summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb11
-rw-r--r--Documentation/ABI/testing/sysfs-platform-asus-laptop12
-rw-r--r--Documentation/ABI/testing/sysfs-platform-eeepc-laptop10
-rw-r--r--Documentation/DocBook/device-drivers.tmpl2
-rw-r--r--Documentation/DocBook/deviceiobook.tmpl2
-rw-r--r--Documentation/cdrom/ide-cd39
-rw-r--r--Documentation/feature-removal-schedule.txt44
-rw-r--r--Documentation/filesystems/nilfs2.txt3
-rw-r--r--Documentation/filesystems/sharedsubtree.txt16
-rw-r--r--Documentation/i2c/busses/i2c-i8013
-rw-r--r--Documentation/i2c/busses/i2c-parport3
-rw-r--r--Documentation/i2c/busses/i2c-parport-light11
-rw-r--r--Documentation/i2c/smbus-protocol16
-rw-r--r--Documentation/i2c/writing-clients5
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--Documentation/kprobes.txt207
-rw-r--r--Documentation/kvm/api.txt12
-rw-r--r--Documentation/laptops/thinkpad-acpi.txt4
-rw-r--r--Documentation/networking/00-INDEX2
-rw-r--r--Documentation/networking/cxacru-cf.py48
-rw-r--r--Documentation/networking/cxacru.txt16
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/dma.txt8
-rw-r--r--Documentation/usb/error-codes.txt6
-rw-r--r--Documentation/usb/power-management.txt235
-rw-r--r--MAINTAINERS3
-rw-r--r--arch/Kconfig13
-rw-r--r--arch/alpha/include/asm/local.h17
-rw-r--r--arch/arm/configs/rx51_defconfig11
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-adma.h12
-rw-r--r--arch/arm/mach-mx2/devices.c80
-rw-r--r--arch/arm/mach-mx2/devices.h1
-rw-r--r--arch/arm/mach-u300/include/mach/coh901318.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/mx21-usbhost.h38
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c7
-rw-r--r--arch/blackfin/mach-common/entry.S4
-rw-r--r--arch/cris/arch-v10/kernel/entry.S2
-rw-r--r--arch/cris/arch-v32/mm/mmu.S2
-rw-r--r--arch/ia64/include/asm/percpu.h4
-rw-r--r--arch/ia64/include/asm/xen/events.h4
-rw-r--r--arch/ia64/kernel/acpi.c4
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c4
-rw-r--r--arch/ia64/kvm/Kconfig1
-rw-r--r--arch/ia64/kvm/kvm-ia64.c50
-rw-r--r--arch/ia64/kvm/kvm_fw.c28
-rw-r--r--arch/ia64/kvm/mmio.c4
-rw-r--r--arch/ia64/kvm/vcpu.c4
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/m32r/include/asm/local.h25
-rw-r--r--arch/microblaze/include/asm/entry.h2
-rw-r--r--arch/mips/include/asm/local.h25
-rw-r--r--arch/parisc/lib/fixup.S8
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h6
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h11
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64_asm.h18
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h3
-rw-r--r--arch/powerpc/include/asm/kvm_host.h23
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h83
-rw-r--r--arch/powerpc/include/asm/local.h25
-rw-r--r--arch/powerpc/include/asm/paca.h5
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c33
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c1
-rw-r--r--arch/powerpc/kvm/44x_emulate.c25
-rw-r--r--arch/powerpc/kvm/44x_tlb.c20
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s.c309
-rw-r--r--arch/powerpc/kvm/book3s_64_emulate.c77
-rw-r--r--arch/powerpc/kvm/book3s_64_exports.c8
-rw-r--r--arch/powerpc/kvm/book3s_64_interrupts.S336
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c10
-rw-r--r--arch/powerpc/kvm/book3s_64_rmhandlers.S119
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S160
-rw-r--r--arch/powerpc/kvm/booke.c87
-rw-r--r--arch/powerpc/kvm/booke_emulate.c107
-rw-r--r--arch/powerpc/kvm/e500.c6
-rw-r--r--arch/powerpc/kvm/e500_emulate.c93
-rw-r--r--arch/powerpc/kvm/e500_tlb.c10
-rw-r--r--arch/powerpc/kvm/emulate.c118
-rw-r--r--arch/powerpc/kvm/powerpc.c40
-rw-r--r--arch/s390/hypfs/inode.c42
-rw-r--r--arch/s390/kvm/kvm-s390.c26
-rw-r--r--arch/s390/kvm/kvm-s390.h10
-rw-r--r--arch/sparc/configs/sparc32_defconfig56
-rw-r--r--arch/sparc/configs/sparc64_defconfig34
-rw-r--r--arch/sparc/include/asm/io_32.h4
-rw-r--r--arch/sparc/include/asm/io_64.h4
-rw-r--r--arch/sparc/include/asm/perfctr.h4
-rw-r--r--arch/sparc/include/asm/system_64.h15
-rw-r--r--arch/sparc/include/asm/thread_info_64.h25
-rw-r--r--arch/sparc/kernel/entry.h1
-rw-r--r--arch/sparc/kernel/nmi.c7
-rw-r--r--arch/sparc/kernel/process_64.c23
-rw-r--r--arch/sparc/kernel/rtrap_64.S62
-rw-r--r--arch/sparc/kernel/sys32.S1
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c104
-rw-r--r--arch/sparc/kernel/syscalls.S23
-rw-r--r--arch/sparc/kernel/systbls.h2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/sparc/kernel/traps_64.c9
-rw-r--r--arch/sparc/prom/p1275.c12
-rw-r--r--arch/um/drivers/mconsole_kern.c2
-rw-r--r--arch/x86/Kconfig16
-rw-r--r--arch/x86/ia32/ia32_aout.c1
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/alternative.h4
-rw-r--r--arch/x86/include/asm/e820.h5
-rw-r--r--arch/x86/include/asm/highmem.h4
-rw-r--r--arch/x86/include/asm/hyperv.h186
-rw-r--r--arch/x86/include/asm/i8259.h2
-rw-r--r--arch/x86/include/asm/io_apic.h1
-rw-r--r--arch/x86/include/asm/irq.h1
-rw-r--r--arch/x86/include/asm/irq_vectors.h48
-rw-r--r--arch/x86/include/asm/kprobes.h31
-rw-r--r--arch/x86/include/asm/kvm_emulate.h17
-rw-r--r--arch/x86/include/asm/kvm_host.h60
-rw-r--r--arch/x86/include/asm/kvm_para.h1
-rw-r--r--arch/x86/include/asm/local.h37
-rw-r--r--arch/x86/include/asm/paravirt.h9
-rw-r--r--arch/x86/include/asm/paravirt_types.h4
-rw-r--r--arch/x86/include/asm/pci.h2
-rw-r--r--arch/x86/include/asm/pci_64.h2
-rw-r--r--arch/x86/include/asm/percpu.h119
-rw-r--r--arch/x86/include/asm/pgtable_32.h4
-rw-r--r--arch/x86/include/asm/proto.h10
-rw-r--r--arch/x86/include/asm/svm.h2
-rw-r--r--arch/x86/include/asm/system.h8
-rw-r--r--arch/x86/include/asm/vmx.h5
-rw-r--r--arch/x86/kernel/acpi/boot.c9
-rw-r--r--arch/x86/kernel/alternative.c60
-rw-r--r--arch/x86/kernel/apic/io_apic.c258
-rw-r--r--arch/x86/kernel/apic/nmi.c12
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c208
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c1
-rw-r--r--arch/x86/kernel/e820.c349
-rw-r--r--arch/x86/kernel/head32.c10
-rw-r--r--arch/x86/kernel/head_32.S6
-rw-r--r--arch/x86/kernel/i8259.c30
-rw-r--r--arch/x86/kernel/irqinit.c35
-rw-r--r--arch/x86/kernel/kprobes.c609
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c7
-rw-r--r--arch/x86/kernel/paravirt.c4
-rw-r--r--arch/x86/kernel/pci-dma.c13
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/setup_percpu.c6
-rw-r--r--arch/x86/kernel/smpboot.c13
-rw-r--r--arch/x86/kernel/time.c4
-rw-r--r--arch/x86/kernel/visws_quirks.c6
-rw-r--r--arch/x86/kernel/vmi_32.c35
-rw-r--r--arch/x86/kernel/vmiclock_32.c6
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86/kernel/vsyscall_64.c3
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/emulate.c440
-rw-r--r--arch/x86/kvm/i8254.c23
-rw-r--r--arch/x86/kvm/i8254.h2
-rw-r--r--arch/x86/kvm/i8259.c46
-rw-r--r--arch/x86/kvm/irq.h3
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h31
-rw-r--r--arch/x86/kvm/lapic.c31
-rw-r--r--arch/x86/kvm/lapic.h8
-rw-r--r--arch/x86/kvm/mmu.c137
-rw-r--r--arch/x86/kvm/mmu.h35
-rw-r--r--arch/x86/kvm/paging_tmpl.h13
-rw-r--r--arch/x86/kvm/svm.c237
-rw-r--r--arch/x86/kvm/trace.h59
-rw-r--r--arch/x86/kvm/vmx.c396
-rw-r--r--arch/x86/kvm/x86.c1098
-rw-r--r--arch/x86/kvm/x86.h30
-rw-r--r--arch/x86/mm/init_32.c7
-rw-r--r--arch/x86/mm/init_64.c9
-rw-r--r--arch/x86/mm/numa_32.c3
-rw-r--r--arch/x86/mm/numa_64.c97
-rw-r--r--arch/x86/pci/Makefile3
-rw-r--r--arch/x86/pci/amd_bus.c127
-rw-r--r--arch/x86/pci/bus_numa.c25
-rw-r--r--arch/x86/pci/bus_numa.h9
-rw-r--r--arch/x86/pci/i386.c4
-rw-r--r--arch/x86/xen/enlighten.c7
-rw-r--r--arch/x86/xen/mmu.c21
-rw-r--r--arch/x86/xen/xen-asm_32.S4
-rw-r--r--crypto/ahash.c1
-rw-r--r--crypto/authenc.c27
-rw-r--r--crypto/cryptd.c2
-rw-r--r--crypto/md5.c1
-rw-r--r--drivers/acpi/numa.c4
-rw-r--r--drivers/acpi/processor_perflib.c2
-rw-r--r--drivers/char/agp/intel-agp.c123
-rw-r--r--drivers/char/cyclades.c16
-rw-r--r--drivers/char/hvc_console.c2
-rw-r--r--drivers/char/ip2/ip2main.c26
-rw-r--r--drivers/char/isicom.c54
-rw-r--r--drivers/char/moxa.c20
-rw-r--r--drivers/char/mxser.c3
-rw-r--r--drivers/char/nozomi.c157
-rw-r--r--drivers/char/serial167.c3
-rw-r--r--drivers/char/specialix.c2
-rw-r--r--drivers/char/synclink.c4
-rw-r--r--drivers/char/synclink_gt.c186
-rw-r--r--drivers/char/tty_buffer.c17
-rw-r--r--drivers/char/tty_ldisc.c50
-rw-r--r--drivers/char/vt_ioctl.c39
-rw-r--r--drivers/dma/Kconfig23
-rw-r--r--drivers/dma/Makefile8
-rw-r--r--drivers/dma/coh901318.c182
-rw-r--r--drivers/dma/coh901318_lli.c23
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dmatest.c8
-rw-r--r--drivers/dma/fsldma.c1177
-rw-r--r--drivers/dma/fsldma.h35
-rw-r--r--drivers/dma/ioat/dma.c46
-rw-r--r--drivers/dma/ioat/dma.h11
-rw-r--r--drivers/dma/ioat/dma_v2.c70
-rw-r--r--drivers/dma/ioat/dma_v2.h6
-rw-r--r--drivers/dma/ioat/dma_v3.c64
-rw-r--r--drivers/dma/ioat/registers.h2
-rw-r--r--drivers/dma/ipu/ipu_idmac.c15
-rw-r--r--drivers/dma/mpc512x_dma.c800
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/edac/amd64_edac.c39
-rw-r--r--drivers/edac/amd64_edac.h3
-rw-r--r--drivers/firewire/core-cdev.c368
-rw-r--r--drivers/firewire/core-device.c198
-rw-r--r--drivers/firewire/core-transaction.c17
-rw-r--r--drivers/firewire/core.h2
-rw-r--r--drivers/firewire/ohci.c364
-rw-r--r--drivers/firewire/sbp2.c5
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_buffer.c184
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c44
-rw-r--r--drivers/gpu/drm/drm_edid.c30
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c26
-rw-r--r--drivers/gpu/drm/drm_gem.c70
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c253
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c326
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h69
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c430
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c169
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c313
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h170
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c14
-rw-r--r--drivers/gpu/drm/i915/intel_display.c216
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c41
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c29
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c23
-rw-r--r--drivers/gpu/drm/nouveau/Makefile2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c160
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c339
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h126
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c167
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c108
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c508
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c40
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c49
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c54
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c74
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c2367
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile9
-rw-r--r--drivers/gpu/drm/radeon/atom.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios.h7300
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c456
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c64
-rw-r--r--drivers/gpu/drm/radeon/avivod.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c767
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h176
-rw-r--r--drivers/gpu/drm/radeon/r100.c176
-rw-r--r--drivers/gpu/drm/radeon/r200.c46
-rw-r--r--drivers/gpu/drm/radeon/r300.c157
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c280
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r420.c49
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h100
-rw-r--r--drivers/gpu/drm/radeon/r520.c21
-rw-r--r--drivers/gpu/drm/radeon/r600.c190
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c21
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c17
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c10
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c262
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c831
-rw-r--r--drivers/gpu/drm/radeon/r600d.h467
-rw-r--r--drivers/gpu/drm/radeon/radeon.h167
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h172
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c435
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c257
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c290
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c235
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c332
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h46
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c354
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c768
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h55
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c399
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h50
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c203
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c12
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r600837
-rw-r--r--drivers/gpu/drm/radeon/rs400.c39
-rw-r--r--drivers/gpu/drm/radeon/rs600.c56
-rw-r--r--drivers/gpu/drm/radeon/rs690.c41
-rw-r--r--drivers/gpu/drm/radeon/rv515.c21
-rw-r--r--drivers/gpu/drm/radeon/rv770.c259
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c18
-rw-r--r--drivers/gpu/vga/Kconfig11
-rw-r--r--drivers/gpu/vga/Makefile1
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c450
-rw-r--r--drivers/hid/usbhid/hiddev.c7
-rw-r--r--drivers/i2c/Kconfig10
-rw-r--r--drivers/i2c/Makefile1
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c2
-rw-r--r--drivers/i2c/busses/Kconfig7
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c2
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c2
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c2
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c7
-rw-r--r--drivers/i2c/busses/i2c-isch.c2
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c2
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c48
-rw-r--r--drivers/i2c/busses/i2c-parport.c43
-rw-r--r--drivers/i2c/busses/i2c-parport.h4
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c2
-rw-r--r--drivers/i2c/busses/i2c-sis630.c2
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c10
-rw-r--r--drivers/i2c/busses/i2c-via.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c2
-rw-r--r--drivers/i2c/i2c-core.c54
-rw-r--r--drivers/i2c/i2c-smbus.c263
-rw-r--r--drivers/ide/aec62xx.c13
-rw-r--r--drivers/ide/ali14xx.c3
-rw-r--r--drivers/ide/alim15x3.c171
-rw-r--r--drivers/ide/amd74xx.c18
-rw-r--r--drivers/ide/at91_ide.c5
-rw-r--r--drivers/ide/atiixp.c14
-rw-r--r--drivers/ide/au1xxx-ide.c13
-rw-r--r--drivers/ide/cmd640.c5
-rw-r--r--drivers/ide/cmd64x.c114
-rw-r--r--drivers/ide/cs5520.c9
-rw-r--r--drivers/ide/cs5530.c13
-rw-r--r--drivers/ide/cs5535.c14
-rw-r--r--drivers/ide/cs5536.c16
-rw-r--r--drivers/ide/cy82c693.c146
-rw-r--r--drivers/ide/dtc2278.c4
-rw-r--r--drivers/ide/hpt366.c9
-rw-r--r--drivers/ide/ht6560b.c3
-rw-r--r--drivers/ide/icside.c67
-rw-r--r--drivers/ide/ide-cs.c23
-rw-r--r--drivers/ide/ide-devsets.c6
-rw-r--r--drivers/ide/ide-iops.c2
-rw-r--r--drivers/ide/ide-probe.c2
-rw-r--r--drivers/ide/ide-tape.c14
-rw-r--r--drivers/ide/ide-timings.c18
-rw-r--r--drivers/ide/ide-xfer-mode.c18
-rw-r--r--drivers/ide/it8172.c14
-rw-r--r--drivers/ide/it8213.c20
-rw-r--r--drivers/ide/it821x.c14
-rw-r--r--drivers/ide/jmicron.c6
-rw-r--r--drivers/ide/opti621.c77
-rw-r--r--drivers/ide/palm_bk3710.c12
-rw-r--r--drivers/ide/pdc202xx_new.c8
-rw-r--r--drivers/ide/pdc202xx_old.c31
-rw-r--r--drivers/ide/piix.c20
-rw-r--r--drivers/ide/pmac.c13
-rw-r--r--drivers/ide/qd65xx.c10
-rw-r--r--drivers/ide/sc1200.c8
-rw-r--r--drivers/ide/scc_pata.c24
-rw-r--r--drivers/ide/serverworks.c50
-rw-r--r--drivers/ide/sgiioc4.c2
-rw-r--r--drivers/ide/siimage.c14
-rw-r--r--drivers/ide/sis5513.c8
-rw-r--r--drivers/ide/sl82c105.c8
-rw-r--r--drivers/ide/slc90e66.c17
-rw-r--r--drivers/ide/tc86c001.c9
-rw-r--r--drivers/ide/triflex.c10
-rw-r--r--drivers/ide/tx4938ide.c7
-rw-r--r--drivers/ide/tx4939ide.c10
-rw-r--r--drivers/ide/umc8672.c5
-rw-r--r--drivers/ide/via82cxxx.c132
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/ucm.c63
-rw-r--r--drivers/infiniband/core/ud_header.c14
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/user_mad.c173
-rw-r--r--drivers/infiniband/core/uverbs.h13
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c25
-rw-r--r--drivers/infiniband/core/uverbs_main.c256
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c15
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h17
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c80
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.c1
-rw-r--r--drivers/infiniband/hw/nes/nes.h9
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c11
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c484
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c61
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c47
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h97
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c506
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c64
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c281
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c91
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h6
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/media/dvb/firewire/firedtv-fw.c39
-rw-r--r--drivers/media/video/dabusb.c8
-rw-r--r--drivers/mmc/card/sdio_uart.c93
-rw-r--r--drivers/net/cxgb3/adapter.h5
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c57
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h5
-rw-r--r--drivers/net/cxgb3/regs.h16
-rw-r--r--drivers/net/cxgb3/sge.c10
-rw-r--r--drivers/net/cxgb3/t3_hw.c5
-rw-r--r--drivers/parport/parport_pc.c6
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c14
-rw-r--r--drivers/pcmcia/Kconfig14
-rw-r--r--drivers/pcmcia/cardbus.c2
-rw-r--r--drivers/pcmcia/cistpl.c1908
-rw-r--r--drivers/pcmcia/db1xxx_ss.c19
-rw-r--r--drivers/pcmcia/pd6729.c18
-rw-r--r--drivers/pcmcia/rsrc_mgr.c3
-rw-r--r--drivers/pcmcia/xxs1500_ss.c16
-rw-r--r--drivers/pcmcia/yenta_socket.c8
-rw-r--r--drivers/platform/x86/Kconfig13
-rw-r--r--drivers/platform/x86/asus-laptop.c1741
-rw-r--r--drivers/platform/x86/asus_acpi.c3
-rw-r--r--drivers/platform/x86/classmate-laptop.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c9
-rw-r--r--drivers/platform/x86/eeepc-laptop.c21
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c116
-rw-r--r--drivers/platform/x86/toshiba_acpi.c30
-rw-r--r--drivers/power/Kconfig4
-rw-r--r--drivers/power/bq27x00_battery.c177
-rw-r--r--drivers/power/da9030_battery.c2
-rw-r--r--drivers/power/wm97xx_battery.c4
-rw-r--r--drivers/regulator/Kconfig29
-rw-r--r--drivers/regulator/Makefile3
-rw-r--r--drivers/regulator/ab3100.c6
-rw-r--r--drivers/regulator/core.c79
-rw-r--r--drivers/regulator/dummy.c66
-rw-r--r--drivers/regulator/dummy.h31
-rw-r--r--drivers/regulator/fixed.c30
-rw-r--r--drivers/regulator/lp3971.c68
-rw-r--r--drivers/regulator/max1586.c9
-rw-r--r--drivers/regulator/max8649.c408
-rw-r--r--drivers/regulator/max8660.c11
-rw-r--r--drivers/regulator/mc13783-regulator.c465
-rw-r--r--drivers/regulator/pcap-regulator.c8
-rw-r--r--drivers/regulator/tps65023-regulator.c35
-rw-r--r--drivers/regulator/tps6507x-regulator.c34
-rw-r--r--drivers/regulator/twl-regulator.c22
-rw-r--r--drivers/regulator/virtual.c64
-rw-r--r--drivers/regulator/wm831x-dcdc.c12
-rw-r--r--drivers/regulator/wm831x-isink.c3
-rw-r--r--drivers/regulator/wm831x-ldo.c5
-rw-r--r--drivers/regulator/wm8350-regulator.c46
-rw-r--r--drivers/regulator/wm8400-regulator.c7
-rw-r--r--drivers/regulator/wm8994-regulator.c307
-rw-r--r--drivers/serial/68328serial.c8
-rw-r--r--drivers/serial/8250.c21
-rw-r--r--drivers/serial/8250_pci.c31
-rw-r--r--drivers/serial/Kconfig53
-rw-r--r--drivers/serial/atmel_serial.c22
-rw-r--r--drivers/serial/bcm63xx_uart.c7
-rw-r--r--drivers/serial/bfin_5xx.c22
-rw-r--r--drivers/serial/bfin_sport_uart.c701
-rw-r--r--drivers/serial/bfin_sport_uart.h38
-rw-r--r--drivers/serial/icom.c5
-rw-r--r--drivers/serial/imx.c6
-rw-r--r--drivers/serial/ioc3_serial.c3
-rw-r--r--drivers/serial/jsm/jsm_driver.c1
-rw-r--r--drivers/serial/jsm/jsm_tty.c9
-rw-r--r--drivers/serial/msm_serial.c6
-rw-r--r--drivers/serial/timbuart.c7
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c2
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/atm/cxacru.c192
-rw-r--r--drivers/usb/atm/usbatm.c3
-rw-r--r--drivers/usb/atm/usbatm.h15
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c8
-rw-r--r--drivers/usb/class/cdc-acm.c82
-rw-r--r--drivers/usb/class/cdc-acm.h2
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/class/usblp.c22
-rw-r--r--drivers/usb/class/usbtmc.c2
-rw-r--r--drivers/usb/core/Kconfig4
-rw-r--r--drivers/usb/core/devices.c32
-rw-r--r--drivers/usb/core/devio.c127
-rw-r--r--drivers/usb/core/driver.c918
-rw-r--r--drivers/usb/core/file.c2
-rw-r--r--drivers/usb/core/hcd.c27
-rw-r--r--drivers/usb/core/hcd.h13
-rw-r--r--drivers/usb/core/hub.c120
-rw-r--r--drivers/usb/core/message.c5
-rw-r--r--drivers/usb/core/quirks.c18
-rw-r--r--drivers/usb/core/sysfs.c85
-rw-r--r--drivers/usb/core/urb.c13
-rw-r--r--drivers/usb/core/usb.c38
-rw-r--r--drivers/usb/core/usb.h43
-rw-r--r--drivers/usb/early/ehci-dbgp.c68
-rw-r--r--drivers/usb/gadget/Kconfig10
-rw-r--r--drivers/usb/gadget/Makefile2
-rw-r--r--drivers/usb/gadget/at91_udc.c10
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c9
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.h1
-rw-r--r--drivers/usb/gadget/epautoconf.c24
-rw-r--r--drivers/usb/gadget/ether.c2
-rw-r--r--drivers/usb/gadget/f_acm.c8
-rw-r--r--drivers/usb/gadget/f_ecm.c7
-rw-r--r--drivers/usb/gadget/f_mass_storage.c52
-rw-r--r--drivers/usb/gadget/f_rndis.c4
-rw-r--r--drivers/usb/gadget/file_storage.c10
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/gadget_chips.h59
-rw-r--r--drivers/usb/gadget/gmidi.c5
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/inode.c39
-rw-r--r--drivers/usb/gadget/mass_storage.c8
-rw-r--r--drivers/usb/gadget/nokia.c259
-rw-r--r--drivers/usb/gadget/printer.c18
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c135
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h6
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c11
-rw-r--r--drivers/usb/gadget/u_ether.c5
-rw-r--r--drivers/usb/gadget/u_ether.h7
-rw-r--r--drivers/usb/gadget/zero.c6
-rw-r--r--drivers/usb/host/Kconfig11
-rw-r--r--drivers/usb/host/Makefile2
-rw-r--r--drivers/usb/host/ehci-atmel.c2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c6
-rw-r--r--drivers/usb/host/ehci-fsl.c97
-rw-r--r--drivers/usb/host/ehci-mxc.c23
-rw-r--r--drivers/usb/host/ehci-omap.c47
-rw-r--r--drivers/usb/host/ehci-orion.c8
-rw-r--r--drivers/usb/host/ehci-ppc-of.c14
-rw-r--r--drivers/usb/host/ehci-sched.c12
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c8
-rw-r--r--drivers/usb/host/fhci-hcd.c4
-rw-r--r--drivers/usb/host/imx21-dbg.c527
-rw-r--r--drivers/usb/host/imx21-hcd.c1789
-rw-r--r--drivers/usb/host/imx21-hcd.h436
-rw-r--r--drivers/usb/host/isp1362-hcd.c15
-rw-r--r--drivers/usb/host/isp1760-hcd.c10
-rw-r--r--drivers/usb/host/isp1760-if.c2
-rw-r--r--drivers/usb/host/ohci-da8xx.c456
-rw-r--r--drivers/usb/host/ohci-dbg.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-lh7a404.c11
-rw-r--r--drivers/usb/host/ohci-pnx4008.c6
-rw-r--r--drivers/usb/host/ohci-ppc-of.c10
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c8
-rw-r--r--drivers/usb/host/ohci-sa1111.c8
-rw-r--r--drivers/usb/host/sl811-hcd.c5
-rw-r--r--drivers/usb/host/uhci-hcd.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c19
-rw-r--r--drivers/usb/host/xhci-ext-caps.h7
-rw-r--r--drivers/usb/host/xhci-hcd.c150
-rw-r--r--drivers/usb/host/xhci-hub.c65
-rw-r--r--drivers/usb/host/xhci-mem.c47
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-ring.c41
-rw-r--r--drivers/usb/host/xhci.h11
-rw-r--r--drivers/usb/image/mdc800.c2
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/misc/Kconfig25
-rw-r--r--drivers/usb/misc/Makefile2
-rw-r--r--drivers/usb/misc/adutux.c8
-rw-r--r--drivers/usb/misc/appledisplay.c5
-rw-r--r--drivers/usb/misc/berry_charge.c183
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c2
-rw-r--r--drivers/usb/misc/cytherm.c2
-rw-r--r--drivers/usb/misc/emi26.c2
-rw-r--r--drivers/usb/misc/emi62.c2
-rw-r--r--drivers/usb/misc/ftdi-elan.c11
-rw-r--r--drivers/usb/misc/idmouse.c2
-rw-r--r--drivers/usb/misc/iowarrior.c6
-rw-r--r--drivers/usb/misc/isight_firmware.c4
-rw-r--r--drivers/usb/misc/ldusb.c4
-rw-r--r--drivers/usb/misc/legousbtower.c13
-rw-r--r--drivers/usb/misc/rio500.c11
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c20
-rw-r--r--drivers/usb/misc/trancevibrator.c2
-rw-r--r--drivers/usb/misc/usblcd.c7
-rw-r--r--drivers/usb/misc/usbled.c2
-rw-r--r--drivers/usb/misc/usbsevseg.c2
-rw-r--r--drivers/usb/misc/usbtest.c6
-rw-r--r--drivers/usb/misc/uss720.c2
-rw-r--r--drivers/usb/misc/vstusb.c783
-rw-r--r--drivers/usb/mon/mon_bin.c7
-rw-r--r--drivers/usb/mon/mon_text.c6
-rw-r--r--drivers/usb/musb/blackfin.c28
-rw-r--r--drivers/usb/musb/cppi_dma.c33
-rw-r--r--drivers/usb/musb/musb_core.c562
-rw-r--r--drivers/usb/musb/musb_core.h72
-rw-r--r--drivers/usb/musb/musb_gadget.c20
-rw-r--r--drivers/usb/musb/musb_host.c34
-rw-r--r--drivers/usb/musb/musb_regs.h101
-rw-r--r--drivers/usb/musb/musbhsdma.c25
-rw-r--r--drivers/usb/musb/musbhsdma.h17
-rw-r--r--drivers/usb/musb/omap2430.c48
-rw-r--r--drivers/usb/musb/omap2430.h32
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/musb/tusb6010_omap.c2
-rw-r--r--drivers/usb/otg/twl4030-usb.c45
-rw-r--r--drivers/usb/serial/Kconfig19
-rw-r--r--drivers/usb/serial/Makefile2
-rw-r--r--drivers/usb/serial/aircable.c36
-rw-r--r--drivers/usb/serial/ark3116.c3
-rw-r--r--drivers/usb/serial/belkin_sa.c2
-rw-r--r--drivers/usb/serial/ch341.c27
-rw-r--r--drivers/usb/serial/cp210x.c7
-rw-r--r--drivers/usb/serial/cyberjack.c5
-rw-r--r--drivers/usb/serial/cypress_m8.c82
-rw-r--r--drivers/usb/serial/digi_acceleport.c38
-rw-r--r--drivers/usb/serial/empeg.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c195
-rw-r--r--drivers/usb/serial/ftdi_sio.h6
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h39
-rw-r--r--drivers/usb/serial/funsoft.c2
-rw-r--r--drivers/usb/serial/garmin_gps.c3
-rw-r--r--drivers/usb/serial/generic.c7
-rw-r--r--drivers/usb/serial/hp4x.c2
-rw-r--r--drivers/usb/serial/io_edgeport.c69
-rw-r--r--drivers/usb/serial/io_tables.h10
-rw-r--r--drivers/usb/serial/io_ti.c75
-rw-r--r--drivers/usb/serial/ipaq.c1
-rw-r--r--drivers/usb/serial/ipw.c3
-rw-r--r--drivers/usb/serial/ir-usb.c13
-rw-r--r--drivers/usb/serial/iuu_phoenix.c2
-rw-r--r--drivers/usb/serial/keyspan.c57
-rw-r--r--drivers/usb/serial/keyspan.h10
-rw-r--r--drivers/usb/serial/keyspan_pda.c60
-rw-r--r--drivers/usb/serial/kl5kusb105.c66
-rw-r--r--drivers/usb/serial/kobil_sct.c25
-rw-r--r--drivers/usb/serial/mct_u232.c57
-rw-r--r--drivers/usb/serial/mct_u232.h2
-rw-r--r--drivers/usb/serial/mos7720.c185
-rw-r--r--drivers/usb/serial/mos7840.c27
-rw-r--r--drivers/usb/serial/moto_modem.c2
-rw-r--r--drivers/usb/serial/navman.c3
-rw-r--r--drivers/usb/serial/omninet.c8
-rw-r--r--drivers/usb/serial/opticon.c17
-rw-r--r--drivers/usb/serial/option.c71
-rw-r--r--drivers/usb/serial/oti6858.c36
-rw-r--r--drivers/usb/serial/pl2303.c38
-rw-r--r--drivers/usb/serial/qcaux.c96
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/siemens_mpi.c2
-rw-r--r--drivers/usb/serial/sierra.c59
-rw-r--r--drivers/usb/serial/spcp8x5.c27
-rw-r--r--drivers/usb/serial/symbolserial.c12
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c3
-rw-r--r--drivers/usb/serial/usb-serial.c15
-rw-r--r--drivers/usb/serial/usb_debug.c2
-rw-r--r--drivers/usb/serial/visor.c40
-rw-r--r--drivers/usb/serial/vivopay-serial.c76
-rw-r--r--drivers/usb/serial/whiteheat.c24
-rw-r--r--drivers/usb/storage/onetouch.c2
-rw-r--r--drivers/usb/storage/scsiglue.c10
-rw-r--r--drivers/usb/storage/shuttle_usbat.c15
-rw-r--r--drivers/usb/storage/transport.c6
-rw-r--r--drivers/usb/storage/unusual_devs.h88
-rw-r--r--drivers/usb/usb-skeleton.c2
-rw-r--r--drivers/usb/wusbcore/cbaf.c2
-rw-r--r--drivers/usb/wusbcore/devconnect.c2
-rw-r--r--drivers/usb/wusbcore/mmc.c2
-rw-r--r--drivers/video/console/fbcon.c18
-rw-r--r--drivers/video/fbmem.c1
-rw-r--r--drivers/xen/events.c8
-rw-r--r--fs/adfs/adfs.h2
-rw-r--r--fs/adfs/inode.c5
-rw-r--r--fs/affs/affs.h3
-rw-r--r--fs/affs/inode.c2
-rw-r--r--fs/afs/internal.h1
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/afs/write.c21
-rw-r--r--fs/autofs4/autofs_i.h7
-rw-r--r--fs/autofs4/dev-ioctl.c11
-rw-r--r--fs/autofs4/expire.c6
-rw-r--r--fs/autofs4/inode.c63
-rw-r--r--fs/autofs4/root.c474
-rw-r--r--fs/bfs/inode.c5
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/inode.c4
-rw-r--r--fs/cifs/CHANGES3
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifspdu.h6
-rw-r--r--fs/cifs/cifsproto.h7
-rw-r--r--fs/cifs/cifssmb.c360
-rw-r--r--fs/cifs/connect.c8
-rw-r--r--fs/cifs/file.c4
-rw-r--r--fs/cifs/inode.c3
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/xattr.c8
-rw-r--r--fs/dcache.c70
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/exofs/common.h39
-rw-r--r--fs/exofs/exofs.h55
-rw-r--r--fs/exofs/inode.c198
-rw-r--r--fs/exofs/ios.c575
-rw-r--r--fs/exofs/super.c121
-rw-r--r--fs/ext2/ext2.h2
-rw-r--r--fs/ext2/inode.c11
-rw-r--r--fs/ext3/inode.c4
-rw-r--r--fs/ext4/balloc.c35
-rw-r--r--fs/ext4/block_validity.c4
-rw-r--r--fs/ext4/dir.c14
-rw-r--r--fs/ext4/ext4.h110
-rw-r--r--fs/ext4/ext4_jbd2.c4
-rw-r--r--fs/ext4/ext4_jbd2.h24
-rw-r--r--fs/ext4/extents.c260
-rw-r--r--fs/ext4/file.c10
-rw-r--r--fs/ext4/fsync.c2
-rw-r--r--fs/ext4/ialloc.c32
-rw-r--r--fs/ext4/inode.c465
-rw-r--r--fs/ext4/ioctl.c12
-rw-r--r--fs/ext4/mballoc.c73
-rw-r--r--fs/ext4/mballoc.h9
-rw-r--r--fs/ext4/migrate.c35
-rw-r--r--fs/ext4/move_extent.c36
-rw-r--r--fs/ext4/namei.c63
-rw-r--r--fs/ext4/resize.c102
-rw-r--r--fs/ext4/super.c342
-rw-r--r--fs/ext4/xattr.c56
-rw-r--r--fs/fat/inode.c9
-rw-r--r--fs/fs-writeback.c22
-rw-r--r--fs/fuse/dev.c30
-rw-r--r--fs/gfs2/aops.c4
-rw-r--r--fs/gfs2/glock.c75
-rw-r--r--fs/gfs2/glock.h7
-rw-r--r--fs/gfs2/glops.c16
-rw-r--r--fs/gfs2/incore.h5
-rw-r--r--fs/gfs2/inode.c6
-rw-r--r--fs/gfs2/lock_dlm.c5
-rw-r--r--fs/gfs2/lops.c4
-rw-r--r--fs/gfs2/main.c28
-rw-r--r--fs/gfs2/meta_io.c46
-rw-r--r--fs/gfs2/meta_io.h12
-rw-r--r--fs/gfs2/ops_fstype.c4
-rw-r--r--fs/gfs2/ops_inode.c113
-rw-r--r--fs/gfs2/super.c32
-rw-r--r--fs/gfs2/sys.c2
-rw-r--r--fs/gfs2/util.c1
-rw-r--r--fs/gfs2/util.h1
-rw-r--r--fs/hfs/hfs_fs.h2
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/super.c3
-rw-r--r--fs/hpfs/anode.c2
-rw-r--r--fs/hpfs/dentry.c14
-rw-r--r--fs/hpfs/dir.c14
-rw-r--r--fs/hpfs/dnode.c21
-rw-r--r--fs/hpfs/ea.c7
-rw-r--r--fs/hpfs/hpfs_fn.h30
-rw-r--r--fs/hpfs/inode.c4
-rw-r--r--fs/hpfs/map.c6
-rw-r--r--fs/hpfs/name.c21
-rw-r--r--fs/hpfs/namei.c75
-rw-r--r--fs/hppfs/hppfs.c2
-rw-r--r--fs/internal.h2
-rw-r--r--fs/jbd2/checkpoint.c1
-rw-r--r--fs/jbd2/commit.c13
-rw-r--r--fs/jbd2/journal.c132
-rw-r--r--fs/jbd2/transaction.c43
-rw-r--r--fs/jfs/inode.c5
-rw-r--r--fs/jfs/jfs_inode.h2
-rw-r--r--fs/libfs.c77
-rw-r--r--fs/locks.c5
-rw-r--r--fs/minix/inode.c8
-rw-r--r--fs/namei.c555
-rw-r--r--fs/namespace.c53
-rw-r--r--fs/nfs/inode.c18
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/iostat.h4
-rw-r--r--fs/nfs/nfs4proc.c8
-rw-r--r--fs/nfsctl.c5
-rw-r--r--fs/nfsd/nfs4xdr.c12
-rw-r--r--fs/nfsd/vfs.c4
-rw-r--r--fs/nilfs2/dat.c3
-rw-r--r--fs/nilfs2/dir.c14
-rw-r--r--fs/nilfs2/ioctl.c66
-rw-r--r--fs/nilfs2/namei.c13
-rw-r--r--fs/nilfs2/nilfs.h4
-rw-r--r--fs/nilfs2/recovery.c41
-rw-r--r--fs/nilfs2/segbuf.c18
-rw-r--r--fs/nilfs2/segbuf.h5
-rw-r--r--fs/nilfs2/segment.c120
-rw-r--r--fs/nilfs2/segment.h2
-rw-r--r--fs/nilfs2/super.c15
-rw-r--r--fs/nilfs2/the_nilfs.c38
-rw-r--r--fs/nilfs2/the_nilfs.h3
-rw-r--r--fs/notify/inotify/inotify_user.c59
-rw-r--r--fs/ntfs/dir.c2
-rw-r--r--fs/ntfs/file.c2
-rw-r--r--fs/ntfs/inode.c2
-rw-r--r--fs/ntfs/inode.h4
-rw-r--r--fs/ntfs/super.c8
-rw-r--r--fs/ocfs2/Makefile1
-rw-r--r--fs/ocfs2/alloc.c5
-rw-r--r--fs/ocfs2/aops.c5
-rw-r--r--fs/ocfs2/cluster/masklog.c1
-rw-r--r--fs/ocfs2/cluster/masklog.h7
-rw-r--r--fs/ocfs2/dir.c2
-rw-r--r--fs/ocfs2/dlm/Makefile3
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c2
-rw-r--r--fs/ocfs2/dlmfs/Makefile5
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c (renamed from fs/ocfs2/dlm/dlmfs.c)127
-rw-r--r--fs/ocfs2/dlmfs/dlmfsver.c (renamed from fs/ocfs2/dlm/dlmfsver.c)0
-rw-r--r--fs/ocfs2/dlmfs/dlmfsver.h (renamed from fs/ocfs2/dlm/dlmfsver.h)0
-rw-r--r--fs/ocfs2/dlmfs/userdlm.c (renamed from fs/ocfs2/dlm/userdlm.c)308
-rw-r--r--fs/ocfs2/dlmfs/userdlm.h (renamed from fs/ocfs2/dlm/userdlm.h)16
-rw-r--r--fs/ocfs2/dlmglue.c284
-rw-r--r--fs/ocfs2/file.c13
-rw-r--r--fs/ocfs2/ioctl.h6
-rw-r--r--fs/ocfs2/localalloc.c2
-rw-r--r--fs/ocfs2/ocfs2.h32
-rw-r--r--fs/ocfs2/ocfs2_fs.h57
-rw-r--r--fs/ocfs2/ocfs2_ioctl.h79
-rw-r--r--fs/ocfs2/ocfs2_lockingver.h2
-rw-r--r--fs/ocfs2/refcounttree.c6
-rw-r--r--fs/ocfs2/stack_o2cb.c37
-rw-r--r--fs/ocfs2/stack_user.c49
-rw-r--r--fs/ocfs2/stackglue.c98
-rw-r--r--fs/ocfs2/stackglue.h95
-rw-r--r--fs/ocfs2/suballoc.c171
-rw-r--r--fs/ocfs2/suballoc.h1
-rw-r--r--fs/ocfs2/super.c10
-rw-r--r--fs/ocfs2/xattr.c2182
-rw-r--r--fs/omfs/inode.c10
-rw-r--r--fs/open.c2
-rw-r--r--fs/pnode.c28
-rw-r--r--fs/pnode.h5
-rw-r--r--fs/proc/base.c10
-rw-r--r--fs/proc/generic.c5
-rw-r--r--fs/proc/kmsg.c14
-rw-r--r--fs/proc/root.c6
-rw-r--r--fs/reiserfs/inode.c4
-rw-r--r--fs/squashfs/Makefile2
-rw-r--r--fs/squashfs/block.c76
-rw-r--r--fs/squashfs/cache.c1
-rw-r--r--fs/squashfs/decompressor.c68
-rw-r--r--fs/squashfs/decompressor.h55
-rw-r--r--fs/squashfs/dir.c1
-rw-r--r--fs/squashfs/export.c1
-rw-r--r--fs/squashfs/file.c1
-rw-r--r--fs/squashfs/fragment.c1
-rw-r--r--fs/squashfs/id.c1
-rw-r--r--fs/squashfs/inode.c1
-rw-r--r--fs/squashfs/namei.c1
-rw-r--r--fs/squashfs/squashfs.h8
-rw-r--r--fs/squashfs/squashfs_fs.h6
-rw-r--r--fs/squashfs/squashfs_fs_sb.h40
-rw-r--r--fs/squashfs/super.c49
-rw-r--r--fs/squashfs/symlink.c1
-rw-r--r--fs/squashfs/zlib_wrapper.c150
-rw-r--r--fs/super.c21
-rw-r--r--fs/sysv/inode.c10
-rw-r--r--fs/sysv/sysv.h2
-rw-r--r--fs/ubifs/dir.c2
-rw-r--r--fs/ubifs/file.c8
-rw-r--r--fs/ubifs/super.c2
-rw-r--r--fs/udf/balloc.c2
-rw-r--r--fs/udf/dir.c4
-rw-r--r--fs/udf/inode.c6
-rw-r--r--fs/udf/namei.c20
-rw-r--r--fs/udf/symlink.c10
-rw-r--r--fs/udf/udfdecl.h2
-rw-r--r--fs/ufs/dir.c10
-rw-r--r--fs/ufs/inode.c5
-rw-r--r--fs/ufs/ufs.h6
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c8
-rw-r--r--fs/xfs/xfs_mount.h2
-rw-r--r--include/acpi/processor.h2
-rw-r--r--include/asm-generic/local.h19
-rw-r--r--include/asm-generic/percpu.h18
-rw-r--r--include/drm/drmP.h28
-rw-r--r--include/drm/drm_buffer.h148
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/drm/drm_edid.h3
-rw-r--r--include/drm/drm_pciids.h36
-rw-r--r--include/drm/nouveau_drm.h86
-rw-r--r--include/drm/radeon_drm.h1
-rw-r--r--include/drm/ttm/ttm_bo_driver.h2
-rw-r--r--include/linux/audit.h11
-rw-r--r--include/linux/blktrace_api.h4
-rw-r--r--include/linux/bootmem.h7
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/dmaengine.h4
-rw-r--r--include/linux/early_res.h23
-rw-r--r--include/linux/ext3_fs.h2
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/firewire-cdev.h40
-rw-r--r--include/linux/firewire.h11
-rw-r--r--include/linux/fs.h28
-rw-r--r--include/linux/fsnotify.h11
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/gfs2_ondisk.h30
-rw-r--r--include/linux/i2c-smbus.h50
-rw-r--r--include/linux/i2c.h8
-rw-r--r--include/linux/ide.h7
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/jbd2.h11
-rw-r--r--include/linux/kernel.h10
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/kprobes.h44
-rw-r--r--include/linux/kvm.h10
-rw-r--r--include/linux/kvm_host.h71
-rw-r--r--include/linux/magic.h1
-rw-r--r--include/linux/mfd/mc13783.h2
-rw-r--r--include/linux/mm.h16
-rw-r--r--include/linux/mmzone.h14
-rw-r--r--include/linux/mnt_namespace.h1
-rw-r--r--include/linux/module.h37
-rw-r--r--include/linux/mount.h16
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/nilfs2_fs.h1
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/percpu-defs.h40
-rw-r--r--include/linux/percpu.h44
-rw-r--r--include/linux/percpu_counter.h2
-rw-r--r--include/linux/range.h30
-rw-r--r--include/linux/regulator/consumer.h4
-rw-r--r--include/linux/regulator/driver.h6
-rw-r--r--include/linux/regulator/fixed.h2
-rw-r--r--include/linux/regulator/max8649.h44
-rw-r--r--include/linux/reiserfs_fs.h2
-rw-r--r--include/linux/security.h14
-rw-r--r--include/linux/srcu.h2
-rw-r--r--include/linux/syslog.h52
-rw-r--r--include/linux/tty.h10
-rw-r--r--include/linux/tty_flip.h7
-rw-r--r--include/linux/usb.h53
-rw-r--r--include/linux/usb/Kbuild1
-rw-r--r--include/linux/usb/atmel_usba_udc.h1
-rw-r--r--include/linux/usb/ch9.h2
-rw-r--r--include/linux/usb/musb.h23
-rw-r--r--include/linux/usb/otg.h25
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/usb/serial.h13
-rw-r--r--include/linux/usb/vstusb.h71
-rw-r--r--include/linux/vga_switcheroo.h57
-rw-r--r--include/linux/vmstat.h8
-rw-r--r--include/linux/vt.h3
-rw-r--r--include/rdma/ib_pack.h1
-rw-r--r--include/rdma/ib_verbs.h4
-rw-r--r--include/rdma/rdma_cm.h1
-rw-r--r--include/trace/events/ext4.h101
-rw-r--r--include/trace/events/jbd2.h28
-rw-r--r--include/trace/events/kvm.h41
-rw-r--r--init/do_mounts_initrd.c4
-rw-r--r--init/main.c27
-rw-r--r--ipc/mqueue.c120
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/audit_tree.c100
-rw-r--r--kernel/auditsc.c7
-rw-r--r--kernel/capability.c4
-rw-r--r--kernel/early_res.c578
-rw-r--r--kernel/irq/chip.c52
-rw-r--r--kernel/irq/handle.c58
-rw-r--r--kernel/irq/internals.h6
-rw-r--r--kernel/irq/numa_migrate.c4
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/kprobes.c647
-rw-r--r--kernel/module.c29
-rw-r--r--kernel/padata.c8
-rw-r--r--kernel/printk.c52
-rw-r--r--kernel/range.c163
-rw-r--r--kernel/rcutorture.c8
-rw-r--r--kernel/resource.c9
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/signal.c43
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/sysctl.c12
-rw-r--r--kernel/sysctl_binary.c7
-rw-r--r--kernel/trace/ring_buffer.c1
-rw-r--r--kernel/trace/ring_buffer_benchmark.c1
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace_functions_graph.c4
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/bootmem.c195
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/page_alloc.c263
-rw-r--r--mm/percpu.c36
-rw-r--r--mm/sparse-vmemmap.c76
-rw-r--r--mm/sparse.c196
-rw-r--r--mm/vmstat.c15
-rw-r--r--net/sunrpc/rpc_pipe.c9
-rw-r--r--security/capability.c4
-rw-r--r--security/commoncap.c9
-rw-r--r--security/security.c49
-rw-r--r--security/selinux/avc.c22
-rw-r--r--security/selinux/hooks.c41
-rw-r--r--security/selinux/include/security.h13
-rw-r--r--security/selinux/selinuxfs.c12
-rw-r--r--security/selinux/ss/context.h12
-rw-r--r--security/selinux/ss/mls.c48
-rw-r--r--security/selinux/ss/mls.h2
-rw-r--r--security/selinux/ss/mls_types.h7
-rw-r--r--security/selinux/ss/policydb.c127
-rw-r--r--security/selinux/ss/policydb.h10
-rw-r--r--security/selinux/ss/services.c273
-rw-r--r--security/smack/smack_lsm.c6
-rw-r--r--security/tomoyo/Makefile2
-rw-r--r--security/tomoyo/common.c374
-rw-r--r--security/tomoyo/common.h530
-rw-r--r--security/tomoyo/domain.c391
-rw-r--r--security/tomoyo/file.c731
-rw-r--r--security/tomoyo/gc.c370
-rw-r--r--security/tomoyo/realpath.c292
-rw-r--r--security/tomoyo/realpath.h66
-rw-r--r--security/tomoyo/tomoyo.c142
-rw-r--r--security/tomoyo/tomoyo.h94
-rw-r--r--tools/perf/Documentation/perf-probe.txt58
-rw-r--r--tools/perf/Makefile10
-rw-r--r--tools/perf/builtin-probe.c36
-rw-r--r--tools/perf/util/probe-event.c55
-rw-r--r--tools/perf/util/probe-finder.c1002
-rw-r--r--tools/perf/util/probe-finder.h53
-rw-r--r--tools/perf/util/string.c55
-rw-r--r--tools/perf/util/string.h1
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/assigned-dev.c12
-rw-r--r--virt/kvm/coalesced_mmio.c43
-rw-r--r--virt/kvm/coalesced_mmio.h15
-rw-r--r--virt/kvm/eventfd.c21
-rw-r--r--virt/kvm/ioapic.c38
-rw-r--r--virt/kvm/ioapic.h2
-rw-r--r--virt/kvm/iommu.c36
-rw-r--r--virt/kvm/kvm_main.c392
1101 files changed, 51342 insertions, 26730 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index a07c0f3..a986e9b 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -159,3 +159,14 @@ Description:
device. This is useful to ensure auto probing won't
match the driver to the device. For example:
# echo "046d c315" > /sys/bus/usb/drivers/foo/remove_id
+
+What: /sys/bus/usb/device/.../avoid_reset
+Date: December 2009
+Contact: Oliver Neukum <oliver@neukum.org>
+Description:
+ Writing 1 to this file tells the kernel that this
+ device will morph into another mode when it is reset.
+ Drivers will not use reset for error handling for
+ such devices.
+Users:
+ usb_modeswitch
diff --git a/Documentation/ABI/testing/sysfs-platform-asus-laptop b/Documentation/ABI/testing/sysfs-platform-asus-laptop
index a1cb660..1d77539 100644
--- a/Documentation/ABI/testing/sysfs-platform-asus-laptop
+++ b/Documentation/ABI/testing/sysfs-platform-asus-laptop
@@ -1,4 +1,4 @@
-What: /sys/devices/platform/asus-laptop/display
+What: /sys/devices/platform/asus_laptop/display
Date: January 2007
KernelVersion: 2.6.20
Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -13,7 +13,7 @@ Description:
Ex: - 0 (0000b) means no display
- 3 (0011b) CRT+LCD.
-What: /sys/devices/platform/asus-laptop/gps
+What: /sys/devices/platform/asus_laptop/gps
Date: January 2007
KernelVersion: 2.6.20
Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -21,7 +21,7 @@ Description:
Control the gps device. 1 means on, 0 means off.
Users: Lapsus
-What: /sys/devices/platform/asus-laptop/ledd
+What: /sys/devices/platform/asus_laptop/ledd
Date: January 2007
KernelVersion: 2.6.20
Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -29,11 +29,11 @@ Description:
Some models like the W1N have a LED display that can be
used to display several informations.
To control the LED display, use the following :
- echo 0x0T000DDD > /sys/devices/platform/asus-laptop/
+ echo 0x0T000DDD > /sys/devices/platform/asus_laptop/
where T control the 3 letters display, and DDD the 3 digits display.
The DDD table can be found in Documentation/laptops/asus-laptop.txt
-What: /sys/devices/platform/asus-laptop/bluetooth
+What: /sys/devices/platform/asus_laptop/bluetooth
Date: January 2007
KernelVersion: 2.6.20
Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -42,7 +42,7 @@ Description:
This may control the led, the device or both.
Users: Lapsus
-What: /sys/devices/platform/asus-laptop/wlan
+What: /sys/devices/platform/asus_laptop/wlan
Date: January 2007
KernelVersion: 2.6.20
Contact: "Corentin Chary" <corentincj@iksaif.net>
diff --git a/Documentation/ABI/testing/sysfs-platform-eeepc-laptop b/Documentation/ABI/testing/sysfs-platform-eeepc-laptop
index 7445dfb..5b026c6 100644
--- a/Documentation/ABI/testing/sysfs-platform-eeepc-laptop
+++ b/Documentation/ABI/testing/sysfs-platform-eeepc-laptop
@@ -1,4 +1,4 @@
-What: /sys/devices/platform/eeepc-laptop/disp
+What: /sys/devices/platform/eeepc/disp
Date: May 2008
KernelVersion: 2.6.26
Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -9,21 +9,21 @@ Description:
- 3 = LCD+CRT
If you run X11, you should use xrandr instead.
-What: /sys/devices/platform/eeepc-laptop/camera
+What: /sys/devices/platform/eeepc/camera
Date: May 2008
KernelVersion: 2.6.26
Contact: "Corentin Chary" <corentincj@iksaif.net>
Description:
Control the camera. 1 means on, 0 means off.
-What: /sys/devices/platform/eeepc-laptop/cardr
+What: /sys/devices/platform/eeepc/cardr
Date: May 2008
KernelVersion: 2.6.26
Contact: "Corentin Chary" <corentincj@iksaif.net>
Description:
Control the card reader. 1 means on, 0 means off.
-What: /sys/devices/platform/eeepc-laptop/cpufv
+What: /sys/devices/platform/eeepc/cpufv
Date: Jun 2009
KernelVersion: 2.6.31
Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -42,7 +42,7 @@ Description:
`------------ Availables modes
For example, 0x301 means: mode 1 selected, 3 available modes.
-What: /sys/devices/platform/eeepc-laptop/available_cpufv
+What: /sys/devices/platform/eeepc/available_cpufv
Date: Jun 2009
KernelVersion: 2.6.31
Contact: "Corentin Chary" <corentincj@iksaif.net>
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index f9a6e2c..1b2dd4f 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -45,7 +45,7 @@
</sect1>
<sect1><title>Atomic and pointer manipulation</title>
-!Iarch/x86/include/asm/atomic_32.h
+!Iarch/x86/include/asm/atomic.h
!Iarch/x86/include/asm/unaligned.h
</sect1>
diff --git a/Documentation/DocBook/deviceiobook.tmpl b/Documentation/DocBook/deviceiobook.tmpl
index 3ed8812..c1ed6a4 100644
--- a/Documentation/DocBook/deviceiobook.tmpl
+++ b/Documentation/DocBook/deviceiobook.tmpl
@@ -316,7 +316,7 @@ CPU B: spin_unlock_irqrestore(&amp;dev_lock, flags)
<chapter id="pubfunctions">
<title>Public Functions Provided</title>
-!Iarch/x86/include/asm/io_32.h
+!Iarch/x86/include/asm/io.h
!Elib/iomap.c
</chapter>
diff --git a/Documentation/cdrom/ide-cd b/Documentation/cdrom/ide-cd
index 2c558cd..f4dc9de 100644
--- a/Documentation/cdrom/ide-cd
+++ b/Documentation/cdrom/ide-cd
@@ -159,42 +159,7 @@ two arguments: the CDROM device, and the slot number to which you wish
to change. If the slot number is -1, the drive is unloaded.
-4. Compilation options
-----------------------
-
-There are a few additional options which can be set when compiling the
-driver. Most people should not need to mess with any of these; they
-are listed here simply for completeness. A compilation option can be
-enabled by adding a line of the form `#define <option> 1' to the top
-of ide-cd.c. All these options are disabled by default.
-
-VERBOSE_IDE_CD_ERRORS
- If this is set, ATAPI error codes will be translated into textual
- descriptions. In addition, a dump is made of the command which
- provoked the error. This is off by default to save the memory used
- by the (somewhat long) table of error descriptions.
-
-STANDARD_ATAPI
- If this is set, the code needed to deal with certain drives which do
- not properly implement the ATAPI spec will be disabled. If you know
- your drive implements ATAPI properly, you can turn this on to get a
- slightly smaller kernel.
-
-NO_DOOR_LOCKING
- If this is set, the driver will never attempt to lock the door of
- the drive.
-
-CDROM_NBLOCKS_BUFFER
- This sets the size of the buffer to be used for a CDROMREADAUDIO
- ioctl. The default is 8.
-
-TEST
- This currently enables an additional ioctl which enables a user-mode
- program to execute an arbitrary packet command. See the source for
- details. This should be left off unless you know what you're doing.
-
-
-5. Common problems
+4. Common problems
------------------
This section discusses some common problems encountered when trying to
@@ -371,7 +336,7 @@ f. Data corruption.
expense of low system performance.
-6. cdchange.c
+5. cdchange.c
-------------
/*
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 73ef30d..31575e22 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -117,19 +117,25 @@ Who: Mauro Carvalho Chehab <mchehab@infradead.org>
---------------------------
What: PCMCIA control ioctl (needed for pcmcia-cs [cardmgr, cardctl])
-When: November 2005
+When: 2.6.35/2.6.36
Files: drivers/pcmcia/: pcmcia_ioctl.c
Why: With the 16-bit PCMCIA subsystem now behaving (almost) like a
normal hotpluggable bus, and with it using the default kernel
infrastructure (hotplug, driver core, sysfs) keeping the PCMCIA
control ioctl needed by cardmgr and cardctl from pcmcia-cs is
- unnecessary, and makes further cleanups and integration of the
+ unnecessary and potentially harmful (it does not provide for
+ proper locking), and makes further cleanups and integration of the
PCMCIA subsystem into the Linux kernel device driver model more
difficult. The features provided by cardmgr and cardctl are either
handled by the kernel itself now or are available in the new
pcmciautils package available at
http://kernel.org/pub/linux/utils/kernel/pcmcia/
-Who: Dominik Brodowski <linux@brodo.de>
+
+ For all architectures except ARM, the associated config symbol
+ has been removed from kernel 2.6.34; for ARM, it will be likely
+ be removed from kernel 2.6.35. The actual code will then likely
+ be removed from kernel 2.6.36.
+Who: Dominik Brodowski <linux@dominikbrodowski.net>
---------------------------
@@ -550,3 +556,35 @@ Why: udev fully replaces this special file system that only contains CAPI
NCCI TTY device nodes. User space (pppdcapiplugin) works without
noticing the difference.
Who: Jan Kiszka <jan.kiszka@web.de>
+
+----------------------------
+
+What: KVM memory aliases support
+When: July 2010
+Why: Memory aliasing support is used for speeding up guest vga access
+ through the vga windows.
+
+ Modern userspace no longer uses this feature, so it's just bitrotted
+ code and can be removed with no impact.
+Who: Avi Kivity <avi@redhat.com>
+
+----------------------------
+
+What: KVM kernel-allocated memory slots
+When: July 2010
+Why: Since 2.6.25, kvm supports user-allocated memory slots, which are
+ much more flexible than kernel-allocated slots. All current userspace
+ supports the newer interface and this code can be removed with no
+ impact.
+Who: Avi Kivity <avi@redhat.com>
+
+----------------------------
+
+What: KVM paravirt mmu host support
+When: January 2011
+Why: The paravirt mmu host support is slower than non-paravirt mmu, both
+ on newer and older hardware. It is already not exposed to the guest,
+ and kept only for live migration purposes.
+Who: Avi Kivity <avi@redhat.com>
+
+----------------------------
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt
index 839efd8..cf6d0d8 100644
--- a/Documentation/filesystems/nilfs2.txt
+++ b/Documentation/filesystems/nilfs2.txt
@@ -74,6 +74,9 @@ norecovery Disable recovery of the filesystem on mount.
This disables every write access on the device for
read-only mounts or snapshots. This option will fail
for r/w mounts on an unclean volume.
+discard Issue discard/TRIM commands to the underlying block
+ device when blocks are freed. This is useful for SSD
+ devices and sparse/thinly-provisioned LUNs.
NILFS2 usage
============
diff --git a/Documentation/filesystems/sharedsubtree.txt b/Documentation/filesystems/sharedsubtree.txt
index 23a1810..fc0e39a 100644
--- a/Documentation/filesystems/sharedsubtree.txt
+++ b/Documentation/filesystems/sharedsubtree.txt
@@ -837,6 +837,9 @@ replicas continue to be exactly same.
individual lists does not affect propagation or the way propagation
tree is modified by operations.
+ All vfsmounts in a peer group have the same ->mnt_master. If it is
+ non-NULL, they form a contiguous (ordered) segment of slave list.
+
A example propagation tree looks as shown in the figure below.
[ NOTE: Though it looks like a forest, if we consider all the shared
mounts as a conceptual entity called 'pnode', it becomes a tree]
@@ -874,8 +877,19 @@ replicas continue to be exactly same.
NOTE: The propagation tree is orthogonal to the mount tree.
+8B Locking:
+
+ ->mnt_share, ->mnt_slave, ->mnt_slave_list, ->mnt_master are protected
+ by namespace_sem (exclusive for modifications, shared for reading).
+
+ Normally we have ->mnt_flags modifications serialized by vfsmount_lock.
+ There are two exceptions: do_add_mount() and clone_mnt().
+ The former modifies a vfsmount that has not been visible in any shared
+ data structures yet.
+ The latter holds namespace_sem and the only references to vfsmount
+ are in lists that can't be traversed without namespace_sem.
-8B Algorithm:
+8C Algorithm:
The crux of the implementation resides in rbind/move operation.
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 81c0c59..e1bb5b2 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -15,7 +15,8 @@ Supported adapters:
* Intel 82801I (ICH9)
* Intel EP80579 (Tolapai)
* Intel 82801JI (ICH10)
- * Intel PCH
+ * Intel 3400/5 Series (PCH)
+ * Intel Cougar Point (PCH)
Datasheets: Publicly available at the Intel website
Authors:
diff --git a/Documentation/i2c/busses/i2c-parport b/Documentation/i2c/busses/i2c-parport
index dceaba1..2461c7b 100644
--- a/Documentation/i2c/busses/i2c-parport
+++ b/Documentation/i2c/busses/i2c-parport
@@ -29,6 +29,9 @@ can be easily added when needed.
Earlier kernels defaulted to type=0 (Philips). But now, if the type
parameter is missing, the driver will simply fail to initialize.
+SMBus alert support is available on adapters which have this line properly
+connected to the parallel port's interrupt pin.
+
Building your own adapter
-------------------------
diff --git a/Documentation/i2c/busses/i2c-parport-light b/Documentation/i2c/busses/i2c-parport-light
index 2874364..bdc9cbb 100644
--- a/Documentation/i2c/busses/i2c-parport-light
+++ b/Documentation/i2c/busses/i2c-parport-light
@@ -9,3 +9,14 @@ parport handling is not an option. The drawback is a reduced portability
and the impossibility to daisy-chain other parallel port devices.
Please see i2c-parport for documentation.
+
+Module parameters:
+
+* type: type of adapter (see i2c-parport or modinfo)
+
+* base: base I/O address
+ Default is 0x378 which is fairly common for parallel ports, at least on PC.
+
+* irq: optional IRQ
+ This must be passed if you want SMBus alert support, assuming your adapter
+ actually supports this.
diff --git a/Documentation/i2c/smbus-protocol b/Documentation/i2c/smbus-protocol
index 9df4744..7c19d1a 100644
--- a/Documentation/i2c/smbus-protocol
+++ b/Documentation/i2c/smbus-protocol
@@ -185,6 +185,22 @@ the protocol. All ARP communications use slave address 0x61 and
require PEC checksums.
+SMBus Alert
+===========
+
+SMBus Alert was introduced in Revision 1.0 of the specification.
+
+The SMBus alert protocol allows several SMBus slave devices to share a
+single interrupt pin on the SMBus master, while still allowing the master
+to know which slave triggered the interrupt.
+
+This is implemented the following way in the Linux kernel:
+* I2C bus drivers which support SMBus alert should call
+ i2c_setup_smbus_alert() to setup SMBus alert support.
+* I2C drivers for devices which can trigger SMBus alerts should implement
+ the optional alert() callback.
+
+
I2C Block Transactions
======================
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index 0a74603..3219ee0 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -318,8 +318,9 @@ Plain I2C communication
These routines read and write some bytes from/to a client. The client
contains the i2c address, so you do not have to include it. The second
parameter contains the bytes to read/write, the third the number of bytes
-to read/write (must be less than the length of the buffer.) Returned is
-the actual number of bytes read/written.
+to read/write (must be less than the length of the buffer, also should be
+less than 64k since msg.len is u16.) Returned is the actual number of bytes
+read/written.
int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msg,
int num);
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 35cf64d..35c9b51 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -139,7 +139,6 @@ Code Seq#(hex) Include File Comments
'K' all linux/kd.h
'L' 00-1F linux/loop.h conflict!
'L' 10-1F drivers/scsi/mpt2sas/mpt2sas_ctl.h conflict!
-'L' 20-2F linux/usb/vstusb.h
'L' E0-FF linux/ppdd.h encrypted disk device driver
<http://linux01.gwdg.de/~alatham/ppdd.html>
'M' all linux/soundcard.h conflict!
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index fbcddc5..d80930d 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1794,6 +1794,12 @@ and is between 256 and 4096 characters. It is defined in the file
purges which is reported from either PAL_VM_SUMMARY or
SAL PALO.
+ nr_cpus= [SMP] Maximum number of processors that an SMP kernel
+ could support. nr_cpus=n : n >= 1 limits the kernel to
+ supporting 'n' processors. Later in runtime you can not
+ use hotplug cpu feature to put more cpu back to online.
+ just like you compile the kernel NR_CPUS=n
+
nr_uarts= [SERIAL] maximum number of UARTs to be registered.
numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 053037a..2f9115c 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -1,6 +1,7 @@
Title : Kernel Probes (Kprobes)
Authors : Jim Keniston <jkenisto@us.ibm.com>
- : Prasanna S Panchamukhi <prasanna@in.ibm.com>
+ : Prasanna S Panchamukhi <prasanna.panchamukhi@gmail.com>
+ : Masami Hiramatsu <mhiramat@redhat.com>
CONTENTS
@@ -15,6 +16,7 @@ CONTENTS
9. Jprobes Example
10. Kretprobes Example
Appendix A: The kprobes debugfs interface
+Appendix B: The kprobes sysctl interface
1. Concepts: Kprobes, Jprobes, Return Probes
@@ -42,13 +44,13 @@ registration/unregistration of a group of *probes. These functions
can speed up unregistration process when you have to unregister
a lot of probes at once.
-The next three subsections explain how the different types of
-probes work. They explain certain things that you'll need to
-know in order to make the best use of Kprobes -- e.g., the
-difference between a pre_handler and a post_handler, and how
-to use the maxactive and nmissed fields of a kretprobe. But
-if you're in a hurry to start using Kprobes, you can skip ahead
-to section 2.
+The next four subsections explain how the different types of
+probes work and how jump optimization works. They explain certain
+things that you'll need to know in order to make the best use of
+Kprobes -- e.g., the difference between a pre_handler and
+a post_handler, and how to use the maxactive and nmissed fields of
+a kretprobe. But if you're in a hurry to start using Kprobes, you
+can skip ahead to section 2.
1.1 How Does a Kprobe Work?
@@ -161,13 +163,125 @@ In case probed function is entered but there is no kretprobe_instance
object available, then in addition to incrementing the nmissed count,
the user entry_handler invocation is also skipped.
+1.4 How Does Jump Optimization Work?
+
+If you configured your kernel with CONFIG_OPTPROBES=y (currently
+this option is supported on x86/x86-64, non-preemptive kernel) and
+the "debug.kprobes_optimization" kernel parameter is set to 1 (see
+sysctl(8)), Kprobes tries to reduce probe-hit overhead by using a jump
+instruction instead of a breakpoint instruction at each probepoint.
+
+1.4.1 Init a Kprobe
+
+When a probe is registered, before attempting this optimization,
+Kprobes inserts an ordinary, breakpoint-based kprobe at the specified
+address. So, even if it's not possible to optimize this particular
+probepoint, there'll be a probe there.
+
+1.4.2 Safety Check
+
+Before optimizing a probe, Kprobes performs the following safety checks:
+
+- Kprobes verifies that the region that will be replaced by the jump
+instruction (the "optimized region") lies entirely within one function.
+(A jump instruction is multiple bytes, and so may overlay multiple
+instructions.)
+
+- Kprobes analyzes the entire function and verifies that there is no
+jump into the optimized region. Specifically:
+ - the function contains no indirect jump;
+ - the function contains no instruction that causes an exception (since
+ the fixup code triggered by the exception could jump back into the
+ optimized region -- Kprobes checks the exception tables to verify this);
+ and
+ - there is no near jump to the optimized region (other than to the first
+ byte).
+
+- For each instruction in the optimized region, Kprobes verifies that
+the instruction can be executed out of line.
+
+1.4.3 Preparing Detour Buffer
+
+Next, Kprobes prepares a "detour" buffer, which contains the following
+instruction sequence:
+- code to push the CPU's registers (emulating a breakpoint trap)
+- a call to the trampoline code which calls user's probe handlers.
+- code to restore registers
+- the instructions from the optimized region
+- a jump back to the original execution path.
+
+1.4.4 Pre-optimization
+
+After preparing the detour buffer, Kprobes verifies that none of the
+following situations exist:
+- The probe has either a break_handler (i.e., it's a jprobe) or a
+post_handler.
+- Other instructions in the optimized region are probed.
+- The probe is disabled.
+In any of the above cases, Kprobes won't start optimizing the probe.
+Since these are temporary situations, Kprobes tries to start
+optimizing it again if the situation is changed.
+
+If the kprobe can be optimized, Kprobes enqueues the kprobe to an
+optimizing list, and kicks the kprobe-optimizer workqueue to optimize
+it. If the to-be-optimized probepoint is hit before being optimized,
+Kprobes returns control to the original instruction path by setting
+the CPU's instruction pointer to the copied code in the detour buffer
+-- thus at least avoiding the single-step.
+
+1.4.5 Optimization
+
+The Kprobe-optimizer doesn't insert the jump instruction immediately;
+rather, it calls synchronize_sched() for safety first, because it's
+possible for a CPU to be interrupted in the middle of executing the
+optimized region(*). As you know, synchronize_sched() can ensure
+that all interruptions that were active when synchronize_sched()
+was called are done, but only if CONFIG_PREEMPT=n. So, this version
+of kprobe optimization supports only kernels with CONFIG_PREEMPT=n.(**)
+
+After that, the Kprobe-optimizer calls stop_machine() to replace
+the optimized region with a jump instruction to the detour buffer,
+using text_poke_smp().
+
+1.4.6 Unoptimization
+
+When an optimized kprobe is unregistered, disabled, or blocked by
+another kprobe, it will be unoptimized. If this happens before
+the optimization is complete, the kprobe is just dequeued from the
+optimized list. If the optimization has been done, the jump is
+replaced with the original code (except for an int3 breakpoint in
+the first byte) by using text_poke_smp().
+
+(*)Please imagine that the 2nd instruction is interrupted and then
+the optimizer replaces the 2nd instruction with the jump *address*
+while the interrupt handler is running. When the interrupt
+returns to original address, there is no valid instruction,
+and it causes an unexpected result.
+
+(**)This optimization-safety checking may be replaced with the
+stop-machine method that ksplice uses for supporting a CONFIG_PREEMPT=y
+kernel.
+
+NOTE for geeks:
+The jump optimization changes the kprobe's pre_handler behavior.
+Without optimization, the pre_handler can change the kernel's execution
+path by changing regs->ip and returning 1. However, when the probe
+is optimized, that modification is ignored. Thus, if you want to
+tweak the kernel's execution path, you need to suppress optimization,
+using one of the following techniques:
+- Specify an empty function for the kprobe's post_handler or break_handler.
+ or
+- Config CONFIG_OPTPROBES=n.
+ or
+- Execute 'sysctl -w debug.kprobes_optimization=n'
+
2. Architectures Supported
Kprobes, jprobes, and return probes are implemented on the following
architectures:
-- i386
-- x86_64 (AMD-64, EM64T)
+- i386 (Supports jump optimization)
+- x86_64 (AMD-64, EM64T) (Supports jump optimization)
- ppc64
- ia64 (Does not support probes on instruction slot1.)
- sparc64 (Return probes not yet implemented.)
@@ -193,6 +307,10 @@ it useful to "Compile the kernel with debug info" (CONFIG_DEBUG_INFO),
so you can use "objdump -d -l vmlinux" to see the source-to-object
code mapping.
+If you want to reduce probing overhead, set "Kprobes jump optimization
+support" (CONFIG_OPTPROBES) to "y". You can find this option under the
+"Kprobes" line.
+
4. API Reference
The Kprobes API includes a "register" function and an "unregister"
@@ -389,7 +507,10 @@ the probe which has been registered.
Kprobes allows multiple probes at the same address. Currently,
however, there cannot be multiple jprobes on the same function at
-the same time.
+the same time. Also, a probepoint for which there is a jprobe or
+a post_handler cannot be optimized. So if you install a jprobe,
+or a kprobe with a post_handler, at an optimized probepoint, the
+probepoint will be unoptimized automatically.
In general, you can install a probe anywhere in the kernel.
In particular, you can probe interrupt handlers. Known exceptions
@@ -453,6 +574,38 @@ reason, Kprobes doesn't support return probes (or kprobes or jprobes)
on the x86_64 version of __switch_to(); the registration functions
return -EINVAL.
+On x86/x86-64, since the Jump Optimization of Kprobes modifies
+instructions widely, there are some limitations to optimization. To
+explain it, we introduce some terminology. Imagine a 3-instruction
+sequence consisting of a two 2-byte instructions and one 3-byte
+instruction.
+
+ IA
+ |
+[-2][-1][0][1][2][3][4][5][6][7]
+ [ins1][ins2][ ins3 ]
+ [<- DCR ->]
+ [<- JTPR ->]
+
+ins1: 1st Instruction
+ins2: 2nd Instruction
+ins3: 3rd Instruction
+IA: Insertion Address
+JTPR: Jump Target Prohibition Region
+DCR: Detoured Code Region
+
+The instructions in DCR are copied to the out-of-line buffer
+of the kprobe, because the bytes in DCR are replaced by
+a 5-byte jump instruction. So there are several limitations.
+
+a) The instructions in DCR must be relocatable.
+b) The instructions in DCR must not include a call instruction.
+c) JTPR must not be targeted by any jump or call instruction.
+d) DCR must not straddle the border betweeen functions.
+
+Anyway, these limitations are checked by the in-kernel instruction
+decoder, so you don't need to worry about that.
+
6. Probe Overhead
On a typical CPU in use in 2005, a kprobe hit takes 0.5 to 1.0
@@ -476,6 +629,19 @@ k = 0.49 usec; j = 0.76; r = 0.80; kr = 0.82; jr = 1.07
ppc64: POWER5 (gr), 1656 MHz (SMT disabled, 1 virtual CPU per physical CPU)
k = 0.77 usec; j = 1.31; r = 1.26; kr = 1.45; jr = 1.99
+6.1 Optimized Probe Overhead
+
+Typically, an optimized kprobe hit takes 0.07 to 0.1 microseconds to
+process. Here are sample overhead figures (in usec) for x86 architectures.
+k = unoptimized kprobe, b = boosted (single-step skipped), o = optimized kprobe,
+r = unoptimized kretprobe, rb = boosted kretprobe, ro = optimized kretprobe.
+
+i386: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips
+k = 0.80 usec; b = 0.33; o = 0.05; r = 1.10; rb = 0.61; ro = 0.33
+
+x86-64: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips
+k = 0.99 usec; b = 0.43; o = 0.06; r = 1.24; rb = 0.68; ro = 0.30
+
7. TODO
a. SystemTap (http://sourceware.org/systemtap): Provides a simplified
@@ -523,7 +689,8 @@ is also specified. Following columns show probe status. If the probe is on
a virtual address that is no longer valid (module init sections, module
virtual addresses that correspond to modules that've been unloaded),
such probes are marked with [GONE]. If the probe is temporarily disabled,
-such probes are marked with [DISABLED].
+such probes are marked with [DISABLED]. If the probe is optimized, it is
+marked with [OPTIMIZED].
/sys/kernel/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly.
@@ -533,3 +700,19 @@ registered probes will be disarmed, till such time a "1" is echoed to this
file. Note that this knob just disarms and arms all kprobes and doesn't
change each probe's disabling state. This means that disabled kprobes (marked
[DISABLED]) will be not enabled if you turn ON all kprobes by this knob.
+
+
+Appendix B: The kprobes sysctl interface
+
+/proc/sys/debug/kprobes-optimization: Turn kprobes optimization ON/OFF.
+
+When CONFIG_OPTPROBES=y, this sysctl interface appears and it provides
+a knob to globally and forcibly turn jump optimization (see section
+1.4) ON or OFF. By default, jump optimization is allowed (ON).
+If you echo "0" to this file or set "debug.kprobes_optimization" to
+0 via sysctl, all optimized probes will be unoptimized, and any new
+probes registered after that will not be optimized. Note that this
+knob *changes* the optimized state. This means that optimized probes
+(marked [OPTIMIZED]) will be unoptimized ([OPTIMIZED] tag will be
+removed). If the knob is turned on, they will be optimized again.
+
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
index 2811e45..c6416a3 100644
--- a/Documentation/kvm/api.txt
+++ b/Documentation/kvm/api.txt
@@ -23,12 +23,12 @@ of a virtual machine. The ioctls belong to three classes
Only run vcpu ioctls from the same thread that was used to create the
vcpu.
-2. File descritpors
+2. File descriptors
The kvm API is centered around file descriptors. An initial
open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
-handle will create a VM file descripror which can be used to issue VM
+handle will create a VM file descriptor which can be used to issue VM
ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
and return a file descriptor pointing to it. Finally, ioctls on a vcpu
fd can be used to control the vcpu, including the important task of
@@ -643,7 +643,7 @@ Type: vm ioctl
Parameters: struct kvm_clock_data (in)
Returns: 0 on success, -1 on error
-Sets the current timestamp of kvmclock to the valued specific in its parameter.
+Sets the current timestamp of kvmclock to the value specified in its parameter.
In conjunction with KVM_GET_CLOCK, it is used to ensure monotonicity on scenarios
such as migration.
@@ -795,11 +795,11 @@ Unused.
__u64 data_offset; /* relative to kvm_run start */
} io;
-If exit_reason is KVM_EXIT_IO_IN or KVM_EXIT_IO_OUT, then the vcpu has
+If exit_reason is KVM_EXIT_IO, then the vcpu has
executed a port I/O instruction which could not be satisfied by kvm.
data_offset describes where the data is located (KVM_EXIT_IO_OUT) or
where kvm expects application code to place the data for the next
-KVM_RUN invocation (KVM_EXIT_IO_IN). Data format is a patcked array.
+KVM_RUN invocation (KVM_EXIT_IO_IN). Data format is a packed array.
struct {
struct kvm_debug_exit_arch arch;
@@ -815,7 +815,7 @@ Unused.
__u8 is_write;
} mmio;
-If exit_reason is KVM_EXIT_MMIO or KVM_EXIT_IO_OUT, then the vcpu has
+If exit_reason is KVM_EXIT_MMIO, then the vcpu has
executed a memory-mapped I/O instruction which could not be satisfied
by kvm. The 'data' member contains the written data if 'is_write' is
true, and should be filled by application code otherwise.
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index 75afa12..39c0a09 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -650,6 +650,10 @@ LCD, CRT or DVI (if available). The following commands are available:
echo expand_toggle > /proc/acpi/ibm/video
echo video_switch > /proc/acpi/ibm/video
+NOTE: Access to this feature is restricted to processes owning the
+CAP_SYS_ADMIN capability for safety reasons, as it can interact badly
+enough with some versions of X.org to crash it.
+
Each video output device can be enabled or disabled individually.
Reading /proc/acpi/ibm/video shows the status of each device.
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 50189bf..fe5c099 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -32,6 +32,8 @@ cs89x0.txt
- the Crystal LAN (CS8900/20-based) Ethernet ISA adapter driver
cxacru.txt
- Conexant AccessRunner USB ADSL Modem
+cxacru-cf.py
+ - Conexant AccessRunner USB ADSL Modem configuration file parser
de4x5.txt
- the Digital EtherWORKS DE4?? and DE5?? PCI Ethernet driver
decnet.txt
diff --git a/Documentation/networking/cxacru-cf.py b/Documentation/networking/cxacru-cf.py
new file mode 100644
index 0000000..b41d298
--- /dev/null
+++ b/Documentation/networking/cxacru-cf.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# Copyright 2009 Simon Arlott
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Usage: cxacru-cf.py < cxacru-cf.bin
+# Output: values string suitable for the sysfs adsl_config attribute
+#
+# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
+# contains mis-aligned values which will stop the modem from being able
+# to make a connection. If the first and last two bytes are removed then
+# the values become valid, but the modulation will be forced to ANSI
+# T1.413 only which may not be appropriate.
+#
+# The original binary format is a packed list of le32 values.
+
+import sys
+import struct
+
+i = 0
+while True:
+ buf = sys.stdin.read(4)
+
+ if len(buf) == 0:
+ break
+ elif len(buf) != 4:
+ sys.stdout.write("\n")
+ sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
+ sys.exit(1)
+
+ if i > 0:
+ sys.stdout.write(" ")
+ sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
+ i += 1
+
+sys.stdout.write("\n")
diff --git a/Documentation/networking/cxacru.txt b/Documentation/networking/cxacru.txt
index b074681..2cce044 100644
--- a/Documentation/networking/cxacru.txt
+++ b/Documentation/networking/cxacru.txt
@@ -4,6 +4,12 @@ While it is capable of managing/maintaining the ADSL connection without the
module loaded, the device will sometimes stop responding after unloading the
driver and it is necessary to unplug/remove power to the device to fix this.
+Note: support for cxacru-cf.bin has been removed. It was not loaded correctly
+so it had no effect on the device configuration. Fixing it could have stopped
+existing devices working when an invalid configuration is supplied.
+
+There is a script cxacru-cf.py to convert an existing file to the sysfs form.
+
Detected devices will appear as ATM devices named "cxacru". In /sys/class/atm/
these are directories named cxacruN where N is the device number. A symlink
named device points to the USB interface device's directory which contains
@@ -15,6 +21,15 @@ several sysfs attribute files for retrieving device statistics:
* adsl_headend_environment
Information about the remote headend.
+* adsl_config
+ Configuration writing interface.
+ Write parameters in hexadecimal format <index>=<value>,
+ separated by whitespace, e.g.:
+ "1=0 a=5"
+ Up to 7 parameters at a time will be sent and the modem will restart
+ the ADSL connection when any value is set. These are logged for future
+ reference.
+
* downstream_attenuation (dB)
* downstream_bits_per_frame
* downstream_rate (kbps)
@@ -61,6 +76,7 @@ several sysfs attribute files for retrieving device statistics:
* mac_address
* modulation
+ "" (when not connected)
"ANSI T1.413"
"ITU-T G.992.1 (G.DMT)"
"ITU-T G.992.2 (G.LITE)"
diff --git a/Documentation/powerpc/dts-bindings/fsl/dma.txt b/Documentation/powerpc/dts-bindings/fsl/dma.txt
index 0732cdd..2a4b4bc 100644
--- a/Documentation/powerpc/dts-bindings/fsl/dma.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/dma.txt
@@ -44,21 +44,29 @@ Example:
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <0>;
reg = <0 0x80>;
+ interrupt-parent = <&ipic>;
+ interrupts = <71 8>;
};
dma-channel@80 {
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <1>;
reg = <0x80 0x80>;
+ interrupt-parent = <&ipic>;
+ interrupts = <71 8>;
};
dma-channel@100 {
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <2>;
reg = <0x100 0x80>;
+ interrupt-parent = <&ipic>;
+ interrupts = <71 8>;
};
dma-channel@180 {
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <3>;
reg = <0x180 0x80>;
+ interrupt-parent = <&ipic>;
+ interrupts = <71 8>;
};
};
diff --git a/Documentation/usb/error-codes.txt b/Documentation/usb/error-codes.txt
index 9cf83e8..d83703e 100644
--- a/Documentation/usb/error-codes.txt
+++ b/Documentation/usb/error-codes.txt
@@ -41,8 +41,8 @@ USB-specific:
-EFBIG Host controller driver can't schedule that many ISO frames.
--EPIPE Specified endpoint is stalled. For non-control endpoints,
- reset this status with usb_clear_halt().
+-EPIPE The pipe type specified in the URB doesn't match the
+ endpoint's actual type.
-EMSGSIZE (a) endpoint maxpacket size is zero; it is not usable
in the current interface altsetting.
@@ -60,6 +60,8 @@ USB-specific:
-EHOSTUNREACH URB was rejected because the device is suspended.
+-ENOEXEC A control URB doesn't contain a Setup packet.
+
**************************************************************************
* Error codes returned by in urb->status *
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index 3bf6818..2790ad4 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -2,7 +2,7 @@
Alan Stern <stern@rowland.harvard.edu>
- November 10, 2009
+ December 11, 2009
@@ -29,9 +29,9 @@ covered to some extent (see Documentation/power/*.txt for more
information about system PM).
Note: Dynamic PM support for USB is present only if the kernel was
-built with CONFIG_USB_SUSPEND enabled. System PM support is present
-only if the kernel was built with CONFIG_SUSPEND or CONFIG_HIBERNATION
-enabled.
+built with CONFIG_USB_SUSPEND enabled (which depends on
+CONFIG_PM_RUNTIME). System PM support is present only if the kernel
+was built with CONFIG_SUSPEND or CONFIG_HIBERNATION enabled.
What is Remote Wakeup?
@@ -229,6 +229,11 @@ necessary operations by hand or add them to a udev script. You can
also change the idle-delay time; 2 seconds is not the best choice for
every device.
+If a driver knows that its device has proper suspend/resume support,
+it can enable autosuspend all by itself. For example, the video
+driver for a laptop's webcam might do this, since these devices are
+rarely used and so should normally be autosuspended.
+
Sometimes it turns out that even when a device does work okay with
autosuspend there are still problems. For example, there are
experimental patches adding autosuspend support to the usbhid driver,
@@ -321,69 +326,81 @@ driver does so by calling these six functions:
void usb_autopm_get_interface_no_resume(struct usb_interface *intf);
void usb_autopm_put_interface_no_suspend(struct usb_interface *intf);
-The functions work by maintaining a counter in the usb_interface
-structure. When intf->pm_usage_count is > 0 then the interface is
-deemed to be busy, and the kernel will not autosuspend the interface's
-device. When intf->pm_usage_count is <= 0 then the interface is
-considered to be idle, and the kernel may autosuspend the device.
+The functions work by maintaining a usage counter in the
+usb_interface's embedded device structure. When the counter is > 0
+then the interface is deemed to be busy, and the kernel will not
+autosuspend the interface's device. When the usage counter is = 0
+then the interface is considered to be idle, and the kernel may
+autosuspend the device.
-(There is a similar pm_usage_count field in struct usb_device,
+(There is a similar usage counter field in struct usb_device,
associated with the device itself rather than any of its interfaces.
-This field is used only by the USB core.)
-
-Drivers must not modify intf->pm_usage_count directly; its value
-should be changed only be using the functions listed above. Drivers
-are responsible for insuring that the overall change to pm_usage_count
-during their lifetime balances out to 0 (it may be necessary for the
-disconnect method to call usb_autopm_put_interface() one or more times
-to fulfill this requirement). The first two routines use the PM mutex
-in struct usb_device for mutual exclusion; drivers using the async
-routines are responsible for their own synchronization and mutual
-exclusion.
-
- usb_autopm_get_interface() increments pm_usage_count and
- attempts an autoresume if the new value is > 0 and the
- device is suspended.
-
- usb_autopm_put_interface() decrements pm_usage_count and
- attempts an autosuspend if the new value is <= 0 and the
- device isn't suspended.
+This counter is used only by the USB core.)
+
+Drivers need not be concerned about balancing changes to the usage
+counter; the USB core will undo any remaining "get"s when a driver
+is unbound from its interface. As a corollary, drivers must not call
+any of the usb_autopm_* functions after their diconnect() routine has
+returned.
+
+Drivers using the async routines are responsible for their own
+synchronization and mutual exclusion.
+
+ usb_autopm_get_interface() increments the usage counter and
+ does an autoresume if the device is suspended. If the
+ autoresume fails, the counter is decremented back.
+
+ usb_autopm_put_interface() decrements the usage counter and
+ attempts an autosuspend if the new value is = 0.
usb_autopm_get_interface_async() and
usb_autopm_put_interface_async() do almost the same things as
- their non-async counterparts. The differences are: they do
- not acquire the PM mutex, and they use a workqueue to do their
+ their non-async counterparts. The big difference is that they
+ use a workqueue to do the resume or suspend part of their
jobs. As a result they can be called in an atomic context,
such as an URB's completion handler, but when they return the
- device will not generally not yet be in the desired state.
+ device will generally not yet be in the desired state.
usb_autopm_get_interface_no_resume() and
usb_autopm_put_interface_no_suspend() merely increment or
- decrement the pm_usage_count value; they do not attempt to
- carry out an autoresume or an autosuspend. Hence they can be
- called in an atomic context.
+ decrement the usage counter; they do not attempt to carry out
+ an autoresume or an autosuspend. Hence they can be called in
+ an atomic context.
-The conventional usage pattern is that a driver calls
+The simplest usage pattern is that a driver calls
usb_autopm_get_interface() in its open routine and
-usb_autopm_put_interface() in its close or release routine. But
-other patterns are possible.
+usb_autopm_put_interface() in its close or release routine. But other
+patterns are possible.
The autosuspend attempts mentioned above will often fail for one
reason or another. For example, the power/level attribute might be
set to "on", or another interface in the same device might not be
idle. This is perfectly normal. If the reason for failure was that
-the device hasn't been idle for long enough, a delayed workqueue
-routine is automatically set up to carry out the operation when the
-autosuspend idle-delay has expired.
+the device hasn't been idle for long enough, a timer is scheduled to
+carry out the operation automatically when the autosuspend idle-delay
+has expired.
Autoresume attempts also can fail, although failure would mean that
the device is no longer present or operating properly. Unlike
-autosuspend, there's no delay for an autoresume.
+autosuspend, there's no idle-delay for an autoresume.
Other parts of the driver interface
-----------------------------------
+Drivers can enable autosuspend for their devices by calling
+
+ usb_enable_autosuspend(struct usb_device *udev);
+
+in their probe() routine, if they know that the device is capable of
+suspending and resuming correctly. This is exactly equivalent to
+writing "auto" to the device's power/level attribute. Likewise,
+drivers can disable autosuspend by calling
+
+ usb_disable_autosuspend(struct usb_device *udev);
+
+This is exactly the same as writing "on" to the power/level attribute.
+
Sometimes a driver needs to make sure that remote wakeup is enabled
during autosuspend. For example, there's not much point
autosuspending a keyboard if the user can't cause the keyboard to do a
@@ -395,26 +412,27 @@ though, setting this flag won't cause the kernel to autoresume it.
Normally a driver would set this flag in its probe method, at which
time the device is guaranteed not to be autosuspended.)
-The synchronous usb_autopm_* routines have to run in a sleepable
-process context; they must not be called from an interrupt handler or
-while holding a spinlock. In fact, the entire autosuspend mechanism
-is not well geared toward interrupt-driven operation. However there
-is one thing a driver can do in an interrupt handler:
+If a driver does its I/O asynchronously in interrupt context, it
+should call usb_autopm_get_interface_async() before starting output and
+usb_autopm_put_interface_async() when the output queue drains. When
+it receives an input event, it should call
usb_mark_last_busy(struct usb_device *udev);
-This sets udev->last_busy to the current time. udev->last_busy is the
-field used for idle-delay calculations; updating it will cause any
-pending autosuspend to be moved back. The usb_autopm_* routines will
-also set the last_busy field to the current time.
-
-Calling urb_mark_last_busy() from within an URB completion handler is
-subject to races: The kernel may have just finished deciding the
-device has been idle for long enough but not yet gotten around to
-calling the driver's suspend method. The driver would have to be
-responsible for synchronizing its suspend method with its URB
-completion handler and causing the autosuspend to fail with -EBUSY if
-an URB had completed too recently.
+in the event handler. This sets udev->last_busy to the current time.
+udev->last_busy is the field used for idle-delay calculations;
+updating it will cause any pending autosuspend to be moved back. Most
+of the usb_autopm_* routines will also set the last_busy field to the
+current time.
+
+Asynchronous operation is always subject to races. For example, a
+driver may call one of the usb_autopm_*_interface_async() routines at
+a time when the core has just finished deciding the device has been
+idle for long enough but not yet gotten around to calling the driver's
+suspend method. The suspend method must be responsible for
+synchronizing with the output request routine and the URB completion
+handler; it should cause autosuspends to fail with -EBUSY if the
+driver needs to use the device.
External suspend calls should never be allowed to fail in this way,
only autosuspend calls. The driver can tell them apart by checking
@@ -422,75 +440,23 @@ the PM_EVENT_AUTO bit in the message.event argument to the suspend
method; this bit will be set for internal PM events (autosuspend) and
clear for external PM events.
-Many of the ingredients in the autosuspend framework are oriented
-towards interfaces: The usb_interface structure contains the
-pm_usage_cnt field, and the usb_autopm_* routines take an interface
-pointer as their argument. But somewhat confusingly, a few of the
-pieces (i.e., usb_mark_last_busy()) use the usb_device structure
-instead. Drivers need to keep this straight; they can call
-interface_to_usbdev() to find the device structure for a given
-interface.
-
- Locking requirements
- --------------------
+ Mutual exclusion
+ ----------------
-All three suspend/resume methods are always called while holding the
-usb_device's PM mutex. For external events -- but not necessarily for
-autosuspend or autoresume -- the device semaphore (udev->dev.sem) will
-also be held. This implies that external suspend/resume events are
-mutually exclusive with calls to probe, disconnect, pre_reset, and
-post_reset; the USB core guarantees that this is true of internal
-suspend/resume events as well.
+For external events -- but not necessarily for autosuspend or
+autoresume -- the device semaphore (udev->dev.sem) will be held when a
+suspend or resume method is called. This implies that external
+suspend/resume events are mutually exclusive with calls to probe,
+disconnect, pre_reset, and post_reset; the USB core guarantees that
+this is true of autosuspend/autoresume events as well.
If a driver wants to block all suspend/resume calls during some
-critical section, it can simply acquire udev->pm_mutex. Note that
-calls to resume may be triggered indirectly. Block IO due to memory
-allocations can make the vm subsystem resume a device. Thus while
-holding this lock you must not allocate memory with GFP_KERNEL or
-GFP_NOFS.
-
-Alternatively, if the critical section might call some of the
-usb_autopm_* routines, the driver can avoid deadlock by doing:
-
- down(&udev->dev.sem);
- rc = usb_autopm_get_interface(intf);
-
-and at the end of the critical section:
-
- if (!rc)
- usb_autopm_put_interface(intf);
- up(&udev->dev.sem);
-
-Holding the device semaphore will block all external PM calls, and the
-usb_autopm_get_interface() will prevent any internal PM calls, even if
-it fails. (Exercise: Why?)
-
-The rules for locking order are:
-
- Never acquire any device semaphore while holding any PM mutex.
-
- Never acquire udev->pm_mutex while holding the PM mutex for
- a device that isn't a descendant of udev.
-
-In other words, PM mutexes should only be acquired going up the device
-tree, and they should be acquired only after locking all the device
-semaphores you need to hold. These rules don't matter to drivers very
-much; they usually affect just the USB core.
-
-Still, drivers do need to be careful. For example, many drivers use a
-private mutex to synchronize their normal I/O activities with their
-disconnect method. Now if the driver supports autosuspend then it
-must call usb_autopm_put_interface() from somewhere -- maybe from its
-close method. It should make the call while holding the private mutex,
-since a driver shouldn't call any of the usb_autopm_* functions for an
-interface from which it has been unbound.
-
-But the usb_autpm_* routines always acquire the device's PM mutex, and
-consequently the locking order has to be: private mutex first, PM
-mutex second. Since the suspend method is always called with the PM
-mutex held, it mustn't try to acquire the private mutex. It has to
-synchronize with the driver's I/O activities in some other way.
+critical section, the best way is to lock the device and call
+usb_autopm_get_interface() (and do the reverse at the end of the
+critical section). Holding the device semaphore will block all
+external PM calls, and the usb_autopm_get_interface() will prevent any
+internal PM calls, even if it fails. (Exercise: Why?)
Interaction between dynamic PM and system PM
@@ -499,22 +465,11 @@ synchronize with the driver's I/O activities in some other way.
Dynamic power management and system power management can interact in
a couple of ways.
-Firstly, a device may already be manually suspended or autosuspended
-when a system suspend occurs. Since system suspends are supposed to
-be as transparent as possible, the device should remain suspended
-following the system resume. The 2.6.23 kernel obeys this principle
-for manually suspended devices but not for autosuspended devices; they
-do get resumed when the system wakes up. (Presumably they will be
-autosuspended again after their idle-delay time expires.) In later
-kernels this behavior will be fixed.
-
-(There is an exception. If a device would undergo a reset-resume
-instead of a normal resume, and the device is enabled for remote
-wakeup, then the reset-resume takes place even if the device was
-already suspended when the system suspend began. The justification is
-that a reset-resume is a kind of remote-wakeup event. Or to put it
-another way, a device which needs a reset won't be able to generate
-normal remote-wakeup signals, so it ought to be resumed immediately.)
+Firstly, a device may already be autosuspended when a system suspend
+occurs. Since system suspends are supposed to be as transparent as
+possible, the device should remain suspended following the system
+resume. But this theory may not work out well in practice; over time
+the kernel's behavior in this regard has changed.
Secondly, a dynamic power-management event may occur as a system
suspend is underway. The window for this is short, since system
diff --git a/MAINTAINERS b/MAINTAINERS
index 34f52a1..51d8b52 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3173,7 +3173,7 @@ F: arch/x86/include/asm/svm.h
F: arch/x86/kvm/svm.c
KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
-M: Hollis Blanchard <hollisb@us.ibm.com>
+M: Alexander Graf <agraf@suse.de>
L: kvm-ppc@vger.kernel.org
W: http://kvm.qumranet.com
S: Supported
@@ -6097,6 +6097,7 @@ F: arch/x86/
X86 PLATFORM DRIVERS
M: Matthew Garrett <mjg@redhat.com>
L: platform-driver-x86@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.git
S: Maintained
F: drivers/platform/x86
diff --git a/arch/Kconfig b/arch/Kconfig
index 215e460..e5eb133 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -41,6 +41,17 @@ config KPROBES
for kernel debugging, non-intrusive instrumentation and testing.
If in doubt, say "N".
+config OPTPROBES
+ bool "Kprobes jump optimization support (EXPERIMENTAL)"
+ default y
+ depends on KPROBES
+ depends on !PREEMPT
+ depends on HAVE_OPTPROBES
+ select KALLSYMS_ALL
+ help
+ This option will allow kprobes to optimize breakpoint to
+ a jump for reducing its overhead.
+
config HAVE_EFFICIENT_UNALIGNED_ACCESS
bool
help
@@ -83,6 +94,8 @@ config HAVE_KPROBES
config HAVE_KRETPROBES
bool
+config HAVE_OPTPROBES
+ bool
#
# An arch should select this if it provides all these things:
#
diff --git a/arch/alpha/include/asm/local.h b/arch/alpha/include/asm/local.h
index 6ad3ea6..b9e3e33 100644
--- a/arch/alpha/include/asm/local.h
+++ b/arch/alpha/include/asm/local.h
@@ -98,21 +98,4 @@ static __inline__ long local_sub_return(long i, local_t * l)
#define __local_add(i,l) ((l)->a.counter+=(i))
#define __local_sub(i,l) ((l)->a.counter-=(i))
-/* Use these for per-cpu local_t variables: on some archs they are
- * much more efficient than these naive implementations. Note they take
- * a variable, not an address.
- */
-#define cpu_local_read(l) local_read(&__get_cpu_var(l))
-#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
-
-#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
-#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
-#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
-#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
-
-#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
-#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
-#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
-#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
-
#endif /* _ALPHA_LOCAL_H */
diff --git a/arch/arm/configs/rx51_defconfig b/arch/arm/configs/rx51_defconfig
index 426ae94..193bd33 100644
--- a/arch/arm/configs/rx51_defconfig
+++ b/arch/arm/configs/rx51_defconfig
@@ -445,6 +445,8 @@ CONFIG_IP_NF_FILTER=m
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+CONFIG_PHONET=y
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
# CONFIG_DCB is not set
@@ -1325,27 +1327,34 @@ CONFIG_USB_GADGET_SELECTED=y
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
# CONFIG_USB_GADGET_PXA27X is not set
-# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
# CONFIG_USB_GADGET_M66592 is not set
# CONFIG_USB_GADGET_AMD5536UDC is not set
# CONFIG_USB_GADGET_FSL_QE is not set
# CONFIG_USB_GADGET_CI13XXX is not set
# CONFIG_USB_GADGET_NET2280 is not set
# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
# CONFIG_USB_ZERO_HNPTEST is not set
+# CONFIG_USB_AUDIO is not set
# CONFIG_USB_ETH is not set
# CONFIG_USB_GADGETFS is not set
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_MASS_STORAGE is not set
# CONFIG_USB_G_SERIAL is not set
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
# CONFIG_USB_CDC_COMPOSITE is not set
+CONFIG_USB_G_NOKIA=m
+# CONFIG_USB_G_MULTI is not set
#
# OTG and related infrastructure
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h
index 1a8c727..9b28f12 100644
--- a/arch/arm/include/asm/hardware/iop3xx-adma.h
+++ b/arch/arm/include/asm/hardware/iop3xx-adma.h
@@ -366,8 +366,7 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
slot_cnt += *slots_per_op;
}
- if (len)
- slot_cnt += *slots_per_op;
+ slot_cnt += *slots_per_op;
return slot_cnt;
}
@@ -389,8 +388,7 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
slot_cnt += *slots_per_op;
}
- if (len)
- slot_cnt += *slots_per_op;
+ slot_cnt += *slots_per_op;
return slot_cnt;
}
@@ -737,10 +735,8 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
i += slots_per_op;
} while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
- if (len) {
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- iter->byte_count = len;
- }
+ iter = iop_hw_desc_slot_idx(hw_desc, i);
+ iter->byte_count = len;
}
}
diff --git a/arch/arm/mach-mx2/devices.c b/arch/arm/mach-mx2/devices.c
index 3d398ce..3956d82 100644
--- a/arch/arm/mach-mx2/devices.c
+++ b/arch/arm/mach-mx2/devices.c
@@ -31,6 +31,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
+#include <linux/dma-mapping.h>
#include <mach/irqs.h>
#include <mach/hardware.h>
@@ -292,7 +293,7 @@ struct platform_device mxc_fb_device = {
.num_resources = ARRAY_SIZE(mxc_fb),
.resource = mxc_fb,
.dev = {
- .coherent_dma_mask = 0xFFFFFFFF,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -395,17 +396,17 @@ static struct resource mxc_sdhc1_resources[] = {
},
};
-static u64 mxc_sdhc1_dmamask = 0xffffffffUL;
+static u64 mxc_sdhc1_dmamask = DMA_BIT_MASK(32);
struct platform_device mxc_sdhc_device0 = {
- .name = "mxc-mmc",
- .id = 0,
- .dev = {
- .dma_mask = &mxc_sdhc1_dmamask,
- .coherent_dma_mask = 0xffffffff,
- },
- .num_resources = ARRAY_SIZE(mxc_sdhc1_resources),
- .resource = mxc_sdhc1_resources,
+ .name = "mxc-mmc",
+ .id = 0,
+ .dev = {
+ .dma_mask = &mxc_sdhc1_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(mxc_sdhc1_resources),
+ .resource = mxc_sdhc1_resources,
};
static struct resource mxc_sdhc2_resources[] = {
@@ -424,17 +425,17 @@ static struct resource mxc_sdhc2_resources[] = {
},
};
-static u64 mxc_sdhc2_dmamask = 0xffffffffUL;
+static u64 mxc_sdhc2_dmamask = DMA_BIT_MASK(32);
struct platform_device mxc_sdhc_device1 = {
- .name = "mxc-mmc",
- .id = 1,
- .dev = {
- .dma_mask = &mxc_sdhc2_dmamask,
- .coherent_dma_mask = 0xffffffff,
- },
- .num_resources = ARRAY_SIZE(mxc_sdhc2_resources),
- .resource = mxc_sdhc2_resources,
+ .name = "mxc-mmc",
+ .id = 1,
+ .dev = {
+ .dma_mask = &mxc_sdhc2_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(mxc_sdhc2_resources),
+ .resource = mxc_sdhc2_resources,
};
#ifdef CONFIG_MACH_MX27
@@ -450,7 +451,7 @@ static struct resource otg_resources[] = {
},
};
-static u64 otg_dmamask = 0xffffffffUL;
+static u64 otg_dmamask = DMA_BIT_MASK(32);
/* OTG gadget device */
struct platform_device mxc_otg_udc_device = {
@@ -458,7 +459,7 @@ struct platform_device mxc_otg_udc_device = {
.id = -1,
.dev = {
.dma_mask = &otg_dmamask,
- .coherent_dma_mask = 0xffffffffUL,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = otg_resources,
.num_resources = ARRAY_SIZE(otg_resources),
@@ -469,7 +470,7 @@ struct platform_device mxc_otg_host = {
.name = "mxc-ehci",
.id = 0,
.dev = {
- .coherent_dma_mask = 0xffffffff,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.dma_mask = &otg_dmamask,
},
.resource = otg_resources,
@@ -478,7 +479,7 @@ struct platform_device mxc_otg_host = {
/* USB host 1 */
-static u64 usbh1_dmamask = 0xffffffffUL;
+static u64 usbh1_dmamask = DMA_BIT_MASK(32);
static struct resource mxc_usbh1_resources[] = {
{
@@ -496,7 +497,7 @@ struct platform_device mxc_usbh1 = {
.name = "mxc-ehci",
.id = 1,
.dev = {
- .coherent_dma_mask = 0xffffffff,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.dma_mask = &usbh1_dmamask,
},
.resource = mxc_usbh1_resources,
@@ -504,7 +505,7 @@ struct platform_device mxc_usbh1 = {
};
/* USB host 2 */
-static u64 usbh2_dmamask = 0xffffffffUL;
+static u64 usbh2_dmamask = DMA_BIT_MASK(32);
static struct resource mxc_usbh2_resources[] = {
{
@@ -522,7 +523,7 @@ struct platform_device mxc_usbh2 = {
.name = "mxc-ehci",
.id = 2,
.dev = {
- .coherent_dma_mask = 0xffffffff,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.dma_mask = &usbh2_dmamask,
},
.resource = mxc_usbh2_resources,
@@ -642,3 +643,30 @@ int __init mxc_register_gpios(void)
{
return mxc_gpio_init(imx_gpio_ports, ARRAY_SIZE(imx_gpio_ports));
}
+
+#ifdef CONFIG_MACH_MX21
+static struct resource mx21_usbhc_resources[] = {
+ {
+ .start = USBOTG_BASE_ADDR,
+ .end = USBOTG_BASE_ADDR + 0x1FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MXC_INT_USBHOST,
+ .end = MXC_INT_USBHOST,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device mx21_usbhc_device = {
+ .name = "imx21-hcd",
+ .id = 0,
+ .dev = {
+ .dma_mask = &mx21_usbhc_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(mx21_usbhc_resources),
+ .resource = mx21_usbhc_resources,
+};
+#endif
+
diff --git a/arch/arm/mach-mx2/devices.h b/arch/arm/mach-mx2/devices.h
index 97306aa..f12694b 100644
--- a/arch/arm/mach-mx2/devices.h
+++ b/arch/arm/mach-mx2/devices.h
@@ -26,5 +26,6 @@ extern struct platform_device mxc_usbh2;
extern struct platform_device mxc_spi_device0;
extern struct platform_device mxc_spi_device1;
extern struct platform_device mxc_spi_device2;
+extern struct platform_device mx21_usbhc_device;
extern struct platform_device imx_ssi_device0;
extern struct platform_device imx_ssi_device1;
diff --git a/arch/arm/mach-u300/include/mach/coh901318.h b/arch/arm/mach-u300/include/mach/coh901318.h
index f4cfee9..b8155b4 100644
--- a/arch/arm/mach-u300/include/mach/coh901318.h
+++ b/arch/arm/mach-u300/include/mach/coh901318.h
@@ -53,7 +53,7 @@ struct coh901318_params {
* struct coh_dma_channel - dma channel base
* @name: ascii name of dma channel
* @number: channel id number
- * @desc_nbr_max: number of preallocated descriptortors
+ * @desc_nbr_max: number of preallocated descriptors
* @priority_high: prio of channel, 0 low otherwise high.
* @param: configuration parameters
* @dev_addr: physical address of periphal connected to channel
diff --git a/arch/arm/plat-mxc/include/mach/mx21-usbhost.h b/arch/arm/plat-mxc/include/mach/mx21-usbhost.h
new file mode 100644
index 0000000..22d0b59
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/mx21-usbhost.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2009 Martin Fuzzey <mfuzzey@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MX21_USBH
+#define __ASM_ARCH_MX21_USBH
+
+enum mx21_usbh_xcvr {
+ /* Values below as used by hardware (HWMODE register) */
+ MX21_USBXCVR_TXDIF_RXDIF = 0,
+ MX21_USBXCVR_TXDIF_RXSE = 1,
+ MX21_USBXCVR_TXSE_RXDIF = 2,
+ MX21_USBXCVR_TXSE_RXSE = 3,
+};
+
+struct mx21_usbh_platform_data {
+ enum mx21_usbh_xcvr host_xcvr; /* tranceiver mode host 1,2 ports */
+ enum mx21_usbh_xcvr otg_xcvr; /* tranceiver mode otg (as host) port */
+ u16 enable_host1:1,
+ enable_host2:1,
+ enable_otg_host:1, /* enable "OTG" port (as host) */
+ host1_xcverless:1, /* traceiverless host1 port */
+ host1_txenoe:1, /* output enable host1 transmit enable */
+ otg_ext_xcvr:1, /* external tranceiver for OTG port */
+ unused:10;
+};
+
+#endif /* __ASM_ARCH_MX21_USBH */
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index b13d187..3a4bc1a 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1770,10 +1770,13 @@ at32_add_device_usba(unsigned int id, struct usba_platform_data *data)
ARRAY_SIZE(usba0_resource)))
goto out_free_pdev;
- if (data)
+ if (data) {
usba_data.pdata.vbus_pin = data->vbus_pin;
- else
+ usba_data.pdata.vbus_pin_inverted = data->vbus_pin_inverted;
+ } else {
usba_data.pdata.vbus_pin = -EINVAL;
+ usba_data.pdata.vbus_pin_inverted = -EINVAL;
+ }
data = &usba_data.pdata;
data->num_ep = ARRAY_SIZE(at32_usba_ep);
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index b0ed0b4..01b2f58 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -816,8 +816,8 @@ ENDPROC(_resume)
ENTRY(_ret_from_exception)
#ifdef CONFIG_IPIPE
- p2.l = _per_cpu__ipipe_percpu_domain;
- p2.h = _per_cpu__ipipe_percpu_domain;
+ p2.l = _ipipe_percpu_domain;
+ p2.h = _ipipe_percpu_domain;
r0.l = _ipipe_root;
r0.h = _ipipe_root;
r2 = [p2];
diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S
index 2c18d08..c52bef3 100644
--- a/arch/cris/arch-v10/kernel/entry.S
+++ b/arch/cris/arch-v10/kernel/entry.S
@@ -358,7 +358,7 @@ mmu_bus_fault:
1: btstq 12, $r1 ; Refill?
bpl 2f
lsrq 24, $r1 ; Get PGD index (bit 24-31)
- move.d [per_cpu__current_pgd], $r0 ; PGD for the current process
+ move.d [current_pgd], $r0 ; PGD for the current process
move.d [$r0+$r1.d], $r0 ; Get PMD
beq 2f
nop
diff --git a/arch/cris/arch-v32/mm/mmu.S b/arch/cris/arch-v32/mm/mmu.S
index 2238d15..f125d91 100644
--- a/arch/cris/arch-v32/mm/mmu.S
+++ b/arch/cris/arch-v32/mm/mmu.S
@@ -115,7 +115,7 @@
#ifdef CONFIG_SMP
move $s7, $acr ; PGD
#else
- move.d per_cpu__current_pgd, $acr ; PGD
+ move.d current_pgd, $acr ; PGD
#endif
; Look up PMD in PGD
lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31)
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
index 30cf465..f7c00a5 100644
--- a/arch/ia64/include/asm/percpu.h
+++ b/arch/ia64/include/asm/percpu.h
@@ -9,7 +9,7 @@
#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
#ifdef __ASSEMBLY__
-# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */
+# define THIS_CPU(var) (var) /* use this to mark accesses to per-CPU variables... */
#else /* !__ASSEMBLY__ */
@@ -39,7 +39,7 @@ extern void *per_cpu_init(void);
* On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
* more efficient.
*/
-#define __ia64_per_cpu_var(var) per_cpu__##var
+#define __ia64_per_cpu_var(var) var
#include <asm-generic/percpu.h>
diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h
index b8370c8..baa74c8 100644
--- a/arch/ia64/include/asm/xen/events.h
+++ b/arch/ia64/include/asm/xen/events.h
@@ -36,10 +36,6 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return !(ia64_psr(regs)->i);
}
-static inline void handle_irq(int irq, struct pt_regs *regs)
-{
- __do_IRQ(irq);
-}
#define irq_ctx_init(cpu) do { } while (0)
#endif /* _ASM_IA64_XEN_EVENTS_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index c16fb03..a7ca07f 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -852,8 +852,8 @@ __init void prefill_possible_map(void)
possible = available_cpus + additional_cpus;
- if (possible > NR_CPUS)
- possible = NR_CPUS;
+ if (possible > nr_cpu_ids)
+ possible = nr_cpu_ids;
printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
possible, max((possible - available_cpus), 0));
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 461b999..7f4a0ed 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -30,9 +30,9 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic
#endif
#include <asm/processor.h>
-EXPORT_SYMBOL(per_cpu__ia64_cpu_info);
+EXPORT_SYMBOL(ia64_cpu_info);
#ifdef CONFIG_SMP
-EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
+EXPORT_SYMBOL(local_per_cpu_offset);
#endif
#include <asm/uaccess.h>
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index 01c7579..fa4d1e5 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -26,6 +26,7 @@ config KVM
select ANON_INODES
select HAVE_KVM_IRQCHIP
select KVM_APIC_ARCHITECTURE
+ select KVM_MMIO
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 5fdeec5..26e0e08 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -241,10 +241,10 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 0;
mmio:
if (p->dir)
- r = kvm_io_bus_read(&vcpu->kvm->mmio_bus, p->addr,
+ r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr,
p->size, &p->data);
else
- r = kvm_io_bus_write(&vcpu->kvm->mmio_bus, p->addr,
+ r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr,
p->size, &p->data);
if (r)
printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
@@ -636,12 +636,9 @@ static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
union context *host_ctx, *guest_ctx;
- int r;
+ int r, idx;
- /*
- * down_read() may sleep and return with interrupts enabled
- */
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
again:
if (signal_pending(current)) {
@@ -663,7 +660,7 @@ again:
if (r < 0)
goto vcpu_run_fail;
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm_guest_enter();
/*
@@ -687,7 +684,7 @@ again:
kvm_guest_exit();
preempt_enable();
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_handle_exit(kvm_run, vcpu);
@@ -697,10 +694,10 @@ again:
}
out:
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (r > 0) {
kvm_resched(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
goto again;
}
@@ -971,7 +968,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
goto out;
r = kvm_setup_default_irq_routing(kvm);
if (r) {
- kfree(kvm->arch.vioapic);
+ kvm_ioapic_destroy(kvm);
goto out;
}
break;
@@ -1377,12 +1374,14 @@ static void free_kvm(struct kvm *kvm)
static void kvm_release_vm_pages(struct kvm *kvm)
{
+ struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int i, j;
unsigned long base_gfn;
- for (i = 0; i < kvm->nmemslots; i++) {
- memslot = &kvm->memslots[i];
+ slots = rcu_dereference(kvm->memslots);
+ for (i = 0; i < slots->nmemslots; i++) {
+ memslot = &slots->memslots[i];
base_gfn = memslot->base_gfn;
for (j = 0; j < memslot->npages; j++) {
@@ -1405,6 +1404,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kfree(kvm->arch.vioapic);
kvm_release_vm_pages(kvm);
kvm_free_physmem(kvm);
+ cleanup_srcu_struct(&kvm->srcu);
free_kvm(kvm);
}
@@ -1576,15 +1576,15 @@ out:
return r;
}
-int kvm_arch_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
int user_alloc)
{
unsigned long i;
unsigned long pfn;
- int npages = mem->memory_size >> PAGE_SHIFT;
- struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
+ int npages = memslot->npages;
unsigned long base_gfn = memslot->base_gfn;
if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
@@ -1608,6 +1608,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
return 0;
}
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ return;
+}
+
void kvm_arch_flush_shadow(struct kvm *kvm)
{
kvm_flush_remote_tlbs(kvm);
@@ -1802,7 +1810,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS)
goto out;
- memslot = &kvm->memslots[log->slot];
+ memslot = &kvm->memslots->memslots[log->slot];
r = -ENOENT;
if (!memslot->dirty_bitmap)
goto out;
@@ -1827,6 +1835,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot;
int is_dirty = 0;
+ mutex_lock(&kvm->slots_lock);
spin_lock(&kvm->arch.dirty_log_lock);
r = kvm_ia64_sync_dirty_log(kvm, log);
@@ -1840,12 +1849,13 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
kvm_flush_remote_tlbs(kvm);
- memslot = &kvm->memslots[log->slot];
+ memslot = &kvm->memslots->memslots[log->slot];
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
memset(memslot->dirty_bitmap, 0, n);
}
r = 0;
out:
+ mutex_unlock(&kvm->slots_lock);
spin_unlock(&kvm->arch.dirty_log_lock);
return r;
}
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index e4b8231..cb548ee 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -75,7 +75,7 @@ static void set_pal_result(struct kvm_vcpu *vcpu,
struct exit_ctl_data *p;
p = kvm_get_exit_data(vcpu);
- if (p && p->exit_reason == EXIT_REASON_PAL_CALL) {
+ if (p->exit_reason == EXIT_REASON_PAL_CALL) {
p->u.pal_data.ret = result;
return ;
}
@@ -87,7 +87,7 @@ static void set_sal_result(struct kvm_vcpu *vcpu,
struct exit_ctl_data *p;
p = kvm_get_exit_data(vcpu);
- if (p && p->exit_reason == EXIT_REASON_SAL_CALL) {
+ if (p->exit_reason == EXIT_REASON_SAL_CALL) {
p->u.sal_data.ret = result;
return ;
}
@@ -322,7 +322,7 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
struct exit_ctl_data *p;
p = kvm_get_exit_data(vcpu);
- if (p && (p->exit_reason == EXIT_REASON_PAL_CALL))
+ if (p->exit_reason == EXIT_REASON_PAL_CALL)
index = p->u.pal_data.gr28;
return index;
@@ -646,18 +646,16 @@ static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1,
p = kvm_get_exit_data(vcpu);
- if (p) {
- if (p->exit_reason == EXIT_REASON_SAL_CALL) {
- *in0 = p->u.sal_data.in0;
- *in1 = p->u.sal_data.in1;
- *in2 = p->u.sal_data.in2;
- *in3 = p->u.sal_data.in3;
- *in4 = p->u.sal_data.in4;
- *in5 = p->u.sal_data.in5;
- *in6 = p->u.sal_data.in6;
- *in7 = p->u.sal_data.in7;
- return ;
- }
+ if (p->exit_reason == EXIT_REASON_SAL_CALL) {
+ *in0 = p->u.sal_data.in0;
+ *in1 = p->u.sal_data.in1;
+ *in2 = p->u.sal_data.in2;
+ *in3 = p->u.sal_data.in3;
+ *in4 = p->u.sal_data.in4;
+ *in5 = p->u.sal_data.in5;
+ *in6 = p->u.sal_data.in6;
+ *in7 = p->u.sal_data.in7;
+ return ;
}
*in0 = 0;
}
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
index 9bf55af..fb8f9f5 100644
--- a/arch/ia64/kvm/mmio.c
+++ b/arch/ia64/kvm/mmio.c
@@ -316,8 +316,8 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
return;
} else {
inst_type = -1;
- panic_vm(vcpu, "Unsupported MMIO access instruction! \
- Bunld[0]=0x%lx, Bundle[1]=0x%lx\n",
+ panic_vm(vcpu, "Unsupported MMIO access instruction! "
+ "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n",
bundle.i64[0], bundle.i64[1]);
}
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index dce75b70..958815c 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -1639,8 +1639,8 @@ void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
* Otherwise panic
*/
if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
- panic_vm(vcpu, "Only support guests with vpsr.pk =0 \
- & vpsr.is=0\n");
+ panic_vm(vcpu, "Only support guests with vpsr.pk =0 "
+ "& vpsr.is=0\n");
/*
* For those IA64_PSR bits: id/da/dd/ss/ed/ia
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 19c4b21..8d586d1 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -459,7 +459,7 @@ static void __init initialize_pernode_data(void)
cpu = 0;
node = node_cpuid[cpu].nid;
cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
- ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start));
+ ((char *)&ia64_cpu_info - __per_cpu_start));
cpu0_cpu_info->node_data = mem_data[node].node_data;
}
#endif /* CONFIG_SMP */
diff --git a/arch/m32r/include/asm/local.h b/arch/m32r/include/asm/local.h
index 22256d1..734bca8 100644
--- a/arch/m32r/include/asm/local.h
+++ b/arch/m32r/include/asm/local.h
@@ -338,29 +338,4 @@ static inline void local_set_mask(unsigned long mask, local_t *addr)
* a variable, not an address.
*/
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non local way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
-#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* __M32R_LOCAL_H */
diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h
index 61abbd2..ec89f2a 100644
--- a/arch/microblaze/include/asm/entry.h
+++ b/arch/microblaze/include/asm/entry.h
@@ -21,7 +21,7 @@
* places
*/
-#define PER_CPU(var) per_cpu__##var
+#define PER_CPU(var) var
# ifndef __ASSEMBLY__
DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index 361f4f16..bdcdef0 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -193,29 +193,4 @@ static __inline__ long local_sub_return(long i, local_t * l)
#define __local_add(i, l) ((l)->a.counter+=(i))
#define __local_sub(i, l) ((l)->a.counter-=(i))
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
-#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* _ARCH_MIPS_LOCAL_H */
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
index d172d42..f8c45cc 100644
--- a/arch/parisc/lib/fixup.S
+++ b/arch/parisc/lib/fixup.S
@@ -36,8 +36,8 @@
#endif
/* t2 = &__per_cpu_offset[smp_processor_id()]; */
LDREGX \t2(\t1),\t2
- addil LT%per_cpu__exception_data,%r27
- LDREG RT%per_cpu__exception_data(%r1),\t1
+ addil LT%exception_data,%r27
+ LDREG RT%exception_data(%r1),\t1
/* t1 = &__get_cpu_var(exception_data) */
add,l \t1,\t2,\t1
/* t1 = t1->fault_ip */
@@ -46,8 +46,8 @@
#else
.macro get_fault_ip t1 t2
/* t1 = &__get_cpu_var(exception_data) */
- addil LT%per_cpu__exception_data,%r27
- LDREG RT%per_cpu__exception_data(%r1),\t2
+ addil LT%exception_data,%r27
+ LDREG RT%exception_data(%r1),\t2
/* t1 = t2->fault_ip */
LDREG EXCDATA_IP(\t2), \t1
.endm
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index af2abe7..aadf2dd 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -97,4 +97,10 @@
#define RESUME_HOST RESUME_FLAG_HOST
#define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV)
+#define KVM_GUEST_MODE_NONE 0
+#define KVM_GUEST_MODE_GUEST 1
+#define KVM_GUEST_MODE_SKIP 2
+
+#define KVM_INST_FETCH_FAILED -1
+
#endif /* __POWERPC_KVM_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 74b7369..db7db0a 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -22,7 +22,7 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
-#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s_64_asm.h>
struct kvmppc_slb {
u64 esid;
@@ -33,7 +33,8 @@ struct kvmppc_slb {
bool Ks;
bool Kp;
bool nx;
- bool large;
+ bool large; /* PTEs are 16MB */
+ bool tb; /* 1TB segment */
bool class;
};
@@ -69,6 +70,7 @@ struct kvmppc_sid_map {
struct kvmppc_vcpu_book3s {
struct kvm_vcpu vcpu;
+ struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
struct kvmppc_sid_map sid_map[SID_MAP_NUM];
struct kvmppc_slb slb[64];
struct {
@@ -89,6 +91,7 @@ struct kvmppc_vcpu_book3s {
u64 vsid_next;
u64 vsid_max;
int context_id;
+ ulong prog_flags; /* flags to inject when giving a 700 trap */
};
#define CONTEXT_HOST 0
@@ -119,6 +122,10 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
extern u32 kvmppc_trampoline_lowmem;
extern u32 kvmppc_trampoline_enter;
+extern void kvmppc_rmcall(ulong srr0, ulong srr1);
+extern void kvmppc_load_up_fpu(void);
+extern void kvmppc_load_up_altivec(void);
+extern void kvmppc_load_up_vsx(void);
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
diff --git a/arch/powerpc/include/asm/kvm_book3s_64_asm.h b/arch/powerpc/include/asm/kvm_book3s_64_asm.h
index 2e06ee8..183461b 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64_asm.h
@@ -20,6 +20,8 @@
#ifndef __ASM_KVM_BOOK3S_ASM_H__
#define __ASM_KVM_BOOK3S_ASM_H__
+#ifdef __ASSEMBLY__
+
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#include <asm/kvm_asm.h>
@@ -55,4 +57,20 @@ kvmppc_resume_\intno:
#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */
+#else /*__ASSEMBLY__ */
+
+struct kvmppc_book3s_shadow_vcpu {
+ ulong gpr[14];
+ u32 cr;
+ u32 xer;
+ ulong host_r1;
+ ulong host_r2;
+ ulong handler;
+ ulong scratch0;
+ ulong scratch1;
+ ulong vmhandler;
+};
+
+#endif /*__ASSEMBLY__ */
+
#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
index 9d497ce..7fea26f 100644
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ b/arch/powerpc/include/asm/kvm_e500.h
@@ -52,9 +52,12 @@ struct kvmppc_vcpu_e500 {
u32 mas5;
u32 mas6;
u32 mas7;
+ u32 l1csr0;
u32 l1csr1;
u32 hid0;
u32 hid1;
+ u32 tlb0cfg;
+ u32 tlb1cfg;
struct kvm_vcpu vcpu;
};
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 1201f62..5e5bae7 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -167,23 +167,40 @@ struct kvm_vcpu_arch {
ulong trampoline_lowmem;
ulong trampoline_enter;
ulong highmem_handler;
+ ulong rmcall;
ulong host_paca_phys;
struct kvmppc_mmu mmu;
#endif
- u64 fpr[32];
ulong gpr[32];
+ u64 fpr[32];
+ u32 fpscr;
+
+#ifdef CONFIG_ALTIVEC
+ vector128 vr[32];
+ vector128 vscr;
+#endif
+
+#ifdef CONFIG_VSX
+ u64 vsr[32];
+#endif
+
ulong pc;
- u32 cr;
ulong ctr;
ulong lr;
+
+#ifdef CONFIG_BOOKE
ulong xer;
+ u32 cr;
+#endif
ulong msr;
#ifdef CONFIG_PPC64
ulong shadow_msr;
+ ulong shadow_srr1;
ulong hflags;
+ ulong guest_owned_ext;
#endif
u32 mmucr;
ulong sprg0;
@@ -242,6 +259,8 @@ struct kvm_vcpu_arch {
#endif
ulong fault_dear;
ulong fault_esr;
+ ulong queued_dear;
+ ulong queued_esr;
gpa_t paddr_accessed;
u8 io_gpr; /* GPR used as IO source/target */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 269ee46..e264282 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -28,6 +28,9 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/kvm_book3s.h>
+#endif
enum emulation_result {
EMULATE_DONE, /* no further processing */
@@ -80,8 +83,9 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
-extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq);
@@ -95,4 +99,81 @@ extern void kvmppc_booke_exit(void);
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
+#ifdef CONFIG_PPC_BOOK3S
+
+/* We assume we're always acting on the current vcpu */
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+ if ( num < 14 ) {
+ get_paca()->shadow_vcpu.gpr[num] = val;
+ to_book3s(vcpu)->shadow_vcpu.gpr[num] = val;
+ } else
+ vcpu->arch.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+ if ( num < 14 )
+ return get_paca()->shadow_vcpu.gpr[num];
+ else
+ return vcpu->arch.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+ get_paca()->shadow_vcpu.cr = val;
+ to_book3s(vcpu)->shadow_vcpu.cr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+ return get_paca()->shadow_vcpu.cr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+{
+ get_paca()->shadow_vcpu.xer = val;
+ to_book3s(vcpu)->shadow_vcpu.xer = val;
+}
+
+static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+ return get_paca()->shadow_vcpu.xer;
+}
+
+#else
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+ vcpu->arch.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+ return vcpu->arch.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.cr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.cr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.xer = val;
+}
+
+static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.xer;
+}
+
+#endif
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index ce58c80..c2410af 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -172,29 +172,4 @@ static __inline__ long local_dec_if_positive(local_t *l)
#define __local_add(i,l) ((l)->a.counter+=(i))
#define __local_sub(i,l) ((l)->a.counter-=(i))
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
-#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* _ARCH_POWERPC_LOCAL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 5e9b4ef..d8a6931 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -19,6 +19,9 @@
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/exception-64e.h>
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/kvm_book3s_64_asm.h>
+#endif
register struct paca_struct *local_paca asm("r13");
@@ -135,6 +138,8 @@ struct paca_struct {
u64 esid;
u64 vsid;
} kvm_slb[64]; /* guest SLB */
+ /* We use this to store guest state in */
+ struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
u8 kvm_slb_max; /* highest used guest slb entry */
u8 kvm_in_guest; /* are we inside the guest? */
#endif
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index bc8dd53..5572e86 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -426,6 +426,10 @@
#define SRR1_WAKEMT 0x00280000 /* mtctrl */
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
+#define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */
+#define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */
+#define SRR1_PROGTRAP 0x00020000 /* Trap */
+#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */
#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index a6c2b63..957ceb7 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -194,6 +194,30 @@ int main(void)
DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest));
DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb));
DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max));
+ DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr));
+ DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer));
+ DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0]));
+ DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1]));
+ DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2]));
+ DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3]));
+ DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4]));
+ DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5]));
+ DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6]));
+ DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7]));
+ DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8]));
+ DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9]));
+ DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10]));
+ DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11]));
+ DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12]));
+ DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13]));
+ DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1));
+ DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
+ DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
+ shadow_vcpu.vmhandler));
+ DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
+ shadow_vcpu.scratch0));
+ DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
+ shadow_vcpu.scratch1));
#endif
#endif /* CONFIG_PPC64 */
@@ -389,8 +413,6 @@ int main(void)
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
- DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
- DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
@@ -411,11 +433,16 @@ int main(void)
DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2));
DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
+ DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
+ DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
-#endif
+#else
+ DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
+ DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
+#endif /* CONFIG_PPC64 */
#endif
#ifdef CONFIG_44x
DEFINE(PGD_T_LOG2, PGD_T_LOG2);
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 42545145..ab3e392 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -107,6 +107,7 @@ EXPORT_SYMBOL(giveup_altivec);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
EXPORT_SYMBOL(giveup_vsx);
+EXPORT_SYMBOL_GPL(__giveup_vsx);
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
EXPORT_SYMBOL(giveup_spe);
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index 61af58f..65ea083 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -65,13 +65,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
switch (dcrn) {
case DCRN_CPR0_CONFIG_ADDR:
- vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr);
break;
case DCRN_CPR0_CONFIG_DATA:
local_irq_disable();
mtdcr(DCRN_CPR0_CONFIG_ADDR,
vcpu->arch.cpr0_cfgaddr);
- vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
+ kvmppc_set_gpr(vcpu, rt,
+ mfdcr(DCRN_CPR0_CONFIG_DATA));
local_irq_enable();
break;
default:
@@ -93,11 +94,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* emulate some access in kernel */
switch (dcrn) {
case DCRN_CPR0_CONFIG_ADDR:
- vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
+ vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs);
break;
default:
run->dcr.dcrn = dcrn;
- run->dcr.data = vcpu->arch.gpr[rs];
+ run->dcr.data = kvmppc_get_gpr(vcpu, rs);
run->dcr.is_write = 1;
vcpu->arch.dcr_needed = 1;
kvmppc_account_exit(vcpu, DCR_EXITS);
@@ -146,13 +147,13 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
switch (sprn) {
case SPRN_PID:
- kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
+ kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break;
case SPRN_MMUCR:
- vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break;
case SPRN_CCR0:
- vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break;
case SPRN_CCR1:
- vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break;
default:
emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
}
@@ -167,13 +168,13 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
switch (sprn) {
case SPRN_PID:
- vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break;
case SPRN_MMUCR:
- vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break;
case SPRN_CCR0:
- vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break;
case SPRN_CCR1:
- vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break;
default:
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
}
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index ff3cb63..2570fcc 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -439,7 +439,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
struct kvmppc_44x_tlbe *tlbe;
unsigned int gtlb_index;
- gtlb_index = vcpu->arch.gpr[ra];
+ gtlb_index = kvmppc_get_gpr(vcpu, ra);
if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
printk("%s: index %d\n", __func__, gtlb_index);
kvmppc_dump_vcpu(vcpu);
@@ -455,15 +455,15 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
switch (ws) {
case PPC44x_TLB_PAGEID:
tlbe->tid = get_mmucr_stid(vcpu);
- tlbe->word0 = vcpu->arch.gpr[rs];
+ tlbe->word0 = kvmppc_get_gpr(vcpu, rs);
break;
case PPC44x_TLB_XLAT:
- tlbe->word1 = vcpu->arch.gpr[rs];
+ tlbe->word1 = kvmppc_get_gpr(vcpu, rs);
break;
case PPC44x_TLB_ATTRIB:
- tlbe->word2 = vcpu->arch.gpr[rs];
+ tlbe->word2 = kvmppc_get_gpr(vcpu, rs);
break;
default:
@@ -500,18 +500,20 @@ int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
unsigned int as = get_mmucr_sts(vcpu);
unsigned int pid = get_mmucr_stid(vcpu);
- ea = vcpu->arch.gpr[rb];
+ ea = kvmppc_get_gpr(vcpu, rb);
if (ra)
- ea += vcpu->arch.gpr[ra];
+ ea += kvmppc_get_gpr(vcpu, ra);
gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
if (rc) {
+ u32 cr = kvmppc_get_cr(vcpu);
+
if (gtlb_index < 0)
- vcpu->arch.cr &= ~0x20000000;
+ kvmppc_set_cr(vcpu, cr & ~0x20000000);
else
- vcpu->arch.cr |= 0x20000000;
+ kvmppc_set_cr(vcpu, cr | 0x20000000);
}
- vcpu->arch.gpr[rt] = gtlb_index;
+ kvmppc_set_gpr(vcpu, rt, gtlb_index);
kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
return EMULATE_DONE;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index fe037fd..60624cc 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -20,6 +20,7 @@ config KVM
bool
select PREEMPT_NOTIFIERS
select ANON_INODES
+ select KVM_MMIO
config KVM_BOOK3S_64_HANDLER
bool
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 3e294bd..9a271f0 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -33,12 +33,9 @@
/* #define EXIT_DEBUG */
/* #define EXIT_DEBUG_SIMPLE */
+/* #define DEBUG_EXT */
-/* Without AGGRESSIVE_DEC we only fire off a DEC interrupt when DEC turns 0.
- * When set, we retrigger a DEC interrupt after that if DEC <= 0.
- * PPC32 Linux runs faster without AGGRESSIVE_DEC, PPC64 Linux requires it. */
-
-/* #define AGGRESSIVE_DEC */
+static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "exits", VCPU_STAT(sum_exits) },
@@ -72,16 +69,24 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb));
+ memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu,
+ sizeof(get_paca()->shadow_vcpu));
get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max;
}
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb));
+ memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
+ sizeof(get_paca()->shadow_vcpu));
to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max;
+
+ kvmppc_giveup_ext(vcpu, MSR_FP);
+ kvmppc_giveup_ext(vcpu, MSR_VEC);
+ kvmppc_giveup_ext(vcpu, MSR_VSX);
}
-#if defined(AGGRESSIVE_DEC) || defined(EXIT_DEBUG)
+#if defined(EXIT_DEBUG)
static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
{
u64 jd = mftb() - vcpu->arch.dec_jiffies;
@@ -89,6 +94,23 @@ static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
}
#endif
+static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.shadow_msr = vcpu->arch.msr;
+ /* Guest MSR values */
+ vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE |
+ MSR_BE | MSR_DE;
+ /* Process MSR values */
+ vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR |
+ MSR_EE;
+ /* External providers the guest reserved */
+ vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext);
+ /* 64-bit Process MSR values */
+#ifdef CONFIG_PPC_BOOK3S_64
+ vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV;
+#endif
+}
+
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
{
ulong old_msr = vcpu->arch.msr;
@@ -96,12 +118,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
#ifdef EXIT_DEBUG
printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
#endif
+
msr &= to_book3s(vcpu)->msr_mask;
vcpu->arch.msr = msr;
- vcpu->arch.shadow_msr = msr | MSR_USER32;
- vcpu->arch.shadow_msr &= ( MSR_VEC | MSR_VSX | MSR_FP | MSR_FE0 |
- MSR_USER64 | MSR_SE | MSR_BE | MSR_DE |
- MSR_FE1);
+ kvmppc_recalc_shadow_msr(vcpu);
if (msr & (MSR_WE|MSR_POW)) {
if (!vcpu->arch.pending_exceptions) {
@@ -125,11 +145,10 @@ void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
vcpu->arch.mmu.reset_msr(vcpu);
}
-void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
+static int kvmppc_book3s_vec2irqprio(unsigned int vec)
{
unsigned int prio;
- vcpu->stat.queue_intr++;
switch (vec) {
case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
@@ -149,15 +168,31 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
default: prio = BOOK3S_IRQPRIO_MAX; break;
}
- set_bit(prio, &vcpu->arch.pending_exceptions);
+ return prio;
+}
+
+static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
+ unsigned int vec)
+{
+ clear_bit(kvmppc_book3s_vec2irqprio(vec),
+ &vcpu->arch.pending_exceptions);
+}
+
+void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
+{
+ vcpu->stat.queue_intr++;
+
+ set_bit(kvmppc_book3s_vec2irqprio(vec),
+ &vcpu->arch.pending_exceptions);
#ifdef EXIT_DEBUG
printk(KERN_INFO "Queueing interrupt %x\n", vec);
#endif
}
-void kvmppc_core_queue_program(struct kvm_vcpu *vcpu)
+void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
{
+ to_book3s(vcpu)->prog_flags = flags;
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM);
}
@@ -171,6 +206,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions);
}
+void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
+{
+ kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
+}
+
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
@@ -181,6 +221,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
{
int deliver = 1;
int vec = 0;
+ ulong flags = 0ULL;
switch (priority) {
case BOOK3S_IRQPRIO_DECREMENTER:
@@ -214,6 +255,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
break;
case BOOK3S_IRQPRIO_PROGRAM:
vec = BOOK3S_INTERRUPT_PROGRAM;
+ flags = to_book3s(vcpu)->prog_flags;
break;
case BOOK3S_IRQPRIO_VSX:
vec = BOOK3S_INTERRUPT_VSX;
@@ -244,7 +286,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
#endif
if (deliver)
- kvmppc_inject_interrupt(vcpu, vec, 0ULL);
+ kvmppc_inject_interrupt(vcpu, vec, flags);
return deliver;
}
@@ -254,21 +296,15 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
unsigned long *pending = &vcpu->arch.pending_exceptions;
unsigned int priority;
- /* XXX be more clever here - no need to mftb() on every entry */
- /* Issue DEC again if it's still active */
-#ifdef AGGRESSIVE_DEC
- if (vcpu->arch.msr & MSR_EE)
- if (kvmppc_get_dec(vcpu) & 0x80000000)
- kvmppc_core_queue_dec(vcpu);
-#endif
-
#ifdef EXIT_DEBUG
if (vcpu->arch.pending_exceptions)
printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
#endif
priority = __ffs(*pending);
while (priority <= (sizeof(unsigned int) * 8)) {
- if (kvmppc_book3s_irqprio_deliver(vcpu, priority)) {
+ if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
+ (priority != BOOK3S_IRQPRIO_DECREMENTER)) {
+ /* DEC interrupts get cleared by mtdec */
clear_bit(priority, &vcpu->arch.pending_exceptions);
break;
}
@@ -503,14 +539,14 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Page not found in guest PTE entries */
vcpu->arch.dear = vcpu->arch.fault_dear;
to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
- vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL);
+ vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EPERM) {
/* Storage protection */
vcpu->arch.dear = vcpu->arch.fault_dear;
to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
- vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL);
+ vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EINVAL) {
/* Page not found in guest SLB */
@@ -532,13 +568,122 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = kvmppc_emulate_mmio(run, vcpu);
if ( r == RESUME_HOST_NV )
r = RESUME_HOST;
- if ( r == RESUME_GUEST_NV )
- r = RESUME_GUEST;
}
return r;
}
+static inline int get_fpr_index(int i)
+{
+#ifdef CONFIG_VSX
+ i *= 2;
+#endif
+ return i;
+}
+
+/* Give up external provider (FPU, Altivec, VSX) */
+static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
+{
+ struct thread_struct *t = &current->thread;
+ u64 *vcpu_fpr = vcpu->arch.fpr;
+ u64 *vcpu_vsx = vcpu->arch.vsr;
+ u64 *thread_fpr = (u64*)t->fpr;
+ int i;
+
+ if (!(vcpu->arch.guest_owned_ext & msr))
+ return;
+
+#ifdef DEBUG_EXT
+ printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
+#endif
+
+ switch (msr) {
+ case MSR_FP:
+ giveup_fpu(current);
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
+ vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
+
+ vcpu->arch.fpscr = t->fpscr.val;
+ break;
+ case MSR_VEC:
+#ifdef CONFIG_ALTIVEC
+ giveup_altivec(current);
+ memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
+ vcpu->arch.vscr = t->vscr;
+#endif
+ break;
+ case MSR_VSX:
+#ifdef CONFIG_VSX
+ __giveup_vsx(current);
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
+ vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
+#endif
+ break;
+ default:
+ BUG();
+ }
+
+ vcpu->arch.guest_owned_ext &= ~msr;
+ current->thread.regs->msr &= ~msr;
+ kvmppc_recalc_shadow_msr(vcpu);
+}
+
+/* Handle external providers (FPU, Altivec, VSX) */
+static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
+ ulong msr)
+{
+ struct thread_struct *t = &current->thread;
+ u64 *vcpu_fpr = vcpu->arch.fpr;
+ u64 *vcpu_vsx = vcpu->arch.vsr;
+ u64 *thread_fpr = (u64*)t->fpr;
+ int i;
+
+ if (!(vcpu->arch.msr & msr)) {
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ return RESUME_GUEST;
+ }
+
+#ifdef DEBUG_EXT
+ printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
+#endif
+
+ current->thread.regs->msr |= msr;
+
+ switch (msr) {
+ case MSR_FP:
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
+ thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
+
+ t->fpscr.val = vcpu->arch.fpscr;
+ t->fpexc_mode = 0;
+ kvmppc_load_up_fpu();
+ break;
+ case MSR_VEC:
+#ifdef CONFIG_ALTIVEC
+ memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
+ t->vscr = vcpu->arch.vscr;
+ t->vrsave = -1;
+ kvmppc_load_up_altivec();
+#endif
+ break;
+ case MSR_VSX:
+#ifdef CONFIG_VSX
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
+ thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
+ kvmppc_load_up_vsx();
+#endif
+ break;
+ default:
+ BUG();
+ }
+
+ vcpu->arch.guest_owned_ext |= msr;
+
+ kvmppc_recalc_shadow_msr(vcpu);
+
+ return RESUME_GUEST;
+}
+
int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int exit_nr)
{
@@ -563,7 +708,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_INST_STORAGE:
vcpu->stat.pf_instruc++;
/* only care about PTEG not found errors, but leave NX alone */
- if (vcpu->arch.shadow_msr & 0x40000000) {
+ if (vcpu->arch.shadow_srr1 & 0x40000000) {
r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr);
vcpu->stat.sp_instruc++;
} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
@@ -575,7 +720,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
} else {
- vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000);
+ vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
r = RESUME_GUEST;
@@ -621,6 +766,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_PROGRAM:
{
enum emulation_result er;
+ ulong flags;
+
+ flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
if (vcpu->arch.msr & MSR_PR) {
#ifdef EXIT_DEBUG
@@ -628,7 +776,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
#endif
if ((vcpu->arch.last_inst & 0xff0007ff) !=
(INS_DCBZ & 0xfffffff7)) {
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST;
break;
}
@@ -638,12 +786,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
er = kvmppc_emulate_instruction(run, vcpu);
switch (er) {
case EMULATE_DONE:
- r = RESUME_GUEST;
+ r = RESUME_GUEST_NV;
break;
case EMULATE_FAIL:
printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
__func__, vcpu->arch.pc, vcpu->arch.last_inst);
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST;
break;
default:
@@ -653,23 +801,30 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
case BOOK3S_INTERRUPT_SYSCALL:
#ifdef EXIT_DEBUG
- printk(KERN_INFO "Syscall Nr %d\n", (int)vcpu->arch.gpr[0]);
+ printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0));
#endif
vcpu->stat.syscall_exits++;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
r = RESUME_GUEST;
break;
- case BOOK3S_INTERRUPT_MACHINE_CHECK:
case BOOK3S_INTERRUPT_FP_UNAVAIL:
- case BOOK3S_INTERRUPT_TRACE:
+ r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP);
+ break;
case BOOK3S_INTERRUPT_ALTIVEC:
+ r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC);
+ break;
case BOOK3S_INTERRUPT_VSX:
+ r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX);
+ break;
+ case BOOK3S_INTERRUPT_MACHINE_CHECK:
+ case BOOK3S_INTERRUPT_TRACE:
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
r = RESUME_GUEST;
break;
default:
/* Ugh - bork here! What did we get? */
- printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr);
+ printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
+ exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1);
r = RESUME_HOST;
BUG();
break;
@@ -712,10 +867,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
int i;
regs->pc = vcpu->arch.pc;
- regs->cr = vcpu->arch.cr;
+ regs->cr = kvmppc_get_cr(vcpu);
regs->ctr = vcpu->arch.ctr;
regs->lr = vcpu->arch.lr;
- regs->xer = vcpu->arch.xer;
+ regs->xer = kvmppc_get_xer(vcpu);
regs->msr = vcpu->arch.msr;
regs->srr0 = vcpu->arch.srr0;
regs->srr1 = vcpu->arch.srr1;
@@ -729,7 +884,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->sprg7 = vcpu->arch.sprg6;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
- regs->gpr[i] = vcpu->arch.gpr[i];
+ regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
return 0;
}
@@ -739,10 +894,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
int i;
vcpu->arch.pc = regs->pc;
- vcpu->arch.cr = regs->cr;
+ kvmppc_set_cr(vcpu, regs->cr);
vcpu->arch.ctr = regs->ctr;
vcpu->arch.lr = regs->lr;
- vcpu->arch.xer = regs->xer;
+ kvmppc_set_xer(vcpu, regs->xer);
kvmppc_set_msr(vcpu, regs->msr);
vcpu->arch.srr0 = regs->srr0;
vcpu->arch.srr1 = regs->srr1;
@@ -754,8 +909,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.sprg6 = regs->sprg5;
vcpu->arch.sprg7 = regs->sprg6;
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
- vcpu->arch.gpr[i] = regs->gpr[i];
+ for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
+ kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
return 0;
}
@@ -850,7 +1005,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
int is_dirty = 0;
int r, n;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
r = kvm_get_dirty_log(kvm, log, &is_dirty);
if (r)
@@ -858,7 +1013,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
- memslot = &kvm->memslots[log->slot];
+ memslot = &kvm->memslots->memslots[log->slot];
ga = memslot->base_gfn << PAGE_SHIFT;
ga_end = ga + (memslot->npages << PAGE_SHIFT);
@@ -872,7 +1027,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
r = 0;
out:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}
@@ -910,6 +1065,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
+ vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
vcpu->arch.shadow_msr = MSR_USER64;
@@ -943,6 +1099,10 @@ extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int ret;
+ struct thread_struct ext_bkp;
+ bool save_vec = current->thread.used_vr;
+ bool save_vsx = current->thread.used_vsr;
+ ulong ext_msr;
/* No need to go into the guest when all we do is going out */
if (signal_pending(current)) {
@@ -950,6 +1110,35 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
return -EINTR;
}
+ /* Save FPU state in stack */
+ if (current->thread.regs->msr & MSR_FP)
+ giveup_fpu(current);
+ memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr));
+ ext_bkp.fpscr = current->thread.fpscr;
+ ext_bkp.fpexc_mode = current->thread.fpexc_mode;
+
+#ifdef CONFIG_ALTIVEC
+ /* Save Altivec state in stack */
+ if (save_vec) {
+ if (current->thread.regs->msr & MSR_VEC)
+ giveup_altivec(current);
+ memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr));
+ ext_bkp.vscr = current->thread.vscr;
+ ext_bkp.vrsave = current->thread.vrsave;
+ }
+ ext_bkp.used_vr = current->thread.used_vr;
+#endif
+
+#ifdef CONFIG_VSX
+ /* Save VSX state in stack */
+ if (save_vsx && (current->thread.regs->msr & MSR_VSX))
+ __giveup_vsx(current);
+ ext_bkp.used_vsr = current->thread.used_vsr;
+#endif
+
+ /* Remember the MSR with disabled extensions */
+ ext_msr = current->thread.regs->msr;
+
/* XXX we get called with irq disabled - change that! */
local_irq_enable();
@@ -957,6 +1146,32 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
local_irq_disable();
+ current->thread.regs->msr = ext_msr;
+
+ /* Make sure we save the guest FPU/Altivec/VSX state */
+ kvmppc_giveup_ext(vcpu, MSR_FP);
+ kvmppc_giveup_ext(vcpu, MSR_VEC);
+ kvmppc_giveup_ext(vcpu, MSR_VSX);
+
+ /* Restore FPU state from stack */
+ memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr));
+ current->thread.fpscr = ext_bkp.fpscr;
+ current->thread.fpexc_mode = ext_bkp.fpexc_mode;
+
+#ifdef CONFIG_ALTIVEC
+ /* Restore Altivec state from stack */
+ if (save_vec && current->thread.used_vr) {
+ memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr));
+ current->thread.vscr = ext_bkp.vscr;
+ current->thread.vrsave= ext_bkp.vrsave;
+ }
+ current->thread.used_vr = ext_bkp.used_vr;
+#endif
+
+#ifdef CONFIG_VSX
+ current->thread.used_vsr = ext_bkp.used_vsr;
+#endif
+
return ret;
}
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c
index 1027eac..2b0ee7e 100644
--- a/arch/powerpc/kvm/book3s_64_emulate.c
+++ b/arch/powerpc/kvm/book3s_64_emulate.c
@@ -65,11 +65,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
case 31:
switch (get_xop(inst)) {
case OP_31_XOP_MFMSR:
- vcpu->arch.gpr[get_rt(inst)] = vcpu->arch.msr;
+ kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr);
break;
case OP_31_XOP_MTMSRD:
{
- ulong rs = vcpu->arch.gpr[get_rs(inst)];
+ ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
if (inst & 0x10000) {
vcpu->arch.msr &= ~(MSR_RI | MSR_EE);
vcpu->arch.msr |= rs & (MSR_RI | MSR_EE);
@@ -78,30 +78,30 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
}
case OP_31_XOP_MTMSR:
- kvmppc_set_msr(vcpu, vcpu->arch.gpr[get_rs(inst)]);
+ kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
break;
case OP_31_XOP_MFSRIN:
{
int srnum;
- srnum = (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf;
+ srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
if (vcpu->arch.mmu.mfsrin) {
u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
- vcpu->arch.gpr[get_rt(inst)] = sr;
+ kvmppc_set_gpr(vcpu, get_rt(inst), sr);
}
break;
}
case OP_31_XOP_MTSRIN:
vcpu->arch.mmu.mtsrin(vcpu,
- (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf,
- vcpu->arch.gpr[get_rs(inst)]);
+ (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
+ kvmppc_get_gpr(vcpu, get_rs(inst)));
break;
case OP_31_XOP_TLBIE:
case OP_31_XOP_TLBIEL:
{
bool large = (inst & 0x00200000) ? true : false;
- ulong addr = vcpu->arch.gpr[get_rb(inst)];
+ ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
vcpu->arch.mmu.tlbie(vcpu, addr, large);
break;
}
@@ -111,14 +111,16 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!vcpu->arch.mmu.slbmte)
return EMULATE_FAIL;
- vcpu->arch.mmu.slbmte(vcpu, vcpu->arch.gpr[get_rs(inst)],
- vcpu->arch.gpr[get_rb(inst)]);
+ vcpu->arch.mmu.slbmte(vcpu,
+ kvmppc_get_gpr(vcpu, get_rs(inst)),
+ kvmppc_get_gpr(vcpu, get_rb(inst)));
break;
case OP_31_XOP_SLBIE:
if (!vcpu->arch.mmu.slbie)
return EMULATE_FAIL;
- vcpu->arch.mmu.slbie(vcpu, vcpu->arch.gpr[get_rb(inst)]);
+ vcpu->arch.mmu.slbie(vcpu,
+ kvmppc_get_gpr(vcpu, get_rb(inst)));
break;
case OP_31_XOP_SLBIA:
if (!vcpu->arch.mmu.slbia)
@@ -132,9 +134,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
} else {
ulong t, rb;
- rb = vcpu->arch.gpr[get_rb(inst)];
+ rb = kvmppc_get_gpr(vcpu, get_rb(inst));
t = vcpu->arch.mmu.slbmfee(vcpu, rb);
- vcpu->arch.gpr[get_rt(inst)] = t;
+ kvmppc_set_gpr(vcpu, get_rt(inst), t);
}
break;
case OP_31_XOP_SLBMFEV:
@@ -143,20 +145,20 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
} else {
ulong t, rb;
- rb = vcpu->arch.gpr[get_rb(inst)];
+ rb = kvmppc_get_gpr(vcpu, get_rb(inst));
t = vcpu->arch.mmu.slbmfev(vcpu, rb);
- vcpu->arch.gpr[get_rt(inst)] = t;
+ kvmppc_set_gpr(vcpu, get_rt(inst), t);
}
break;
case OP_31_XOP_DCBZ:
{
- ulong rb = vcpu->arch.gpr[get_rb(inst)];
+ ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
ulong ra = 0;
ulong addr;
u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
if (get_ra(inst))
- ra = vcpu->arch.gpr[get_ra(inst)];
+ ra = kvmppc_get_gpr(vcpu, get_ra(inst));
addr = (ra + rb) & ~31ULL;
if (!(vcpu->arch.msr & MSR_SF))
@@ -233,43 +235,44 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
int emulated = EMULATE_DONE;
+ ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_SDR1:
- to_book3s(vcpu)->sdr1 = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->sdr1 = spr_val;
break;
case SPRN_DSISR:
- to_book3s(vcpu)->dsisr = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->dsisr = spr_val;
break;
case SPRN_DAR:
- vcpu->arch.dear = vcpu->arch.gpr[rs];
+ vcpu->arch.dear = spr_val;
break;
case SPRN_HIOR:
- to_book3s(vcpu)->hior = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->hior = spr_val;
break;
case SPRN_IBAT0U ... SPRN_IBAT3L:
case SPRN_IBAT4U ... SPRN_IBAT7L:
case SPRN_DBAT0U ... SPRN_DBAT3L:
case SPRN_DBAT4U ... SPRN_DBAT7L:
- kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]);
+ kvmppc_write_bat(vcpu, sprn, (u32)spr_val);
/* BAT writes happen so rarely that we're ok to flush
* everything here */
kvmppc_mmu_pte_flush(vcpu, 0, 0);
break;
case SPRN_HID0:
- to_book3s(vcpu)->hid[0] = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->hid[0] = spr_val;
break;
case SPRN_HID1:
- to_book3s(vcpu)->hid[1] = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->hid[1] = spr_val;
break;
case SPRN_HID2:
- to_book3s(vcpu)->hid[2] = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->hid[2] = spr_val;
break;
case SPRN_HID4:
- to_book3s(vcpu)->hid[4] = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->hid[4] = spr_val;
break;
case SPRN_HID5:
- to_book3s(vcpu)->hid[5] = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->hid[5] = spr_val;
/* guest HID5 set can change is_dcbz32 */
if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
(mfmsr() & MSR_HV))
@@ -299,38 +302,38 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
switch (sprn) {
case SPRN_SDR1:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->sdr1;
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
break;
case SPRN_DSISR:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->dsisr;
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr);
break;
case SPRN_DAR:
- vcpu->arch.gpr[rt] = vcpu->arch.dear;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear);
break;
case SPRN_HIOR:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->hior;
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);
break;
case SPRN_HID0:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[0];
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]);
break;
case SPRN_HID1:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[1];
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
break;
case SPRN_HID2:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[2];
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
break;
case SPRN_HID4:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[4];
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
break;
case SPRN_HID5:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[5];
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
break;
case SPRN_THRM1:
case SPRN_THRM2:
case SPRN_THRM3:
case SPRN_CTRLF:
case SPRN_CTRLT:
- vcpu->arch.gpr[rt] = 0;
+ kvmppc_set_gpr(vcpu, rt, 0);
break;
default:
printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_64_exports.c
index 5b2db38..1dd5a1d 100644
--- a/arch/powerpc/kvm/book3s_64_exports.c
+++ b/arch/powerpc/kvm/book3s_64_exports.c
@@ -22,3 +22,11 @@
EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter);
EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem);
+EXPORT_SYMBOL_GPL(kvmppc_rmcall);
+EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
+#ifdef CONFIG_ALTIVEC
+EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
+#endif
+#ifdef CONFIG_VSX
+EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx);
+#endif
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S
index 7b55d80..c1584d0 100644
--- a/arch/powerpc/kvm/book3s_64_interrupts.S
+++ b/arch/powerpc/kvm/book3s_64_interrupts.S
@@ -28,11 +28,6 @@
#define ULONG_SIZE 8
#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
-.macro mfpaca tmp_reg, src_reg, offset, vcpu_reg
- ld \tmp_reg, (PACA_EXMC+\offset)(r13)
- std \tmp_reg, VCPU_GPR(\src_reg)(\vcpu_reg)
-.endm
-
.macro DISABLE_INTERRUPTS
mfmsr r0
rldicl r0,r0,48,1
@@ -40,6 +35,26 @@
mtmsrd r0,1
.endm
+#define VCPU_LOAD_NVGPRS(vcpu) \
+ ld r14, VCPU_GPR(r14)(vcpu); \
+ ld r15, VCPU_GPR(r15)(vcpu); \
+ ld r16, VCPU_GPR(r16)(vcpu); \
+ ld r17, VCPU_GPR(r17)(vcpu); \
+ ld r18, VCPU_GPR(r18)(vcpu); \
+ ld r19, VCPU_GPR(r19)(vcpu); \
+ ld r20, VCPU_GPR(r20)(vcpu); \
+ ld r21, VCPU_GPR(r21)(vcpu); \
+ ld r22, VCPU_GPR(r22)(vcpu); \
+ ld r23, VCPU_GPR(r23)(vcpu); \
+ ld r24, VCPU_GPR(r24)(vcpu); \
+ ld r25, VCPU_GPR(r25)(vcpu); \
+ ld r26, VCPU_GPR(r26)(vcpu); \
+ ld r27, VCPU_GPR(r27)(vcpu); \
+ ld r28, VCPU_GPR(r28)(vcpu); \
+ ld r29, VCPU_GPR(r29)(vcpu); \
+ ld r30, VCPU_GPR(r30)(vcpu); \
+ ld r31, VCPU_GPR(r31)(vcpu); \
+
/*****************************************************************************
* *
* Guest entry / exit code that is in kernel module memory (highmem) *
@@ -67,61 +82,32 @@ kvm_start_entry:
SAVE_NVGPRS(r1)
/* Save LR */
- mflr r14
- std r14, _LINK(r1)
-
-/* XXX optimize non-volatile loading away */
-kvm_start_lightweight:
+ std r0, _LINK(r1)
- DISABLE_INTERRUPTS
+ /* Load non-volatile guest state from the vcpu */
+ VCPU_LOAD_NVGPRS(r4)
/* Save R1/R2 in the PACA */
- std r1, PACAR1(r13)
- std r2, (PACA_EXMC+EX_SRR0)(r13)
+ std r1, PACA_KVM_HOST_R1(r13)
+ std r2, PACA_KVM_HOST_R2(r13)
+
+ /* XXX swap in/out on load? */
ld r3, VCPU_HIGHMEM_HANDLER(r4)
- std r3, PACASAVEDMSR(r13)
+ std r3, PACA_KVM_VMHANDLER(r13)
- /* Load non-volatile guest state from the vcpu */
- ld r14, VCPU_GPR(r14)(r4)
- ld r15, VCPU_GPR(r15)(r4)
- ld r16, VCPU_GPR(r16)(r4)
- ld r17, VCPU_GPR(r17)(r4)
- ld r18, VCPU_GPR(r18)(r4)
- ld r19, VCPU_GPR(r19)(r4)
- ld r20, VCPU_GPR(r20)(r4)
- ld r21, VCPU_GPR(r21)(r4)
- ld r22, VCPU_GPR(r22)(r4)
- ld r23, VCPU_GPR(r23)(r4)
- ld r24, VCPU_GPR(r24)(r4)
- ld r25, VCPU_GPR(r25)(r4)
- ld r26, VCPU_GPR(r26)(r4)
- ld r27, VCPU_GPR(r27)(r4)
- ld r28, VCPU_GPR(r28)(r4)
- ld r29, VCPU_GPR(r29)(r4)
- ld r30, VCPU_GPR(r30)(r4)
- ld r31, VCPU_GPR(r31)(r4)
+kvm_start_lightweight:
ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
- ld r3, VCPU_TRAMPOLINE_ENTER(r4)
- mtsrr0 r3
-
- LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
- mtsrr1 r3
-
- /* Load guest state in the respective registers */
- lwz r3, VCPU_CR(r4) /* r3 = vcpu->arch.cr */
- stw r3, (PACA_EXMC + EX_CCR)(r13)
-
- ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */
- mtctr r3 /* CTR = r3 */
+ /* Load some guest state in the respective registers */
+ ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */
+ /* will be swapped in by rmcall */
ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
mtlr r3 /* LR = r3 */
- ld r3, VCPU_XER(r4) /* r3 = vcpu->arch.xer */
- std r3, (PACA_EXMC + EX_R3)(r13)
+ DISABLE_INTERRUPTS
/* Some guests may need to have dcbz set to 32 byte length.
*
@@ -141,36 +127,15 @@ kvm_start_lightweight:
mtspr SPRN_HID5,r3
no_dcbz32_on:
- /* Load guest GPRs */
-
- ld r3, VCPU_GPR(r9)(r4)
- std r3, (PACA_EXMC + EX_R9)(r13)
- ld r3, VCPU_GPR(r10)(r4)
- std r3, (PACA_EXMC + EX_R10)(r13)
- ld r3, VCPU_GPR(r11)(r4)
- std r3, (PACA_EXMC + EX_R11)(r13)
- ld r3, VCPU_GPR(r12)(r4)
- std r3, (PACA_EXMC + EX_R12)(r13)
- ld r3, VCPU_GPR(r13)(r4)
- std r3, (PACA_EXMC + EX_R13)(r13)
-
- ld r0, VCPU_GPR(r0)(r4)
- ld r1, VCPU_GPR(r1)(r4)
- ld r2, VCPU_GPR(r2)(r4)
- ld r3, VCPU_GPR(r3)(r4)
- ld r5, VCPU_GPR(r5)(r4)
- ld r6, VCPU_GPR(r6)(r4)
- ld r7, VCPU_GPR(r7)(r4)
- ld r8, VCPU_GPR(r8)(r4)
- ld r4, VCPU_GPR(r4)(r4)
-
- /* This sets the Magic value for the trampoline */
-
- li r11, 1
- stb r11, PACA_KVM_IN_GUEST(r13)
+
+ ld r6, VCPU_RMCALL(r4)
+ mtctr r6
+
+ ld r3, VCPU_TRAMPOLINE_ENTER(r4)
+ LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
/* Jump to SLB patching handlder and into our guest */
- RFI
+ bctr
/*
* This is the handler in module memory. It gets jumped at from the
@@ -184,125 +149,70 @@ kvmppc_handler_highmem:
/*
* Register usage at this point:
*
- * R00 = guest R13
- * R01 = host R1
- * R02 = host R2
- * R10 = guest PC
- * R11 = guest MSR
- * R12 = exit handler id
- * R13 = PACA
- * PACA.exmc.R9 = guest R1
- * PACA.exmc.R10 = guest R10
- * PACA.exmc.R11 = guest R11
- * PACA.exmc.R12 = guest R12
- * PACA.exmc.R13 = guest R2
- * PACA.exmc.DAR = guest DAR
- * PACA.exmc.DSISR = guest DSISR
- * PACA.exmc.LR = guest instruction
- * PACA.exmc.CCR = guest CR
- * PACA.exmc.SRR0 = guest R0
+ * R0 = guest last inst
+ * R1 = host R1
+ * R2 = host R2
+ * R3 = guest PC
+ * R4 = guest MSR
+ * R5 = guest DAR
+ * R6 = guest DSISR
+ * R13 = PACA
+ * PACA.KVM.* = guest *
*
*/
- std r3, (PACA_EXMC+EX_R3)(r13)
+ /* R7 = vcpu */
+ ld r7, GPR4(r1)
- /* save the exit id in R3 */
- mr r3, r12
+ /* Now save the guest state */
- /* R12 = vcpu */
- ld r12, GPR4(r1)
+ stw r0, VCPU_LAST_INST(r7)
- /* Now save the guest state */
+ std r3, VCPU_PC(r7)
+ std r4, VCPU_SHADOW_SRR1(r7)
+ std r5, VCPU_FAULT_DEAR(r7)
+ std r6, VCPU_FAULT_DSISR(r7)
- std r0, VCPU_GPR(r13)(r12)
- std r4, VCPU_GPR(r4)(r12)
- std r5, VCPU_GPR(r5)(r12)
- std r6, VCPU_GPR(r6)(r12)
- std r7, VCPU_GPR(r7)(r12)
- std r8, VCPU_GPR(r8)(r12)
- std r9, VCPU_GPR(r9)(r12)
-
- /* get registers from PACA */
- mfpaca r5, r0, EX_SRR0, r12
- mfpaca r5, r3, EX_R3, r12
- mfpaca r5, r1, EX_R9, r12
- mfpaca r5, r10, EX_R10, r12
- mfpaca r5, r11, EX_R11, r12
- mfpaca r5, r12, EX_R12, r12
- mfpaca r5, r2, EX_R13, r12
-
- lwz r5, (PACA_EXMC+EX_LR)(r13)
- stw r5, VCPU_LAST_INST(r12)
-
- lwz r5, (PACA_EXMC+EX_CCR)(r13)
- stw r5, VCPU_CR(r12)
-
- ld r5, VCPU_HFLAGS(r12)
+ ld r5, VCPU_HFLAGS(r7)
rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
beq no_dcbz32_off
+ li r4, 0
mfspr r5,SPRN_HID5
- rldimi r5,r5,6,56
+ rldimi r5,r4,6,56
mtspr SPRN_HID5,r5
no_dcbz32_off:
- /* XXX maybe skip on lightweight? */
- std r14, VCPU_GPR(r14)(r12)
- std r15, VCPU_GPR(r15)(r12)
- std r16, VCPU_GPR(r16)(r12)
- std r17, VCPU_GPR(r17)(r12)
- std r18, VCPU_GPR(r18)(r12)
- std r19, VCPU_GPR(r19)(r12)
- std r20, VCPU_GPR(r20)(r12)
- std r21, VCPU_GPR(r21)(r12)
- std r22, VCPU_GPR(r22)(r12)
- std r23, VCPU_GPR(r23)(r12)
- std r24, VCPU_GPR(r24)(r12)
- std r25, VCPU_GPR(r25)(r12)
- std r26, VCPU_GPR(r26)(r12)
- std r27, VCPU_GPR(r27)(r12)
- std r28, VCPU_GPR(r28)(r12)
- std r29, VCPU_GPR(r29)(r12)
- std r30, VCPU_GPR(r30)(r12)
- std r31, VCPU_GPR(r31)(r12)
-
- /* Restore non-volatile host registers (r14 - r31) */
- REST_NVGPRS(r1)
-
- /* Save guest PC (R10) */
- std r10, VCPU_PC(r12)
-
- /* Save guest msr (R11) */
- std r11, VCPU_SHADOW_MSR(r12)
-
- /* Save guest CTR (in R12) */
+ std r14, VCPU_GPR(r14)(r7)
+ std r15, VCPU_GPR(r15)(r7)
+ std r16, VCPU_GPR(r16)(r7)
+ std r17, VCPU_GPR(r17)(r7)
+ std r18, VCPU_GPR(r18)(r7)
+ std r19, VCPU_GPR(r19)(r7)
+ std r20, VCPU_GPR(r20)(r7)
+ std r21, VCPU_GPR(r21)(r7)
+ std r22, VCPU_GPR(r22)(r7)
+ std r23, VCPU_GPR(r23)(r7)
+ std r24, VCPU_GPR(r24)(r7)
+ std r25, VCPU_GPR(r25)(r7)
+ std r26, VCPU_GPR(r26)(r7)
+ std r27, VCPU_GPR(r27)(r7)
+ std r28, VCPU_GPR(r28)(r7)
+ std r29, VCPU_GPR(r29)(r7)
+ std r30, VCPU_GPR(r30)(r7)
+ std r31, VCPU_GPR(r31)(r7)
+
+ /* Save guest CTR */
mfctr r5
- std r5, VCPU_CTR(r12)
+ std r5, VCPU_CTR(r7)
/* Save guest LR */
mflr r5
- std r5, VCPU_LR(r12)
-
- /* Save guest XER */
- mfxer r5
- std r5, VCPU_XER(r12)
-
- /* Save guest DAR */
- ld r5, (PACA_EXMC+EX_DAR)(r13)
- std r5, VCPU_FAULT_DEAR(r12)
-
- /* Save guest DSISR */
- lwz r5, (PACA_EXMC+EX_DSISR)(r13)
- std r5, VCPU_FAULT_DSISR(r12)
+ std r5, VCPU_LR(r7)
/* Restore host msr -> SRR1 */
- ld r7, VCPU_HOST_MSR(r12)
- mtsrr1 r7
-
- /* Restore host IP -> SRR0 */
- ld r6, VCPU_HOST_RETIP(r12)
- mtsrr0 r6
+ ld r6, VCPU_HOST_MSR(r7)
/*
* For some interrupts, we need to call the real Linux
@@ -314,13 +224,14 @@ no_dcbz32_off:
* r3 = address of interrupt handler (exit reason)
*/
- cmpwi r3, BOOK3S_INTERRUPT_EXTERNAL
+ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
beq call_linux_handler
- cmpwi r3, BOOK3S_INTERRUPT_DECREMENTER
+ cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
beq call_linux_handler
- /* Back to Interruptable Mode! (goto kvm_return_point) */
- RFI
+ /* Back to EE=1 */
+ mtmsr r6
+ b kvm_return_point
call_linux_handler:
@@ -333,16 +244,22 @@ call_linux_handler:
* interrupt handler!
*
* R3 still contains the exit code,
- * R6 VCPU_HOST_RETIP and
- * R7 VCPU_HOST_MSR
+ * R5 VCPU_HOST_RETIP and
+ * R6 VCPU_HOST_MSR
*/
- mtlr r3
+ /* Restore host IP -> SRR0 */
+ ld r5, VCPU_HOST_RETIP(r7)
+
+ /* XXX Better move to a safe function?
+ * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */
- ld r5, VCPU_TRAMPOLINE_LOWMEM(r12)
- mtsrr0 r5
- LOAD_REG_IMMEDIATE(r5, MSR_KERNEL & ~(MSR_IR | MSR_DR))
- mtsrr1 r5
+ mtlr r12
+
+ ld r4, VCPU_TRAMPOLINE_LOWMEM(r7)
+ mtsrr0 r4
+ LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
+ mtsrr1 r3
RFI
@@ -351,42 +268,51 @@ kvm_return_point:
/* Jump back to lightweight entry if we're supposed to */
/* go back into the guest */
- mr r5, r3
+
+ /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
+ mr r5, r12
+
/* Restore r3 (kvm_run) and r4 (vcpu) */
REST_2GPRS(3, r1)
bl KVMPPC_HANDLE_EXIT
-#if 0 /* XXX get lightweight exits back */
+ /* If RESUME_GUEST, get back in the loop */
cmpwi r3, RESUME_GUEST
- bne kvm_exit_heavyweight
+ beq kvm_loop_lightweight
- /* put VCPU and KVM_RUN back into place and roll again! */
- REST_2GPRS(3, r1)
- b kvm_start_lightweight
+ cmpwi r3, RESUME_GUEST_NV
+ beq kvm_loop_heavyweight
-kvm_exit_heavyweight:
- /* Restore non-volatile host registers */
- ld r14, _LINK(r1)
- mtlr r14
- REST_NVGPRS(r1)
+kvm_exit_loop:
- addi r1, r1, SWITCH_FRAME_SIZE
-#else
ld r4, _LINK(r1)
mtlr r4
- cmpwi r3, RESUME_GUEST
- bne kvm_exit_heavyweight
+ /* Restore non-volatile host registers (r14 - r31) */
+ REST_NVGPRS(r1)
+
+ addi r1, r1, SWITCH_FRAME_SIZE
+ blr
+
+kvm_loop_heavyweight:
+
+ ld r4, _LINK(r1)
+ std r4, (16 + SWITCH_FRAME_SIZE)(r1)
+ /* Load vcpu and cpu_run */
REST_2GPRS(3, r1)
- addi r1, r1, SWITCH_FRAME_SIZE
+ /* Load non-volatile guest state from the vcpu */
+ VCPU_LOAD_NVGPRS(r4)
- b kvm_start_entry
+ /* Jump back into the beginning of this function */
+ b kvm_start_lightweight
-kvm_exit_heavyweight:
+kvm_loop_lightweight:
- addi r1, r1, SWITCH_FRAME_SIZE
-#endif
+ /* We'll need the vcpu pointer */
+ REST_GPR(4, r1)
+
+ /* Jump back into the beginning of this function */
+ b kvm_start_lightweight
- blr
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index e4beeb3..512dcff 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -54,7 +54,7 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
if (!vcpu_book3s->slb[i].valid)
continue;
- if (vcpu_book3s->slb[i].large)
+ if (vcpu_book3s->slb[i].tb)
cmp_esid = esid_1t;
if (vcpu_book3s->slb[i].esid == cmp_esid)
@@ -65,9 +65,10 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
eaddr, esid, esid_1t);
for (i = 0; i < vcpu_book3s->slb_nr; i++) {
if (vcpu_book3s->slb[i].vsid)
- dprintk(" %d: %c%c %llx %llx\n", i,
+ dprintk(" %d: %c%c%c %llx %llx\n", i,
vcpu_book3s->slb[i].valid ? 'v' : ' ',
vcpu_book3s->slb[i].large ? 'l' : ' ',
+ vcpu_book3s->slb[i].tb ? 't' : ' ',
vcpu_book3s->slb[i].esid,
vcpu_book3s->slb[i].vsid);
}
@@ -84,7 +85,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
if (!slb)
return 0;
- if (slb->large)
+ if (slb->tb)
return (((u64)eaddr >> 12) & 0xfffffff) |
(((u64)slb->vsid) << 28);
@@ -309,7 +310,8 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
slbe = &vcpu_book3s->slb[slb_nr];
slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
- slbe->esid = slbe->large ? esid_1t : esid;
+ slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
+ slbe->esid = slbe->tb ? esid_1t : esid;
slbe->vsid = rs >> 12;
slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0;
diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_64_rmhandlers.S
index fb7dd2e..c83c60a 100644
--- a/arch/powerpc/kvm/book3s_64_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_64_rmhandlers.S
@@ -45,36 +45,25 @@ kvmppc_trampoline_\intno:
* To distinguish, we check a magic byte in the PACA
*/
mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */
- std r12, (PACA_EXMC + EX_R12)(r13)
+ std r12, PACA_KVM_SCRATCH0(r13)
mfcr r12
- stw r12, (PACA_EXMC + EX_CCR)(r13)
+ stw r12, PACA_KVM_SCRATCH1(r13)
lbz r12, PACA_KVM_IN_GUEST(r13)
- cmpwi r12, 0
+ cmpwi r12, KVM_GUEST_MODE_NONE
bne ..kvmppc_handler_hasmagic_\intno
/* No KVM guest? Then jump back to the Linux handler! */
- lwz r12, (PACA_EXMC + EX_CCR)(r13)
+ lwz r12, PACA_KVM_SCRATCH1(r13)
mtcr r12
- ld r12, (PACA_EXMC + EX_R12)(r13)
+ ld r12, PACA_KVM_SCRATCH0(r13)
mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
b kvmppc_resume_\intno /* Get back original handler */
/* Now we know we're handling a KVM guest */
..kvmppc_handler_hasmagic_\intno:
- /* Unset guest state */
- li r12, 0
- stb r12, PACA_KVM_IN_GUEST(r13)
- std r1, (PACA_EXMC+EX_R9)(r13)
- std r10, (PACA_EXMC+EX_R10)(r13)
- std r11, (PACA_EXMC+EX_R11)(r13)
- std r2, (PACA_EXMC+EX_R13)(r13)
-
- mfsrr0 r10
- mfsrr1 r11
-
- /* Restore R1/R2 so we can handle faults */
- ld r1, PACAR1(r13)
- ld r2, (PACA_EXMC+EX_SRR0)(r13)
+ /* Should we just skip the faulting instruction? */
+ cmpwi r12, KVM_GUEST_MODE_SKIP
+ beq kvmppc_handler_skip_ins
/* Let's store which interrupt we're handling */
li r12, \intno
@@ -102,23 +91,107 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
/*
+ * Bring us back to the faulting code, but skip the
+ * faulting instruction.
+ *
+ * This is a generic exit path from the interrupt
+ * trampolines above.
+ *
+ * Input Registers:
+ *
+ * R12 = free
+ * R13 = PACA
+ * PACA.KVM.SCRATCH0 = guest R12
+ * PACA.KVM.SCRATCH1 = guest CR
+ * SPRG_SCRATCH0 = guest R13
+ *
+ */
+kvmppc_handler_skip_ins:
+
+ /* Patch the IP to the next instruction */
+ mfsrr0 r12
+ addi r12, r12, 4
+ mtsrr0 r12
+
+ /* Clean up all state */
+ lwz r12, PACA_KVM_SCRATCH1(r13)
+ mtcr r12
+ ld r12, PACA_KVM_SCRATCH0(r13)
+ mfspr r13, SPRN_SPRG_SCRATCH0
+
+ /* And get back into the code */
+ RFI
+
+/*
* This trampoline brings us back to a real mode handler
*
* Input Registers:
*
- * R6 = SRR0
- * R7 = SRR1
+ * R5 = SRR0
+ * R6 = SRR1
* LR = real-mode IP
*
*/
.global kvmppc_handler_lowmem_trampoline
kvmppc_handler_lowmem_trampoline:
- mtsrr0 r6
- mtsrr1 r7
+ mtsrr0 r5
+ mtsrr1 r6
blr
kvmppc_handler_lowmem_trampoline_end:
+/*
+ * Call a function in real mode
+ *
+ * Input Registers:
+ *
+ * R3 = function
+ * R4 = MSR
+ * R5 = CTR
+ *
+ */
+_GLOBAL(kvmppc_rmcall)
+ mtmsr r4 /* Disable relocation, so mtsrr
+ doesn't get interrupted */
+ mtctr r5
+ mtsrr0 r3
+ mtsrr1 r4
+ RFI
+
+/*
+ * Activate current's external feature (FPU/Altivec/VSX)
+ */
+#define define_load_up(what) \
+ \
+_GLOBAL(kvmppc_load_up_ ## what); \
+ subi r1, r1, INT_FRAME_SIZE; \
+ mflr r3; \
+ std r3, _LINK(r1); \
+ mfmsr r4; \
+ std r31, GPR3(r1); \
+ mr r31, r4; \
+ li r5, MSR_DR; \
+ oris r5, r5, MSR_EE@h; \
+ andc r4, r4, r5; \
+ mtmsr r4; \
+ \
+ bl .load_up_ ## what; \
+ \
+ mtmsr r31; \
+ ld r3, _LINK(r1); \
+ ld r31, GPR3(r1); \
+ addi r1, r1, INT_FRAME_SIZE; \
+ mtlr r3; \
+ blr
+
+define_load_up(fpu)
+#ifdef CONFIG_ALTIVEC
+define_load_up(altivec)
+#endif
+#ifdef CONFIG_VSX
+define_load_up(vsx)
+#endif
+
.global kvmppc_trampoline_lowmem
kvmppc_trampoline_lowmem:
.long kvmppc_handler_lowmem_trampoline - _stext
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index ecd237a..35b76272 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -31,7 +31,7 @@
#define REBOLT_SLB_ENTRY(num) \
ld r10, SHADOW_SLB_ESID(num)(r11); \
cmpdi r10, 0; \
- beq slb_exit_skip_1; \
+ beq slb_exit_skip_ ## num; \
oris r10, r10, SLB_ESID_V@h; \
ld r9, SHADOW_SLB_VSID(num)(r11); \
slbmte r9, r10; \
@@ -51,23 +51,21 @@ kvmppc_handler_trampoline_enter:
*
* MSR = ~IR|DR
* R13 = PACA
+ * R1 = host R1
+ * R2 = host R2
* R9 = guest IP
* R10 = guest MSR
- * R11 = free
- * R12 = free
- * PACA[PACA_EXMC + EX_R9] = guest R9
- * PACA[PACA_EXMC + EX_R10] = guest R10
- * PACA[PACA_EXMC + EX_R11] = guest R11
- * PACA[PACA_EXMC + EX_R12] = guest R12
- * PACA[PACA_EXMC + EX_R13] = guest R13
- * PACA[PACA_EXMC + EX_CCR] = guest CR
- * PACA[PACA_EXMC + EX_R3] = guest XER
+ * all other GPRS = free
+ * PACA[KVM_CR] = guest CR
+ * PACA[KVM_XER] = guest XER
*/
mtsrr0 r9
mtsrr1 r10
- mtspr SPRN_SPRG_SCRATCH0, r0
+ /* Activate guest mode, so faults get handled by KVM */
+ li r11, KVM_GUEST_MODE_GUEST
+ stb r11, PACA_KVM_IN_GUEST(r13)
/* Remove LPAR shadow entries */
@@ -131,20 +129,27 @@ slb_do_enter:
/* Enter guest */
- mfspr r0, SPRN_SPRG_SCRATCH0
-
- ld r9, (PACA_EXMC+EX_R9)(r13)
- ld r10, (PACA_EXMC+EX_R10)(r13)
- ld r12, (PACA_EXMC+EX_R12)(r13)
-
- lwz r11, (PACA_EXMC+EX_CCR)(r13)
+ ld r0, (PACA_KVM_R0)(r13)
+ ld r1, (PACA_KVM_R1)(r13)
+ ld r2, (PACA_KVM_R2)(r13)
+ ld r3, (PACA_KVM_R3)(r13)
+ ld r4, (PACA_KVM_R4)(r13)
+ ld r5, (PACA_KVM_R5)(r13)
+ ld r6, (PACA_KVM_R6)(r13)
+ ld r7, (PACA_KVM_R7)(r13)
+ ld r8, (PACA_KVM_R8)(r13)
+ ld r9, (PACA_KVM_R9)(r13)
+ ld r10, (PACA_KVM_R10)(r13)
+ ld r12, (PACA_KVM_R12)(r13)
+
+ lwz r11, (PACA_KVM_CR)(r13)
mtcr r11
- ld r11, (PACA_EXMC+EX_R3)(r13)
+ ld r11, (PACA_KVM_XER)(r13)
mtxer r11
- ld r11, (PACA_EXMC+EX_R11)(r13)
- ld r13, (PACA_EXMC+EX_R13)(r13)
+ ld r11, (PACA_KVM_R11)(r13)
+ ld r13, (PACA_KVM_R13)(r13)
RFI
kvmppc_handler_trampoline_enter_end:
@@ -162,28 +167,54 @@ kvmppc_handler_trampoline_exit:
/* Register usage at this point:
*
- * SPRG_SCRATCH0 = guest R13
- * R01 = host R1
- * R02 = host R2
- * R10 = guest PC
- * R11 = guest MSR
- * R12 = exit handler id
- * R13 = PACA
- * PACA.exmc.CCR = guest CR
- * PACA.exmc.R9 = guest R1
- * PACA.exmc.R10 = guest R10
- * PACA.exmc.R11 = guest R11
- * PACA.exmc.R12 = guest R12
- * PACA.exmc.R13 = guest R2
+ * SPRG_SCRATCH0 = guest R13
+ * R12 = exit handler id
+ * R13 = PACA
+ * PACA.KVM.SCRATCH0 = guest R12
+ * PACA.KVM.SCRATCH1 = guest CR
*
*/
/* Save registers */
- std r0, (PACA_EXMC+EX_SRR0)(r13)
- std r9, (PACA_EXMC+EX_R3)(r13)
- std r10, (PACA_EXMC+EX_LR)(r13)
- std r11, (PACA_EXMC+EX_DAR)(r13)
+ std r0, PACA_KVM_R0(r13)
+ std r1, PACA_KVM_R1(r13)
+ std r2, PACA_KVM_R2(r13)
+ std r3, PACA_KVM_R3(r13)
+ std r4, PACA_KVM_R4(r13)
+ std r5, PACA_KVM_R5(r13)
+ std r6, PACA_KVM_R6(r13)
+ std r7, PACA_KVM_R7(r13)
+ std r8, PACA_KVM_R8(r13)
+ std r9, PACA_KVM_R9(r13)
+ std r10, PACA_KVM_R10(r13)
+ std r11, PACA_KVM_R11(r13)
+
+ /* Restore R1/R2 so we can handle faults */
+ ld r1, PACA_KVM_HOST_R1(r13)
+ ld r2, PACA_KVM_HOST_R2(r13)
+
+ /* Save guest PC and MSR in GPRs */
+ mfsrr0 r3
+ mfsrr1 r4
+
+ /* Get scratch'ed off registers */
+ mfspr r9, SPRN_SPRG_SCRATCH0
+ std r9, PACA_KVM_R13(r13)
+
+ ld r8, PACA_KVM_SCRATCH0(r13)
+ std r8, PACA_KVM_R12(r13)
+
+ lwz r7, PACA_KVM_SCRATCH1(r13)
+ stw r7, PACA_KVM_CR(r13)
+
+ /* Save more register state */
+
+ mfxer r6
+ stw r6, PACA_KVM_XER(r13)
+
+ mfdar r5
+ mfdsisr r6
/*
* In order for us to easily get the last instruction,
@@ -202,17 +233,28 @@ kvmppc_handler_trampoline_exit:
ld_last_inst:
/* Save off the guest instruction we're at */
+
+ /* Set guest mode to 'jump over instruction' so if lwz faults
+ * we'll just continue at the next IP. */
+ li r9, KVM_GUEST_MODE_SKIP
+ stb r9, PACA_KVM_IN_GUEST(r13)
+
/* 1) enable paging for data */
mfmsr r9
ori r11, r9, MSR_DR /* Enable paging for data */
mtmsr r11
/* 2) fetch the instruction */
- lwz r0, 0(r10)
+ li r0, KVM_INST_FETCH_FAILED /* In case lwz faults */
+ lwz r0, 0(r3)
/* 3) disable paging again */
mtmsr r9
no_ld_last_inst:
+ /* Unset guest mode */
+ li r9, KVM_GUEST_MODE_NONE
+ stb r9, PACA_KVM_IN_GUEST(r13)
+
/* Restore bolted entries from the shadow and fix it along the way */
/* We don't store anything in entry 0, so we don't need to take care of it */
@@ -233,29 +275,27 @@ no_ld_last_inst:
slb_do_exit:
- /* Restore registers */
-
- ld r11, (PACA_EXMC+EX_DAR)(r13)
- ld r10, (PACA_EXMC+EX_LR)(r13)
- ld r9, (PACA_EXMC+EX_R3)(r13)
-
- /* Save last inst */
- stw r0, (PACA_EXMC+EX_LR)(r13)
-
- /* Save DAR and DSISR before going to paged mode */
- mfdar r0
- std r0, (PACA_EXMC+EX_DAR)(r13)
- mfdsisr r0
- stw r0, (PACA_EXMC+EX_DSISR)(r13)
+ /* Register usage at this point:
+ *
+ * R0 = guest last inst
+ * R1 = host R1
+ * R2 = host R2
+ * R3 = guest PC
+ * R4 = guest MSR
+ * R5 = guest DAR
+ * R6 = guest DSISR
+ * R12 = exit handler id
+ * R13 = PACA
+ * PACA.KVM.* = guest *
+ *
+ */
/* RFI into the highmem handler */
- mfmsr r0
- ori r0, r0, MSR_IR|MSR_DR|MSR_RI /* Enable paging */
- mtsrr1 r0
- ld r0, PACASAVEDMSR(r13) /* Highmem handler address */
- mtsrr0 r0
-
- mfspr r0, SPRN_SPRG_SCRATCH0
+ mfmsr r7
+ ori r7, r7, MSR_IR|MSR_DR|MSR_RI /* Enable paging */
+ mtsrr1 r7
+ ld r8, PACA_KVM_VMHANDLER(r13) /* Highmem handler address */
+ mtsrr0 r8
RFI
kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 06f5a9e..4d686cc 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -69,10 +69,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
for (i = 0; i < 32; i += 4) {
printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
- vcpu->arch.gpr[i],
- vcpu->arch.gpr[i+1],
- vcpu->arch.gpr[i+2],
- vcpu->arch.gpr[i+3]);
+ kvmppc_get_gpr(vcpu, i),
+ kvmppc_get_gpr(vcpu, i+1),
+ kvmppc_get_gpr(vcpu, i+2),
+ kvmppc_get_gpr(vcpu, i+3));
}
}
@@ -82,8 +82,32 @@ static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
set_bit(priority, &vcpu->arch.pending_exceptions);
}
-void kvmppc_core_queue_program(struct kvm_vcpu *vcpu)
+static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
+ ulong dear_flags, ulong esr_flags)
{
+ vcpu->arch.queued_dear = dear_flags;
+ vcpu->arch.queued_esr = esr_flags;
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
+}
+
+static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
+ ulong dear_flags, ulong esr_flags)
+{
+ vcpu->arch.queued_dear = dear_flags;
+ vcpu->arch.queued_esr = esr_flags;
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
+}
+
+static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
+ ulong esr_flags)
+{
+ vcpu->arch.queued_esr = esr_flags;
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
+}
+
+void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
+{
+ vcpu->arch.queued_esr = esr_flags;
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
}
@@ -97,6 +121,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
}
+void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
+{
+ clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
+}
+
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
@@ -109,14 +138,19 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
{
int allowed = 0;
ulong msr_mask;
+ bool update_esr = false, update_dear = false;
switch (priority) {
- case BOOKE_IRQPRIO_PROGRAM:
case BOOKE_IRQPRIO_DTLB_MISS:
- case BOOKE_IRQPRIO_ITLB_MISS:
- case BOOKE_IRQPRIO_SYSCALL:
case BOOKE_IRQPRIO_DATA_STORAGE:
+ update_dear = true;
+ /* fall through */
case BOOKE_IRQPRIO_INST_STORAGE:
+ case BOOKE_IRQPRIO_PROGRAM:
+ update_esr = true;
+ /* fall through */
+ case BOOKE_IRQPRIO_ITLB_MISS:
+ case BOOKE_IRQPRIO_SYSCALL:
case BOOKE_IRQPRIO_FP_UNAVAIL:
case BOOKE_IRQPRIO_SPE_UNAVAIL:
case BOOKE_IRQPRIO_SPE_FP_DATA:
@@ -151,6 +185,10 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
vcpu->arch.srr0 = vcpu->arch.pc;
vcpu->arch.srr1 = vcpu->arch.msr;
vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
+ if (update_esr == true)
+ vcpu->arch.esr = vcpu->arch.queued_esr;
+ if (update_dear == true)
+ vcpu->arch.dear = vcpu->arch.queued_dear;
kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask);
clear_bit(priority, &vcpu->arch.pending_exceptions);
@@ -223,8 +261,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (vcpu->arch.msr & MSR_PR) {
/* Program traps generated by user-level software must be handled
* by the guest kernel. */
- vcpu->arch.esr = vcpu->arch.fault_esr;
- kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
+ kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
r = RESUME_GUEST;
kvmppc_account_exit(vcpu, USR_PR_INST);
break;
@@ -280,16 +317,14 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOKE_INTERRUPT_DATA_STORAGE:
- vcpu->arch.dear = vcpu->arch.fault_dear;
- vcpu->arch.esr = vcpu->arch.fault_esr;
- kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
+ kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
+ vcpu->arch.fault_esr);
kvmppc_account_exit(vcpu, DSI_EXITS);
r = RESUME_GUEST;
break;
case BOOKE_INTERRUPT_INST_STORAGE:
- vcpu->arch.esr = vcpu->arch.fault_esr;
- kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
+ kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
kvmppc_account_exit(vcpu, ISI_EXITS);
r = RESUME_GUEST;
break;
@@ -310,9 +345,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
if (gtlb_index < 0) {
/* The guest didn't have a mapping for it. */
- kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
- vcpu->arch.dear = vcpu->arch.fault_dear;
- vcpu->arch.esr = vcpu->arch.fault_esr;
+ kvmppc_core_queue_dtlb_miss(vcpu,
+ vcpu->arch.fault_dear,
+ vcpu->arch.fault_esr);
kvmppc_mmu_dtlb_miss(vcpu);
kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
r = RESUME_GUEST;
@@ -426,7 +461,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
vcpu->arch.pc = 0;
vcpu->arch.msr = 0;
- vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
+ kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
vcpu->arch.shadow_pid = 1;
@@ -444,10 +479,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
int i;
regs->pc = vcpu->arch.pc;
- regs->cr = vcpu->arch.cr;
+ regs->cr = kvmppc_get_cr(vcpu);
regs->ctr = vcpu->arch.ctr;
regs->lr = vcpu->arch.lr;
- regs->xer = vcpu->arch.xer;
+ regs->xer = kvmppc_get_xer(vcpu);
regs->msr = vcpu->arch.msr;
regs->srr0 = vcpu->arch.srr0;
regs->srr1 = vcpu->arch.srr1;
@@ -461,7 +496,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->sprg7 = vcpu->arch.sprg6;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
- regs->gpr[i] = vcpu->arch.gpr[i];
+ regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
return 0;
}
@@ -471,10 +506,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
int i;
vcpu->arch.pc = regs->pc;
- vcpu->arch.cr = regs->cr;
+ kvmppc_set_cr(vcpu, regs->cr);
vcpu->arch.ctr = regs->ctr;
vcpu->arch.lr = regs->lr;
- vcpu->arch.xer = regs->xer;
+ kvmppc_set_xer(vcpu, regs->xer);
kvmppc_set_msr(vcpu, regs->msr);
vcpu->arch.srr0 = regs->srr0;
vcpu->arch.srr1 = regs->srr1;
@@ -486,8 +521,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.sprg6 = regs->sprg5;
vcpu->arch.sprg7 = regs->sprg6;
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
- vcpu->arch.gpr[i] = regs->gpr[i];
+ for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
+ kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
return 0;
}
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index aebc65e..cbc790e 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -62,20 +62,20 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
case OP_31_XOP_MFMSR:
rt = get_rt(inst);
- vcpu->arch.gpr[rt] = vcpu->arch.msr;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.msr);
kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
break;
case OP_31_XOP_MTMSR:
rs = get_rs(inst);
kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
- kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
+ kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_WRTEE:
rs = get_rs(inst);
vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
- | (vcpu->arch.gpr[rs] & MSR_EE);
+ | (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
break;
@@ -101,22 +101,23 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
int emulated = EMULATE_DONE;
+ ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_DEAR:
- vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.dear = spr_val; break;
case SPRN_ESR:
- vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.esr = spr_val; break;
case SPRN_DBCR0:
- vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.dbcr0 = spr_val; break;
case SPRN_DBCR1:
- vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.dbcr1 = spr_val; break;
case SPRN_DBSR:
- vcpu->arch.dbsr &= ~vcpu->arch.gpr[rs]; break;
+ vcpu->arch.dbsr &= ~spr_val; break;
case SPRN_TSR:
- vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
+ vcpu->arch.tsr &= ~spr_val; break;
case SPRN_TCR:
- vcpu->arch.tcr = vcpu->arch.gpr[rs];
+ vcpu->arch.tcr = spr_val;
kvmppc_emulate_dec(vcpu);
break;
@@ -124,64 +125,64 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
* loaded into the real SPRGs when resuming the
* guest. */
case SPRN_SPRG4:
- vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg4 = spr_val; break;
case SPRN_SPRG5:
- vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg5 = spr_val; break;
case SPRN_SPRG6:
- vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg6 = spr_val; break;
case SPRN_SPRG7:
- vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg7 = spr_val; break;
case SPRN_IVPR:
- vcpu->arch.ivpr = vcpu->arch.gpr[rs];
+ vcpu->arch.ivpr = spr_val;
break;
case SPRN_IVOR0:
- vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
break;
case SPRN_IVOR1:
- vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val;
break;
case SPRN_IVOR2:
- vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
break;
case SPRN_IVOR3:
- vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
break;
case SPRN_IVOR4:
- vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val;
break;
case SPRN_IVOR5:
- vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val;
break;
case SPRN_IVOR6:
- vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val;
break;
case SPRN_IVOR7:
- vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val;
break;
case SPRN_IVOR8:
- vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
break;
case SPRN_IVOR9:
- vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
break;
case SPRN_IVOR10:
- vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val;
break;
case SPRN_IVOR11:
- vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val;
break;
case SPRN_IVOR12:
- vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val;
break;
case SPRN_IVOR13:
- vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val;
break;
case SPRN_IVOR14:
- vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val;
break;
case SPRN_IVOR15:
- vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val;
break;
default:
@@ -197,65 +198,65 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
switch (sprn) {
case SPRN_IVPR:
- vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break;
case SPRN_DEAR:
- vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); break;
case SPRN_ESR:
- vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break;
case SPRN_DBCR0:
- vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
case SPRN_DBCR1:
- vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break;
case SPRN_DBSR:
- vcpu->arch.gpr[rt] = vcpu->arch.dbsr; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break;
case SPRN_IVOR0:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
break;
case SPRN_IVOR1:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]);
break;
case SPRN_IVOR2:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
break;
case SPRN_IVOR3:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]);
break;
case SPRN_IVOR4:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]);
break;
case SPRN_IVOR5:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]);
break;
case SPRN_IVOR6:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]);
break;
case SPRN_IVOR7:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]);
break;
case SPRN_IVOR8:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
break;
case SPRN_IVOR9:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]);
break;
case SPRN_IVOR10:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]);
break;
case SPRN_IVOR11:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]);
break;
case SPRN_IVOR12:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]);
break;
case SPRN_IVOR13:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]);
break;
case SPRN_IVOR14:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]);
break;
case SPRN_IVOR15:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]);
break;
default:
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 64949ee..efa1198 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -60,6 +60,12 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
kvmppc_e500_tlb_setup(vcpu_e500);
+ /* Registers init */
+ vcpu->arch.pvr = mfspr(SPRN_PVR);
+
+ /* Since booke kvm only support one core, update all vcpus' PIR to 0 */
+ vcpu->vcpu_id = 0;
+
return 0;
}
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index be95b8d..8e3edfb 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -74,54 +74,59 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
+ ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_PID:
vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
- vcpu->arch.pid = vcpu->arch.gpr[rs];
+ vcpu->arch.pid = spr_val;
break;
case SPRN_PID1:
- vcpu_e500->pid[1] = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->pid[1] = spr_val; break;
case SPRN_PID2:
- vcpu_e500->pid[2] = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->pid[2] = spr_val; break;
case SPRN_MAS0:
- vcpu_e500->mas0 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->mas0 = spr_val; break;
case SPRN_MAS1:
- vcpu_e500->mas1 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->mas1 = spr_val; break;
case SPRN_MAS2:
- vcpu_e500->mas2 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->mas2 = spr_val; break;
case SPRN_MAS3:
- vcpu_e500->mas3 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->mas3 = spr_val; break;
case SPRN_MAS4:
- vcpu_e500->mas4 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->mas4 = spr_val; break;
case SPRN_MAS6:
- vcpu_e500->mas6 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->mas6 = spr_val; break;
case SPRN_MAS7:
- vcpu_e500->mas7 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->mas7 = spr_val; break;
+ case SPRN_L1CSR0:
+ vcpu_e500->l1csr0 = spr_val;
+ vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
+ break;
case SPRN_L1CSR1:
- vcpu_e500->l1csr1 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->l1csr1 = spr_val; break;
case SPRN_HID0:
- vcpu_e500->hid0 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->hid0 = spr_val; break;
case SPRN_HID1:
- vcpu_e500->hid1 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->hid1 = spr_val; break;
case SPRN_MMUCSR0:
emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
- vcpu->arch.gpr[rs]);
+ spr_val);
break;
/* extra exceptions */
case SPRN_IVOR32:
- vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
break;
case SPRN_IVOR33:
- vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
break;
case SPRN_IVOR34:
- vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
break;
case SPRN_IVOR35:
- vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
break;
default:
@@ -138,63 +143,57 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
switch (sprn) {
case SPRN_PID:
- vcpu->arch.gpr[rt] = vcpu_e500->pid[0]; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break;
case SPRN_PID1:
- vcpu->arch.gpr[rt] = vcpu_e500->pid[1]; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break;
case SPRN_PID2:
- vcpu->arch.gpr[rt] = vcpu_e500->pid[2]; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
case SPRN_MAS0:
- vcpu->arch.gpr[rt] = vcpu_e500->mas0; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break;
case SPRN_MAS1:
- vcpu->arch.gpr[rt] = vcpu_e500->mas1; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break;
case SPRN_MAS2:
- vcpu->arch.gpr[rt] = vcpu_e500->mas2; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break;
case SPRN_MAS3:
- vcpu->arch.gpr[rt] = vcpu_e500->mas3; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas3); break;
case SPRN_MAS4:
- vcpu->arch.gpr[rt] = vcpu_e500->mas4; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break;
case SPRN_MAS6:
- vcpu->arch.gpr[rt] = vcpu_e500->mas6; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break;
case SPRN_MAS7:
- vcpu->arch.gpr[rt] = vcpu_e500->mas7; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break;
case SPRN_TLB0CFG:
- vcpu->arch.gpr[rt] = mfspr(SPRN_TLB0CFG);
- vcpu->arch.gpr[rt] &= ~0xfffUL;
- vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[0];
- break;
-
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break;
case SPRN_TLB1CFG:
- vcpu->arch.gpr[rt] = mfspr(SPRN_TLB1CFG);
- vcpu->arch.gpr[rt] &= ~0xfffUL;
- vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[1];
- break;
-
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break;
+ case SPRN_L1CSR0:
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break;
case SPRN_L1CSR1:
- vcpu->arch.gpr[rt] = vcpu_e500->l1csr1; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break;
case SPRN_HID0:
- vcpu->arch.gpr[rt] = vcpu_e500->hid0; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break;
case SPRN_HID1:
- vcpu->arch.gpr[rt] = vcpu_e500->hid1; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break;
case SPRN_MMUCSR0:
- vcpu->arch.gpr[rt] = 0; break;
+ kvmppc_set_gpr(vcpu, rt, 0); break;
case SPRN_MMUCFG:
- vcpu->arch.gpr[rt] = mfspr(SPRN_MMUCFG); break;
+ kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break;
/* extra exceptions */
case SPRN_IVOR32:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]);
break;
case SPRN_IVOR33:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]);
break;
case SPRN_IVOR34:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]);
break;
case SPRN_IVOR35:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]);
break;
default:
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index fb1e1dc..0d772e6 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -417,7 +417,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
int esel, tlbsel;
gva_t ea;
- ea = ((ra) ? vcpu->arch.gpr[ra] : 0) + vcpu->arch.gpr[rb];
+ ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
ia = (ea >> 2) & 0x1;
@@ -470,7 +470,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
struct tlbe *gtlbe = NULL;
gva_t ea;
- ea = vcpu->arch.gpr[rb];
+ ea = kvmppc_get_gpr(vcpu, rb);
for (tlbsel = 0; tlbsel < 2; tlbsel++) {
esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
@@ -728,6 +728,12 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
if (vcpu_e500->shadow_pages[1] == NULL)
goto err_out_page0;
+ /* Init TLB configuration register */
+ vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
+ vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0];
+ vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
+ vcpu_e500->tlb1cfg |= vcpu_e500->guest_tlb_size[1];
+
return 0;
err_out_page0:
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 4a9ac66..cb72a65 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -83,6 +83,9 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
pr_debug("mtDEC: %x\n", vcpu->arch.dec);
#ifdef CONFIG_PPC64
+ /* mtdec lowers the interrupt line when positive. */
+ kvmppc_core_dequeue_dec(vcpu);
+
/* POWER4+ triggers a dec interrupt if the value is < 0 */
if (vcpu->arch.dec & 0x80000000) {
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
@@ -140,14 +143,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
+ /* Try again next time */
+ if (inst == KVM_INST_FETCH_FAILED)
+ return EMULATE_DONE;
+
switch (get_op(inst)) {
case OP_TRAP:
#ifdef CONFIG_PPC64
case OP_TRAP_64:
+ kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
- vcpu->arch.esr |= ESR_PTR;
+ kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR);
#endif
- kvmppc_core_queue_program(vcpu);
advance = 0;
break;
@@ -167,14 +174,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case OP_31_XOP_STWX:
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
- vcpu->arch.gpr[rs],
+ kvmppc_get_gpr(vcpu, rs),
4, 1);
break;
case OP_31_XOP_STBX:
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
- vcpu->arch.gpr[rs],
+ kvmppc_get_gpr(vcpu, rs),
1, 1);
break;
@@ -183,14 +190,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst);
rb = get_rb(inst);
- ea = vcpu->arch.gpr[rb];
+ ea = kvmppc_get_gpr(vcpu, rb);
if (ra)
- ea += vcpu->arch.gpr[ra];
+ ea += kvmppc_get_gpr(vcpu, ra);
emulated = kvmppc_handle_store(run, vcpu,
- vcpu->arch.gpr[rs],
+ kvmppc_get_gpr(vcpu, rs),
1, 1);
- vcpu->arch.gpr[rs] = ea;
+ kvmppc_set_gpr(vcpu, rs, ea);
break;
case OP_31_XOP_LHZX:
@@ -203,12 +210,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst);
rb = get_rb(inst);
- ea = vcpu->arch.gpr[rb];
+ ea = kvmppc_get_gpr(vcpu, rb);
if (ra)
- ea += vcpu->arch.gpr[ra];
+ ea += kvmppc_get_gpr(vcpu, ra);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
- vcpu->arch.gpr[ra] = ea;
+ kvmppc_set_gpr(vcpu, ra, ea);
break;
case OP_31_XOP_MFSPR:
@@ -217,47 +224,49 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
switch (sprn) {
case SPRN_SRR0:
- vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break;
case SPRN_SRR1:
- vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break;
case SPRN_PVR:
- vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
case SPRN_PIR:
- vcpu->arch.gpr[rt] = vcpu->vcpu_id; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
case SPRN_MSSSR0:
- vcpu->arch.gpr[rt] = 0; break;
+ kvmppc_set_gpr(vcpu, rt, 0); break;
/* Note: mftb and TBRL/TBWL are user-accessible, so
* the guest can always access the real TB anyways.
* In fact, we probably will never see these traps. */
case SPRN_TBWL:
- vcpu->arch.gpr[rt] = get_tb() >> 32; break;
+ kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
case SPRN_TBWU:
- vcpu->arch.gpr[rt] = get_tb(); break;
+ kvmppc_set_gpr(vcpu, rt, get_tb()); break;
case SPRN_SPRG0:
- vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break;
case SPRN_SPRG1:
- vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break;
case SPRN_SPRG2:
- vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break;
case SPRN_SPRG3:
- vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break;
/* Note: SPRG4-7 are user-readable, so we don't get
* a trap. */
case SPRN_DEC:
{
u64 jd = get_tb() - vcpu->arch.dec_jiffies;
- vcpu->arch.gpr[rt] = vcpu->arch.dec - jd;
- pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]);
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd);
+ pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n",
+ vcpu->arch.dec, jd,
+ kvmppc_get_gpr(vcpu, rt));
break;
}
default:
emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
if (emulated == EMULATE_FAIL) {
printk("mfspr: unknown spr %x\n", sprn);
- vcpu->arch.gpr[rt] = 0;
+ kvmppc_set_gpr(vcpu, rt, 0);
}
break;
}
@@ -269,7 +278,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
rb = get_rb(inst);
emulated = kvmppc_handle_store(run, vcpu,
- vcpu->arch.gpr[rs],
+ kvmppc_get_gpr(vcpu, rs),
2, 1);
break;
@@ -278,14 +287,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst);
rb = get_rb(inst);
- ea = vcpu->arch.gpr[rb];
+ ea = kvmppc_get_gpr(vcpu, rb);
if (ra)
- ea += vcpu->arch.gpr[ra];
+ ea += kvmppc_get_gpr(vcpu, ra);
emulated = kvmppc_handle_store(run, vcpu,
- vcpu->arch.gpr[rs],
+ kvmppc_get_gpr(vcpu, rs),
2, 1);
- vcpu->arch.gpr[ra] = ea;
+ kvmppc_set_gpr(vcpu, ra, ea);
break;
case OP_31_XOP_MTSPR:
@@ -293,9 +302,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
rs = get_rs(inst);
switch (sprn) {
case SPRN_SRR0:
- vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break;
case SPRN_SRR1:
- vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break;
/* XXX We need to context-switch the timebase for
* watchdog and FIT. */
@@ -305,18 +314,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case SPRN_MSSSR0: break;
case SPRN_DEC:
- vcpu->arch.dec = vcpu->arch.gpr[rs];
+ vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
kvmppc_emulate_dec(vcpu);
break;
case SPRN_SPRG0:
- vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break;
case SPRN_SPRG1:
- vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break;
case SPRN_SPRG2:
- vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break;
case SPRN_SPRG3:
- vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break;
default:
emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
@@ -348,7 +357,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
rb = get_rb(inst);
emulated = kvmppc_handle_store(run, vcpu,
- vcpu->arch.gpr[rs],
+ kvmppc_get_gpr(vcpu, rs),
4, 0);
break;
@@ -363,7 +372,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
rb = get_rb(inst);
emulated = kvmppc_handle_store(run, vcpu,
- vcpu->arch.gpr[rs],
+ kvmppc_get_gpr(vcpu, rs),
2, 0);
break;
@@ -382,7 +391,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst);
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
- vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
break;
case OP_LBZ:
@@ -394,35 +403,39 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst);
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
- vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
break;
case OP_STW:
rs = get_rs(inst);
- emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs),
4, 1);
break;
case OP_STWU:
ra = get_ra(inst);
rs = get_rs(inst);
- emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs),
4, 1);
- vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
break;
case OP_STB:
rs = get_rs(inst);
- emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs),
1, 1);
break;
case OP_STBU:
ra = get_ra(inst);
rs = get_rs(inst);
- emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs),
1, 1);
- vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
break;
case OP_LHZ:
@@ -434,21 +447,23 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst);
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
- vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
break;
case OP_STH:
rs = get_rs(inst);
- emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs),
2, 1);
break;
case OP_STHU:
ra = get_ra(inst);
rs = get_rs(inst);
- emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs),
2, 1);
- vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
break;
default:
@@ -461,6 +476,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
advance = 0;
printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
"(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
+ kvmppc_core_queue_program(vcpu, 0);
}
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index f06cf93..51aedd7 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -137,6 +137,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvmppc_free_vcpus(kvm);
kvm_free_physmem(kvm);
+ cleanup_srcu_struct(&kvm->srcu);
kfree(kvm);
}
@@ -165,14 +166,24 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -EINVAL;
}
-int kvm_arch_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- int user_alloc)
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
+ int user_alloc)
{
return 0;
}
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ return;
+}
+
+
void kvm_arch_flush_shadow(struct kvm *kvm)
{
}
@@ -260,34 +271,35 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
- ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
- *gpr = run->dcr.data;
+ kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
}
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
- ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
+ ulong gpr;
- if (run->mmio.len > sizeof(*gpr)) {
+ if (run->mmio.len > sizeof(gpr)) {
printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
return;
}
if (vcpu->arch.mmio_is_bigendian) {
switch (run->mmio.len) {
- case 4: *gpr = *(u32 *)run->mmio.data; break;
- case 2: *gpr = *(u16 *)run->mmio.data; break;
- case 1: *gpr = *(u8 *)run->mmio.data; break;
+ case 4: gpr = *(u32 *)run->mmio.data; break;
+ case 2: gpr = *(u16 *)run->mmio.data; break;
+ case 1: gpr = *(u8 *)run->mmio.data; break;
}
} else {
/* Convert BE data from userland back to LE. */
switch (run->mmio.len) {
- case 4: *gpr = ld_le32((u32 *)run->mmio.data); break;
- case 2: *gpr = ld_le16((u16 *)run->mmio.data); break;
- case 1: *gpr = *(u8 *)run->mmio.data; break;
+ case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
+ case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
+ case 1: gpr = *(u8 *)run->mmio.data; break;
}
}
+
+ kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
}
int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 341aff2..cd128b0 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -288,46 +288,30 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = HYPFS_MAGIC;
sb->s_op = &hypfs_s_ops;
- if (hypfs_parse_options(data, sb)) {
- rc = -EINVAL;
- goto err_alloc;
- }
+ if (hypfs_parse_options(data, sb))
+ return -EINVAL;
root_inode = hypfs_make_inode(sb, S_IFDIR | 0755);
- if (!root_inode) {
- rc = -ENOMEM;
- goto err_alloc;
- }
+ if (!root_inode)
+ return -ENOMEM;
root_inode->i_op = &simple_dir_inode_operations;
root_inode->i_fop = &simple_dir_operations;
- root_dentry = d_alloc_root(root_inode);
+ sb->s_root = root_dentry = d_alloc_root(root_inode);
if (!root_dentry) {
iput(root_inode);
- rc = -ENOMEM;
- goto err_alloc;
+ return -ENOMEM;
}
if (MACHINE_IS_VM)
rc = hypfs_vm_create_files(sb, root_dentry);
else
rc = hypfs_diag_create_files(sb, root_dentry);
if (rc)
- goto err_tree;
+ return rc;
sbi->update_file = hypfs_create_update_file(sb, root_dentry);
- if (IS_ERR(sbi->update_file)) {
- rc = PTR_ERR(sbi->update_file);
- goto err_tree;
- }
+ if (IS_ERR(sbi->update_file))
+ return PTR_ERR(sbi->update_file);
hypfs_update_update(sb);
- sb->s_root = root_dentry;
pr_info("Hypervisor filesystem mounted\n");
return 0;
-
-err_tree:
- hypfs_delete_tree(root_dentry);
- d_genocide(root_dentry);
- dput(root_dentry);
-err_alloc:
- kfree(sbi);
- return rc;
}
static int hypfs_get_super(struct file_system_type *fst, int flags,
@@ -340,12 +324,12 @@ static void hypfs_kill_super(struct super_block *sb)
{
struct hypfs_sb_info *sb_info = sb->s_fs_info;
- if (sb->s_root) {
+ if (sb->s_root)
hypfs_delete_tree(sb->s_root);
+ if (sb_info->update_file)
hypfs_remove(sb_info->update_file);
- kfree(sb->s_fs_info);
- sb->s_fs_info = NULL;
- }
+ kfree(sb->s_fs_info);
+ sb->s_fs_info = NULL;
kill_litter_super(sb);
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3fa0a10..4929286 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -242,6 +242,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_free_physmem(kvm);
free_page((unsigned long)(kvm->arch.sca));
debug_unregister(kvm->arch.dbf);
+ cleanup_srcu_struct(&kvm->srcu);
kfree(kvm);
}
@@ -690,14 +691,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}
/* Section: memory related */
-int kvm_arch_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- int user_alloc)
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
+ int user_alloc)
{
- int i;
- struct kvm_vcpu *vcpu;
-
/* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a
page boundary in userland and which has to end at a page boundary.
@@ -720,14 +719,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
if (!user_alloc)
return -EINVAL;
+ return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+
/* request update of sie control block for all available vcpus */
kvm_for_each_vcpu(i, vcpu, kvm) {
if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
continue;
kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
}
-
- return 0;
}
void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 06cce82..60f09ab 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -67,10 +67,14 @@ static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
{
+ int idx;
struct kvm_memory_slot *mem;
+ struct kvm_memslots *memslots;
- down_read(&vcpu->kvm->slots_lock);
- mem = &vcpu->kvm->memslots[0];
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ memslots = rcu_dereference(vcpu->kvm->memslots);
+
+ mem = &memslots->memslots[0];
vcpu->arch.sie_block->gmsor = mem->userspace_addr;
vcpu->arch.sie_block->gmslm =
@@ -78,7 +82,7 @@ static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
(mem->npages << PAGE_SHIFT) +
VIRTIODESCSPACE - 1ul;
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
/* implemented in priv.c */
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig
index 99a1f19..6a8d078 100644
--- a/arch/sparc/configs/sparc32_defconfig
+++ b/arch/sparc/configs/sparc32_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 11 23:20:31 2010
+# Linux kernel version: 2.6.33
+# Wed Mar 3 02:52:23 2010
#
# CONFIG_64BIT is not set
CONFIG_SPARC=y
@@ -9,6 +9,8 @@ CONFIG_SPARC32=y
# CONFIG_SPARC64 is not set
CONFIG_ARCH_DEFCONFIG="arch/sparc/configs/sparc32_defconfig"
CONFIG_BITS=32
+CONFIG_GENERIC_TIME=y
+CONFIG_ARCH_USES_GETTIMEOFFSET=y
CONFIG_AUDIT_ARCH=y
CONFIG_MMU=y
CONFIG_HIGHMEM=y
@@ -48,11 +50,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -68,6 +65,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
CONFIG_RD_BZIP2=y
CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -211,7 +209,6 @@ CONFIG_SBUSCHAR=y
CONFIG_PCI=y
CONFIG_PCI_SYSCALL=y
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
# CONFIG_PCI_DEBUG is not set
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
@@ -232,7 +229,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
CONFIG_XFRM_USER=m
@@ -379,11 +375,13 @@ CONFIG_MISC_DEVICES=y
# CONFIG_TIFM_CORE is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_HP_ILO is not set
+# CONFIG_TI_DAC7512 is not set
# CONFIG_C2PORT is not set
#
# EEPROM support
#
+# CONFIG_EEPROM_AT25 is not set
# CONFIG_EEPROM_93CX6 is not set
# CONFIG_CB710_CORE is not set
CONFIG_HAVE_IDE=y
@@ -507,7 +505,9 @@ CONFIG_SUNQE=m
# CONFIG_SUNGEM is not set
# CONFIG_CASSINI is not set
# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_ENC28J60 is not set
# CONFIG_ETHOC is not set
+# CONFIG_GRETH is not set
# CONFIG_DNET is not set
# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
@@ -521,6 +521,7 @@ CONFIG_SUNQE=m
# CONFIG_NET_PCI is not set
# CONFIG_B44 is not set
# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
# CONFIG_KS8851_MLL is not set
# CONFIG_ATL2 is not set
CONFIG_NETDEV_1000=y
@@ -563,6 +564,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_BNX2X is not set
+# CONFIG_QLCNIC is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
@@ -665,6 +667,7 @@ CONFIG_DEVKMEM=y
#
# Non-8250 serial port support
#
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_SUNCORE=y
CONFIG_SERIAL_SUNZILOG=y
CONFIG_SERIAL_SUNZILOG_CONSOLE=y
@@ -689,7 +692,23 @@ CONFIG_HW_RANDOM=m
# CONFIG_TCG_TPM is not set
CONFIG_DEVPORT=y
# CONFIG_I2C is not set
-# CONFIG_SPI is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BITBANG=m
+CONFIG_SPI_XILINX=m
+CONFIG_SPI_XILINX_PLTFM=m
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
#
# PPS support
@@ -706,10 +725,13 @@ CONFIG_HWMON=y
#
# Native drivers
#
+# CONFIG_SENSORS_ADCXX is not set
# CONFIG_SENSORS_I5K_AMB is not set
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_MAX1111 is not set
# CONFIG_SENSORS_PC87360 is not set
# CONFIG_SENSORS_PC87427 is not set
# CONFIG_SENSORS_SIS5595 is not set
@@ -720,6 +742,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_VT8231 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
# CONFIG_THERMAL is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -736,6 +759,8 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB4500_CORE is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -743,6 +768,7 @@ CONFIG_SSB_POSSIBLE=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
# CONFIG_FB is not set
@@ -808,6 +834,14 @@ CONFIG_RTC_INTF_DEV=y
#
# SPI RTC drivers
#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
@@ -1180,9 +1214,11 @@ CONFIG_CRC32=y
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_DECOMPRESS=y
CONFIG_DECOMPRESS_GZIP=y
CONFIG_DECOMPRESS_BZIP2=y
CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_LZO=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 41c5a56..56e3163 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Wed Jan 20 16:31:47 2010
+# Linux kernel version: 2.6.33
+# Wed Mar 3 02:54:29 2010
#
CONFIG_64BIT=y
CONFIG_SPARC=y
@@ -55,14 +55,10 @@ CONFIG_TREE_RCU=y
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=64
# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_RCU_FAST_NO_HZ is not set
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=18
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_RELAY=y
@@ -77,6 +73,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
CONFIG_RD_BZIP2=y
CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -105,7 +102,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
CONFIG_PERF_COUNTERS=y
# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
@@ -266,7 +262,6 @@ CONFIG_PCI_DOMAINS=y
CONFIG_PCI_SYSCALL=y
CONFIG_ARCH_SUPPORTS_MSI=y
CONFIG_PCI_MSI=y
-# CONFIG_PCI_LEGACY is not set
# CONFIG_PCI_DEBUG is not set
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
@@ -290,7 +285,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
CONFIG_XFRM=y
CONFIG_XFRM_USER=m
@@ -425,10 +419,6 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
-
-#
-# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
-#
# CONFIG_BLK_DEV_DRBD is not set
CONFIG_BLK_DEV_NBD=m
# CONFIG_BLK_DEV_SX8 is not set
@@ -677,6 +667,7 @@ CONFIG_SUNGEM=m
CONFIG_SUNVNET=m
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_ETHOC is not set
+# CONFIG_GRETH is not set
# CONFIG_DNET is not set
# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
@@ -691,6 +682,7 @@ CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_E100 is not set
@@ -741,6 +733,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_CHELSIO_T3 is not set
# CONFIG_ENIC is not set
# CONFIG_IXGBE is not set
+# CONFIG_IXGBEVF is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
# CONFIG_VXGE is not set
@@ -751,6 +744,7 @@ CONFIG_NIU=m
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_BNX2X is not set
+# CONFIG_QLCNIC is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
@@ -1028,6 +1022,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_SMSC47M192 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_AMC6821 is not set
# CONFIG_SENSORS_THMC50 is not set
# CONFIG_SENSORS_TMP401 is not set
# CONFIG_SENSORS_TMP421 is not set
@@ -1076,6 +1071,7 @@ CONFIG_SSB_POSSIBLE=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1279,6 +1275,7 @@ CONFIG_SND_ALI5451=m
# CONFIG_SND_YMFPCI is not set
CONFIG_SND_USB=y
# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_UA101 is not set
# CONFIG_SND_USB_CAIAQ is not set
CONFIG_SND_SPARC=y
# CONFIG_SND_SUN_AMD7930 is not set
@@ -1301,6 +1298,7 @@ CONFIG_USB_HIDDEV=y
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
@@ -1317,14 +1315,19 @@ CONFIG_HID_KENSINGTON=y
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGIG940_FF is not set
CONFIG_HID_MICROSOFT=y
+# CONFIG_HID_MOSART is not set
CONFIG_HID_MONTEREY=y
CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
CONFIG_HID_PANTHERLORD=y
# CONFIG_PANTHERLORD_FF is not set
CONFIG_HID_PETALYNX=y
+# CONFIG_HID_QUANTA is not set
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
+# CONFIG_HID_STANTUM is not set
CONFIG_HID_SUNPLUS=y
CONFIG_HID_GREENASIA=y
# CONFIG_GREENASIA_FF is not set
@@ -1807,6 +1810,7 @@ CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_MANAGER2=y
CONFIG_CRYPTO_GF128MUL=m
CONFIG_CRYPTO_NULL=m
+# CONFIG_CRYPTO_PCRYPT is not set
CONFIG_CRYPTO_WORKQUEUE=y
# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_AUTHENC=y
@@ -1904,9 +1908,11 @@ CONFIG_CRC32=y
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_DECOMPRESS=y
CONFIG_DECOMPRESS_GZIP=y
CONFIG_DECOMPRESS_BZIP2=y
CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_LZO=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h
index 679c750..2889574 100644
--- a/arch/sparc/include/asm/io_32.h
+++ b/arch/sparc/include/asm/io_32.h
@@ -249,10 +249,14 @@ extern void iounmap(volatile void __iomem *addr);
#define ioread8(X) readb(X)
#define ioread16(X) readw(X)
+#define ioread16be(X) __raw_readw(X)
#define ioread32(X) readl(X)
+#define ioread32be(X) __raw_readl(X)
#define iowrite8(val,X) writeb(val,X)
#define iowrite16(val,X) writew(val,X)
+#define iowrite16be(val,X) __raw_writew(val,X)
#define iowrite32(val,X) writel(val,X)
+#define iowrite32be(val,X) __raw_writel(val,X)
static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
{
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 4aee21d..9517d06 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -468,10 +468,14 @@ static inline void iounmap(volatile void __iomem *addr)
#define ioread8(X) readb(X)
#define ioread16(X) readw(X)
+#define ioread16be(X) __raw_readw(X)
#define ioread32(X) readl(X)
+#define ioread32be(X) __raw_readl(X)
#define iowrite8(val,X) writeb(val,X)
#define iowrite16(val,X) writew(val,X)
+#define iowrite16be(val,X) __raw_writew(val,X)
#define iowrite32(val,X) writel(val,X)
+#define iowrite32be(val,X) __raw_writel(val,X)
/* Create a virtual mapping cookie for an IO port range */
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
diff --git a/arch/sparc/include/asm/perfctr.h b/arch/sparc/include/asm/perfctr.h
index 8368730..8d8720a 100644
--- a/arch/sparc/include/asm/perfctr.h
+++ b/arch/sparc/include/asm/perfctr.h
@@ -10,8 +10,8 @@
* from enumeration below. The meaning of further arguments
* are determined by the operation code.
*
- * int sys_perfctr(int opcode, unsigned long arg0,
- * unsigned long arg1, unsigned long arg2)
+ * NOTE: This system call is no longer provided, use the perf_events
+ * infrastructure.
*
* Pointers which are passed by the user are pointers to 64-bit
* integers.
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
index d47a98e..d24cfe1 100644
--- a/arch/sparc/include/asm/system_64.h
+++ b/arch/sparc/include/asm/system_64.h
@@ -143,15 +143,7 @@ do { \
* and 2 stores in this critical code path. -DaveM
*/
#define switch_to(prev, next, last) \
-do { if (test_thread_flag(TIF_PERFCTR)) { \
- unsigned long __tmp; \
- read_pcr(__tmp); \
- current_thread_info()->pcr_reg = __tmp; \
- read_pic(__tmp); \
- current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\
- current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \
- } \
- flush_tlb_pending(); \
+do { flush_tlb_pending(); \
save_and_clear_fpu(); \
/* If you are tempted to conditionalize the following */ \
/* so that ASI is only written if it changes, think again. */ \
@@ -197,11 +189,6 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
"l1", "l2", "l3", "l4", "l5", "l6", "l7", \
"i0", "i1", "i2", "i3", "i4", "i5", \
"o0", "o1", "o2", "o3", "o4", "o5", "o7"); \
- /* If you fuck with this, update ret_from_syscall code too. */ \
- if (test_thread_flag(TIF_PERFCTR)) { \
- write_pcr(current_thread_info()->pcr_reg); \
- reset_pic(); \
- } \
} while(0)
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 39be9f2..9e2d944 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -58,11 +58,6 @@ struct thread_info {
unsigned long gsr[7];
unsigned long xfsr[7];
- __u64 __user *user_cntd0;
- __u64 __user *user_cntd1;
- __u64 kernel_cntd0, kernel_cntd1;
- __u64 pcr_reg;
-
struct restart_block restart_block;
struct pt_regs *kern_una_regs;
@@ -96,15 +91,10 @@ struct thread_info {
#define TI_RWIN_SPTRS 0x000003c8
#define TI_GSR 0x00000400
#define TI_XFSR 0x00000438
-#define TI_USER_CNTD0 0x00000470
-#define TI_USER_CNTD1 0x00000478
-#define TI_KERN_CNTD0 0x00000480
-#define TI_KERN_CNTD1 0x00000488
-#define TI_PCR 0x00000490
-#define TI_RESTART_BLOCK 0x00000498
-#define TI_KUNA_REGS 0x000004c8
-#define TI_KUNA_INSN 0x000004d0
-#define TI_FPREGS 0x00000500
+#define TI_RESTART_BLOCK 0x00000470
+#define TI_KUNA_REGS 0x000004a0
+#define TI_KUNA_INSN 0x000004a8
+#define TI_FPREGS 0x000004c0
/* We embed this in the uppermost byte of thread_info->flags */
#define FAULT_CODE_WRITE 0x01 /* Write access, implies D-TLB */
@@ -199,7 +189,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
*
* On trap return we need to test several values:
*
- * user: need_resched, notify_resume, sigpending, wsaved, perfctr
+ * user: need_resched, notify_resume, sigpending, wsaved
* kernel: fpdepth
*
* So to check for work in the kernel case we simply load the fpdepth
@@ -220,7 +210,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
-#define TIF_PERFCTR 4 /* performance counters active */
+/* flag bit 4 is available */
#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
/* flag bit 6 is available */
#define TIF_32BIT 7 /* 32-bit binary */
@@ -241,7 +231,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
-#define _TIF_PERFCTR (1<<TIF_PERFCTR)
#define _TIF_UNALIGNED (1<<TIF_UNALIGNED)
#define _TIF_32BIT (1<<TIF_32BIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
@@ -252,7 +241,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
_TIF_DO_NOTIFY_RESUME_MASK | \
- _TIF_NEED_RESCHED | _TIF_PERFCTR)
+ _TIF_NEED_RESCHED)
#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
/*
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index 4f53a23..c011b93 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -48,7 +48,6 @@ extern void __init boot_cpu_id_too_large(int cpu);
extern unsigned int dcache_parity_tl1_occurred;
extern unsigned int icache_parity_tl1_occurred;
-extern asmlinkage void update_perfctrs(void);
extern asmlinkage void sparc_breakpoint(struct pt_regs *regs);
extern void timer_interrupt(int irq, struct pt_regs *regs);
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index d242a73..b287b62 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -21,7 +21,6 @@
#include <asm/perf_event.h>
#include <asm/ptrace.h>
-#include <asm/local.h>
#include <asm/pcr.h>
/* We don't have a real NMI on sparc64, but we can fake one
@@ -113,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
touched = 1;
}
if (!touched && __get_cpu_var(last_irq_sum) == sum) {
- __this_cpu_inc(per_cpu_var(alert_counter));
- if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz)
+ __this_cpu_inc(alert_counter);
+ if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
die_nmi("BUG: NMI Watchdog detected LOCKUP",
regs, panic_on_timeout);
} else {
__get_cpu_var(last_irq_sum) = sum;
- __this_cpu_write(per_cpu_var(alert_counter), 0);
+ __this_cpu_write(alert_counter, 0);
}
if (__get_cpu_var(wd_enabled)) {
write_pic(picl_value(nmi_hz));
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index cb70476..a5cf386 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -352,12 +352,6 @@ void exit_thread(void)
else
t->utraps[0]--;
}
-
- if (test_and_clear_thread_flag(TIF_PERFCTR)) {
- t->user_cntd0 = t->user_cntd1 = NULL;
- t->pcr_reg = 0;
- write_pcr(0);
- }
}
void flush_thread(void)
@@ -371,13 +365,6 @@ void flush_thread(void)
set_thread_wsaved(0);
- /* Turn off performance counters if on. */
- if (test_and_clear_thread_flag(TIF_PERFCTR)) {
- t->user_cntd0 = t->user_cntd1 = NULL;
- t->pcr_reg = 0;
- write_pcr(0);
- }
-
/* Clear FPU register state. */
t->fpsaved[0] = 0;
@@ -591,16 +578,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
t->kregs->u_regs[UREG_FP] =
((unsigned long) child_sf) - STACK_BIAS;
- /* Special case, if we are spawning a kernel thread from
- * a userspace task (usermode helper, NFS or similar), we
- * must disable performance counters in the child because
- * the address space and protection realm are changing.
- */
- if (t->flags & _TIF_PERFCTR) {
- t->user_cntd0 = t->user_cntd1 = NULL;
- t->pcr_reg = 0;
- t->flags &= ~_TIF_PERFCTR;
- }
t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT);
t->kregs->u_regs[UREG_G6] = (unsigned long) t;
t->kregs->u_regs[UREG_G4] = (unsigned long) t->task;
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index fd3cee4..83f1873 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -65,48 +65,6 @@ __handle_user_windows:
ba,pt %xcc, __handle_user_windows_continue
andn %l1, %l4, %l1
-__handle_perfctrs:
- call update_perfctrs
- wrpr %g0, RTRAP_PSTATE, %pstate
- wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- ldub [%g6 + TI_WSAVED], %o2
- brz,pt %o2, 1f
- nop
- /* Redo userwin+sched+sig checks */
- call fault_in_user_windows
-
- wrpr %g0, RTRAP_PSTATE, %pstate
- wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- ldx [%g6 + TI_FLAGS], %l0
- andcc %l0, _TIF_NEED_RESCHED, %g0
- be,pt %xcc, 1f
-
- nop
- call schedule
- wrpr %g0, RTRAP_PSTATE, %pstate
- wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- ldx [%g6 + TI_FLAGS], %l0
-1: andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
-
- be,pt %xcc, __handle_perfctrs_continue
- sethi %hi(TSTATE_PEF), %o0
- mov %l5, %o1
- add %sp, PTREGS_OFF, %o0
- mov %l0, %o2
- call do_notify_resume
-
- wrpr %g0, RTRAP_PSTATE, %pstate
- wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- /* Signal delivery can modify pt_regs tstate, so we must
- * reload it.
- */
- ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
- sethi %hi(0xf << 20), %l4
- and %l1, %l4, %l4
- andn %l1, %l4, %l1
- ba,pt %xcc, __handle_perfctrs_continue
-
- sethi %hi(TSTATE_PEF), %o0
__handle_userfpu:
rd %fprs, %l5
andcc %l5, FPRS_FEF, %g0
@@ -149,11 +107,11 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
rtrap_irq:
rtrap:
#ifndef CONFIG_SMP
- sethi %hi(per_cpu____cpu_data), %l0
- lduw [%l0 + %lo(per_cpu____cpu_data)], %l1
+ sethi %hi(__cpu_data), %l0
+ lduw [%l0 + %lo(__cpu_data)], %l1
#else
- sethi %hi(per_cpu____cpu_data), %l0
- or %l0, %lo(per_cpu____cpu_data), %l0
+ sethi %hi(__cpu_data), %l0
+ or %l0, %lo(__cpu_data), %l0
lduw [%l0 + %g5], %l1
#endif
cmp %l1, 0
@@ -191,9 +149,9 @@ rtrap_no_irq_enable:
* take until the next local IRQ before the signal/resched
* event would be handled.
*
- * This also means that if we have to deal with performance
- * counters or user windows, we have to redo all of these
- * sched+signal checks with IRQs disabled.
+ * This also means that if we have to deal with user
+ * windows, we have to redo all of these sched+signal checks
+ * with IRQs disabled.
*/
to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
wrpr 0, %pil
@@ -214,12 +172,8 @@ __handle_signal_continue:
brnz,pn %o2, __handle_user_windows
nop
__handle_user_windows_continue:
- ldx [%g6 + TI_FLAGS], %l5
- andcc %l5, _TIF_PERFCTR, %g0
sethi %hi(TSTATE_PEF), %o0
- bne,pn %xcc, __handle_perfctrs
-__handle_perfctrs_continue:
- andcc %l1, %o0, %g0
+ andcc %l1, %o0, %g0
/* This fpdepth clear is necessary for non-syscall rtraps only */
user_nowork:
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index e706113..46a76ba 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -51,7 +51,6 @@ SIGN1(sys32_exit_group, sys_exit_group, %o0)
SIGN1(sys32_wait4, compat_sys_wait4, %o2)
SIGN1(sys32_creat, sys_creat, %o1)
SIGN1(sys32_mknod, sys_mknod, %o1)
-SIGN1(sys32_perfctr, sys_perfctr, %o0)
SIGN1(sys32_umount, sys_umount, %o1)
SIGN1(sys32_signal, sys_signal, %o0)
SIGN1(sys32_access, sys_access, %o1)
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index d77f543..cb1bef6 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -27,7 +27,6 @@
#include <asm/uaccess.h>
#include <asm/utrap.h>
-#include <asm/perfctr.h>
#include <asm/unistd.h>
#include "entry.h"
@@ -766,109 +765,6 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
return ret;
}
-/* Invoked by rtrap code to update performance counters in
- * user space.
- */
-asmlinkage void update_perfctrs(void)
-{
- unsigned long pic, tmp;
-
- read_pic(pic);
- tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
- __put_user(tmp, current_thread_info()->user_cntd0);
- tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
- __put_user(tmp, current_thread_info()->user_cntd1);
- reset_pic();
-}
-
-SYSCALL_DEFINE4(perfctr, int, opcode, unsigned long, arg0,
- unsigned long, arg1, unsigned long, arg2)
-{
- int err = 0;
-
- switch(opcode) {
- case PERFCTR_ON:
- current_thread_info()->pcr_reg = arg2;
- current_thread_info()->user_cntd0 = (u64 __user *) arg0;
- current_thread_info()->user_cntd1 = (u64 __user *) arg1;
- current_thread_info()->kernel_cntd0 =
- current_thread_info()->kernel_cntd1 = 0;
- write_pcr(arg2);
- reset_pic();
- set_thread_flag(TIF_PERFCTR);
- break;
-
- case PERFCTR_OFF:
- err = -EINVAL;
- if (test_thread_flag(TIF_PERFCTR)) {
- current_thread_info()->user_cntd0 =
- current_thread_info()->user_cntd1 = NULL;
- current_thread_info()->pcr_reg = 0;
- write_pcr(0);
- clear_thread_flag(TIF_PERFCTR);
- err = 0;
- }
- break;
-
- case PERFCTR_READ: {
- unsigned long pic, tmp;
-
- if (!test_thread_flag(TIF_PERFCTR)) {
- err = -EINVAL;
- break;
- }
- read_pic(pic);
- tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
- err |= __put_user(tmp, current_thread_info()->user_cntd0);
- tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
- err |= __put_user(tmp, current_thread_info()->user_cntd1);
- reset_pic();
- break;
- }
-
- case PERFCTR_CLRPIC:
- if (!test_thread_flag(TIF_PERFCTR)) {
- err = -EINVAL;
- break;
- }
- current_thread_info()->kernel_cntd0 =
- current_thread_info()->kernel_cntd1 = 0;
- reset_pic();
- break;
-
- case PERFCTR_SETPCR: {
- u64 __user *user_pcr = (u64 __user *)arg0;
-
- if (!test_thread_flag(TIF_PERFCTR)) {
- err = -EINVAL;
- break;
- }
- err |= __get_user(current_thread_info()->pcr_reg, user_pcr);
- write_pcr(current_thread_info()->pcr_reg);
- current_thread_info()->kernel_cntd0 =
- current_thread_info()->kernel_cntd1 = 0;
- reset_pic();
- break;
- }
-
- case PERFCTR_GETPCR: {
- u64 __user *user_pcr = (u64 __user *)arg0;
-
- if (!test_thread_flag(TIF_PERFCTR)) {
- err = -EINVAL;
- break;
- }
- err |= __put_user(current_thread_info()->pcr_reg, user_pcr);
- break;
- }
-
- default:
- err = -EINVAL;
- break;
- };
- return err;
-}
-
/*
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index dc4a458..1d7e274 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -110,31 +110,12 @@ sys_clone:
.globl ret_from_syscall
ret_from_syscall:
- /* Clear current_thread_info()->new_child, and
- * check performance counter stuff too.
- */
+ /* Clear current_thread_info()->new_child. */
stb %g0, [%g6 + TI_NEW_CHILD]
ldx [%g6 + TI_FLAGS], %l0
call schedule_tail
mov %g7, %o0
- andcc %l0, _TIF_PERFCTR, %g0
- be,pt %icc, 1f
- nop
- ldx [%g6 + TI_PCR], %o7
- wr %g0, %o7, %pcr
-
- /* Blackbird errata workaround. See commentary in
- * smp.c:smp_percpu_timer_interrupt() for more
- * information.
- */
- ba,pt %xcc, 99f
- nop
-
- .align 64
-99: wr %g0, %g0, %pic
- rd %pic, %g0
-
-1: ba,pt %xcc, ret_sys_call
+ ba,pt %xcc, ret_sys_call
ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0
.globl sparc_exit
diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h
index d2f999a..68312fe 100644
--- a/arch/sparc/kernel/systbls.h
+++ b/arch/sparc/kernel/systbls.h
@@ -36,8 +36,6 @@ extern asmlinkage long sys_rt_sigaction(int sig,
struct sigaction __user *oact,
void __user *restorer,
size_t sigsetsize);
-extern asmlinkage long sys_perfctr(int opcode, unsigned long arg0,
- unsigned long arg1, unsigned long arg2);
extern asmlinkage void sparc64_set_context(struct pt_regs *regs);
extern asmlinkage void sparc64_get_context(struct pt_regs *regs);
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index e575b46..1761425 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -21,7 +21,7 @@ sys_call_table32:
/*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write
/*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link
/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod
-/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys32_perfctr, sys32_lseek
+/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys32_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
/*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause
/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
@@ -96,7 +96,7 @@ sys_call_table:
/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
-/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_perfctr, sys_lseek
+/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_nis_syscall, sys_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
/*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
/*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 10f7bb9..bdc05a2 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2548,15 +2548,6 @@ void __init trap_init(void)
rwbuf_stkptrs) ||
TI_GSR != offsetof(struct thread_info, gsr) ||
TI_XFSR != offsetof(struct thread_info, xfsr) ||
- TI_USER_CNTD0 != offsetof(struct thread_info,
- user_cntd0) ||
- TI_USER_CNTD1 != offsetof(struct thread_info,
- user_cntd1) ||
- TI_KERN_CNTD0 != offsetof(struct thread_info,
- kernel_cntd0) ||
- TI_KERN_CNTD1 != offsetof(struct thread_info,
- kernel_cntd1) ||
- TI_PCR != offsetof(struct thread_info, pcr_reg) ||
TI_PRE_COUNT != offsetof(struct thread_info,
preempt_count) ||
TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
index 4b7c937..2d8b70d 100644
--- a/arch/sparc/prom/p1275.c
+++ b/arch/sparc/prom/p1275.c
@@ -32,10 +32,9 @@ extern void prom_cif_interface(void);
extern void prom_cif_callback(void);
/*
- * This provides SMP safety on the p1275buf. prom_callback() drops this lock
- * to allow recursuve acquisition.
+ * This provides SMP safety on the p1275buf.
*/
-DEFINE_SPINLOCK(prom_entry_lock);
+DEFINE_RAW_SPINLOCK(prom_entry_lock);
long p1275_cmd(const char *service, long fmt, ...)
{
@@ -47,7 +46,9 @@ long p1275_cmd(const char *service, long fmt, ...)
p = p1275buf.prom_buffer;
- spin_lock_irqsave(&prom_entry_lock, flags);
+ raw_local_save_flags(flags);
+ raw_local_irq_restore(PIL_NMI);
+ raw_spin_lock(&prom_entry_lock);
p1275buf.prom_args[0] = (unsigned long)p; /* service */
strcpy (p, service);
@@ -139,7 +140,8 @@ long p1275_cmd(const char *service, long fmt, ...)
va_end(list);
x = p1275buf.prom_args [nargs + 3];
- spin_unlock_irqrestore(&prom_entry_lock, flags);
+ raw_spin_unlock(&prom_entry_lock);
+ raw_local_irq_restore(flags);
return x;
}
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 3b3c366..de317d0 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -140,7 +140,7 @@ void mconsole_proc(struct mc_request *req)
goto out;
}
- err = may_open(&nd.path, MAY_READ, FMODE_READ);
+ err = may_open(&nd.path, MAY_READ, O_RDONLY);
if (result) {
mconsole_reply(req, "Failed to open file", 1, 0);
path_put(&nd.path);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0896008..f15f37b 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -31,6 +31,7 @@ config X86
select ARCH_WANT_FRAME_POINTERS
select HAVE_DMA_ATTRS
select HAVE_KRETPROBES
+ select HAVE_OPTPROBES
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER
@@ -184,6 +185,9 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y
+config HAVE_EARLY_RES
+ def_bool y
+
config HAVE_INTEL_TXT
def_bool y
depends on EXPERIMENTAL && DMAR && ACPI
@@ -569,6 +573,18 @@ config PARAVIRT_DEBUG
Enable to debug paravirt_ops internals. Specifically, BUG if
a paravirt_op is missing when it is called.
+config NO_BOOTMEM
+ default y
+ bool "Disable Bootmem code"
+ ---help---
+ Use early_res directly instead of bootmem before slab is ready.
+ - allocator (buddy) [generic]
+ - early allocator (bootmem) [generic]
+ - very early allocator (reserve_early*()) [x86]
+ - very very early allocator (early brk model) [x86]
+ So reduce one layer between early allocator to final allocator
+
+
config MEMTEST
bool "Memtest"
---help---
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 9046e4a..280c019 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -327,7 +327,6 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
current->mm->free_area_cache = TASK_UNMAPPED_BASE;
current->mm->cached_hole_size = 0;
- current->mm->mmap = NULL;
install_exec_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 9f828f8..493092e 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -11,6 +11,7 @@ header-y += sigcontext32.h
header-y += ucontext.h
header-y += processor-flags.h
header-y += hw_breakpoint.h
+header-y += hyperv.h
unifdef-y += e820.h
unifdef-y += ist.h
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index f1e253c..b09ec55 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -165,10 +165,12 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
* invalid instruction possible) or if the instructions are changed from a
* consistent state to another consistent state atomically.
* More care must be taken when modifying code in the SMP case because of
- * Intel's errata.
+ * Intel's errata. text_poke_smp() takes care that errata, but still
+ * doesn't support NMI/MCE handler code modifying.
* On the local CPU you need to be protected again NMI or MCE handlers seeing an
* inconsistent instruction while you patch.
*/
extern void *text_poke(void *addr, const void *opcode, size_t len);
+extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 761249e..0e22296 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -111,11 +111,8 @@ extern unsigned long end_user_pfn;
extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
-extern void reserve_early(u64 start, u64 end, char *name);
-extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
-extern void free_early(u64 start, u64 end);
-extern void early_res_to_bootmem(u64 start, u64 end);
extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
+#include <linux/early_res.h>
extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 014c2b8..a726650 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -66,10 +66,6 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
-#ifndef CONFIG_PARAVIRT
-#define kmap_atomic_pte(page, type) kmap_atomic(page, type)
-#endif
-
#define flush_cache_kmaps() do { } while (0)
extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/asm/hyperv.h
new file mode 100644
index 0000000..e153a2b
--- /dev/null
+++ b/arch/x86/include/asm/hyperv.h
@@ -0,0 +1,186 @@
+#ifndef _ASM_X86_KVM_HYPERV_H
+#define _ASM_X86_KVM_HYPERV_H
+
+#include <linux/types.h>
+
+/*
+ * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
+ * is set by CPUID(HvCpuIdFunctionVersionAndFeatures).
+ */
+#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000
+#define HYPERV_CPUID_INTERFACE 0x40000001
+#define HYPERV_CPUID_VERSION 0x40000002
+#define HYPERV_CPUID_FEATURES 0x40000003
+#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
+#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
+
+/*
+ * Feature identification. EAX indicates which features are available
+ * to the partition based upon the current partition privileges.
+ */
+
+/* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */
+#define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0)
+/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/
+#define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1)
+/*
+ * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM
+ * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available
+ */
+#define HV_X64_MSR_SYNIC_AVAILABLE (1 << 2)
+/*
+ * Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through
+ * HV_X64_MSR_STIMER3_COUNT) available
+ */
+#define HV_X64_MSR_SYNTIMER_AVAILABLE (1 << 3)
+/*
+ * APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR)
+ * are available
+ */
+#define HV_X64_MSR_APIC_ACCESS_AVAILABLE (1 << 4)
+/* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/
+#define HV_X64_MSR_HYPERCALL_AVAILABLE (1 << 5)
+/* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/
+#define HV_X64_MSR_VP_INDEX_AVAILABLE (1 << 6)
+/* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/
+#define HV_X64_MSR_RESET_AVAILABLE (1 << 7)
+ /*
+ * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE,
+ * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE,
+ * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available
+ */
+#define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8)
+
+/*
+ * Feature identification: EBX indicates which flags were specified at
+ * partition creation. The format is the same as the partition creation
+ * flag structure defined in section Partition Creation Flags.
+ */
+#define HV_X64_CREATE_PARTITIONS (1 << 0)
+#define HV_X64_ACCESS_PARTITION_ID (1 << 1)
+#define HV_X64_ACCESS_MEMORY_POOL (1 << 2)
+#define HV_X64_ADJUST_MESSAGE_BUFFERS (1 << 3)
+#define HV_X64_POST_MESSAGES (1 << 4)
+#define HV_X64_SIGNAL_EVENTS (1 << 5)
+#define HV_X64_CREATE_PORT (1 << 6)
+#define HV_X64_CONNECT_PORT (1 << 7)
+#define HV_X64_ACCESS_STATS (1 << 8)
+#define HV_X64_DEBUGGING (1 << 11)
+#define HV_X64_CPU_POWER_MANAGEMENT (1 << 12)
+#define HV_X64_CONFIGURE_PROFILER (1 << 13)
+
+/*
+ * Feature identification. EDX indicates which miscellaneous features
+ * are available to the partition.
+ */
+/* The MWAIT instruction is available (per section MONITOR / MWAIT) */
+#define HV_X64_MWAIT_AVAILABLE (1 << 0)
+/* Guest debugging support is available */
+#define HV_X64_GUEST_DEBUGGING_AVAILABLE (1 << 1)
+/* Performance Monitor support is available*/
+#define HV_X64_PERF_MONITOR_AVAILABLE (1 << 2)
+/* Support for physical CPU dynamic partitioning events is available*/
+#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1 << 3)
+/*
+ * Support for passing hypercall input parameter block via XMM
+ * registers is available
+ */
+#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4)
+/* Support for a virtual guest idle state is available */
+#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5)
+
+/*
+ * Implementation recommendations. Indicates which behaviors the hypervisor
+ * recommends the OS implement for optimal performance.
+ */
+ /*
+ * Recommend using hypercall for address space switches rather
+ * than MOV to CR3 instruction
+ */
+#define HV_X64_MWAIT_RECOMMENDED (1 << 0)
+/* Recommend using hypercall for local TLB flushes rather
+ * than INVLPG or MOV to CR3 instructions */
+#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED (1 << 1)
+/*
+ * Recommend using hypercall for remote TLB flushes rather
+ * than inter-processor interrupts
+ */
+#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED (1 << 2)
+/*
+ * Recommend using MSRs for accessing APIC registers
+ * EOI, ICR and TPR rather than their memory-mapped counterparts
+ */
+#define HV_X64_APIC_ACCESS_RECOMMENDED (1 << 3)
+/* Recommend using the hypervisor-provided MSR to initiate a system RESET */
+#define HV_X64_SYSTEM_RESET_RECOMMENDED (1 << 4)
+/*
+ * Recommend using relaxed timing for this partition. If used,
+ * the VM should disable any watchdog timeouts that rely on the
+ * timely delivery of external interrupts
+ */
+#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5)
+
+/* MSR used to identify the guest OS. */
+#define HV_X64_MSR_GUEST_OS_ID 0x40000000
+
+/* MSR used to setup pages used to communicate with the hypervisor. */
+#define HV_X64_MSR_HYPERCALL 0x40000001
+
+/* MSR used to provide vcpu index */
+#define HV_X64_MSR_VP_INDEX 0x40000002
+
+/* Define the virtual APIC registers */
+#define HV_X64_MSR_EOI 0x40000070
+#define HV_X64_MSR_ICR 0x40000071
+#define HV_X64_MSR_TPR 0x40000072
+#define HV_X64_MSR_APIC_ASSIST_PAGE 0x40000073
+
+/* Define synthetic interrupt controller model specific registers. */
+#define HV_X64_MSR_SCONTROL 0x40000080
+#define HV_X64_MSR_SVERSION 0x40000081
+#define HV_X64_MSR_SIEFP 0x40000082
+#define HV_X64_MSR_SIMP 0x40000083
+#define HV_X64_MSR_EOM 0x40000084
+#define HV_X64_MSR_SINT0 0x40000090
+#define HV_X64_MSR_SINT1 0x40000091
+#define HV_X64_MSR_SINT2 0x40000092
+#define HV_X64_MSR_SINT3 0x40000093
+#define HV_X64_MSR_SINT4 0x40000094
+#define HV_X64_MSR_SINT5 0x40000095
+#define HV_X64_MSR_SINT6 0x40000096
+#define HV_X64_MSR_SINT7 0x40000097
+#define HV_X64_MSR_SINT8 0x40000098
+#define HV_X64_MSR_SINT9 0x40000099
+#define HV_X64_MSR_SINT10 0x4000009A
+#define HV_X64_MSR_SINT11 0x4000009B
+#define HV_X64_MSR_SINT12 0x4000009C
+#define HV_X64_MSR_SINT13 0x4000009D
+#define HV_X64_MSR_SINT14 0x4000009E
+#define HV_X64_MSR_SINT15 0x4000009F
+
+
+#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001
+#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12
+#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \
+ (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
+
+/* Declare the various hypercall operations. */
+#define HV_X64_HV_NOTIFY_LONG_SPIN_WAIT 0x0008
+
+#define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE 0x00000001
+#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT 12
+#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_MASK \
+ (~((1ull << HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
+
+#define HV_PROCESSOR_POWER_STATE_C0 0
+#define HV_PROCESSOR_POWER_STATE_C1 1
+#define HV_PROCESSOR_POWER_STATE_C2 2
+#define HV_PROCESSOR_POWER_STATE_C3 3
+
+/* hypercall status code */
+#define HV_STATUS_SUCCESS 0
+#define HV_STATUS_INVALID_HYPERCALL_CODE 2
+#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
+#define HV_STATUS_INVALID_ALIGNMENT 4
+
+#endif
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 58d7091..7ec65b1 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -24,7 +24,7 @@ extern unsigned int cached_irq_mask;
#define SLAVE_ICW4_DEFAULT 0x01
#define PIC_ICW4_AEOI 2
-extern spinlock_t i8259A_lock;
+extern raw_spinlock_t i8259A_lock;
extern void init_8259A(int auto_eoi);
extern void enable_8259A_irq(unsigned int irq);
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 7c7c16c..5f61f6e 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -160,6 +160,7 @@ extern int io_apic_get_redir_entries(int ioapic);
struct io_apic_irq_attr;
extern int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr);
+void setup_IO_APIC_irq_extra(u32 gsi);
extern int (*ioapic_renumber_irq)(int ioapic, int irq);
extern void ioapic_init_mappings(void);
extern void ioapic_insert_resources(void);
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 5458380..2622927 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -48,5 +48,6 @@ extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
extern int vector_used_by_percpu_irq(unsigned int vector);
extern void init_ISA_irqs(void);
+extern int nr_legacy_irqs;
#endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 4611f08..8767d99 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -28,28 +28,33 @@
#define MCE_VECTOR 0x12
/*
- * IDT vectors usable for external interrupt sources start
- * at 0x20:
+ * IDT vectors usable for external interrupt sources start at 0x20.
+ * (0x80 is the syscall vector, 0x30-0x3f are for ISA)
*/
#define FIRST_EXTERNAL_VECTOR 0x20
-
-#ifdef CONFIG_X86_32
-# define SYSCALL_VECTOR 0x80
-# define IA32_SYSCALL_VECTOR 0x80
-#else
-# define IA32_SYSCALL_VECTOR 0x80
-#endif
+/*
+ * We start allocating at 0x21 to spread out vectors evenly between
+ * priority levels. (0x80 is the syscall vector)
+ */
+#define VECTOR_OFFSET_START 1
/*
- * Reserve the lowest usable priority level 0x20 - 0x2f for triggering
- * cleanup after irq migration.
+ * Reserve the lowest usable vector (and hence lowest priority) 0x20 for
+ * triggering cleanup after irq migration. 0x21-0x2f will still be used
+ * for device interrupts.
*/
#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
+#define IA32_SYSCALL_VECTOR 0x80
+#ifdef CONFIG_X86_32
+# define SYSCALL_VECTOR 0x80
+#endif
+
/*
* Vectors 0x30-0x3f are used for ISA interrupts.
+ * round up to the next 16-vector boundary
*/
-#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
+#define IRQ0_VECTOR ((FIRST_EXTERNAL_VECTOR + 16) & ~15)
#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
@@ -120,13 +125,6 @@
*/
#define MCE_SELF_VECTOR 0xeb
-/*
- * First APIC vector available to drivers: (vectors 0x30-0xee) we
- * start at 0x31(0x41) to spread out vectors evenly between priority
- * levels. (0x80 is the syscall vector)
- */
-#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
-
#define NR_VECTORS 256
#define FPU_IRQ 13
@@ -154,21 +152,21 @@ static inline int invalid_vm86_irq(int irq)
#define NR_IRQS_LEGACY 16
-#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
#ifdef CONFIG_X86_IO_APIC
# ifdef CONFIG_SPARSE_IRQ
+# define CPU_VECTOR_LIMIT (64 * NR_CPUS)
# define NR_IRQS \
(CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
(NR_VECTORS + CPU_VECTOR_LIMIT) : \
(NR_VECTORS + IO_APIC_VECTOR_LIMIT))
# else
-# if NR_CPUS < MAX_IO_APICS
-# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT)
-# else
-# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
-# endif
+# define CPU_VECTOR_LIMIT (32 * NR_CPUS)
+# define NR_IRQS \
+ (CPU_VECTOR_LIMIT < IO_APIC_VECTOR_LIMIT ? \
+ (NR_VECTORS + CPU_VECTOR_LIMIT) : \
+ (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
# endif
#else /* !CONFIG_X86_IO_APIC: */
# define NR_IRQS NR_IRQS_LEGACY
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 4fe681d..4ffa345 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -32,7 +32,10 @@ struct kprobe;
typedef u8 kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION 0xcc
-#define RELATIVEJUMP_INSTRUCTION 0xe9
+#define RELATIVEJUMP_OPCODE 0xe9
+#define RELATIVEJUMP_SIZE 5
+#define RELATIVECALL_OPCODE 0xe8
+#define RELATIVE_ADDR_SIZE 4
#define MAX_INSN_SIZE 16
#define MAX_STACK_SIZE 64
#define MIN_STACK_SIZE(ADDR) \
@@ -44,6 +47,17 @@ typedef u8 kprobe_opcode_t;
#define flush_insn_slot(p) do { } while (0)
+/* optinsn template addresses */
+extern kprobe_opcode_t optprobe_template_entry;
+extern kprobe_opcode_t optprobe_template_val;
+extern kprobe_opcode_t optprobe_template_call;
+extern kprobe_opcode_t optprobe_template_end;
+#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
+#define MAX_OPTINSN_SIZE \
+ (((unsigned long)&optprobe_template_end - \
+ (unsigned long)&optprobe_template_entry) + \
+ MAX_OPTIMIZED_LENGTH + RELATIVEJUMP_SIZE)
+
extern const int kretprobe_blacklist_size;
void arch_remove_kprobe(struct kprobe *p);
@@ -64,6 +78,21 @@ struct arch_specific_insn {
int boostable;
};
+struct arch_optimized_insn {
+ /* copy of the original instructions */
+ kprobe_opcode_t copied_insn[RELATIVE_ADDR_SIZE];
+ /* detour code buffer */
+ kprobe_opcode_t *insn;
+ /* the size of instructions copied to detour code buffer */
+ size_t size;
+};
+
+/* Return true (!0) if optinsn is prepared for optimization. */
+static inline int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
+{
+ return optinsn->size;
+}
+
struct prev_kprobe {
struct kprobe *kp;
unsigned long status;
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 7c18e12..7a6f54f 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -54,13 +54,23 @@ struct x86_emulate_ctxt;
struct x86_emulate_ops {
/*
* read_std: Read bytes of standard (non-emulated/special) memory.
- * Used for instruction fetch, stack operations, and others.
+ * Used for descriptor reading.
* @addr: [IN ] Linear address from which to read.
* @val: [OUT] Value read from memory, zero-extended to 'u_long'.
* @bytes: [IN ] Number of bytes to read from memory.
*/
int (*read_std)(unsigned long addr, void *val,
- unsigned int bytes, struct kvm_vcpu *vcpu);
+ unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
+
+ /*
+ * fetch: Read bytes of standard (non-emulated/special) memory.
+ * Used for instruction fetch.
+ * @addr: [IN ] Linear address from which to read.
+ * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
+ * @bytes: [IN ] Number of bytes to read from memory.
+ */
+ int (*fetch)(unsigned long addr, void *val,
+ unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
/*
* read_emulated: Read bytes from emulated/special memory area.
@@ -74,7 +84,7 @@ struct x86_emulate_ops {
struct kvm_vcpu *vcpu);
/*
- * write_emulated: Read bytes from emulated/special memory area.
+ * write_emulated: Write bytes to emulated/special memory area.
* @addr: [IN ] Linear address to which to write.
* @val: [IN ] Value to write to memory (low-order bytes used as
* required).
@@ -168,6 +178,7 @@ struct x86_emulate_ctxt {
/* Execution mode, passed to the emulator. */
#define X86EMUL_MODE_REAL 0 /* Real mode. */
+#define X86EMUL_MODE_VM86 1 /* Virtual 8086 mode. */
#define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */
#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4f865e8..06d9e79 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -25,7 +25,7 @@
#include <asm/mtrr.h>
#include <asm/msr-index.h>
-#define KVM_MAX_VCPUS 16
+#define KVM_MAX_VCPUS 64
#define KVM_MEMORY_SLOTS 32
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
@@ -38,19 +38,6 @@
#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
0xFFFFFF0000000000ULL)
-#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
- (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
-#define KVM_GUEST_CR0_MASK \
- (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
-#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
- (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP)
-#define KVM_VM_CR0_ALWAYS_ON \
- (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
-#define KVM_GUEST_CR4_MASK \
- (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
-#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
-#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
-
#define INVALID_PAGE (~(hpa_t)0)
#define UNMAPPED_GVA (~(gpa_t)0)
@@ -256,7 +243,8 @@ struct kvm_mmu {
void (*new_cr3)(struct kvm_vcpu *vcpu);
int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
void (*free)(struct kvm_vcpu *vcpu);
- gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
+ gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
+ u32 *error);
void (*prefetch_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page);
int (*sync_page)(struct kvm_vcpu *vcpu,
@@ -282,13 +270,15 @@ struct kvm_vcpu_arch {
u32 regs_dirty;
unsigned long cr0;
+ unsigned long cr0_guest_owned_bits;
unsigned long cr2;
unsigned long cr3;
unsigned long cr4;
+ unsigned long cr4_guest_owned_bits;
unsigned long cr8;
u32 hflags;
u64 pdptrs[4]; /* pae */
- u64 shadow_efer;
+ u64 efer;
u64 apic_base;
struct kvm_lapic *apic; /* kernel irqchip context */
int32_t apic_arb_prio;
@@ -374,17 +364,27 @@ struct kvm_vcpu_arch {
/* used for guest single stepping over the given code position */
u16 singlestep_cs;
unsigned long singlestep_rip;
+ /* fields used by HYPER-V emulation */
+ u64 hv_vapic;
};
struct kvm_mem_alias {
gfn_t base_gfn;
unsigned long npages;
gfn_t target_gfn;
+#define KVM_ALIAS_INVALID 1UL
+ unsigned long flags;
};
-struct kvm_arch{
- int naliases;
+#define KVM_ARCH_HAS_UNALIAS_INSTANTIATION
+
+struct kvm_mem_aliases {
struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
+ int naliases;
+};
+
+struct kvm_arch {
+ struct kvm_mem_aliases *aliases;
unsigned int n_free_mmu_pages;
unsigned int n_requested_mmu_pages;
@@ -416,6 +416,10 @@ struct kvm_arch{
s64 kvmclock_offset;
struct kvm_xen_hvm_config xen_hvm_config;
+
+ /* fields used by HYPER-V emulation */
+ u64 hv_guest_os_id;
+ u64 hv_hypercall;
};
struct kvm_vm_stat {
@@ -471,6 +475,7 @@ struct kvm_x86_ops {
int (*hardware_setup)(void); /* __init */
void (*hardware_unsetup)(void); /* __exit */
bool (*cpu_has_accelerated_tpr)(void);
+ void (*cpuid_update)(struct kvm_vcpu *vcpu);
/* Create, but do not attach this VCPU */
struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
@@ -492,6 +497,7 @@ struct kvm_x86_ops {
void (*set_segment)(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
+ void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -501,12 +507,13 @@ struct kvm_x86_ops {
void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
- unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
- void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
- int *exception);
+ int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest);
+ int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value);
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
+ void (*fpu_activate)(struct kvm_vcpu *vcpu);
+ void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
void (*tlb_flush)(struct kvm_vcpu *vcpu);
@@ -531,7 +538,8 @@ struct kvm_x86_ops {
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*get_tdp_level)(void);
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
- bool (*gb_page_enable)(void);
+ int (*get_lpage_level)(void);
+ bool (*rdtscp_supported)(void);
const struct trace_print_flags *exit_reasons_str;
};
@@ -606,8 +614,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
unsigned long value);
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
-int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
- int type_bits, int seg);
+int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
@@ -653,6 +660,10 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
+gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
+gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
+gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
+gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
@@ -666,6 +677,7 @@ void kvm_disable_tdp(void);
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
int complete_pio(struct kvm_vcpu *vcpu);
+bool kvm_check_iopl(struct kvm_vcpu *vcpu);
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index c584076..ffae142 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -2,6 +2,7 @@
#define _ASM_X86_KVM_PARA_H
#include <linux/types.h>
+#include <asm/hyperv.h>
/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
* should be used to determine that a VM is running under KVM.
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 47b9b6f..2e99724 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -195,41 +195,4 @@ static inline long local_sub_return(long i, local_t *l)
#define __local_add(i, l) local_add((i), (l))
#define __local_sub(i, l) local_sub((i), (l))
-/* Use these for per-cpu local_t variables: on some archs they are
- * much more efficient than these naive implementations. Note they take
- * a variable, not an address.
- *
- * X86_64: This could be done better if we moved the per cpu data directly
- * after GS.
- */
-
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
-({ \
- local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; \
-})
-#define cpu_local_wrap(l) \
-({ \
- preempt_disable(); \
- (l); \
- preempt_enable(); \
-}) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l))))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l))))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
-
-#define __cpu_local_inc(l) cpu_local_inc((l))
-#define __cpu_local_dec(l) cpu_local_dec((l))
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* _ASM_X86_LOCAL_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index dd59a85..5653f43 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -435,15 +435,6 @@ static inline void paravirt_release_pud(unsigned long pfn)
PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
}
-#ifdef CONFIG_HIGHPTE
-static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
-{
- unsigned long ret;
- ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
- return (void *)ret;
-}
-#endif
-
static inline void pte_update(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index b1e70d5..db9ef55 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -304,10 +304,6 @@ struct pv_mmu_ops {
#endif /* PAGETABLE_LEVELS == 4 */
#endif /* PAGETABLE_LEVELS >= 3 */
-#ifdef CONFIG_HIGHPTE
- void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
-#endif
-
struct pv_lazy_ops lazy_mode;
/* dom0 ops */
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index ada8c20..b4a00dd 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -124,6 +124,8 @@ extern void pci_iommu_alloc(void);
#include "pci_64.h"
#endif
+void dma32_reserve_bootmem(void);
+
/* implement the pci_ DMA API in terms of the generic device dma_ one */
#include <asm-generic/pci-dma-compat.h>
diff --git a/arch/x86/include/asm/pci_64.h b/arch/x86/include/asm/pci_64.h
index ae5e40f..fe15cfb 100644
--- a/arch/x86/include/asm/pci_64.h
+++ b/arch/x86/include/asm/pci_64.h
@@ -22,8 +22,6 @@ extern int (*pci_config_read)(int seg, int bus, int dev, int fn,
extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
int reg, int len, u32 value);
-extern void dma32_reserve_bootmem(void);
-
#endif /* __KERNEL__ */
#endif /* _ASM_X86_PCI_64_H */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 0c44196..66a272d 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -25,19 +25,18 @@
*/
#ifdef CONFIG_SMP
#define PER_CPU(var, reg) \
- __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \
- lea per_cpu__##var(reg), reg
-#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var
+ __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
+ lea var(reg), reg
+#define PER_CPU_VAR(var) %__percpu_seg:var
#else /* ! SMP */
-#define PER_CPU(var, reg) \
- __percpu_mov_op $per_cpu__##var, reg
-#define PER_CPU_VAR(var) per_cpu__##var
+#define PER_CPU(var, reg) __percpu_mov_op $var, reg
+#define PER_CPU_VAR(var) var
#endif /* SMP */
#ifdef CONFIG_X86_64_SMP
#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
#else
-#define INIT_PER_CPU_VAR(var) per_cpu__##var
+#define INIT_PER_CPU_VAR(var) var
#endif
#else /* ...!ASSEMBLY */
@@ -60,12 +59,12 @@
* There also must be an entry in vmlinux_64.lds.S
*/
#define DECLARE_INIT_PER_CPU(var) \
- extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
+ extern typeof(var) init_per_cpu_var(var)
#ifdef CONFIG_X86_64_SMP
#define init_per_cpu_var(var) init_per_cpu__##var
#else
-#define init_per_cpu_var(var) per_cpu_var(var)
+#define init_per_cpu_var(var) var
#endif
/* For arch-specific code, we can use direct single-insn ops (they
@@ -104,6 +103,64 @@ do { \
} \
} while (0)
+/*
+ * Generate a percpu add to memory instruction and optimize code
+ * if a one is added or subtracted.
+ */
+#define percpu_add_op(var, val) \
+do { \
+ typedef typeof(var) pao_T__; \
+ const int pao_ID__ = (__builtin_constant_p(val) && \
+ ((val) == 1 || (val) == -1)) ? (val) : 0; \
+ if (0) { \
+ pao_T__ pao_tmp__; \
+ pao_tmp__ = (val); \
+ } \
+ switch (sizeof(var)) { \
+ case 1: \
+ if (pao_ID__ == 1) \
+ asm("incb "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decb "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addb %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "qi" ((pao_T__)(val))); \
+ break; \
+ case 2: \
+ if (pao_ID__ == 1) \
+ asm("incw "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decw "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addw %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "ri" ((pao_T__)(val))); \
+ break; \
+ case 4: \
+ if (pao_ID__ == 1) \
+ asm("incl "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decl "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addl %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "ri" ((pao_T__)(val))); \
+ break; \
+ case 8: \
+ if (pao_ID__ == 1) \
+ asm("incq "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decq "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addq %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "re" ((pao_T__)(val))); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+} while (0)
+
#define percpu_from_op(op, var, constraint) \
({ \
typeof(var) pfo_ret__; \
@@ -142,16 +199,14 @@ do { \
* per-thread variables implemented as per-cpu variables and thus
* stable for the duration of the respective task.
*/
-#define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \
- "m" (per_cpu__##var))
-#define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \
- "p" (&per_cpu__##var))
-#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
-#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
-#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
-#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val)
-#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
-#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
+#define percpu_read(var) percpu_from_op("mov", var, "m" (var))
+#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
+#define percpu_write(var, val) percpu_to_op("mov", var, val)
+#define percpu_add(var, val) percpu_add_op(var, val)
+#define percpu_sub(var, val) percpu_add_op(var, -(val))
+#define percpu_and(var, val) percpu_to_op("and", var, val)
+#define percpu_or(var, val) percpu_to_op("or", var, val)
+#define percpu_xor(var, val) percpu_to_op("xor", var, val)
#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
@@ -160,9 +215,9 @@ do { \
#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
-#define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
-#define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
-#define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
+#define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
+#define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
@@ -179,9 +234,9 @@ do { \
#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
-#define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
-#define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
-#define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
+#define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
+#define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
@@ -192,9 +247,9 @@ do { \
#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
-#define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
-#define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
-#define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
+#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
+#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
@@ -212,19 +267,19 @@ do { \
#ifdef CONFIG_X86_64
#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
-#define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
-#define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
-#define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
@@ -236,7 +291,7 @@ do { \
({ \
int old__; \
asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
- : "=r" (old__), "+m" (per_cpu__##var) \
+ : "=r" (old__), "+m" (var) \
: "dIr" (bit)); \
old__; \
})
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index a286683..47339a1 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -54,10 +54,10 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
in_irq() ? KM_IRQ_PTE : \
KM_PTE0)
#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \
pte_index((address)))
#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \
pte_index((address)))
#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index 4009f65..6f414ed 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -23,14 +23,4 @@ extern int reboot_force;
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
-/*
- * This looks more complex than it should be. But we need to
- * get the type for the ~ right in round_down (it needs to be
- * as wide as the result!), and we want to evaluate the macro
- * arguments just once each.
- */
-#define __round_mask(x,y) ((__typeof__(x))((y)-1))
-#define round_up(x,y) ((((x)-1) | __round_mask(x,y))+1)
-#define round_down(x,y) ((x) & ~__round_mask(x,y))
-
#endif /* _ASM_X86_PROTO_H */
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 1fecb7e..38638cd 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -313,7 +313,7 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_EXIT_ERR -1
-#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */
+#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
#define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8"
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index e04740f..b8fe48e 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -32,7 +32,7 @@ extern void show_regs_common(void);
"movl %P[task_canary](%[next]), %%ebx\n\t" \
"movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
#define __switch_canary_oparam \
- , [stack_canary] "=m" (per_cpu_var(stack_canary.canary))
+ , [stack_canary] "=m" (stack_canary.canary)
#define __switch_canary_iparam \
, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
#else /* CC_STACKPROTECTOR */
@@ -114,7 +114,7 @@ do { \
"movq %P[task_canary](%%rsi),%%r8\n\t" \
"movq %%r8,"__percpu_arg([gs_canary])"\n\t"
#define __switch_canary_oparam \
- , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
+ , [gs_canary] "=m" (irq_stack_union.stack_canary)
#define __switch_canary_iparam \
, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
#else /* CC_STACKPROTECTOR */
@@ -133,7 +133,7 @@ do { \
__switch_canary \
"movq %P[thread_info](%%rsi),%%r8\n\t" \
"movq %%rax,%%rdi\n\t" \
- "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
"jnz ret_from_fork\n\t" \
RESTORE_CONTEXT \
: "=a" (last) \
@@ -143,7 +143,7 @@ do { \
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
[_tif_fork] "i" (_TIF_FORK), \
[thread_info] "i" (offsetof(struct task_struct, stack)), \
- [current_task] "m" (per_cpu_var(current_task)) \
+ [current_task] "m" (current_task) \
__switch_canary_iparam \
: "memory", "cc" __EXTRA_CLOBBER)
#endif
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 2b49454..fb9a080 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -53,6 +53,7 @@
*/
#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
+#define SECONDARY_EXEC_RDTSCP 0x00000008
#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
@@ -251,6 +252,7 @@ enum vmcs_field {
#define EXIT_REASON_MSR_READ 31
#define EXIT_REASON_MSR_WRITE 32
#define EXIT_REASON_MWAIT_INSTRUCTION 36
+#define EXIT_REASON_MONITOR_INSTRUCTION 39
#define EXIT_REASON_PAUSE_INSTRUCTION 40
#define EXIT_REASON_MCE_DURING_VMENTRY 41
#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
@@ -362,6 +364,7 @@ enum vmcs_field {
#define VMX_EPTP_UC_BIT (1ull << 8)
#define VMX_EPTP_WB_BIT (1ull << 14)
#define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
+#define VMX_EPT_1GB_PAGE_BIT (1ull << 17)
#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24)
#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
@@ -374,7 +377,7 @@ enum vmcs_field {
#define VMX_EPT_READABLE_MASK 0x1ull
#define VMX_EPT_WRITABLE_MASK 0x2ull
#define VMX_EPT_EXECUTABLE_MASK 0x4ull
-#define VMX_EPT_IGMT_BIT (1ull << 6)
+#define VMX_EPT_IPAT_BIT (1ull << 6)
#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index f957030..738fcb6 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -447,6 +447,12 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
{
*irq = gsi;
+
+#ifdef CONFIG_X86_IO_APIC
+ if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
+ setup_IO_APIC_irq_extra(gsi);
+#endif
+
return 0;
}
@@ -474,7 +480,8 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
}
#endif
- acpi_gsi_to_irq(plat_gsi, &irq);
+ irq = plat_gsi;
+
return irq;
}
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index e6ea034..3a4bf35 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/memory.h>
+#include <linux/stop_machine.h>
#include <asm/alternative.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
@@ -572,3 +573,62 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
local_irq_restore(flags);
return addr;
}
+
+/*
+ * Cross-modifying kernel text with stop_machine().
+ * This code originally comes from immediate value.
+ */
+static atomic_t stop_machine_first;
+static int wrote_text;
+
+struct text_poke_params {
+ void *addr;
+ const void *opcode;
+ size_t len;
+};
+
+static int __kprobes stop_machine_text_poke(void *data)
+{
+ struct text_poke_params *tpp = data;
+
+ if (atomic_dec_and_test(&stop_machine_first)) {
+ text_poke(tpp->addr, tpp->opcode, tpp->len);
+ smp_wmb(); /* Make sure other cpus see that this has run */
+ wrote_text = 1;
+ } else {
+ while (!wrote_text)
+ cpu_relax();
+ smp_mb(); /* Load wrote_text before following execution */
+ }
+
+ flush_icache_range((unsigned long)tpp->addr,
+ (unsigned long)tpp->addr + tpp->len);
+ return 0;
+}
+
+/**
+ * text_poke_smp - Update instructions on a live kernel on SMP
+ * @addr: address to modify
+ * @opcode: source of the copy
+ * @len: length to copy
+ *
+ * Modify multi-byte instruction by using stop_machine() on SMP. This allows
+ * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
+ * should be allowed, since stop_machine() does _not_ protect code against
+ * NMI and MCE.
+ *
+ * Note: Must be called under get_online_cpus() and text_mutex.
+ */
+void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
+{
+ struct text_poke_params tpp;
+
+ tpp.addr = addr;
+ tpp.opcode = opcode;
+ tpp.len = len;
+ atomic_set(&stop_machine_first, 1);
+ wrote_text = 0;
+ stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
+ return addr;
+}
+
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 6bdd2c7..14862f1 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -73,8 +73,8 @@
*/
int sis_apic_bug = -1;
-static DEFINE_SPINLOCK(ioapic_lock);
-static DEFINE_SPINLOCK(vector_lock);
+static DEFINE_RAW_SPINLOCK(ioapic_lock);
+static DEFINE_RAW_SPINLOCK(vector_lock);
/*
* # of IRQ routing registers
@@ -94,8 +94,6 @@ struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
/* # of MP IRQ source entries */
int mp_irq_entries;
-/* Number of legacy interrupts */
-static int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
/* GSI interrupts */
static int nr_irqs_gsi = NR_IRQS_LEGACY;
@@ -140,27 +138,10 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int node)
/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
#ifdef CONFIG_SPARSE_IRQ
-static struct irq_cfg irq_cfgx[] = {
+static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
#else
-static struct irq_cfg irq_cfgx[NR_IRQS] = {
+static struct irq_cfg irq_cfgx[NR_IRQS];
#endif
- [0] = { .vector = IRQ0_VECTOR, },
- [1] = { .vector = IRQ1_VECTOR, },
- [2] = { .vector = IRQ2_VECTOR, },
- [3] = { .vector = IRQ3_VECTOR, },
- [4] = { .vector = IRQ4_VECTOR, },
- [5] = { .vector = IRQ5_VECTOR, },
- [6] = { .vector = IRQ6_VECTOR, },
- [7] = { .vector = IRQ7_VECTOR, },
- [8] = { .vector = IRQ8_VECTOR, },
- [9] = { .vector = IRQ9_VECTOR, },
- [10] = { .vector = IRQ10_VECTOR, },
- [11] = { .vector = IRQ11_VECTOR, },
- [12] = { .vector = IRQ12_VECTOR, },
- [13] = { .vector = IRQ13_VECTOR, },
- [14] = { .vector = IRQ14_VECTOR, },
- [15] = { .vector = IRQ15_VECTOR, },
-};
void __init io_apic_disable_legacy(void)
{
@@ -185,8 +166,14 @@ int __init arch_early_irq_init(void)
desc->chip_data = &cfg[i];
zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
- if (i < nr_legacy_irqs)
- cpumask_setall(cfg[i].domain);
+ /*
+ * For legacy IRQ's, start with assigning irq0 to irq15 to
+ * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
+ */
+ if (i < nr_legacy_irqs) {
+ cfg[i].vector = IRQ0_VECTOR + i;
+ cpumask_set_cpu(0, cfg[i].domain);
+ }
}
return 0;
@@ -406,7 +393,7 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
struct irq_pin_list *entry;
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
for_each_irq_pin(entry, cfg->irq_2_pin) {
unsigned int reg;
int pin;
@@ -415,11 +402,11 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
reg = io_apic_read(entry->apic, 0x10 + pin*2);
/* Is the remote IRR bit set? */
if (reg & IO_APIC_REDIR_REMOTE_IRR) {
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return true;
}
}
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return false;
}
@@ -433,10 +420,10 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
{
union entry_union eu;
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return eu.entry;
}
@@ -459,9 +446,9 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
__ioapic_write_entry(apic, pin, e);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
/*
@@ -474,10 +461,10 @@ static void ioapic_mask_entry(int apic, int pin)
unsigned long flags;
union entry_union eu = { .entry.mask = 1 };
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0x10 + 2*pin, eu.w1);
io_apic_write(apic, 0x11 + 2*pin, eu.w2);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
/*
@@ -604,9 +591,9 @@ static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
BUG_ON(!cfg);
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
__mask_IO_APIC_irq(cfg);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
@@ -614,9 +601,9 @@ static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
struct irq_cfg *cfg = desc->chip_data;
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
__unmask_IO_APIC_irq(cfg);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void mask_IO_APIC_irq(unsigned int irq)
@@ -1140,12 +1127,12 @@ void lock_vector_lock(void)
/* Used to the online set of cpus does not change
* during assign_irq_vector.
*/
- spin_lock(&vector_lock);
+ raw_spin_lock(&vector_lock);
}
void unlock_vector_lock(void)
{
- spin_unlock(&vector_lock);
+ raw_spin_unlock(&vector_lock);
}
static int
@@ -1162,7 +1149,8 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
* Also, we've got to be careful not to trash gate
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
- static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
+ static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
+ static int current_offset = VECTOR_OFFSET_START % 8;
unsigned int old_vector;
int cpu, err;
cpumask_var_t tmp_mask;
@@ -1198,7 +1186,7 @@ next:
if (vector >= first_system_vector) {
/* If out of vectors on large boxen, must share them. */
offset = (offset + 1) % 8;
- vector = FIRST_DEVICE_VECTOR + offset;
+ vector = FIRST_EXTERNAL_VECTOR + offset;
}
if (unlikely(current_vector == vector))
continue;
@@ -1232,9 +1220,9 @@ int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
int err;
unsigned long flags;
- spin_lock_irqsave(&vector_lock, flags);
+ raw_spin_lock_irqsave(&vector_lock, flags);
err = __assign_irq_vector(irq, cfg, mask);
- spin_unlock_irqrestore(&vector_lock, flags);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
return err;
}
@@ -1268,11 +1256,16 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
void __setup_vector_irq(int cpu)
{
/* Initialize vector_irq on a new cpu */
- /* This function must be called with vector_lock held */
int irq, vector;
struct irq_cfg *cfg;
struct irq_desc *desc;
+ /*
+ * vector_lock will make sure that we don't run into irq vector
+ * assignments that might be happening on another cpu in parallel,
+ * while we setup our initial vector to irq mappings.
+ */
+ raw_spin_lock(&vector_lock);
/* Mark the inuse vectors */
for_each_irq_desc(irq, desc) {
cfg = desc->chip_data;
@@ -1291,6 +1284,7 @@ void __setup_vector_irq(int cpu)
if (!cpumask_test_cpu(cpu, cfg->domain))
per_cpu(vector_irq, cpu)[vector] = -1;
}
+ raw_spin_unlock(&vector_lock);
}
static struct irq_chip ioapic_chip;
@@ -1440,6 +1434,14 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
cfg = desc->chip_data;
+ /*
+ * For legacy irqs, cfg->domain starts with cpu 0 for legacy
+ * controllers like 8259. Now that IO-APIC can handle this irq, update
+ * the cfg->domain.
+ */
+ if (irq < nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
+ apic->vector_allocation_domain(0, cfg->domain);
+
if (assign_irq_vector(irq, cfg, apic->target_cpus()))
return;
@@ -1473,7 +1475,7 @@ static struct {
static void __init setup_IO_APIC_irqs(void)
{
- int apic_id = 0, pin, idx, irq;
+ int apic_id, pin, idx, irq;
int notcon = 0;
struct irq_desc *desc;
struct irq_cfg *cfg;
@@ -1481,14 +1483,7 @@ static void __init setup_IO_APIC_irqs(void)
apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
-#ifdef CONFIG_ACPI
- if (!acpi_disabled && acpi_ioapic) {
- apic_id = mp_find_ioapic(0);
- if (apic_id < 0)
- apic_id = 0;
- }
-#endif
-
+ for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
idx = find_irq_entry(apic_id, pin, mp_INT);
if (idx == -1) {
@@ -1510,6 +1505,9 @@ static void __init setup_IO_APIC_irqs(void)
irq = pin_2_irq(idx, apic_id, pin);
+ if ((apic_id > 0) && (irq > 16))
+ continue;
+
/*
* Skip the timer IRQ if there's a quirk handler
* installed and if it returns 1:
@@ -1539,6 +1537,56 @@ static void __init setup_IO_APIC_irqs(void)
}
/*
+ * for the gsit that is not in first ioapic
+ * but could not use acpi_register_gsi()
+ * like some special sci in IBM x3330
+ */
+void setup_IO_APIC_irq_extra(u32 gsi)
+{
+ int apic_id = 0, pin, idx, irq;
+ int node = cpu_to_node(boot_cpu_id);
+ struct irq_desc *desc;
+ struct irq_cfg *cfg;
+
+ /*
+ * Convert 'gsi' to 'ioapic.pin'.
+ */
+ apic_id = mp_find_ioapic(gsi);
+ if (apic_id < 0)
+ return;
+
+ pin = mp_find_ioapic_pin(apic_id, gsi);
+ idx = find_irq_entry(apic_id, pin, mp_INT);
+ if (idx == -1)
+ return;
+
+ irq = pin_2_irq(idx, apic_id, pin);
+#ifdef CONFIG_SPARSE_IRQ
+ desc = irq_to_desc(irq);
+ if (desc)
+ return;
+#endif
+ desc = irq_to_desc_alloc_node(irq, node);
+ if (!desc) {
+ printk(KERN_INFO "can not get irq_desc for %d\n", irq);
+ return;
+ }
+
+ cfg = desc->chip_data;
+ add_pin_to_irq_node(cfg, node, apic_id, pin);
+
+ if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
+ pr_debug("Pin %d-%d already programmed\n",
+ mp_ioapics[apic_id].apicid, pin);
+ return;
+ }
+ set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
+
+ setup_IO_APIC_irq(apic_id, pin, irq, desc,
+ irq_trigger(idx), irq_polarity(idx));
+}
+
+/*
* Set up the timer pin, possibly with the 8259A-master behind.
*/
static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
@@ -1601,14 +1649,14 @@ __apicdebuginit(void) print_IO_APIC(void)
for (apic = 0; apic < nr_ioapics; apic++) {
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(apic, 0);
reg_01.raw = io_apic_read(apic, 1);
if (reg_01.bits.version >= 0x10)
reg_02.raw = io_apic_read(apic, 2);
if (reg_01.bits.version >= 0x20)
reg_03.raw = io_apic_read(apic, 3);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
printk("\n");
printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
@@ -1830,7 +1878,7 @@ __apicdebuginit(void) print_PIC(void)
printk(KERN_DEBUG "\nprinting PIC contents\n");
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
v = inb(0xa1) << 8 | inb(0x21);
printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
@@ -1844,7 +1892,7 @@ __apicdebuginit(void) print_PIC(void)
outb(0x0a,0xa0);
outb(0x0a,0x20);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
@@ -1903,9 +1951,9 @@ void __init enable_IO_APIC(void)
* The number of IO-APIC IRQ registers (== #pins):
*/
for (apic = 0; apic < nr_ioapics; apic++) {
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_01.raw = io_apic_read(apic, 1);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
nr_ioapic_registers[apic] = reg_01.bits.entries+1;
}
@@ -2045,9 +2093,9 @@ void __init setup_ioapic_ids_from_mpc(void)
for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
/* Read the register 0 value */
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(apic_id, 0);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
old_id = mp_ioapics[apic_id].apicid;
@@ -2106,16 +2154,16 @@ void __init setup_ioapic_ids_from_mpc(void)
mp_ioapics[apic_id].apicid);
reg_00.bits.ID = mp_ioapics[apic_id].apicid;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic_id, 0, reg_00.raw);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
/*
* Sanity check
*/
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(apic_id, 0);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
printk("could not set ID!\n");
else
@@ -2198,7 +2246,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
unsigned long flags;
struct irq_cfg *cfg;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
if (irq < nr_legacy_irqs) {
disable_8259A_irq(irq);
if (i8259A_irq_pending(irq))
@@ -2206,7 +2254,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
}
cfg = irq_cfg(irq);
__unmask_IO_APIC_irq(cfg);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return was_pending;
}
@@ -2217,9 +2265,9 @@ static int ioapic_retrigger_irq(unsigned int irq)
struct irq_cfg *cfg = irq_cfg(irq);
unsigned long flags;
- spin_lock_irqsave(&vector_lock, flags);
+ raw_spin_lock_irqsave(&vector_lock, flags);
apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
- spin_unlock_irqrestore(&vector_lock, flags);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
return 1;
}
@@ -2312,14 +2360,14 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
irq = desc->irq;
cfg = desc->chip_data;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
ret = set_desc_affinity(desc, mask, &dest);
if (!ret) {
/* Only the high 8 bits are valid. */
dest = SET_APIC_LOGICAL_ID(dest);
__target_IO_APIC_irq(irq, dest, cfg);
}
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return ret;
}
@@ -2554,9 +2602,9 @@ static void eoi_ioapic_irq(struct irq_desc *desc)
irq = desc->irq;
cfg = desc->chip_data;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
__eoi_ioapic_irq(irq, cfg);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void ack_apic_level(unsigned int irq)
@@ -3138,13 +3186,13 @@ static int ioapic_resume(struct sys_device *dev)
data = container_of(dev, struct sysfs_ioapic_data, dev);
entry = data->entry;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(dev->id, 0);
if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
reg_00.bits.ID = mp_ioapics[dev->id].apicid;
io_apic_write(dev->id, 0, reg_00.raw);
}
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
ioapic_write_entry(dev->id, i, entry[i]);
@@ -3207,7 +3255,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
if (irq_want < nr_irqs_gsi)
irq_want = nr_irqs_gsi;
- spin_lock_irqsave(&vector_lock, flags);
+ raw_spin_lock_irqsave(&vector_lock, flags);
for (new = irq_want; new < nr_irqs; new++) {
desc_new = irq_to_desc_alloc_node(new, node);
if (!desc_new) {
@@ -3226,14 +3274,11 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
irq = new;
break;
}
- spin_unlock_irqrestore(&vector_lock, flags);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+
+ if (irq > 0)
+ dynamic_irq_init_keep_chip_data(irq);
- if (irq > 0) {
- dynamic_irq_init(irq);
- /* restore it, in case dynamic_irq_init clear it */
- if (desc_new)
- desc_new->chip_data = cfg_new;
- }
return irq;
}
@@ -3255,20 +3300,13 @@ int create_irq(void)
void destroy_irq(unsigned int irq)
{
unsigned long flags;
- struct irq_cfg *cfg;
- struct irq_desc *desc;
- /* store it, in case dynamic_irq_cleanup clear it */
- desc = irq_to_desc(irq);
- cfg = desc->chip_data;
- dynamic_irq_cleanup(irq);
- /* connect back irq_cfg */
- desc->chip_data = cfg;
+ dynamic_irq_cleanup_keep_chip_data(irq);
free_irte(irq);
- spin_lock_irqsave(&vector_lock, flags);
- __clear_irq_vector(irq, cfg);
- spin_unlock_irqrestore(&vector_lock, flags);
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ __clear_irq_vector(irq, get_irq_chip_data(irq));
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
}
/*
@@ -3805,9 +3843,9 @@ int __init io_apic_get_redir_entries (int ioapic)
union IO_APIC_reg_01 reg_01;
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_01.raw = io_apic_read(ioapic, 1);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return reg_01.bits.entries;
}
@@ -3969,9 +4007,9 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
if (physids_empty(apic_id_map))
apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(ioapic, 0);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
if (apic_id >= get_physical_broadcast()) {
printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
@@ -4005,10 +4043,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
if (reg_00.bits.ID != apic_id) {
reg_00.bits.ID = apic_id;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(ioapic, 0, reg_00.raw);
reg_00.raw = io_apic_read(ioapic, 0);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
/* Sanity check */
if (reg_00.bits.ID != apic_id) {
@@ -4029,9 +4067,9 @@ int __init io_apic_get_version(int ioapic)
union IO_APIC_reg_01 reg_01;
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_01.raw = io_apic_read(ioapic, 1);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return reg_01.bits.version;
}
@@ -4063,27 +4101,23 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
#ifdef CONFIG_SMP
void __init setup_ioapic_dest(void)
{
- int pin, ioapic = 0, irq, irq_entry;
+ int pin, ioapic, irq, irq_entry;
struct irq_desc *desc;
const struct cpumask *mask;
if (skip_ioapic_setup == 1)
return;
-#ifdef CONFIG_ACPI
- if (!acpi_disabled && acpi_ioapic) {
- ioapic = mp_find_ioapic(0);
- if (ioapic < 0)
- ioapic = 0;
- }
-#endif
-
+ for (ioapic = 0; ioapic < nr_ioapics; ioapic++)
for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
irq_entry = find_irq_entry(ioapic, pin, mp_INT);
if (irq_entry == -1)
continue;
irq = pin_2_irq(irq_entry, ioapic, pin);
+ if ((ioapic > 0) && (irq > 16))
+ continue;
+
desc = irq_to_desc(irq);
/*
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 0159a69..bd7c96b 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -416,13 +416,13 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
/* We can be called before check_nmi_watchdog, hence NULL check. */
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
- static DEFINE_SPINLOCK(lock); /* Serialise the printks */
+ static DEFINE_RAW_SPINLOCK(lock); /* Serialise the printks */
- spin_lock(&lock);
+ raw_spin_lock(&lock);
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
show_regs(regs);
dump_stack();
- spin_unlock(&lock);
+ raw_spin_unlock(&lock);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
rc = 1;
@@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
* Ayiee, looks like this CPU is stuck ...
* wait a few IRQs (5 seconds) before doing the oops ...
*/
- __this_cpu_inc(per_cpu_var(alert_counter));
- if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz)
+ __this_cpu_inc(alert_counter);
+ if (__this_cpu_read(alert_counter) == 5 * nmi_hz)
/*
* die_nmi will return ONLY if NOTIFY_STOP happens..
*/
@@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
regs, panic_on_timeout);
} else {
__get_cpu_var(last_irq_sum) = sum;
- __this_cpu_write(per_cpu_var(alert_counter), 0);
+ __this_cpu_write(alert_counter, 0);
}
/* see if the nmi watchdog went off */
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 09b1698..06130b5 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -22,10 +22,10 @@
#include <linux/pci.h>
#include <linux/smp.h>
#include <linux/cpu.h>
-#include <linux/sort.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/kvm_para.h>
+#include <linux/range.h>
#include <asm/processor.h>
#include <asm/e820.h>
@@ -34,11 +34,6 @@
#include "mtrr.h"
-struct res_range {
- unsigned long start;
- unsigned long end;
-};
-
struct var_mtrr_range_state {
unsigned long base_pfn;
unsigned long size_pfn;
@@ -56,7 +51,7 @@ struct var_mtrr_state {
/* Should be related to MTRR_VAR_RANGES nums */
#define RANGE_NUM 256
-static struct res_range __initdata range[RANGE_NUM];
+static struct range __initdata range[RANGE_NUM];
static int __initdata nr_range;
static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
@@ -64,152 +59,11 @@ static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
static int __initdata debug_print;
#define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0)
-
-static int __init
-add_range(struct res_range *range, int nr_range,
- unsigned long start, unsigned long end)
-{
- /* Out of slots: */
- if (nr_range >= RANGE_NUM)
- return nr_range;
-
- range[nr_range].start = start;
- range[nr_range].end = end;
-
- nr_range++;
-
- return nr_range;
-}
-
-static int __init
-add_range_with_merge(struct res_range *range, int nr_range,
- unsigned long start, unsigned long end)
-{
- int i;
-
- /* Try to merge it with old one: */
- for (i = 0; i < nr_range; i++) {
- unsigned long final_start, final_end;
- unsigned long common_start, common_end;
-
- if (!range[i].end)
- continue;
-
- common_start = max(range[i].start, start);
- common_end = min(range[i].end, end);
- if (common_start > common_end + 1)
- continue;
-
- final_start = min(range[i].start, start);
- final_end = max(range[i].end, end);
-
- range[i].start = final_start;
- range[i].end = final_end;
- return nr_range;
- }
-
- /* Need to add it: */
- return add_range(range, nr_range, start, end);
-}
-
-static void __init
-subtract_range(struct res_range *range, unsigned long start, unsigned long end)
-{
- int i, j;
-
- for (j = 0; j < RANGE_NUM; j++) {
- if (!range[j].end)
- continue;
-
- if (start <= range[j].start && end >= range[j].end) {
- range[j].start = 0;
- range[j].end = 0;
- continue;
- }
-
- if (start <= range[j].start && end < range[j].end &&
- range[j].start < end + 1) {
- range[j].start = end + 1;
- continue;
- }
-
-
- if (start > range[j].start && end >= range[j].end &&
- range[j].end > start - 1) {
- range[j].end = start - 1;
- continue;
- }
-
- if (start > range[j].start && end < range[j].end) {
- /* Find the new spare: */
- for (i = 0; i < RANGE_NUM; i++) {
- if (range[i].end == 0)
- break;
- }
- if (i < RANGE_NUM) {
- range[i].end = range[j].end;
- range[i].start = end + 1;
- } else {
- printk(KERN_ERR "run of slot in ranges\n");
- }
- range[j].end = start - 1;
- continue;
- }
- }
-}
-
-static int __init cmp_range(const void *x1, const void *x2)
-{
- const struct res_range *r1 = x1;
- const struct res_range *r2 = x2;
- long start1, start2;
-
- start1 = r1->start;
- start2 = r2->start;
-
- return start1 - start2;
-}
-
-static int __init clean_sort_range(struct res_range *range, int az)
-{
- int i, j, k = az - 1, nr_range = 0;
-
- for (i = 0; i < k; i++) {
- if (range[i].end)
- continue;
- for (j = k; j > i; j--) {
- if (range[j].end) {
- k = j;
- break;
- }
- }
- if (j == i)
- break;
- range[i].start = range[k].start;
- range[i].end = range[k].end;
- range[k].start = 0;
- range[k].end = 0;
- k--;
- }
- /* count it */
- for (i = 0; i < az; i++) {
- if (!range[i].end) {
- nr_range = i;
- break;
- }
- }
-
- /* sort them */
- sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
-
- return nr_range;
-}
-
#define BIOS_BUG_MSG KERN_WARNING \
"WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
static int __init
-x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
+x86_get_mtrr_mem_range(struct range *range, int nr_range,
unsigned long extra_remove_base,
unsigned long extra_remove_size)
{
@@ -223,14 +77,14 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
continue;
base = range_state[i].base_pfn;
size = range_state[i].size_pfn;
- nr_range = add_range_with_merge(range, nr_range, base,
- base + size - 1);
+ nr_range = add_range_with_merge(range, RANGE_NUM, nr_range,
+ base, base + size);
}
if (debug_print) {
printk(KERN_DEBUG "After WB checking\n");
for (i = 0; i < nr_range; i++)
- printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n",
- range[i].start, range[i].end + 1);
+ printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
+ range[i].start, range[i].end);
}
/* Take out UC ranges: */
@@ -252,19 +106,19 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
size -= (1<<(20-PAGE_SHIFT)) - base;
base = 1<<(20-PAGE_SHIFT);
}
- subtract_range(range, base, base + size - 1);
+ subtract_range(range, RANGE_NUM, base, base + size);
}
if (extra_remove_size)
- subtract_range(range, extra_remove_base,
- extra_remove_base + extra_remove_size - 1);
+ subtract_range(range, RANGE_NUM, extra_remove_base,
+ extra_remove_base + extra_remove_size);
if (debug_print) {
printk(KERN_DEBUG "After UC checking\n");
for (i = 0; i < RANGE_NUM; i++) {
if (!range[i].end)
continue;
- printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n",
- range[i].start, range[i].end + 1);
+ printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
+ range[i].start, range[i].end);
}
}
@@ -273,26 +127,22 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
if (debug_print) {
printk(KERN_DEBUG "After sorting\n");
for (i = 0; i < nr_range; i++)
- printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n",
- range[i].start, range[i].end + 1);
+ printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
+ range[i].start, range[i].end);
}
- /* clear those is not used */
- for (i = nr_range; i < RANGE_NUM; i++)
- memset(&range[i], 0, sizeof(range[i]));
-
return nr_range;
}
#ifdef CONFIG_MTRR_SANITIZER
-static unsigned long __init sum_ranges(struct res_range *range, int nr_range)
+static unsigned long __init sum_ranges(struct range *range, int nr_range)
{
unsigned long sum = 0;
int i;
for (i = 0; i < nr_range; i++)
- sum += range[i].end + 1 - range[i].start;
+ sum += range[i].end - range[i].start;
return sum;
}
@@ -621,7 +471,7 @@ static int __init parse_mtrr_spare_reg(char *arg)
early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg);
static int __init
-x86_setup_var_mtrrs(struct res_range *range, int nr_range,
+x86_setup_var_mtrrs(struct range *range, int nr_range,
u64 chunk_size, u64 gran_size)
{
struct var_mtrr_state var_state;
@@ -639,7 +489,7 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range,
/* Write the range: */
for (i = 0; i < nr_range; i++) {
set_var_mtrr_range(&var_state, range[i].start,
- range[i].end - range[i].start + 1);
+ range[i].end - range[i].start);
}
/* Write the last range: */
@@ -742,7 +592,7 @@ mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
unsigned long x_remove_base,
unsigned long x_remove_size, int i)
{
- static struct res_range range_new[RANGE_NUM];
+ static struct range range_new[RANGE_NUM];
unsigned long range_sums_new;
static int nr_range_new;
int num_reg;
@@ -869,10 +719,10 @@ int __init mtrr_cleanup(unsigned address_bits)
* [0, 1M) should always be covered by var mtrr with WB
* and fixed mtrrs should take effect before var mtrr for it:
*/
- nr_range = add_range_with_merge(range, nr_range, 0,
- (1ULL<<(20 - PAGE_SHIFT)) - 1);
+ nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0,
+ 1ULL<<(20 - PAGE_SHIFT));
/* Sort the ranges: */
- sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
+ sort_range(range, nr_range);
range_sums = sum_ranges(range, nr_range);
printk(KERN_INFO "total RAM covered: %ldM\n",
@@ -1089,9 +939,9 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
nr_range = 0;
if (mtrr_tom2) {
range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT));
- range[nr_range].end = (mtrr_tom2 >> PAGE_SHIFT) - 1;
- if (highest_pfn < range[nr_range].end + 1)
- highest_pfn = range[nr_range].end + 1;
+ range[nr_range].end = mtrr_tom2 >> PAGE_SHIFT;
+ if (highest_pfn < range[nr_range].end)
+ highest_pfn = range[nr_range].end;
nr_range++;
}
nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
@@ -1103,15 +953,15 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
/* Check the holes: */
for (i = 0; i < nr_range - 1; i++) {
- if (range[i].end + 1 < range[i+1].start)
- total_trim_size += real_trim_memory(range[i].end + 1,
+ if (range[i].end < range[i+1].start)
+ total_trim_size += real_trim_memory(range[i].end,
range[i+1].start);
}
/* Check the top: */
i = nr_range - 1;
- if (range[i].end + 1 < end_pfn)
- total_trim_size += real_trim_memory(range[i].end + 1,
+ if (range[i].end < end_pfn)
+ total_trim_size += real_trim_memory(range[i].end,
end_pfn);
if (total_trim_size) {
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index fe4622e..79556bd 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -145,6 +145,7 @@ struct set_mtrr_data {
/**
* ipi_handler - Synchronisation handler. Executed by "other" CPUs.
+ * @info: pointer to mtrr configuration data
*
* Returns nothing.
*/
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index a966b75..740b440 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -12,21 +12,13 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bootmem.h>
-#include <linux/ioport.h>
-#include <linux/string.h>
-#include <linux/kexec.h>
-#include <linux/module.h>
-#include <linux/mm.h>
#include <linux/pfn.h>
#include <linux/suspend.h>
#include <linux/firmware-map.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
#include <asm/e820.h>
#include <asm/proto.h>
#include <asm/setup.h>
-#include <asm/trampoline.h>
/*
* The e820 map is the map that gets modified e.g. with command line parameters
@@ -730,319 +722,44 @@ core_initcall(e820_mark_nvs_memory);
#endif
/*
- * Early reserved memory areas.
- */
-#define MAX_EARLY_RES 32
-
-struct early_res {
- u64 start, end;
- char name[16];
- char overlap_ok;
-};
-static struct early_res early_res[MAX_EARLY_RES] __initdata = {
- { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */
-#if defined(CONFIG_X86_32) && defined(CONFIG_X86_TRAMPOLINE)
- /*
- * But first pinch a few for the stack/trampoline stuff
- * FIXME: Don't need the extra page at 4K, but need to fix
- * trampoline before removing it. (see the GDT stuff)
- */
- { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE", 1 },
-#endif
-
- {}
-};
-
-static int __init find_overlapped_early(u64 start, u64 end)
-{
- int i;
- struct early_res *r;
-
- for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
- r = &early_res[i];
- if (end > r->start && start < r->end)
- break;
- }
-
- return i;
-}
-
-/*
- * Drop the i-th range from the early reservation map,
- * by copying any higher ranges down one over it, and
- * clearing what had been the last slot.
- */
-static void __init drop_range(int i)
-{
- int j;
-
- for (j = i + 1; j < MAX_EARLY_RES && early_res[j].end; j++)
- ;
-
- memmove(&early_res[i], &early_res[i + 1],
- (j - 1 - i) * sizeof(struct early_res));
-
- early_res[j - 1].end = 0;
-}
-
-/*
- * Split any existing ranges that:
- * 1) are marked 'overlap_ok', and
- * 2) overlap with the stated range [start, end)
- * into whatever portion (if any) of the existing range is entirely
- * below or entirely above the stated range. Drop the portion
- * of the existing range that overlaps with the stated range,
- * which will allow the caller of this routine to then add that
- * stated range without conflicting with any existing range.
+ * Find a free area with specified alignment in a specific range.
*/
-static void __init drop_overlaps_that_are_ok(u64 start, u64 end)
+u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
{
int i;
- struct early_res *r;
- u64 lower_start, lower_end;
- u64 upper_start, upper_end;
- char name[16];
- for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
- r = &early_res[i];
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ u64 addr;
+ u64 ei_start, ei_last;
- /* Continue past non-overlapping ranges */
- if (end <= r->start || start >= r->end)
+ if (ei->type != E820_RAM)
continue;
- /*
- * Leave non-ok overlaps as is; let caller
- * panic "Overlapping early reservations"
- * when it hits this overlap.
- */
- if (!r->overlap_ok)
- return;
-
- /*
- * We have an ok overlap. We will drop it from the early
- * reservation map, and add back in any non-overlapping
- * portions (lower or upper) as separate, overlap_ok,
- * non-overlapping ranges.
- */
-
- /* 1. Note any non-overlapping (lower or upper) ranges. */
- strncpy(name, r->name, sizeof(name) - 1);
-
- lower_start = lower_end = 0;
- upper_start = upper_end = 0;
- if (r->start < start) {
- lower_start = r->start;
- lower_end = start;
- }
- if (r->end > end) {
- upper_start = end;
- upper_end = r->end;
- }
-
- /* 2. Drop the original ok overlapping range */
- drop_range(i);
-
- i--; /* resume for-loop on copied down entry */
-
- /* 3. Add back in any non-overlapping ranges. */
- if (lower_end)
- reserve_early_overlap_ok(lower_start, lower_end, name);
- if (upper_end)
- reserve_early_overlap_ok(upper_start, upper_end, name);
- }
-}
-
-static void __init __reserve_early(u64 start, u64 end, char *name,
- int overlap_ok)
-{
- int i;
- struct early_res *r;
-
- i = find_overlapped_early(start, end);
- if (i >= MAX_EARLY_RES)
- panic("Too many early reservations");
- r = &early_res[i];
- if (r->end)
- panic("Overlapping early reservations "
- "%llx-%llx %s to %llx-%llx %s\n",
- start, end - 1, name?name:"", r->start,
- r->end - 1, r->name);
- r->start = start;
- r->end = end;
- r->overlap_ok = overlap_ok;
- if (name)
- strncpy(r->name, name, sizeof(r->name) - 1);
-}
-
-/*
- * A few early reservtations come here.
- *
- * The 'overlap_ok' in the name of this routine does -not- mean it
- * is ok for these reservations to overlap an earlier reservation.
- * Rather it means that it is ok for subsequent reservations to
- * overlap this one.
- *
- * Use this entry point to reserve early ranges when you are doing
- * so out of "Paranoia", reserving perhaps more memory than you need,
- * just in case, and don't mind a subsequent overlapping reservation
- * that is known to be needed.
- *
- * The drop_overlaps_that_are_ok() call here isn't really needed.
- * It would be needed if we had two colliding 'overlap_ok'
- * reservations, so that the second such would not panic on the
- * overlap with the first. We don't have any such as of this
- * writing, but might as well tolerate such if it happens in
- * the future.
- */
-void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
-{
- drop_overlaps_that_are_ok(start, end);
- __reserve_early(start, end, name, 1);
-}
-
-/*
- * Most early reservations come here.
- *
- * We first have drop_overlaps_that_are_ok() drop any pre-existing
- * 'overlap_ok' ranges, so that we can then reserve this memory
- * range without risk of panic'ing on an overlapping overlap_ok
- * early reservation.
- */
-void __init reserve_early(u64 start, u64 end, char *name)
-{
- if (start >= end)
- return;
-
- drop_overlaps_that_are_ok(start, end);
- __reserve_early(start, end, name, 0);
-}
-
-void __init free_early(u64 start, u64 end)
-{
- struct early_res *r;
- int i;
-
- i = find_overlapped_early(start, end);
- r = &early_res[i];
- if (i >= MAX_EARLY_RES || r->end != end || r->start != start)
- panic("free_early on not reserved area: %llx-%llx!",
- start, end - 1);
-
- drop_range(i);
-}
-
-void __init early_res_to_bootmem(u64 start, u64 end)
-{
- int i, count;
- u64 final_start, final_end;
-
- count = 0;
- for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++)
- count++;
-
- printk(KERN_INFO "(%d early reservations) ==> bootmem [%010llx - %010llx]\n",
- count, start, end);
- for (i = 0; i < count; i++) {
- struct early_res *r = &early_res[i];
- printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i,
- r->start, r->end, r->name);
- final_start = max(start, r->start);
- final_end = min(end, r->end);
- if (final_start >= final_end) {
- printk(KERN_CONT "\n");
- continue;
- }
- printk(KERN_CONT " ==> [%010llx - %010llx]\n",
- final_start, final_end);
- reserve_bootmem_generic(final_start, final_end - final_start,
- BOOTMEM_DEFAULT);
- }
-}
+ ei_last = ei->addr + ei->size;
+ ei_start = ei->addr;
+ addr = find_early_area(ei_start, ei_last, start, end,
+ size, align);
-/* Check for already reserved areas */
-static inline int __init bad_addr(u64 *addrp, u64 size, u64 align)
-{
- int i;
- u64 addr = *addrp;
- int changed = 0;
- struct early_res *r;
-again:
- i = find_overlapped_early(addr, addr + size);
- r = &early_res[i];
- if (i < MAX_EARLY_RES && r->end) {
- *addrp = addr = round_up(r->end, align);
- changed = 1;
- goto again;
+ if (addr != -1ULL)
+ return addr;
}
- return changed;
+ return -1ULL;
}
-/* Check for already reserved areas */
-static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
+u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align)
{
- int i;
- u64 addr = *addrp, last;
- u64 size = *sizep;
- int changed = 0;
-again:
- last = addr + size;
- for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
- struct early_res *r = &early_res[i];
- if (last > r->start && addr < r->start) {
- size = r->start - addr;
- changed = 1;
- goto again;
- }
- if (last > r->end && addr < r->end) {
- addr = round_up(r->end, align);
- size = last - addr;
- changed = 1;
- goto again;
- }
- if (last <= r->end && addr >= r->start) {
- (*sizep)++;
- return 0;
- }
- }
- if (changed) {
- *addrp = addr;
- *sizep = size;
- }
- return changed;
+ return find_e820_area(start, end, size, align);
}
-/*
- * Find a free area with specified alignment in a specific range.
- */
-u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
+u64 __init get_max_mapped(void)
{
- int i;
+ u64 end = max_pfn_mapped;
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
- u64 addr, last;
- u64 ei_last;
+ end <<= PAGE_SHIFT;
- if (ei->type != E820_RAM)
- continue;
- addr = round_up(ei->addr, align);
- ei_last = ei->addr + ei->size;
- if (addr < start)
- addr = round_up(start, align);
- if (addr >= ei_last)
- continue;
- while (bad_addr(&addr, size, align) && addr+size <= ei_last)
- ;
- last = addr + size;
- if (last > ei_last)
- continue;
- if (last > end)
- continue;
- return addr;
- }
- return -1ULL;
+ return end;
}
-
/*
* Find next free range after *start
*/
@@ -1052,25 +769,19 @@ u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i];
- u64 addr, last;
- u64 ei_last;
+ u64 addr;
+ u64 ei_start, ei_last;
if (ei->type != E820_RAM)
continue;
- addr = round_up(ei->addr, align);
+
ei_last = ei->addr + ei->size;
- if (addr < start)
- addr = round_up(start, align);
- if (addr >= ei_last)
- continue;
- *sizep = ei_last - addr;
- while (bad_addr_size(&addr, sizep, align) &&
- addr + *sizep <= ei_last)
- ;
- last = addr + *sizep;
- if (last > ei_last)
- continue;
- return addr;
+ ei_start = ei->addr;
+ addr = find_early_area_size(ei_start, ei_last, start,
+ sizep, align);
+
+ if (addr != -1ULL)
+ return addr;
}
return -1ULL;
@@ -1429,6 +1140,8 @@ void __init e820_reserve_resources_late(void)
end = MAX_RESOURCE_SIZE;
if (start >= end)
continue;
+ printk(KERN_DEBUG "reserve RAM buffer: %016llx - %016llx ",
+ start, end);
reserve_region_with_split(&iomem_resource, start, end,
"RAM buffer");
}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 5051b94..adedeef 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -29,6 +29,16 @@ static void __init i386_default_early_setup(void)
void __init i386_start_kernel(void)
{
+#ifdef CONFIG_X86_TRAMPOLINE
+ /*
+ * But first pinch a few for the stack/trampoline stuff
+ * FIXME: Don't need the extra page at 4K, but need to fix
+ * trampoline before removing it. (see the GDT stuff)
+ */
+ reserve_early_overlap_ok(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE,
+ "EX TRAMPOLINE");
+#endif
+
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 7fd318b..37c3d4b 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -442,8 +442,8 @@ is386: movl $2,%ecx # set MP
*/
cmpb $0,ready
jne 1f
- movl $per_cpu__gdt_page,%eax
- movl $per_cpu__stack_canary,%ecx
+ movl $gdt_page,%eax
+ movl $stack_canary,%ecx
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
shrl $16, %ecx
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
@@ -706,7 +706,7 @@ idt_descr:
.word 0 # 32 bit align gdt_desc.address
ENTRY(early_gdt_descr)
.word GDT_ENTRIES*8-1
- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
+ .long gdt_page /* Overwritten for secondary CPUs */
/*
* The boot_gdt must mirror the equivalent in setup.S and is
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index df89102..8c93a84 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -32,7 +32,7 @@
*/
static int i8259A_auto_eoi;
-DEFINE_SPINLOCK(i8259A_lock);
+DEFINE_RAW_SPINLOCK(i8259A_lock);
static void mask_and_ack_8259A(unsigned int);
struct irq_chip i8259A_chip = {
@@ -68,13 +68,13 @@ void disable_8259A_irq(unsigned int irq)
unsigned int mask = 1 << irq;
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask |= mask;
if (irq & 8)
outb(cached_slave_mask, PIC_SLAVE_IMR);
else
outb(cached_master_mask, PIC_MASTER_IMR);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
void enable_8259A_irq(unsigned int irq)
@@ -82,13 +82,13 @@ void enable_8259A_irq(unsigned int irq)
unsigned int mask = ~(1 << irq);
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask &= mask;
if (irq & 8)
outb(cached_slave_mask, PIC_SLAVE_IMR);
else
outb(cached_master_mask, PIC_MASTER_IMR);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
int i8259A_irq_pending(unsigned int irq)
@@ -97,12 +97,12 @@ int i8259A_irq_pending(unsigned int irq)
unsigned long flags;
int ret;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
if (irq < 8)
ret = inb(PIC_MASTER_CMD) & mask;
else
ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return ret;
}
@@ -150,7 +150,7 @@ static void mask_and_ack_8259A(unsigned int irq)
unsigned int irqmask = 1 << irq;
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
/*
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
@@ -183,7 +183,7 @@ handle_real_irq:
outb(cached_master_mask, PIC_MASTER_IMR);
outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
}
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return;
spurious_8259A_irq:
@@ -285,24 +285,24 @@ void mask_8259A(void)
{
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
void unmask_8259A(void)
{
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
void init_8259A(int auto_eoi)
@@ -311,7 +311,7 @@ void init_8259A(int auto_eoi)
i8259A_auto_eoi = auto_eoi;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
@@ -356,5 +356,5 @@ void init_8259A(int auto_eoi)
outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index d593222..fce55d5 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -84,24 +84,7 @@ static struct irqaction irq2 = {
};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
- [0 ... IRQ0_VECTOR - 1] = -1,
- [IRQ0_VECTOR] = 0,
- [IRQ1_VECTOR] = 1,
- [IRQ2_VECTOR] = 2,
- [IRQ3_VECTOR] = 3,
- [IRQ4_VECTOR] = 4,
- [IRQ5_VECTOR] = 5,
- [IRQ6_VECTOR] = 6,
- [IRQ7_VECTOR] = 7,
- [IRQ8_VECTOR] = 8,
- [IRQ9_VECTOR] = 9,
- [IRQ10_VECTOR] = 10,
- [IRQ11_VECTOR] = 11,
- [IRQ12_VECTOR] = 12,
- [IRQ13_VECTOR] = 13,
- [IRQ14_VECTOR] = 14,
- [IRQ15_VECTOR] = 15,
- [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
+ [0 ... NR_VECTORS - 1] = -1,
};
int vector_used_by_percpu_irq(unsigned int vector)
@@ -116,6 +99,9 @@ int vector_used_by_percpu_irq(unsigned int vector)
return 0;
}
+/* Number of legacy interrupts */
+int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
+
void __init init_ISA_irqs(void)
{
int i;
@@ -142,6 +128,19 @@ void __init init_ISA_irqs(void)
void __init init_IRQ(void)
{
+ int i;
+
+ /*
+ * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
+ * If these IRQ's are handled by legacy interrupt-controllers like PIC,
+ * then this configuration will likely be static after the boot. If
+ * these IRQ's are handled by more mordern controllers like IO-APIC,
+ * then this vector space can be freed and re-used dynamically as the
+ * irq's migrate etc.
+ */
+ for (i = 0; i < nr_legacy_irqs; i++)
+ per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
+
x86_init.irqs.intr_init();
}
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 5de9f4a..b43bbae 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -49,6 +49,7 @@
#include <linux/module.h>
#include <linux/kdebug.h>
#include <linux/kallsyms.h>
+#include <linux/ftrace.h>
#include <asm/cacheflush.h>
#include <asm/desc.h>
@@ -106,16 +107,22 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
};
const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
-/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
-static void __kprobes set_jmp_op(void *from, void *to)
+static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
{
- struct __arch_jmp_op {
- char op;
+ struct __arch_relative_insn {
+ u8 op;
s32 raddr;
- } __attribute__((packed)) * jop;
- jop = (struct __arch_jmp_op *)from;
- jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
- jop->op = RELATIVEJUMP_INSTRUCTION;
+ } __attribute__((packed)) *insn;
+
+ insn = (struct __arch_relative_insn *)from;
+ insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
+ insn->op = op;
+}
+
+/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
+static void __kprobes synthesize_reljump(void *from, void *to)
+{
+ __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
}
/*
@@ -202,7 +209,7 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
/*
* Basically, kp->ainsn.insn has an original instruction.
* However, RIP-relative instruction can not do single-stepping
- * at different place, fix_riprel() tweaks the displacement of
+ * at different place, __copy_instruction() tweaks the displacement of
* that instruction. In that case, we can't recover the instruction
* from the kp->ainsn.insn.
*
@@ -284,21 +291,37 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
}
/*
- * Adjust the displacement if the instruction uses the %rip-relative
- * addressing mode.
+ * Copy an instruction and adjust the displacement if the instruction
+ * uses the %rip-relative addressing mode.
* If it does, Return the address of the 32-bit displacement word.
* If not, return null.
* Only applicable to 64-bit x86.
*/
-static void __kprobes fix_riprel(struct kprobe *p)
+static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
{
-#ifdef CONFIG_X86_64
struct insn insn;
- kernel_insn_init(&insn, p->ainsn.insn);
+ int ret;
+ kprobe_opcode_t buf[MAX_INSN_SIZE];
+ kernel_insn_init(&insn, src);
+ if (recover) {
+ insn_get_opcode(&insn);
+ if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
+ ret = recover_probed_instruction(buf,
+ (unsigned long)src);
+ if (ret)
+ return 0;
+ kernel_insn_init(&insn, buf);
+ }
+ }
+ insn_get_length(&insn);
+ memcpy(dest, insn.kaddr, insn.length);
+
+#ifdef CONFIG_X86_64
if (insn_rip_relative(&insn)) {
s64 newdisp;
u8 *disp;
+ kernel_insn_init(&insn, dest);
insn_get_displacement(&insn);
/*
* The copied instruction uses the %rip-relative addressing
@@ -312,20 +335,23 @@ static void __kprobes fix_riprel(struct kprobe *p)
* extension of the original signed 32-bit displacement would
* have given.
*/
- newdisp = (u8 *) p->addr + (s64) insn.displacement.value -
- (u8 *) p->ainsn.insn;
+ newdisp = (u8 *) src + (s64) insn.displacement.value -
+ (u8 *) dest;
BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
- disp = (u8 *) p->ainsn.insn + insn_offset_displacement(&insn);
+ disp = (u8 *) dest + insn_offset_displacement(&insn);
*(s32 *) disp = (s32) newdisp;
}
#endif
+ return insn.length;
}
static void __kprobes arch_copy_kprobe(struct kprobe *p)
{
- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
-
- fix_riprel(p);
+ /*
+ * Copy an instruction without recovering int3, because it will be
+ * put by another subsystem.
+ */
+ __copy_instruction(p->ainsn.insn, p->addr, 0);
if (can_boost(p->addr))
p->ainsn.boostable = 0;
@@ -406,18 +432,6 @@ static void __kprobes restore_btf(void)
update_debugctlmsr(current->thread.debugctlmsr);
}
-static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
-{
- clear_btf();
- regs->flags |= X86_EFLAGS_TF;
- regs->flags &= ~X86_EFLAGS_IF;
- /* single step inline if the instruction is an int3 */
- if (p->opcode == BREAKPOINT_INSTRUCTION)
- regs->ip = (unsigned long)p->addr;
- else
- regs->ip = (unsigned long)p->ainsn.insn;
-}
-
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
@@ -429,20 +443,50 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
*sara = (unsigned long) &kretprobe_trampoline;
}
+#ifdef CONFIG_OPTPROBES
+static int __kprobes setup_detour_execution(struct kprobe *p,
+ struct pt_regs *regs,
+ int reenter);
+#else
+#define setup_detour_execution(p, regs, reenter) (0)
+#endif
+
static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
+ struct kprobe_ctlblk *kcb, int reenter)
{
+ if (setup_detour_execution(p, regs, reenter))
+ return;
+
#if !defined(CONFIG_PREEMPT)
if (p->ainsn.boostable == 1 && !p->post_handler) {
/* Boost up -- we can execute copied instructions directly */
- reset_current_kprobe();
+ if (!reenter)
+ reset_current_kprobe();
+ /*
+ * Reentering boosted probe doesn't reset current_kprobe,
+ * nor set current_kprobe, because it doesn't use single
+ * stepping.
+ */
regs->ip = (unsigned long)p->ainsn.insn;
preempt_enable_no_resched();
return;
}
#endif
- prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_HIT_SS;
+ if (reenter) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_REENTER;
+ } else
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ /* Prepare real single stepping */
+ clear_btf();
+ regs->flags |= X86_EFLAGS_TF;
+ regs->flags &= ~X86_EFLAGS_IF;
+ /* single step inline if the instruction is an int3 */
+ if (p->opcode == BREAKPOINT_INSTRUCTION)
+ regs->ip = (unsigned long)p->addr;
+ else
+ regs->ip = (unsigned long)p->ainsn.insn;
}
/*
@@ -456,11 +500,8 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
switch (kcb->kprobe_status) {
case KPROBE_HIT_SSDONE:
case KPROBE_HIT_ACTIVE:
- save_previous_kprobe(kcb);
- set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
- prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_REENTER;
+ setup_singlestep(p, regs, kcb, 1);
break;
case KPROBE_HIT_SS:
/* A probe has been hit in the codepath leading up to, or just
@@ -535,13 +576,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
* more here.
*/
if (!p->pre_handler || !p->pre_handler(p, regs))
- setup_singlestep(p, regs, kcb);
+ setup_singlestep(p, regs, kcb, 0);
return 1;
}
} else if (kprobe_running()) {
p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
- setup_singlestep(p, regs, kcb);
+ setup_singlestep(p, regs, kcb, 0);
return 1;
}
} /* else: not a kprobe fault; let the kernel handle it */
@@ -550,6 +591,69 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
return 0;
}
+#ifdef CONFIG_X86_64
+#define SAVE_REGS_STRING \
+ /* Skip cs, ip, orig_ax. */ \
+ " subq $24, %rsp\n" \
+ " pushq %rdi\n" \
+ " pushq %rsi\n" \
+ " pushq %rdx\n" \
+ " pushq %rcx\n" \
+ " pushq %rax\n" \
+ " pushq %r8\n" \
+ " pushq %r9\n" \
+ " pushq %r10\n" \
+ " pushq %r11\n" \
+ " pushq %rbx\n" \
+ " pushq %rbp\n" \
+ " pushq %r12\n" \
+ " pushq %r13\n" \
+ " pushq %r14\n" \
+ " pushq %r15\n"
+#define RESTORE_REGS_STRING \
+ " popq %r15\n" \
+ " popq %r14\n" \
+ " popq %r13\n" \
+ " popq %r12\n" \
+ " popq %rbp\n" \
+ " popq %rbx\n" \
+ " popq %r11\n" \
+ " popq %r10\n" \
+ " popq %r9\n" \
+ " popq %r8\n" \
+ " popq %rax\n" \
+ " popq %rcx\n" \
+ " popq %rdx\n" \
+ " popq %rsi\n" \
+ " popq %rdi\n" \
+ /* Skip orig_ax, ip, cs */ \
+ " addq $24, %rsp\n"
+#else
+#define SAVE_REGS_STRING \
+ /* Skip cs, ip, orig_ax and gs. */ \
+ " subl $16, %esp\n" \
+ " pushl %fs\n" \
+ " pushl %ds\n" \
+ " pushl %es\n" \
+ " pushl %eax\n" \
+ " pushl %ebp\n" \
+ " pushl %edi\n" \
+ " pushl %esi\n" \
+ " pushl %edx\n" \
+ " pushl %ecx\n" \
+ " pushl %ebx\n"
+#define RESTORE_REGS_STRING \
+ " popl %ebx\n" \
+ " popl %ecx\n" \
+ " popl %edx\n" \
+ " popl %esi\n" \
+ " popl %edi\n" \
+ " popl %ebp\n" \
+ " popl %eax\n" \
+ /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\
+ " addl $24, %esp\n"
+#endif
+
/*
* When a retprobed function returns, this code saves registers and
* calls trampoline_handler() runs, which calls the kretprobe's handler.
@@ -563,65 +667,16 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
/* We don't bother saving the ss register */
" pushq %rsp\n"
" pushfq\n"
- /*
- * Skip cs, ip, orig_ax.
- * trampoline_handler() will plug in these values
- */
- " subq $24, %rsp\n"
- " pushq %rdi\n"
- " pushq %rsi\n"
- " pushq %rdx\n"
- " pushq %rcx\n"
- " pushq %rax\n"
- " pushq %r8\n"
- " pushq %r9\n"
- " pushq %r10\n"
- " pushq %r11\n"
- " pushq %rbx\n"
- " pushq %rbp\n"
- " pushq %r12\n"
- " pushq %r13\n"
- " pushq %r14\n"
- " pushq %r15\n"
+ SAVE_REGS_STRING
" movq %rsp, %rdi\n"
" call trampoline_handler\n"
/* Replace saved sp with true return address. */
" movq %rax, 152(%rsp)\n"
- " popq %r15\n"
- " popq %r14\n"
- " popq %r13\n"
- " popq %r12\n"
- " popq %rbp\n"
- " popq %rbx\n"
- " popq %r11\n"
- " popq %r10\n"
- " popq %r9\n"
- " popq %r8\n"
- " popq %rax\n"
- " popq %rcx\n"
- " popq %rdx\n"
- " popq %rsi\n"
- " popq %rdi\n"
- /* Skip orig_ax, ip, cs */
- " addq $24, %rsp\n"
+ RESTORE_REGS_STRING
" popfq\n"
#else
" pushf\n"
- /*
- * Skip cs, ip, orig_ax and gs.
- * trampoline_handler() will plug in these values
- */
- " subl $16, %esp\n"
- " pushl %fs\n"
- " pushl %es\n"
- " pushl %ds\n"
- " pushl %eax\n"
- " pushl %ebp\n"
- " pushl %edi\n"
- " pushl %esi\n"
- " pushl %edx\n"
- " pushl %ecx\n"
- " pushl %ebx\n"
+ SAVE_REGS_STRING
" movl %esp, %eax\n"
" call trampoline_handler\n"
/* Move flags to cs */
@@ -629,15 +684,7 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
" movl %edx, 52(%esp)\n"
/* Replace saved flags with true return address. */
" movl %eax, 56(%esp)\n"
- " popl %ebx\n"
- " popl %ecx\n"
- " popl %edx\n"
- " popl %esi\n"
- " popl %edi\n"
- " popl %ebp\n"
- " popl %eax\n"
- /* Skip ds, es, fs, gs, orig_ax and ip */
- " addl $24, %esp\n"
+ RESTORE_REGS_STRING
" popf\n"
#endif
" ret\n");
@@ -805,8 +852,8 @@ static void __kprobes resume_execution(struct kprobe *p,
* These instructions can be executed directly if it
* jumps back to correct address.
*/
- set_jmp_op((void *)regs->ip,
- (void *)orig_ip + (regs->ip - copy_ip));
+ synthesize_reljump((void *)regs->ip,
+ (void *)orig_ip + (regs->ip - copy_ip));
p->ainsn.boostable = 1;
} else {
p->ainsn.boostable = -1;
@@ -1033,6 +1080,358 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
return 0;
}
+
+#ifdef CONFIG_OPTPROBES
+
+/* Insert a call instruction at address 'from', which calls address 'to'.*/
+static void __kprobes synthesize_relcall(void *from, void *to)
+{
+ __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
+}
+
+/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
+static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr,
+ unsigned long val)
+{
+#ifdef CONFIG_X86_64
+ *addr++ = 0x48;
+ *addr++ = 0xbf;
+#else
+ *addr++ = 0xb8;
+#endif
+ *(unsigned long *)addr = val;
+}
+
+void __kprobes kprobes_optinsn_template_holder(void)
+{
+ asm volatile (
+ ".global optprobe_template_entry\n"
+ "optprobe_template_entry: \n"
+#ifdef CONFIG_X86_64
+ /* We don't bother saving the ss register */
+ " pushq %rsp\n"
+ " pushfq\n"
+ SAVE_REGS_STRING
+ " movq %rsp, %rsi\n"
+ ".global optprobe_template_val\n"
+ "optprobe_template_val: \n"
+ ASM_NOP5
+ ASM_NOP5
+ ".global optprobe_template_call\n"
+ "optprobe_template_call: \n"
+ ASM_NOP5
+ /* Move flags to rsp */
+ " movq 144(%rsp), %rdx\n"
+ " movq %rdx, 152(%rsp)\n"
+ RESTORE_REGS_STRING
+ /* Skip flags entry */
+ " addq $8, %rsp\n"
+ " popfq\n"
+#else /* CONFIG_X86_32 */
+ " pushf\n"
+ SAVE_REGS_STRING
+ " movl %esp, %edx\n"
+ ".global optprobe_template_val\n"
+ "optprobe_template_val: \n"
+ ASM_NOP5
+ ".global optprobe_template_call\n"
+ "optprobe_template_call: \n"
+ ASM_NOP5
+ RESTORE_REGS_STRING
+ " addl $4, %esp\n" /* skip cs */
+ " popf\n"
+#endif
+ ".global optprobe_template_end\n"
+ "optprobe_template_end: \n");
+}
+
+#define TMPL_MOVE_IDX \
+ ((long)&optprobe_template_val - (long)&optprobe_template_entry)
+#define TMPL_CALL_IDX \
+ ((long)&optprobe_template_call - (long)&optprobe_template_entry)
+#define TMPL_END_IDX \
+ ((long)&optprobe_template_end - (long)&optprobe_template_entry)
+
+#define INT3_SIZE sizeof(kprobe_opcode_t)
+
+/* Optimized kprobe call back function: called from optinsn */
+static void __kprobes optimized_callback(struct optimized_kprobe *op,
+ struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ preempt_disable();
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(&op->kp);
+ } else {
+ /* Save skipped registers */
+#ifdef CONFIG_X86_64
+ regs->cs = __KERNEL_CS;
+#else
+ regs->cs = __KERNEL_CS | get_kernel_rpl();
+ regs->gs = 0;
+#endif
+ regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
+ regs->orig_ax = ~0UL;
+
+ __get_cpu_var(current_kprobe) = &op->kp;
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ opt_pre_handler(&op->kp, regs);
+ __get_cpu_var(current_kprobe) = NULL;
+ }
+ preempt_enable_no_resched();
+}
+
+static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
+{
+ int len = 0, ret;
+
+ while (len < RELATIVEJUMP_SIZE) {
+ ret = __copy_instruction(dest + len, src + len, 1);
+ if (!ret || !can_boost(dest + len))
+ return -EINVAL;
+ len += ret;
+ }
+ /* Check whether the address range is reserved */
+ if (ftrace_text_reserved(src, src + len - 1) ||
+ alternatives_text_reserved(src, src + len - 1))
+ return -EBUSY;
+
+ return len;
+}
+
+/* Check whether insn is indirect jump */
+static int __kprobes insn_is_indirect_jump(struct insn *insn)
+{
+ return ((insn->opcode.bytes[0] == 0xff &&
+ (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
+ insn->opcode.bytes[0] == 0xea); /* Segment based jump */
+}
+
+/* Check whether insn jumps into specified address range */
+static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
+{
+ unsigned long target = 0;
+
+ switch (insn->opcode.bytes[0]) {
+ case 0xe0: /* loopne */
+ case 0xe1: /* loope */
+ case 0xe2: /* loop */
+ case 0xe3: /* jcxz */
+ case 0xe9: /* near relative jump */
+ case 0xeb: /* short relative jump */
+ break;
+ case 0x0f:
+ if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
+ break;
+ return 0;
+ default:
+ if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
+ break;
+ return 0;
+ }
+ target = (unsigned long)insn->next_byte + insn->immediate.value;
+
+ return (start <= target && target <= start + len);
+}
+
+/* Decode whole function to ensure any instructions don't jump into target */
+static int __kprobes can_optimize(unsigned long paddr)
+{
+ int ret;
+ unsigned long addr, size = 0, offset = 0;
+ struct insn insn;
+ kprobe_opcode_t buf[MAX_INSN_SIZE];
+ /* Dummy buffers for lookup_symbol_attrs */
+ static char __dummy_buf[KSYM_NAME_LEN];
+
+ /* Lookup symbol including addr */
+ if (!kallsyms_lookup(paddr, &size, &offset, NULL, __dummy_buf))
+ return 0;
+
+ /* Check there is enough space for a relative jump. */
+ if (size - offset < RELATIVEJUMP_SIZE)
+ return 0;
+
+ /* Decode instructions */
+ addr = paddr - offset;
+ while (addr < paddr - offset + size) { /* Decode until function end */
+ if (search_exception_tables(addr))
+ /*
+ * Since some fixup code will jumps into this function,
+ * we can't optimize kprobe in this function.
+ */
+ return 0;
+ kernel_insn_init(&insn, (void *)addr);
+ insn_get_opcode(&insn);
+ if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
+ ret = recover_probed_instruction(buf, addr);
+ if (ret)
+ return 0;
+ kernel_insn_init(&insn, buf);
+ }
+ insn_get_length(&insn);
+ /* Recover address */
+ insn.kaddr = (void *)addr;
+ insn.next_byte = (void *)(addr + insn.length);
+ /* Check any instructions don't jump into target */
+ if (insn_is_indirect_jump(&insn) ||
+ insn_jump_into_range(&insn, paddr + INT3_SIZE,
+ RELATIVE_ADDR_SIZE))
+ return 0;
+ addr += insn.length;
+ }
+
+ return 1;
+}
+
+/* Check optimized_kprobe can actually be optimized. */
+int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
+{
+ int i;
+ struct kprobe *p;
+
+ for (i = 1; i < op->optinsn.size; i++) {
+ p = get_kprobe(op->kp.addr + i);
+ if (p && !kprobe_disabled(p))
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+/* Check the addr is within the optimized instructions. */
+int __kprobes arch_within_optimized_kprobe(struct optimized_kprobe *op,
+ unsigned long addr)
+{
+ return ((unsigned long)op->kp.addr <= addr &&
+ (unsigned long)op->kp.addr + op->optinsn.size > addr);
+}
+
+/* Free optimized instruction slot */
+static __kprobes
+void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
+{
+ if (op->optinsn.insn) {
+ free_optinsn_slot(op->optinsn.insn, dirty);
+ op->optinsn.insn = NULL;
+ op->optinsn.size = 0;
+ }
+}
+
+void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
+{
+ __arch_remove_optimized_kprobe(op, 1);
+}
+
+/*
+ * Copy replacing target instructions
+ * Target instructions MUST be relocatable (checked inside)
+ */
+int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
+{
+ u8 *buf;
+ int ret;
+ long rel;
+
+ if (!can_optimize((unsigned long)op->kp.addr))
+ return -EILSEQ;
+
+ op->optinsn.insn = get_optinsn_slot();
+ if (!op->optinsn.insn)
+ return -ENOMEM;
+
+ /*
+ * Verify if the address gap is in 2GB range, because this uses
+ * a relative jump.
+ */
+ rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
+ if (abs(rel) > 0x7fffffff)
+ return -ERANGE;
+
+ buf = (u8 *)op->optinsn.insn;
+
+ /* Copy instructions into the out-of-line buffer */
+ ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
+ if (ret < 0) {
+ __arch_remove_optimized_kprobe(op, 0);
+ return ret;
+ }
+ op->optinsn.size = ret;
+
+ /* Copy arch-dep-instance from template */
+ memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
+
+ /* Set probe information */
+ synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
+
+ /* Set probe function call */
+ synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
+
+ /* Set returning jmp instruction at the tail of out-of-line buffer */
+ synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
+ (u8 *)op->kp.addr + op->optinsn.size);
+
+ flush_icache_range((unsigned long) buf,
+ (unsigned long) buf + TMPL_END_IDX +
+ op->optinsn.size + RELATIVEJUMP_SIZE);
+ return 0;
+}
+
+/* Replace a breakpoint (int3) with a relative jump. */
+int __kprobes arch_optimize_kprobe(struct optimized_kprobe *op)
+{
+ unsigned char jmp_code[RELATIVEJUMP_SIZE];
+ s32 rel = (s32)((long)op->optinsn.insn -
+ ((long)op->kp.addr + RELATIVEJUMP_SIZE));
+
+ /* Backup instructions which will be replaced by jump address */
+ memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
+ RELATIVE_ADDR_SIZE);
+
+ jmp_code[0] = RELATIVEJUMP_OPCODE;
+ *(s32 *)(&jmp_code[1]) = rel;
+
+ /*
+ * text_poke_smp doesn't support NMI/MCE code modifying.
+ * However, since kprobes itself also doesn't support NMI/MCE
+ * code probing, it's not a problem.
+ */
+ text_poke_smp(op->kp.addr, jmp_code, RELATIVEJUMP_SIZE);
+ return 0;
+}
+
+/* Replace a relative jump with a breakpoint (int3). */
+void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
+{
+ u8 buf[RELATIVEJUMP_SIZE];
+
+ /* Set int3 to first byte for kprobes */
+ buf[0] = BREAKPOINT_INSTRUCTION;
+ memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
+ text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE);
+}
+
+static int __kprobes setup_detour_execution(struct kprobe *p,
+ struct pt_regs *regs,
+ int reenter)
+{
+ struct optimized_kprobe *op;
+
+ if (p->flags & KPROBE_FLAG_OPTIMIZED) {
+ /* This kprobe is really able to run optimized path. */
+ op = container_of(p, struct optimized_kprobe, kp);
+ /* Detour through copied instructions */
+ regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
+ if (!reenter)
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ return 1;
+ }
+ return 0;
+}
+#endif
+
int __init arch_init_kprobes(void)
{
return 0;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 712d15f..7182580 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -7,6 +7,8 @@
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dmi.h>
+#include <linux/range.h>
+
#include <asm/pci-direct.h>
#include <linux/sort.h>
#include <asm/io.h>
@@ -30,11 +32,6 @@ static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = {
{ 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 },
};
-struct range {
- u64 start;
- u64 end;
-};
-
static int __cpuinit cmp_range(const void *x1, const void *x2)
{
const struct range *r1 = x1;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 1b1739d..1db183e 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -428,10 +428,6 @@ struct pv_mmu_ops pv_mmu_ops = {
.ptep_modify_prot_start = __ptep_modify_prot_start,
.ptep_modify_prot_commit = __ptep_modify_prot_commit,
-#ifdef CONFIG_HIGHPTE
- .kmap_atomic_pte = kmap_atomic,
-#endif
-
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
.set_pte_atomic = native_set_pte_atomic,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 75e14e2..1aa966c 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -65,7 +65,7 @@ int dma_set_mask(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(dma_set_mask);
-#ifdef CONFIG_X86_64
+#if defined(CONFIG_X86_64) && !defined(CONFIG_NUMA)
static __initdata void *dma32_bootmem_ptr;
static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
@@ -116,14 +116,21 @@ static void __init dma32_free_bootmem(void)
dma32_bootmem_ptr = NULL;
dma32_bootmem_size = 0;
}
+#else
+void __init dma32_reserve_bootmem(void)
+{
+}
+static void __init dma32_free_bootmem(void)
+{
+}
+
#endif
void __init pci_iommu_alloc(void)
{
-#ifdef CONFIG_X86_64
/* free the range so iommu could get some range less than 4G */
dma32_free_bootmem();
-#endif
+
if (pci_swiotlb_detect())
goto out;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 704bddc..8e1aac8 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -461,6 +461,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
},
},
+ { /* Handle problems with rebooting on the iMac9,1. */
+ .callback = set_pci_reboot,
+ .ident = "Apple iMac9,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
+ },
+ },
{ }
};
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cb42109..5d7ba1a 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -969,15 +969,11 @@ void __init setup_arch(char **cmdline_p)
#endif
initmem_init(0, max_pfn, acpi, k8);
+#ifndef CONFIG_NO_BOOTMEM
+ early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
+#endif
-#ifdef CONFIG_X86_64
- /*
- * dma32_reserve_bootmem() allocates bootmem which may conflict
- * with the crashkernel command line, so do that after
- * reserve_crashkernel()
- */
dma32_reserve_bootmem();
-#endif
reserve_ibft_region();
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 35abcb8..ef6370b 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -137,7 +137,13 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
static void __init pcpu_fc_free(void *ptr, size_t size)
{
+#ifdef CONFIG_NO_BOOTMEM
+ u64 start = __pa(ptr);
+ u64 end = start + size;
+ free_early_partial(start, end);
+#else
free_bootmem(__pa(ptr), size);
+#endif
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9b44011..a435c76 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -241,6 +241,11 @@ static void __cpuinit smp_callin(void)
map_cpu_to_logical_apicid();
notify_cpu_starting(cpuid);
+
+ /*
+ * Need to setup vector mappings before we enable interrupts.
+ */
+ __setup_vector_irq(smp_processor_id());
/*
* Get our bogomips.
*
@@ -315,7 +320,6 @@ notrace static void __cpuinit start_secondary(void *unused)
*/
ipi_call_lock();
lock_vector_lock();
- __setup_vector_irq(smp_processor_id());
set_cpu_online(smp_processor_id(), true);
unlock_vector_lock();
ipi_call_unlock();
@@ -1212,11 +1216,12 @@ __init void prefill_possible_map(void)
total_cpus = max_t(int, possible, num_processors + disabled_cpus);
- if (possible > CONFIG_NR_CPUS) {
+ /* nr_cpu_ids could be reduced via nr_cpus= */
+ if (possible > nr_cpu_ids) {
printk(KERN_WARNING
"%d Processors exceeds NR_CPUS limit of %d\n",
- possible, CONFIG_NR_CPUS);
- possible = CONFIG_NR_CPUS;
+ possible, nr_cpu_ids);
+ possible = nr_cpu_ids;
}
printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index be25734..fb5cc5e1 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -70,11 +70,11 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
* manually to deassert NMI lines for the watchdog if run
* on an 82489DX-based system.
*/
- spin_lock(&i8259A_lock);
+ raw_spin_lock(&i8259A_lock);
outb(0x0c, PIC_MASTER_OCW3);
/* Ack the IRQ; AEOI will end it automatically. */
inb(PIC_MASTER_POLL);
- spin_unlock(&i8259A_lock);
+ raw_spin_unlock(&i8259A_lock);
}
global_clock_event->event_handler(global_clock_event);
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 34a279a..ab38ce0 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -559,7 +559,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
struct irq_desc *desc;
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
/* Find out what's interrupting in the PIIX4 master 8259 */
outb(0x0c, 0x20); /* OCW3 Poll command */
@@ -596,7 +596,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
outb(0x60 + realirq, 0x20);
}
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
desc = irq_to_desc(realirq);
@@ -614,7 +614,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
return IRQ_HANDLED;
out_unlock:
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return IRQ_NONE;
}
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index d430e4c..7dd599d 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -33,6 +33,7 @@
#include <asm/fixmap.h>
#include <asm/apicdef.h>
#include <asm/apic.h>
+#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/timer.h>
#include <asm/vmi_time.h>
@@ -266,30 +267,6 @@ static void vmi_nop(void)
{
}
-#ifdef CONFIG_HIGHPTE
-static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
-{
- void *va = kmap_atomic(page, type);
-
- /*
- * Internally, the VMI ROM must map virtual addresses to physical
- * addresses for processing MMU updates. By the time MMU updates
- * are issued, this information is typically already lost.
- * Fortunately, the VMI provides a cache of mapping slots for active
- * page tables.
- *
- * We use slot zero for the linear mapping of physical memory, and
- * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
- *
- * args: SLOT VA COUNT PFN
- */
- BUG_ON(type != KM_PTE0 && type != KM_PTE1);
- vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page));
-
- return va;
-}
-#endif
-
static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
{
vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
@@ -640,6 +617,12 @@ static inline int __init activate_vmi(void)
u64 reloc;
const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
+ /*
+ * Prevent page tables from being allocated in highmem, even if
+ * CONFIG_HIGHPTE is enabled.
+ */
+ __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+
if (call_vrom_func(vmi_rom, vmi_init) != 0) {
printk(KERN_ERR "VMI ROM failed to initialize!");
return 0;
@@ -778,10 +761,6 @@ static inline int __init activate_vmi(void)
/* Set linear is needed in all cases */
vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
-#ifdef CONFIG_HIGHPTE
- if (vmi_ops.set_linear_mapping)
- pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
-#endif
/*
* These MUST always be patched. Don't support indirect jumps
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 74c92bb..2f1ca56 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -79,11 +79,7 @@ unsigned long vmi_tsc_khz(void)
static inline unsigned int vmi_get_timer_vector(void)
{
-#ifdef CONFIG_X86_IO_APIC
- return FIRST_DEVICE_VECTOR;
-#else
- return FIRST_EXTERNAL_VECTOR;
-#endif
+ return IRQ0_VECTOR;
}
/** vmi clockchip */
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index f92a0da..44879df 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -341,7 +341,7 @@ SECTIONS
* Per-cpu symbols which need to be offset from __per_cpu_load
* for the boot processor.
*/
-#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
+#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
INIT_PER_CPU(gdt_page);
INIT_PER_CPU(irq_stack_union);
@@ -352,7 +352,7 @@ INIT_PER_CPU(irq_stack_union);
"kernel image bigger than KERNEL_IMAGE_SIZE");
#ifdef CONFIG_SMP
-. = ASSERT((per_cpu__irq_stack_union == 0),
+. = ASSERT((irq_stack_union == 0),
"irq_stack_union is not at start of per-cpu area");
#endif
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 9055e58..1c0c6ab 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -301,7 +301,8 @@ static int __init vsyscall_init(void)
register_sysctl_table(kernel_root_table2);
#endif
on_each_cpu(cpu_vsyscall_init, NULL, 1);
- hotcpu_notifier(cpu_vsyscall_notifier, 0);
+ /* notifier priority > KVM */
+ hotcpu_notifier(cpu_vsyscall_notifier, 30);
return 0;
}
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 3c4d0109..970bbd4 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -29,6 +29,7 @@ config KVM
select HAVE_KVM_EVENTFD
select KVM_APIC_ARCHITECTURE
select USER_RETURN_NOTIFIER
+ select KVM_MMIO
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 7e8faea..4dade6a 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -32,7 +32,7 @@
#include <linux/module.h>
#include <asm/kvm_emulate.h>
-#include "mmu.h" /* for is_long_mode() */
+#include "x86.h"
/*
* Opcode effective-address decode tables.
@@ -76,6 +76,8 @@
#define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
#define GroupMask 0xff /* Group number stored in bits 0:7 */
/* Misc flags */
+#define Lock (1<<26) /* lock prefix is allowed for the instruction */
+#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
#define No64 (1<<28)
/* Source 2 operand type */
#define Src2None (0<<29)
@@ -88,39 +90,40 @@
enum {
Group1_80, Group1_81, Group1_82, Group1_83,
Group1A, Group3_Byte, Group3, Group4, Group5, Group7,
+ Group8, Group9,
};
static u32 opcode_table[256] = {
/* 0x00 - 0x07 */
- ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+ ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
/* 0x08 - 0x0F */
- ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+ ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
ImplicitOps | Stack | No64, 0,
/* 0x10 - 0x17 */
- ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+ ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
/* 0x18 - 0x1F */
- ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+ ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
/* 0x20 - 0x27 */
- ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+ ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0,
/* 0x28 - 0x2F */
- ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+ ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
0, 0, 0, 0,
/* 0x30 - 0x37 */
- ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+ ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
0, 0, 0, 0,
/* 0x38 - 0x3F */
@@ -156,7 +159,7 @@ static u32 opcode_table[256] = {
Group | Group1_80, Group | Group1_81,
Group | Group1_82, Group | Group1_83,
ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
- ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+ ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
/* 0x88 - 0x8F */
ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
@@ -210,7 +213,7 @@ static u32 opcode_table[256] = {
SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
/* 0xF0 - 0xF7 */
0, 0, 0, 0,
- ImplicitOps, ImplicitOps, Group | Group3_Byte, Group | Group3,
+ ImplicitOps | Priv, ImplicitOps, Group | Group3_Byte, Group | Group3,
/* 0xF8 - 0xFF */
ImplicitOps, 0, ImplicitOps, ImplicitOps,
ImplicitOps, ImplicitOps, Group | Group4, Group | Group5,
@@ -218,16 +221,20 @@ static u32 opcode_table[256] = {
static u32 twobyte_table[256] = {
/* 0x00 - 0x0F */
- 0, Group | GroupDual | Group7, 0, 0, 0, ImplicitOps, ImplicitOps, 0,
- ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
+ 0, Group | GroupDual | Group7, 0, 0,
+ 0, ImplicitOps, ImplicitOps | Priv, 0,
+ ImplicitOps | Priv, ImplicitOps | Priv, 0, 0,
+ 0, ImplicitOps | ModRM, 0, 0,
/* 0x10 - 0x1F */
0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
/* 0x20 - 0x2F */
- ModRM | ImplicitOps, ModRM, ModRM | ImplicitOps, ModRM, 0, 0, 0, 0,
+ ModRM | ImplicitOps | Priv, ModRM | Priv,
+ ModRM | ImplicitOps | Priv, ModRM | Priv,
+ 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
/* 0x30 - 0x3F */
- ImplicitOps, 0, ImplicitOps, 0,
- ImplicitOps, ImplicitOps, 0, 0,
+ ImplicitOps | Priv, 0, ImplicitOps | Priv, 0,
+ ImplicitOps, ImplicitOps | Priv, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
/* 0x40 - 0x47 */
DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
@@ -257,21 +264,23 @@ static u32 twobyte_table[256] = {
DstMem | SrcReg | Src2CL | ModRM, 0, 0,
/* 0xA8 - 0xAF */
ImplicitOps | Stack, ImplicitOps | Stack,
- 0, DstMem | SrcReg | ModRM | BitOp,
+ 0, DstMem | SrcReg | ModRM | BitOp | Lock,
DstMem | SrcReg | Src2ImmByte | ModRM,
DstMem | SrcReg | Src2CL | ModRM,
ModRM, 0,
/* 0xB0 - 0xB7 */
- ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 0,
- DstMem | SrcReg | ModRM | BitOp,
+ ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
+ 0, DstMem | SrcReg | ModRM | BitOp | Lock,
0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
DstReg | SrcMem16 | ModRM | Mov,
/* 0xB8 - 0xBF */
- 0, 0, DstMem | SrcImmByte | ModRM, DstMem | SrcReg | ModRM | BitOp,
+ 0, 0,
+ Group | Group8, DstMem | SrcReg | ModRM | BitOp | Lock,
0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
DstReg | SrcMem16 | ModRM | Mov,
/* 0xC0 - 0xCF */
- 0, 0, 0, DstMem | SrcReg | ModRM | Mov, 0, 0, 0, ImplicitOps | ModRM,
+ 0, 0, 0, DstMem | SrcReg | ModRM | Mov,
+ 0, 0, 0, Group | GroupDual | Group9,
0, 0, 0, 0, 0, 0, 0, 0,
/* 0xD0 - 0xDF */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -283,25 +292,41 @@ static u32 twobyte_table[256] = {
static u32 group_table[] = {
[Group1_80*8] =
- ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
- ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
- ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
- ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ ByteOp | DstMem | SrcImm | ModRM | Lock,
+ ByteOp | DstMem | SrcImm | ModRM | Lock,
+ ByteOp | DstMem | SrcImm | ModRM | Lock,
+ ByteOp | DstMem | SrcImm | ModRM | Lock,
+ ByteOp | DstMem | SrcImm | ModRM | Lock,
+ ByteOp | DstMem | SrcImm | ModRM | Lock,
+ ByteOp | DstMem | SrcImm | ModRM | Lock,
+ ByteOp | DstMem | SrcImm | ModRM,
[Group1_81*8] =
- DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
- DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
- DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
- DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
+ DstMem | SrcImm | ModRM | Lock,
+ DstMem | SrcImm | ModRM | Lock,
+ DstMem | SrcImm | ModRM | Lock,
+ DstMem | SrcImm | ModRM | Lock,
+ DstMem | SrcImm | ModRM | Lock,
+ DstMem | SrcImm | ModRM | Lock,
+ DstMem | SrcImm | ModRM | Lock,
+ DstMem | SrcImm | ModRM,
[Group1_82*8] =
- ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
- ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
- ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
- ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
+ ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
+ ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
+ ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
+ ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
+ ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
+ ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
+ ByteOp | DstMem | SrcImm | ModRM | No64,
[Group1_83*8] =
- DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
- DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
- DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
- DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
+ DstMem | SrcImmByte | ModRM | Lock,
+ DstMem | SrcImmByte | ModRM | Lock,
+ DstMem | SrcImmByte | ModRM | Lock,
+ DstMem | SrcImmByte | ModRM | Lock,
+ DstMem | SrcImmByte | ModRM | Lock,
+ DstMem | SrcImmByte | ModRM | Lock,
+ DstMem | SrcImmByte | ModRM | Lock,
+ DstMem | SrcImmByte | ModRM,
[Group1A*8] =
DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0,
[Group3_Byte*8] =
@@ -320,24 +345,39 @@ static u32 group_table[] = {
SrcMem | ModRM | Stack, 0,
SrcMem | ModRM | Stack, 0, SrcMem | ModRM | Stack, 0,
[Group7*8] =
- 0, 0, ModRM | SrcMem, ModRM | SrcMem,
+ 0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv,
SrcNone | ModRM | DstMem | Mov, 0,
- SrcMem16 | ModRM | Mov, SrcMem | ModRM | ByteOp,
+ SrcMem16 | ModRM | Mov | Priv, SrcMem | ModRM | ByteOp | Priv,
+ [Group8*8] =
+ 0, 0, 0, 0,
+ DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM | Lock,
+ DstMem | SrcImmByte | ModRM | Lock, DstMem | SrcImmByte | ModRM | Lock,
+ [Group9*8] =
+ 0, ImplicitOps | ModRM | Lock, 0, 0, 0, 0, 0, 0,
};
static u32 group2_table[] = {
[Group7*8] =
- SrcNone | ModRM, 0, 0, SrcNone | ModRM,
+ SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM,
SrcNone | ModRM | DstMem | Mov, 0,
SrcMem16 | ModRM | Mov, 0,
+ [Group9*8] =
+ 0, 0, 0, 0, 0, 0, 0, 0,
};
/* EFLAGS bit definitions. */
+#define EFLG_ID (1<<21)
+#define EFLG_VIP (1<<20)
+#define EFLG_VIF (1<<19)
+#define EFLG_AC (1<<18)
#define EFLG_VM (1<<17)
#define EFLG_RF (1<<16)
+#define EFLG_IOPL (3<<12)
+#define EFLG_NT (1<<14)
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
#define EFLG_IF (1<<9)
+#define EFLG_TF (1<<8)
#define EFLG_SF (1<<7)
#define EFLG_ZF (1<<6)
#define EFLG_AF (1<<4)
@@ -606,7 +646,7 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
if (linear < fc->start || linear >= fc->end) {
size = min(15UL, PAGE_SIZE - offset_in_page(linear));
- rc = ops->read_std(linear, fc->data, size, ctxt->vcpu);
+ rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL);
if (rc)
return rc;
fc->start = linear;
@@ -661,11 +701,11 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
op_bytes = 3;
*address = 0;
rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
- ctxt->vcpu);
+ ctxt->vcpu, NULL);
if (rc)
return rc;
rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
- ctxt->vcpu);
+ ctxt->vcpu, NULL);
return rc;
}
@@ -889,6 +929,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
switch (mode) {
case X86EMUL_MODE_REAL:
+ case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
@@ -975,7 +1016,7 @@ done_prefixes:
}
if (mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
- kvm_report_emulation_failure(ctxt->vcpu, "invalid x86/64 instruction");;
+ kvm_report_emulation_failure(ctxt->vcpu, "invalid x86/64 instruction");
return -1;
}
@@ -1196,13 +1237,56 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
rc = ops->read_emulated(register_address(c, ss_base(ctxt),
c->regs[VCPU_REGS_RSP]),
dest, len, ctxt->vcpu);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
return rc;
register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
return rc;
}
+static int emulate_popf(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ void *dest, int len)
+{
+ int rc;
+ unsigned long val, change_mask;
+ int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+ int cpl = kvm_x86_ops->get_cpl(ctxt->vcpu);
+
+ rc = emulate_pop(ctxt, ops, &val, len);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
+ | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
+
+ switch(ctxt->mode) {
+ case X86EMUL_MODE_PROT64:
+ case X86EMUL_MODE_PROT32:
+ case X86EMUL_MODE_PROT16:
+ if (cpl == 0)
+ change_mask |= EFLG_IOPL;
+ if (cpl <= iopl)
+ change_mask |= EFLG_IF;
+ break;
+ case X86EMUL_MODE_VM86:
+ if (iopl < 3) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+ change_mask |= EFLG_IF;
+ break;
+ default: /* real mode */
+ change_mask |= (EFLG_IOPL | EFLG_IF);
+ break;
+ }
+
+ *(unsigned long *)dest =
+ (ctxt->eflags & ~change_mask) | (val & change_mask);
+
+ return rc;
+}
+
static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
{
struct decode_cache *c = &ctxt->decode;
@@ -1225,7 +1309,7 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
if (rc != 0)
return rc;
- rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, 1, seg);
+ rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, seg);
return rc;
}
@@ -1370,7 +1454,7 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
int rc;
rc = ops->read_emulated(memop, &old, 8, ctxt->vcpu);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
return rc;
if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
@@ -1385,7 +1469,7 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
(u32) c->regs[VCPU_REGS_RBX];
rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->eflags |= EFLG_ZF;
}
@@ -1407,7 +1491,7 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
if (rc)
return rc;
- rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, 1, VCPU_SREG_CS);
+ rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, VCPU_SREG_CS);
return rc;
}
@@ -1451,7 +1535,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
&c->dst.val,
c->dst.bytes,
ctxt->vcpu);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
return rc;
break;
case OP_NONE:
@@ -1514,9 +1598,8 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)
u64 msr_data;
/* syscall is not available in real mode */
- if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL
- || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE))
- return -1;
+ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86)
+ return X86EMUL_UNHANDLEABLE;
setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1553,7 +1636,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)
ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
}
- return 0;
+ return X86EMUL_CONTINUE;
}
static int
@@ -1563,22 +1646,17 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
struct kvm_segment cs, ss;
u64 msr_data;
- /* inject #UD if LOCK prefix is used */
- if (c->lock_prefix)
- return -1;
-
- /* inject #GP if in real mode or paging is disabled */
- if (ctxt->mode == X86EMUL_MODE_REAL ||
- !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
+ /* inject #GP if in real mode */
+ if (ctxt->mode == X86EMUL_MODE_REAL) {
kvm_inject_gp(ctxt->vcpu, 0);
- return -1;
+ return X86EMUL_UNHANDLEABLE;
}
/* XXX sysenter/sysexit have not been tested in 64bit mode.
* Therefore, we inject an #UD.
*/
if (ctxt->mode == X86EMUL_MODE_PROT64)
- return -1;
+ return X86EMUL_UNHANDLEABLE;
setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1587,13 +1665,13 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
case X86EMUL_MODE_PROT32:
if ((msr_data & 0xfffc) == 0x0) {
kvm_inject_gp(ctxt->vcpu, 0);
- return -1;
+ return X86EMUL_PROPAGATE_FAULT;
}
break;
case X86EMUL_MODE_PROT64:
if (msr_data == 0x0) {
kvm_inject_gp(ctxt->vcpu, 0);
- return -1;
+ return X86EMUL_PROPAGATE_FAULT;
}
break;
}
@@ -1618,7 +1696,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
c->regs[VCPU_REGS_RSP] = msr_data;
- return 0;
+ return X86EMUL_CONTINUE;
}
static int
@@ -1629,21 +1707,11 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
u64 msr_data;
int usermode;
- /* inject #UD if LOCK prefix is used */
- if (c->lock_prefix)
- return -1;
-
- /* inject #GP if in real mode or paging is disabled */
- if (ctxt->mode == X86EMUL_MODE_REAL
- || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
- kvm_inject_gp(ctxt->vcpu, 0);
- return -1;
- }
-
- /* sysexit must be called from CPL 0 */
- if (kvm_x86_ops->get_cpl(ctxt->vcpu) != 0) {
+ /* inject #GP if in real mode or Virtual 8086 mode */
+ if (ctxt->mode == X86EMUL_MODE_REAL ||
+ ctxt->mode == X86EMUL_MODE_VM86) {
kvm_inject_gp(ctxt->vcpu, 0);
- return -1;
+ return X86EMUL_UNHANDLEABLE;
}
setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1661,7 +1729,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
cs.selector = (u16)(msr_data + 16);
if ((msr_data & 0xfffc) == 0x0) {
kvm_inject_gp(ctxt->vcpu, 0);
- return -1;
+ return X86EMUL_PROPAGATE_FAULT;
}
ss.selector = (u16)(msr_data + 24);
break;
@@ -1669,7 +1737,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
cs.selector = (u16)(msr_data + 32);
if (msr_data == 0x0) {
kvm_inject_gp(ctxt->vcpu, 0);
- return -1;
+ return X86EMUL_PROPAGATE_FAULT;
}
ss.selector = cs.selector + 8;
cs.db = 0;
@@ -1685,7 +1753,58 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
c->eip = ctxt->vcpu->arch.regs[VCPU_REGS_RDX];
c->regs[VCPU_REGS_RSP] = ctxt->vcpu->arch.regs[VCPU_REGS_RCX];
- return 0;
+ return X86EMUL_CONTINUE;
+}
+
+static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
+{
+ int iopl;
+ if (ctxt->mode == X86EMUL_MODE_REAL)
+ return false;
+ if (ctxt->mode == X86EMUL_MODE_VM86)
+ return true;
+ iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+ return kvm_x86_ops->get_cpl(ctxt->vcpu) > iopl;
+}
+
+static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 port, u16 len)
+{
+ struct kvm_segment tr_seg;
+ int r;
+ u16 io_bitmap_ptr;
+ u8 perm, bit_idx = port & 0x7;
+ unsigned mask = (1 << len) - 1;
+
+ kvm_get_segment(ctxt->vcpu, &tr_seg, VCPU_SREG_TR);
+ if (tr_seg.unusable)
+ return false;
+ if (tr_seg.limit < 103)
+ return false;
+ r = ops->read_std(tr_seg.base + 102, &io_bitmap_ptr, 2, ctxt->vcpu,
+ NULL);
+ if (r != X86EMUL_CONTINUE)
+ return false;
+ if (io_bitmap_ptr + port/8 > tr_seg.limit)
+ return false;
+ r = ops->read_std(tr_seg.base + io_bitmap_ptr + port/8, &perm, 1,
+ ctxt->vcpu, NULL);
+ if (r != X86EMUL_CONTINUE)
+ return false;
+ if ((perm >> bit_idx) & mask)
+ return false;
+ return true;
+}
+
+static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 port, u16 len)
+{
+ if (emulator_bad_iopl(ctxt))
+ if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
+ return false;
+ return true;
}
int
@@ -1709,6 +1828,18 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
saved_eip = c->eip;
+ /* LOCK prefix is allowed only with some instructions */
+ if (c->lock_prefix && !(c->d & Lock)) {
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ goto done;
+ }
+
+ /* Privileged instruction can be executed only in CPL=0 */
+ if ((c->d & Priv) && kvm_x86_ops->get_cpl(ctxt->vcpu)) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ goto done;
+ }
+
if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs))
memop = c->modrm_ea;
@@ -1749,7 +1880,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
&c->src.val,
c->src.bytes,
ctxt->vcpu);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
c->src.orig_val = c->src.val;
}
@@ -1768,12 +1899,15 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
c->dst.ptr = (void *)c->dst.ptr +
(c->src.val & mask) / 8;
}
- if (!(c->d & Mov) &&
- /* optimisation - avoid slow emulated read */
- ((rc = ops->read_emulated((unsigned long)c->dst.ptr,
- &c->dst.val,
- c->dst.bytes, ctxt->vcpu)) != 0))
- goto done;
+ if (!(c->d & Mov)) {
+ /* optimisation - avoid slow emulated read */
+ rc = ops->read_emulated((unsigned long)c->dst.ptr,
+ &c->dst.val,
+ c->dst.bytes,
+ ctxt->vcpu);
+ if (rc != X86EMUL_CONTINUE)
+ goto done;
+ }
}
c->dst.orig_val = c->dst.val;
@@ -1876,7 +2010,12 @@ special_insn:
break;
case 0x6c: /* insb */
case 0x6d: /* insw/insd */
- if (kvm_emulate_pio_string(ctxt->vcpu,
+ if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
+ (c->d & ByteOp) ? 1 : c->op_bytes)) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ goto done;
+ }
+ if (kvm_emulate_pio_string(ctxt->vcpu,
1,
(c->d & ByteOp) ? 1 : c->op_bytes,
c->rep_prefix ?
@@ -1892,6 +2031,11 @@ special_insn:
return 0;
case 0x6e: /* outsb */
case 0x6f: /* outsw/outsd */
+ if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
+ (c->d & ByteOp) ? 1 : c->op_bytes)) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ goto done;
+ }
if (kvm_emulate_pio_string(ctxt->vcpu,
0,
(c->d & ByteOp) ? 1 : c->op_bytes,
@@ -1978,25 +2122,19 @@ special_insn:
break;
case 0x8e: { /* mov seg, r/m16 */
uint16_t sel;
- int type_bits;
- int err;
sel = c->src.val;
- if (c->modrm_reg == VCPU_SREG_SS)
- toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
- if (c->modrm_reg <= 5) {
- type_bits = (c->modrm_reg == 1) ? 9 : 1;
- err = kvm_load_segment_descriptor(ctxt->vcpu, sel,
- type_bits, c->modrm_reg);
- } else {
- printk(KERN_INFO "Invalid segreg in modrm byte 0x%02x\n",
- c->modrm);
- goto cannot_emulate;
+ if (c->modrm_reg == VCPU_SREG_CS ||
+ c->modrm_reg > VCPU_SREG_GS) {
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ goto done;
}
- if (err < 0)
- goto cannot_emulate;
+ if (c->modrm_reg == VCPU_SREG_SS)
+ toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
+
+ rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg);
c->dst.type = OP_NONE; /* Disable writeback. */
break;
@@ -2025,7 +2163,10 @@ special_insn:
c->dst.type = OP_REG;
c->dst.ptr = (unsigned long *) &ctxt->eflags;
c->dst.bytes = c->op_bytes;
- goto pop_instruction;
+ rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
+ if (rc != X86EMUL_CONTINUE)
+ goto done;
+ break;
case 0xa0 ... 0xa1: /* mov */
c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
c->dst.val = c->src.val;
@@ -2039,11 +2180,12 @@ special_insn:
c->dst.ptr = (unsigned long *)register_address(c,
es_base(ctxt),
c->regs[VCPU_REGS_RDI]);
- if ((rc = ops->read_emulated(register_address(c,
- seg_override_base(ctxt, c),
- c->regs[VCPU_REGS_RSI]),
+ rc = ops->read_emulated(register_address(c,
+ seg_override_base(ctxt, c),
+ c->regs[VCPU_REGS_RSI]),
&c->dst.val,
- c->dst.bytes, ctxt->vcpu)) != 0)
+ c->dst.bytes, ctxt->vcpu);
+ if (rc != X86EMUL_CONTINUE)
goto done;
register_address_increment(c, &c->regs[VCPU_REGS_RSI],
(ctxt->eflags & EFLG_DF) ? -c->dst.bytes
@@ -2058,10 +2200,11 @@ special_insn:
c->src.ptr = (unsigned long *)register_address(c,
seg_override_base(ctxt, c),
c->regs[VCPU_REGS_RSI]);
- if ((rc = ops->read_emulated((unsigned long)c->src.ptr,
- &c->src.val,
- c->src.bytes,
- ctxt->vcpu)) != 0)
+ rc = ops->read_emulated((unsigned long)c->src.ptr,
+ &c->src.val,
+ c->src.bytes,
+ ctxt->vcpu);
+ if (rc != X86EMUL_CONTINUE)
goto done;
c->dst.type = OP_NONE; /* Disable writeback. */
@@ -2069,10 +2212,11 @@ special_insn:
c->dst.ptr = (unsigned long *)register_address(c,
es_base(ctxt),
c->regs[VCPU_REGS_RDI]);
- if ((rc = ops->read_emulated((unsigned long)c->dst.ptr,
- &c->dst.val,
- c->dst.bytes,
- ctxt->vcpu)) != 0)
+ rc = ops->read_emulated((unsigned long)c->dst.ptr,
+ &c->dst.val,
+ c->dst.bytes,
+ ctxt->vcpu);
+ if (rc != X86EMUL_CONTINUE)
goto done;
DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr);
@@ -2102,12 +2246,13 @@ special_insn:
c->dst.type = OP_REG;
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
- if ((rc = ops->read_emulated(register_address(c,
- seg_override_base(ctxt, c),
- c->regs[VCPU_REGS_RSI]),
- &c->dst.val,
- c->dst.bytes,
- ctxt->vcpu)) != 0)
+ rc = ops->read_emulated(register_address(c,
+ seg_override_base(ctxt, c),
+ c->regs[VCPU_REGS_RSI]),
+ &c->dst.val,
+ c->dst.bytes,
+ ctxt->vcpu);
+ if (rc != X86EMUL_CONTINUE)
goto done;
register_address_increment(c, &c->regs[VCPU_REGS_RSI],
(ctxt->eflags & EFLG_DF) ? -c->dst.bytes
@@ -2163,11 +2308,9 @@ special_insn:
case 0xe9: /* jmp rel */
goto jmp;
case 0xea: /* jmp far */
- if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val, 9,
- VCPU_SREG_CS) < 0) {
- DPRINTF("jmp far: Failed to load CS descriptor\n");
- goto cannot_emulate;
- }
+ if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val,
+ VCPU_SREG_CS))
+ goto done;
c->eip = c->src.val;
break;
@@ -2185,7 +2328,13 @@ special_insn:
case 0xef: /* out (e/r)ax,dx */
port = c->regs[VCPU_REGS_RDX];
io_dir_in = 0;
- do_io: if (kvm_emulate_pio(ctxt->vcpu, io_dir_in,
+ do_io:
+ if (!emulator_io_permited(ctxt, ops, port,
+ (c->d & ByteOp) ? 1 : c->op_bytes)) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ goto done;
+ }
+ if (kvm_emulate_pio(ctxt->vcpu, io_dir_in,
(c->d & ByteOp) ? 1 : c->op_bytes,
port) != 0) {
c->eip = saved_eip;
@@ -2210,13 +2359,21 @@ special_insn:
c->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xfa: /* cli */
- ctxt->eflags &= ~X86_EFLAGS_IF;
- c->dst.type = OP_NONE; /* Disable writeback. */
+ if (emulator_bad_iopl(ctxt))
+ kvm_inject_gp(ctxt->vcpu, 0);
+ else {
+ ctxt->eflags &= ~X86_EFLAGS_IF;
+ c->dst.type = OP_NONE; /* Disable writeback. */
+ }
break;
case 0xfb: /* sti */
- toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
- ctxt->eflags |= X86_EFLAGS_IF;
- c->dst.type = OP_NONE; /* Disable writeback. */
+ if (emulator_bad_iopl(ctxt))
+ kvm_inject_gp(ctxt->vcpu, 0);
+ else {
+ toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
+ ctxt->eflags |= X86_EFLAGS_IF;
+ c->dst.type = OP_NONE; /* Disable writeback. */
+ }
break;
case 0xfc: /* cld */
ctxt->eflags &= ~EFLG_DF;
@@ -2319,8 +2476,9 @@ twobyte_insn:
}
break;
case 0x05: /* syscall */
- if (emulate_syscall(ctxt) == -1)
- goto cannot_emulate;
+ rc = emulate_syscall(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+ goto done;
else
goto writeback;
break;
@@ -2391,14 +2549,16 @@ twobyte_insn:
c->dst.type = OP_NONE;
break;
case 0x34: /* sysenter */
- if (emulate_sysenter(ctxt) == -1)
- goto cannot_emulate;
+ rc = emulate_sysenter(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+ goto done;
else
goto writeback;
break;
case 0x35: /* sysexit */
- if (emulate_sysexit(ctxt) == -1)
- goto cannot_emulate;
+ rc = emulate_sysexit(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+ goto done;
else
goto writeback;
break;
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 15578f1..294698b 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -242,11 +242,11 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
{
struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
irq_ack_notifier);
- spin_lock(&ps->inject_lock);
+ raw_spin_lock(&ps->inject_lock);
if (atomic_dec_return(&ps->pit_timer.pending) < 0)
atomic_inc(&ps->pit_timer.pending);
ps->irq_ack = 1;
- spin_unlock(&ps->inject_lock);
+ raw_spin_unlock(&ps->inject_lock);
}
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
@@ -605,7 +605,7 @@ static const struct kvm_io_device_ops speaker_dev_ops = {
.write = speaker_ioport_write,
};
-/* Caller must have writers lock on slots_lock */
+/* Caller must hold slots_lock */
struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
{
struct kvm_pit *pit;
@@ -624,7 +624,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
mutex_init(&pit->pit_state.lock);
mutex_lock(&pit->pit_state.lock);
- spin_lock_init(&pit->pit_state.inject_lock);
+ raw_spin_lock_init(&pit->pit_state.inject_lock);
kvm->arch.vpit = pit;
pit->kvm = kvm;
@@ -645,13 +645,13 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
kvm_iodevice_init(&pit->dev, &pit_dev_ops);
- ret = __kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
+ ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &pit->dev);
if (ret < 0)
goto fail;
if (flags & KVM_PIT_SPEAKER_DUMMY) {
kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
- ret = __kvm_io_bus_register_dev(&kvm->pio_bus,
+ ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS,
&pit->speaker_dev);
if (ret < 0)
goto fail_unregister;
@@ -660,11 +660,12 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
return pit;
fail_unregister:
- __kvm_io_bus_unregister_dev(&kvm->pio_bus, &pit->dev);
+ kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
fail:
- if (pit->irq_source_id >= 0)
- kvm_free_irq_source_id(kvm, pit->irq_source_id);
+ kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
+ kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
+ kvm_free_irq_source_id(kvm, pit->irq_source_id);
kfree(pit);
return NULL;
@@ -723,12 +724,12 @@ void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
/* Try to inject pending interrupts when
* last one has been acked.
*/
- spin_lock(&ps->inject_lock);
+ raw_spin_lock(&ps->inject_lock);
if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) {
ps->irq_ack = 0;
inject = 1;
}
- spin_unlock(&ps->inject_lock);
+ raw_spin_unlock(&ps->inject_lock);
if (inject)
__inject_pit_timer_intr(kvm);
}
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index d4c1c7f..900d6b0 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -27,7 +27,7 @@ struct kvm_kpit_state {
u32 speaker_data_on;
struct mutex lock;
struct kvm_pit *pit;
- spinlock_t inject_lock;
+ raw_spinlock_t inject_lock;
unsigned long irq_ack;
struct kvm_irq_ack_notifier irq_ack_notifier;
};
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index d057c0c..07771da 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -44,18 +44,19 @@ static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
* Other interrupt may be delivered to PIC while lock is dropped but
* it should be safe since PIC state is already updated at this stage.
*/
- spin_unlock(&s->pics_state->lock);
+ raw_spin_unlock(&s->pics_state->lock);
kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq);
- spin_lock(&s->pics_state->lock);
+ raw_spin_lock(&s->pics_state->lock);
}
void kvm_pic_clear_isr_ack(struct kvm *kvm)
{
struct kvm_pic *s = pic_irqchip(kvm);
- spin_lock(&s->lock);
+
+ raw_spin_lock(&s->lock);
s->pics[0].isr_ack = 0xff;
s->pics[1].isr_ack = 0xff;
- spin_unlock(&s->lock);
+ raw_spin_unlock(&s->lock);
}
/*
@@ -156,9 +157,9 @@ static void pic_update_irq(struct kvm_pic *s)
void kvm_pic_update_irq(struct kvm_pic *s)
{
- spin_lock(&s->lock);
+ raw_spin_lock(&s->lock);
pic_update_irq(s);
- spin_unlock(&s->lock);
+ raw_spin_unlock(&s->lock);
}
int kvm_pic_set_irq(void *opaque, int irq, int level)
@@ -166,14 +167,14 @@ int kvm_pic_set_irq(void *opaque, int irq, int level)
struct kvm_pic *s = opaque;
int ret = -1;
- spin_lock(&s->lock);
+ raw_spin_lock(&s->lock);
if (irq >= 0 && irq < PIC_NUM_PINS) {
ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
pic_update_irq(s);
trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr,
s->pics[irq >> 3].imr, ret == 0);
}
- spin_unlock(&s->lock);
+ raw_spin_unlock(&s->lock);
return ret;
}
@@ -203,7 +204,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
int irq, irq2, intno;
struct kvm_pic *s = pic_irqchip(kvm);
- spin_lock(&s->lock);
+ raw_spin_lock(&s->lock);
irq = pic_get_irq(&s->pics[0]);
if (irq >= 0) {
pic_intack(&s->pics[0], irq);
@@ -228,7 +229,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
intno = s->pics[0].irq_base + irq;
}
pic_update_irq(s);
- spin_unlock(&s->lock);
+ raw_spin_unlock(&s->lock);
return intno;
}
@@ -442,7 +443,7 @@ static int picdev_write(struct kvm_io_device *this,
printk(KERN_ERR "PIC: non byte write\n");
return 0;
}
- spin_lock(&s->lock);
+ raw_spin_lock(&s->lock);
switch (addr) {
case 0x20:
case 0x21:
@@ -455,7 +456,7 @@ static int picdev_write(struct kvm_io_device *this,
elcr_ioport_write(&s->pics[addr & 1], addr, data);
break;
}
- spin_unlock(&s->lock);
+ raw_spin_unlock(&s->lock);
return 0;
}
@@ -472,7 +473,7 @@ static int picdev_read(struct kvm_io_device *this,
printk(KERN_ERR "PIC: non byte read\n");
return 0;
}
- spin_lock(&s->lock);
+ raw_spin_lock(&s->lock);
switch (addr) {
case 0x20:
case 0x21:
@@ -486,7 +487,7 @@ static int picdev_read(struct kvm_io_device *this,
break;
}
*(unsigned char *)val = data;
- spin_unlock(&s->lock);
+ raw_spin_unlock(&s->lock);
return 0;
}
@@ -520,7 +521,7 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL);
if (!s)
return NULL;
- spin_lock_init(&s->lock);
+ raw_spin_lock_init(&s->lock);
s->kvm = kvm;
s->pics[0].elcr_mask = 0xf8;
s->pics[1].elcr_mask = 0xde;
@@ -533,7 +534,9 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
* Initialize PIO device
*/
kvm_iodevice_init(&s->dev, &picdev_ops);
- ret = kvm_io_bus_register_dev(kvm, &kvm->pio_bus, &s->dev);
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &s->dev);
+ mutex_unlock(&kvm->slots_lock);
if (ret < 0) {
kfree(s);
return NULL;
@@ -541,3 +544,14 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
return s;
}
+
+void kvm_destroy_pic(struct kvm *kvm)
+{
+ struct kvm_pic *vpic = kvm->arch.vpic;
+
+ if (vpic) {
+ kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev);
+ kvm->arch.vpic = NULL;
+ kfree(vpic);
+ }
+}
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index be399e2..34b15915 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -62,7 +62,7 @@ struct kvm_kpic_state {
};
struct kvm_pic {
- spinlock_t lock;
+ raw_spinlock_t lock;
unsigned pending_acks;
struct kvm *kvm;
struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */
@@ -75,6 +75,7 @@ struct kvm_pic {
};
struct kvm_pic *kvm_create_pic(struct kvm *kvm);
+void kvm_destroy_pic(struct kvm *kvm);
int kvm_pic_read_irq(struct kvm *kvm);
void kvm_pic_update_irq(struct kvm_pic *s);
void kvm_pic_clear_isr_ack(struct kvm *kvm);
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 7bcc5b6..cff851c 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -1,6 +1,11 @@
#ifndef ASM_KVM_CACHE_REGS_H
#define ASM_KVM_CACHE_REGS_H
+#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
+#define KVM_POSSIBLE_CR4_GUEST_BITS \
+ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
+ | X86_CR4_OSXMMEXCPT | X86_CR4_PGE)
+
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
enum kvm_reg reg)
{
@@ -38,4 +43,30 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
return vcpu->arch.pdptrs[index];
}
+static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
+{
+ ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
+ if (tmask & vcpu->arch.cr0_guest_owned_bits)
+ kvm_x86_ops->decache_cr0_guest_bits(vcpu);
+ return vcpu->arch.cr0 & mask;
+}
+
+static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
+{
+ return kvm_read_cr0_bits(vcpu, ~0UL);
+}
+
+static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
+{
+ ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
+ if (tmask & vcpu->arch.cr4_guest_owned_bits)
+ kvm_x86_ops->decache_cr4_guest_bits(vcpu);
+ return vcpu->arch.cr4 & mask;
+}
+
+static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
+{
+ return kvm_read_cr4_bits(vcpu, ~0UL);
+}
+
#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index ba8c045..4b224f9 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1246,3 +1246,34 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
return 0;
}
+
+int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return 1;
+
+ /* if this is ICR write vector before command */
+ if (reg == APIC_ICR)
+ apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
+ return apic_reg_write(apic, reg, (u32)data);
+}
+
+int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ u32 low, high = 0;
+
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return 1;
+
+ if (apic_reg_read(apic, reg, 4, &low))
+ return 1;
+ if (reg == APIC_ICR)
+ apic_reg_read(apic, APIC_ICR2, 4, &high);
+
+ *data = (((u64)high) << 32) | low;
+
+ return 0;
+}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 40010b0..f5fe32c 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -48,4 +48,12 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
+
+int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
+
+static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE;
+}
#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 89a49fb..741373e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -18,6 +18,7 @@
*/
#include "mmu.h"
+#include "x86.h"
#include "kvm_cache_regs.h"
#include <linux/kvm_host.h>
@@ -29,6 +30,7 @@
#include <linux/swap.h>
#include <linux/hugetlb.h>
#include <linux/compiler.h>
+#include <linux/srcu.h>
#include <asm/page.h>
#include <asm/cmpxchg.h>
@@ -136,16 +138,6 @@ module_param(oos_shadow, bool, 0644);
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
| PT64_NX_MASK)
-#define PFERR_PRESENT_MASK (1U << 0)
-#define PFERR_WRITE_MASK (1U << 1)
-#define PFERR_USER_MASK (1U << 2)
-#define PFERR_RSVD_MASK (1U << 3)
-#define PFERR_FETCH_MASK (1U << 4)
-
-#define PT_PDPE_LEVEL 3
-#define PT_DIRECTORY_LEVEL 2
-#define PT_PAGE_TABLE_LEVEL 1
-
#define RMAP_EXT 4
#define ACC_EXEC_MASK 1
@@ -153,6 +145,9 @@ module_param(oos_shadow, bool, 0644);
#define ACC_USER_MASK PT_USER_MASK
#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
+#include <trace/events/kvm.h>
+
+#undef TRACE_INCLUDE_FILE
#define CREATE_TRACE_POINTS
#include "mmutrace.h"
@@ -229,7 +224,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
static int is_write_protection(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.cr0 & X86_CR0_WP;
+ return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
}
static int is_cpuid_PSE36(void)
@@ -239,7 +234,7 @@ static int is_cpuid_PSE36(void)
static int is_nx(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.shadow_efer & EFER_NX;
+ return vcpu->arch.efer & EFER_NX;
}
static int is_shadow_present_pte(u64 pte)
@@ -253,7 +248,7 @@ static int is_large_pte(u64 pte)
return pte & PT_PAGE_SIZE_MASK;
}
-static int is_writeble_pte(unsigned long pte)
+static int is_writable_pte(unsigned long pte)
{
return pte & PT_WRITABLE_MASK;
}
@@ -470,24 +465,10 @@ static int has_wrprotected_page(struct kvm *kvm,
static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
{
- unsigned long page_size = PAGE_SIZE;
- struct vm_area_struct *vma;
- unsigned long addr;
+ unsigned long page_size;
int i, ret = 0;
- addr = gfn_to_hva(kvm, gfn);
- if (kvm_is_error_hva(addr))
- return PT_PAGE_TABLE_LEVEL;
-
- down_read(&current->mm->mmap_sem);
- vma = find_vma(current->mm, addr);
- if (!vma)
- goto out;
-
- page_size = vma_kernel_pagesize(vma);
-
-out:
- up_read(&current->mm->mmap_sem);
+ page_size = kvm_host_page_size(kvm, gfn);
for (i = PT_PAGE_TABLE_LEVEL;
i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
@@ -503,8 +484,7 @@ out:
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
{
struct kvm_memory_slot *slot;
- int host_level;
- int level = PT_PAGE_TABLE_LEVEL;
+ int host_level, level, max_level;
slot = gfn_to_memslot(vcpu->kvm, large_gfn);
if (slot && slot->dirty_bitmap)
@@ -515,7 +495,10 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
if (host_level == PT_PAGE_TABLE_LEVEL)
return host_level;
- for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level)
+ max_level = kvm_x86_ops->get_lpage_level() < host_level ?
+ kvm_x86_ops->get_lpage_level() : host_level;
+
+ for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
break;
@@ -633,7 +616,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
pfn = spte_to_pfn(*spte);
if (*spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
- if (is_writeble_pte(*spte))
+ if (is_writable_pte(*spte))
kvm_set_pfn_dirty(pfn);
rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
if (!*rmapp) {
@@ -662,6 +645,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
prev_desc = desc;
desc = desc->more;
}
+ pr_err("rmap_remove: %p %llx many->many\n", spte, *spte);
BUG();
}
}
@@ -708,7 +692,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
BUG_ON(!spte);
BUG_ON(!(*spte & PT_PRESENT_MASK));
rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
- if (is_writeble_pte(*spte)) {
+ if (is_writable_pte(*spte)) {
__set_spte(spte, *spte & ~PT_WRITABLE_MASK);
write_protected = 1;
}
@@ -732,7 +716,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
BUG_ON(!(*spte & PT_PRESENT_MASK));
BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
- if (is_writeble_pte(*spte)) {
+ if (is_writable_pte(*spte)) {
rmap_remove(kvm, spte);
--kvm->stat.lpages;
__set_spte(spte, shadow_trap_nonpresent_pte);
@@ -787,7 +771,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
new_spte &= ~PT_WRITABLE_MASK;
new_spte &= ~SPTE_HOST_WRITEABLE;
- if (is_writeble_pte(*spte))
+ if (is_writable_pte(*spte))
kvm_set_pfn_dirty(spte_to_pfn(*spte));
__set_spte(spte, new_spte);
spte = rmap_next(kvm, rmapp, spte);
@@ -805,35 +789,32 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
unsigned long data))
{
int i, j;
+ int ret;
int retval = 0;
+ struct kvm_memslots *slots;
- /*
- * If mmap_sem isn't taken, we can look the memslots with only
- * the mmu_lock by skipping over the slots with userspace_addr == 0.
- */
- for (i = 0; i < kvm->nmemslots; i++) {
- struct kvm_memory_slot *memslot = &kvm->memslots[i];
+ slots = rcu_dereference(kvm->memslots);
+
+ for (i = 0; i < slots->nmemslots; i++) {
+ struct kvm_memory_slot *memslot = &slots->memslots[i];
unsigned long start = memslot->userspace_addr;
unsigned long end;
- /* mmu_lock protects userspace_addr */
- if (!start)
- continue;
-
end = start + (memslot->npages << PAGE_SHIFT);
if (hva >= start && hva < end) {
gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
- retval |= handler(kvm, &memslot->rmap[gfn_offset],
- data);
+ ret = handler(kvm, &memslot->rmap[gfn_offset], data);
for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
int idx = gfn_offset;
idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j);
- retval |= handler(kvm,
+ ret |= handler(kvm,
&memslot->lpage_info[j][idx].rmap_pde,
data);
}
+ trace_kvm_age_page(hva, memslot, ret);
+ retval |= ret;
}
}
@@ -856,9 +837,15 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
u64 *spte;
int young = 0;
- /* always return old for EPT */
+ /*
+ * Emulate the accessed bit for EPT, by checking if this page has
+ * an EPT mapping, and clearing it if it does. On the next access,
+ * a new EPT mapping will be established.
+ * This has some overhead, but not as much as the cost of swapping
+ * out actively used pages or breaking up actively used hugepages.
+ */
if (!shadow_accessed_mask)
- return 0;
+ return kvm_unmap_rmapp(kvm, rmapp, data);
spte = rmap_next(kvm, rmapp, NULL);
while (spte) {
@@ -1615,7 +1602,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
{
- int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
+ int slot = memslot_id(kvm, gfn);
struct kvm_mmu_page *sp = page_header(__pa(pte));
__set_bit(slot, sp->slot_bitmap);
@@ -1639,7 +1626,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
{
struct page *page;
- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
+ gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
if (gpa == UNMAPPED_GVA)
return NULL;
@@ -1852,7 +1839,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
* is responsibility of mmu_get_page / kvm_sync_page.
* Same reasoning can be applied to dirty page accounting.
*/
- if (!can_unsync && is_writeble_pte(*sptep))
+ if (!can_unsync && is_writable_pte(*sptep))
goto set_pte;
if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
@@ -1860,7 +1847,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
__func__, gfn);
ret = 1;
pte_access &= ~ACC_WRITE_MASK;
- if (is_writeble_pte(spte))
+ if (is_writable_pte(spte))
spte &= ~PT_WRITABLE_MASK;
}
}
@@ -1881,7 +1868,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
bool reset_host_protection)
{
int was_rmapped = 0;
- int was_writeble = is_writeble_pte(*sptep);
+ int was_writable = is_writable_pte(*sptep);
int rmap_count;
pgprintk("%s: spte %llx access %x write_fault %d"
@@ -1932,7 +1919,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (rmap_count > RMAP_RECYCLE_THRESHOLD)
rmap_recycle(vcpu, sptep, gfn);
} else {
- if (was_writeble)
+ if (was_writable)
kvm_release_pfn_dirty(pfn);
else
kvm_release_pfn_clean(pfn);
@@ -2162,8 +2149,11 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
spin_unlock(&vcpu->kvm->mmu_lock);
}
-static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
+static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
+ u32 access, u32 *error)
{
+ if (error)
+ *error = 0;
return vaddr;
}
@@ -2747,7 +2737,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
if (tdp_enabled)
return 0;
- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
+ gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
spin_lock(&vcpu->kvm->mmu_lock);
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
@@ -2847,16 +2837,13 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
*/
page = alloc_page(GFP_KERNEL | __GFP_DMA32);
if (!page)
- goto error_1;
+ return -ENOMEM;
+
vcpu->arch.mmu.pae_root = page_address(page);
for (i = 0; i < 4; ++i)
vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
return 0;
-
-error_1:
- free_mmu_pages(vcpu);
- return -ENOMEM;
}
int kvm_mmu_create(struct kvm_vcpu *vcpu)
@@ -2936,10 +2923,9 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
- int npages;
+ int npages, idx;
- if (!down_read_trylock(&kvm->slots_lock))
- continue;
+ idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
npages = kvm->arch.n_alloc_mmu_pages -
kvm->arch.n_free_mmu_pages;
@@ -2952,7 +2938,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
nr_to_scan--;
spin_unlock(&kvm->mmu_lock);
- up_read(&kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
}
if (kvm_freed)
list_move_tail(&kvm_freed->vm_list, &vm_list);
@@ -3019,9 +3005,11 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
int i;
unsigned int nr_mmu_pages;
unsigned int nr_pages = 0;
+ struct kvm_memslots *slots;
- for (i = 0; i < kvm->nmemslots; i++)
- nr_pages += kvm->memslots[i].npages;
+ slots = rcu_dereference(kvm->memslots);
+ for (i = 0; i < slots->nmemslots; i++)
+ nr_pages += slots->memslots[i].npages;
nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
nr_mmu_pages = max(nr_mmu_pages,
@@ -3246,7 +3234,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
audit_mappings_page(vcpu, ent, va, level - 1);
else {
- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
+ gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
gfn_t gfn = gpa >> PAGE_SHIFT;
pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
@@ -3291,10 +3279,12 @@ static void audit_mappings(struct kvm_vcpu *vcpu)
static int count_rmaps(struct kvm_vcpu *vcpu)
{
int nmaps = 0;
- int i, j, k;
+ int i, j, k, idx;
+ idx = srcu_read_lock(&kvm->srcu);
+ slots = rcu_dereference(kvm->memslots);
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
+ struct kvm_memory_slot *m = &slots->memslots[i];
struct kvm_rmap_desc *d;
for (j = 0; j < m->npages; ++j) {
@@ -3317,6 +3307,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
}
}
}
+ srcu_read_unlock(&kvm->srcu, idx);
return nmaps;
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 61a1b38..be66759 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -2,6 +2,7 @@
#define __KVM_X86_MMU_H
#include <linux/kvm_host.h>
+#include "kvm_cache_regs.h"
#define PT64_PT_BITS 9
#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
@@ -37,6 +38,16 @@
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3
+#define PT_PDPE_LEVEL 3
+#define PT_DIRECTORY_LEVEL 2
+#define PT_PAGE_TABLE_LEVEL 1
+
+#define PFERR_PRESENT_MASK (1U << 0)
+#define PFERR_WRITE_MASK (1U << 1)
+#define PFERR_USER_MASK (1U << 2)
+#define PFERR_RSVD_MASK (1U << 3)
+#define PFERR_FETCH_MASK (1U << 4)
+
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
@@ -53,30 +64,6 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
return kvm_mmu_load(vcpu);
}
-static inline int is_long_mode(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_X86_64
- return vcpu->arch.shadow_efer & EFER_LMA;
-#else
- return 0;
-#endif
-}
-
-static inline int is_pae(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.cr4 & X86_CR4_PAE;
-}
-
-static inline int is_pse(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.cr4 & X86_CR4_PSE;
-}
-
-static inline int is_paging(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.cr0 & X86_CR0_PG;
-}
-
static inline int is_present_gpte(unsigned long pte)
{
return pte & PT_PRESENT_MASK;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index ede2131..81eab9a 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -162,7 +162,7 @@ walk:
if (rsvd_fault)
goto access_error;
- if (write_fault && !is_writeble_pte(pte))
+ if (write_fault && !is_writable_pte(pte))
if (user_fault || is_write_protection(vcpu))
goto access_error;
@@ -490,18 +490,23 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
spin_unlock(&vcpu->kvm->mmu_lock);
}
-static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
+static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
+ u32 *error)
{
struct guest_walker walker;
gpa_t gpa = UNMAPPED_GVA;
int r;
- r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
+ r = FNAME(walk_addr)(&walker, vcpu, vaddr,
+ !!(access & PFERR_WRITE_MASK),
+ !!(access & PFERR_USER_MASK),
+ !!(access & PFERR_FETCH_MASK));
if (r) {
gpa = gfn_to_gpa(walker.gfn);
gpa |= vaddr & ~PAGE_MASK;
- }
+ } else if (error)
+ *error = walker.error_code;
return gpa;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1d9b338..52f78dd 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -231,7 +231,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
efer &= ~EFER_LME;
to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
- vcpu->arch.shadow_efer = efer;
+ vcpu->arch.efer = efer;
}
static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
@@ -540,6 +540,8 @@ static void init_vmcb(struct vcpu_svm *svm)
struct vmcb_control_area *control = &svm->vmcb->control;
struct vmcb_save_area *save = &svm->vmcb->save;
+ svm->vcpu.fpu_active = 1;
+
control->intercept_cr_read = INTERCEPT_CR0_MASK |
INTERCEPT_CR3_MASK |
INTERCEPT_CR4_MASK;
@@ -552,13 +554,19 @@ static void init_vmcb(struct vcpu_svm *svm)
control->intercept_dr_read = INTERCEPT_DR0_MASK |
INTERCEPT_DR1_MASK |
INTERCEPT_DR2_MASK |
- INTERCEPT_DR3_MASK;
+ INTERCEPT_DR3_MASK |
+ INTERCEPT_DR4_MASK |
+ INTERCEPT_DR5_MASK |
+ INTERCEPT_DR6_MASK |
+ INTERCEPT_DR7_MASK;
control->intercept_dr_write = INTERCEPT_DR0_MASK |
INTERCEPT_DR1_MASK |
INTERCEPT_DR2_MASK |
INTERCEPT_DR3_MASK |
+ INTERCEPT_DR4_MASK |
INTERCEPT_DR5_MASK |
+ INTERCEPT_DR6_MASK |
INTERCEPT_DR7_MASK;
control->intercept_exceptions = (1 << PF_VECTOR) |
@@ -569,6 +577,7 @@ static void init_vmcb(struct vcpu_svm *svm)
control->intercept = (1ULL << INTERCEPT_INTR) |
(1ULL << INTERCEPT_NMI) |
(1ULL << INTERCEPT_SMI) |
+ (1ULL << INTERCEPT_SELECTIVE_CR0) |
(1ULL << INTERCEPT_CPUID) |
(1ULL << INTERCEPT_INVD) |
(1ULL << INTERCEPT_HLT) |
@@ -641,10 +650,8 @@ static void init_vmcb(struct vcpu_svm *svm)
control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
(1ULL << INTERCEPT_INVLPG));
control->intercept_exceptions &= ~(1 << PF_VECTOR);
- control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
- INTERCEPT_CR3_MASK);
- control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
- INTERCEPT_CR3_MASK);
+ control->intercept_cr_read &= ~INTERCEPT_CR3_MASK;
+ control->intercept_cr_write &= ~INTERCEPT_CR3_MASK;
save->g_pat = 0x0007040600070406ULL;
save->cr3 = 0;
save->cr4 = 0;
@@ -730,7 +737,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
init_vmcb(svm);
fx_init(&svm->vcpu);
- svm->vcpu.fpu_active = 1;
svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
if (kvm_vcpu_is_bsp(&svm->vcpu))
svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
@@ -765,14 +771,16 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (unlikely(cpu != vcpu->cpu)) {
u64 delta;
- /*
- * Make sure that the guest sees a monotonically
- * increasing TSC.
- */
- delta = vcpu->arch.host_tsc - native_read_tsc();
- svm->vmcb->control.tsc_offset += delta;
- if (is_nested(svm))
- svm->nested.hsave->control.tsc_offset += delta;
+ if (check_tsc_unstable()) {
+ /*
+ * Make sure that the guest sees a monotonically
+ * increasing TSC.
+ */
+ delta = vcpu->arch.host_tsc - native_read_tsc();
+ svm->vmcb->control.tsc_offset += delta;
+ if (is_nested(svm))
+ svm->nested.hsave->control.tsc_offset += delta;
+ }
vcpu->cpu = cpu;
kvm_migrate_timers(vcpu);
svm->asid_generation = 0;
@@ -954,42 +962,59 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
svm->vmcb->save.gdtr.base = dt->base ;
}
+static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
+{
+}
+
static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
{
}
+static void update_cr0_intercept(struct vcpu_svm *svm)
+{
+ ulong gcr0 = svm->vcpu.arch.cr0;
+ u64 *hcr0 = &svm->vmcb->save.cr0;
+
+ if (!svm->vcpu.fpu_active)
+ *hcr0 |= SVM_CR0_SELECTIVE_MASK;
+ else
+ *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
+ | (gcr0 & SVM_CR0_SELECTIVE_MASK);
+
+
+ if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
+ svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
+ svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
+ } else {
+ svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
+ svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
+ }
+}
+
static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_svm *svm = to_svm(vcpu);
#ifdef CONFIG_X86_64
- if (vcpu->arch.shadow_efer & EFER_LME) {
+ if (vcpu->arch.efer & EFER_LME) {
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
- vcpu->arch.shadow_efer |= EFER_LMA;
+ vcpu->arch.efer |= EFER_LMA;
svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
}
if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
- vcpu->arch.shadow_efer &= ~EFER_LMA;
+ vcpu->arch.efer &= ~EFER_LMA;
svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
}
}
#endif
- if (npt_enabled)
- goto set;
+ vcpu->arch.cr0 = cr0;
- if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
- svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
- vcpu->fpu_active = 1;
- }
+ if (!npt_enabled)
+ cr0 |= X86_CR0_PG | X86_CR0_WP;
- vcpu->arch.cr0 = cr0;
- cr0 |= X86_CR0_PG | X86_CR0_WP;
- if (!vcpu->fpu_active) {
- svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
+ if (!vcpu->fpu_active)
cr0 |= X86_CR0_TS;
- }
-set:
/*
* re-enable caching here because the QEMU bios
* does not do it - this results in some delay at
@@ -997,6 +1022,7 @@ set:
*/
cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
svm->vmcb->save.cr0 = cr0;
+ update_cr0_intercept(svm);
}
static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -1102,76 +1128,70 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
svm->vmcb->control.asid = sd->next_asid++;
}
-static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
+static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest)
{
struct vcpu_svm *svm = to_svm(vcpu);
- unsigned long val;
switch (dr) {
case 0 ... 3:
- val = vcpu->arch.db[dr];
+ *dest = vcpu->arch.db[dr];
break;
+ case 4:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
+ return EMULATE_FAIL; /* will re-inject UD */
+ /* fall through */
case 6:
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
- val = vcpu->arch.dr6;
+ *dest = vcpu->arch.dr6;
else
- val = svm->vmcb->save.dr6;
+ *dest = svm->vmcb->save.dr6;
break;
+ case 5:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
+ return EMULATE_FAIL; /* will re-inject UD */
+ /* fall through */
case 7:
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
- val = vcpu->arch.dr7;
+ *dest = vcpu->arch.dr7;
else
- val = svm->vmcb->save.dr7;
+ *dest = svm->vmcb->save.dr7;
break;
- default:
- val = 0;
}
- return val;
+ return EMULATE_DONE;
}
-static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
- int *exception)
+static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value)
{
struct vcpu_svm *svm = to_svm(vcpu);
- *exception = 0;
-
switch (dr) {
case 0 ... 3:
vcpu->arch.db[dr] = value;
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
vcpu->arch.eff_db[dr] = value;
- return;
- case 4 ... 5:
- if (vcpu->arch.cr4 & X86_CR4_DE)
- *exception = UD_VECTOR;
- return;
+ break;
+ case 4:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
+ return EMULATE_FAIL; /* will re-inject UD */
+ /* fall through */
case 6:
- if (value & 0xffffffff00000000ULL) {
- *exception = GP_VECTOR;
- return;
- }
vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
- return;
+ break;
+ case 5:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
+ return EMULATE_FAIL; /* will re-inject UD */
+ /* fall through */
case 7:
- if (value & 0xffffffff00000000ULL) {
- *exception = GP_VECTOR;
- return;
- }
vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
svm->vmcb->save.dr7 = vcpu->arch.dr7;
vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
}
- return;
- default:
- /* FIXME: Possible case? */
- printk(KERN_DEBUG "%s: unexpected dr %u\n",
- __func__, dr);
- *exception = UD_VECTOR;
- return;
+ break;
}
+
+ return EMULATE_DONE;
}
static int pf_interception(struct vcpu_svm *svm)
@@ -1239,13 +1259,17 @@ static int ud_interception(struct vcpu_svm *svm)
return 1;
}
-static int nm_interception(struct vcpu_svm *svm)
+static void svm_fpu_activate(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
- if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
- svm->vmcb->save.cr0 &= ~X86_CR0_TS;
svm->vcpu.fpu_active = 1;
+ update_cr0_intercept(svm);
+}
+static int nm_interception(struct vcpu_svm *svm)
+{
+ svm_fpu_activate(&svm->vcpu);
return 1;
}
@@ -1337,7 +1361,7 @@ static int vmmcall_interception(struct vcpu_svm *svm)
static int nested_svm_check_permissions(struct vcpu_svm *svm)
{
- if (!(svm->vcpu.arch.shadow_efer & EFER_SVME)
+ if (!(svm->vcpu.arch.efer & EFER_SVME)
|| !is_paging(&svm->vcpu)) {
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
@@ -1740,8 +1764,8 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
hsave->save.ds = vmcb->save.ds;
hsave->save.gdtr = vmcb->save.gdtr;
hsave->save.idtr = vmcb->save.idtr;
- hsave->save.efer = svm->vcpu.arch.shadow_efer;
- hsave->save.cr0 = svm->vcpu.arch.cr0;
+ hsave->save.efer = svm->vcpu.arch.efer;
+ hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
hsave->save.cr4 = svm->vcpu.arch.cr4;
hsave->save.rflags = vmcb->save.rflags;
hsave->save.rip = svm->next_rip;
@@ -2153,9 +2177,10 @@ static int rdmsr_interception(struct vcpu_svm *svm)
u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
u64 data;
- if (svm_get_msr(&svm->vcpu, ecx, &data))
+ if (svm_get_msr(&svm->vcpu, ecx, &data)) {
+ trace_kvm_msr_read_ex(ecx);
kvm_inject_gp(&svm->vcpu, 0);
- else {
+ } else {
trace_kvm_msr_read(ecx, data);
svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
@@ -2247,13 +2272,15 @@ static int wrmsr_interception(struct vcpu_svm *svm)
u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
- trace_kvm_msr_write(ecx, data);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
- if (svm_set_msr(&svm->vcpu, ecx, data))
+ if (svm_set_msr(&svm->vcpu, ecx, data)) {
+ trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(&svm->vcpu, 0);
- else
+ } else {
+ trace_kvm_msr_write(ecx, data);
skip_emulated_instruction(&svm->vcpu);
+ }
return 1;
}
@@ -2297,7 +2324,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_READ_CR3] = emulate_on_interception,
[SVM_EXIT_READ_CR4] = emulate_on_interception,
[SVM_EXIT_READ_CR8] = emulate_on_interception,
- /* for now: */
+ [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
[SVM_EXIT_WRITE_CR0] = emulate_on_interception,
[SVM_EXIT_WRITE_CR3] = emulate_on_interception,
[SVM_EXIT_WRITE_CR4] = emulate_on_interception,
@@ -2306,11 +2333,17 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_READ_DR1] = emulate_on_interception,
[SVM_EXIT_READ_DR2] = emulate_on_interception,
[SVM_EXIT_READ_DR3] = emulate_on_interception,
+ [SVM_EXIT_READ_DR4] = emulate_on_interception,
+ [SVM_EXIT_READ_DR5] = emulate_on_interception,
+ [SVM_EXIT_READ_DR6] = emulate_on_interception,
+ [SVM_EXIT_READ_DR7] = emulate_on_interception,
[SVM_EXIT_WRITE_DR0] = emulate_on_interception,
[SVM_EXIT_WRITE_DR1] = emulate_on_interception,
[SVM_EXIT_WRITE_DR2] = emulate_on_interception,
[SVM_EXIT_WRITE_DR3] = emulate_on_interception,
+ [SVM_EXIT_WRITE_DR4] = emulate_on_interception,
[SVM_EXIT_WRITE_DR5] = emulate_on_interception,
+ [SVM_EXIT_WRITE_DR6] = emulate_on_interception,
[SVM_EXIT_WRITE_DR7] = emulate_on_interception,
[SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
[SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
@@ -2383,20 +2416,10 @@ static int handle_exit(struct kvm_vcpu *vcpu)
svm_complete_interrupts(svm);
- if (npt_enabled) {
- int mmu_reload = 0;
- if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
- svm_set_cr0(vcpu, svm->vmcb->save.cr0);
- mmu_reload = 1;
- }
+ if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
vcpu->arch.cr0 = svm->vmcb->save.cr0;
+ if (npt_enabled)
vcpu->arch.cr3 = svm->vmcb->save.cr3;
- if (mmu_reload) {
- kvm_mmu_reset_context(vcpu);
- kvm_mmu_load(vcpu);
- }
- }
-
if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
@@ -2798,12 +2821,6 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
svm->vmcb->save.cr3 = root;
force_new_asid(vcpu);
-
- if (vcpu->fpu_active) {
- svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
- svm->vmcb->save.cr0 |= X86_CR0_TS;
- vcpu->fpu_active = 0;
- }
}
static int is_disabled(void)
@@ -2852,6 +2869,10 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
return 0;
}
+static void svm_cpuid_update(struct kvm_vcpu *vcpu)
+{
+}
+
static const struct trace_print_flags svm_exit_reasons_str[] = {
{ SVM_EXIT_READ_CR0, "read_cr0" },
{ SVM_EXIT_READ_CR3, "read_cr3" },
@@ -2905,9 +2926,22 @@ static const struct trace_print_flags svm_exit_reasons_str[] = {
{ -1, NULL }
};
-static bool svm_gb_page_enable(void)
+static int svm_get_lpage_level(void)
{
- return true;
+ return PT_PDPE_LEVEL;
+}
+
+static bool svm_rdtscp_supported(void)
+{
+ return false;
+}
+
+static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ update_cr0_intercept(svm);
+ svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
}
static struct kvm_x86_ops svm_x86_ops = {
@@ -2936,6 +2970,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_segment = svm_set_segment,
.get_cpl = svm_get_cpl,
.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
+ .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
.set_cr0 = svm_set_cr0,
.set_cr3 = svm_set_cr3,
@@ -2950,6 +2985,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.cache_reg = svm_cache_reg,
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,
+ .fpu_activate = svm_fpu_activate,
+ .fpu_deactivate = svm_fpu_deactivate,
.tlb_flush = svm_flush_tlb,
@@ -2975,7 +3012,11 @@ static struct kvm_x86_ops svm_x86_ops = {
.get_mt_mask = svm_get_mt_mask,
.exit_reasons_str = svm_exit_reasons_str,
- .gb_page_enable = svm_gb_page_enable,
+ .get_lpage_level = svm_get_lpage_level,
+
+ .cpuid_update = svm_cpuid_update,
+
+ .rdtscp_supported = svm_rdtscp_supported,
};
static int __init svm_init(void)
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 816e044..6ad30a2 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -56,6 +56,38 @@ TRACE_EVENT(kvm_hypercall,
);
/*
+ * Tracepoint for hypercall.
+ */
+TRACE_EVENT(kvm_hv_hypercall,
+ TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
+ __u64 ingpa, __u64 outgpa),
+ TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
+
+ TP_STRUCT__entry(
+ __field( __u16, code )
+ __field( bool, fast )
+ __field( __u16, rep_cnt )
+ __field( __u16, rep_idx )
+ __field( __u64, ingpa )
+ __field( __u64, outgpa )
+ ),
+
+ TP_fast_assign(
+ __entry->code = code;
+ __entry->fast = fast;
+ __entry->rep_cnt = rep_cnt;
+ __entry->rep_idx = rep_idx;
+ __entry->ingpa = ingpa;
+ __entry->outgpa = outgpa;
+ ),
+
+ TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
+ __entry->code, __entry->fast ? "fast" : "slow",
+ __entry->rep_cnt, __entry->rep_idx, __entry->ingpa,
+ __entry->outgpa)
+);
+
+/*
* Tracepoint for PIO.
*/
TRACE_EVENT(kvm_pio,
@@ -214,28 +246,33 @@ TRACE_EVENT(kvm_page_fault,
* Tracepoint for guest MSR access.
*/
TRACE_EVENT(kvm_msr,
- TP_PROTO(unsigned int rw, unsigned int ecx, unsigned long data),
- TP_ARGS(rw, ecx, data),
+ TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
+ TP_ARGS(write, ecx, data, exception),
TP_STRUCT__entry(
- __field( unsigned int, rw )
- __field( unsigned int, ecx )
- __field( unsigned long, data )
+ __field( unsigned, write )
+ __field( u32, ecx )
+ __field( u64, data )
+ __field( u8, exception )
),
TP_fast_assign(
- __entry->rw = rw;
+ __entry->write = write;
__entry->ecx = ecx;
__entry->data = data;
+ __entry->exception = exception;
),
- TP_printk("msr_%s %x = 0x%lx",
- __entry->rw ? "write" : "read",
- __entry->ecx, __entry->data)
+ TP_printk("msr_%s %x = 0x%llx%s",
+ __entry->write ? "write" : "read",
+ __entry->ecx, __entry->data,
+ __entry->exception ? " (#GP)" : "")
);
-#define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data)
-#define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data)
+#define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false)
+#define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false)
+#define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true)
+#define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true)
/*
* Tracepoint for guest CR access.
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d4918d6..14873b9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -61,6 +61,21 @@ module_param_named(unrestricted_guest,
static int __read_mostly emulate_invalid_guest_state = 0;
module_param(emulate_invalid_guest_state, bool, S_IRUGO);
+#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
+ (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
+#define KVM_GUEST_CR0_MASK \
+ (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
+#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
+ (X86_CR0_WP | X86_CR0_NE)
+#define KVM_VM_CR0_ALWAYS_ON \
+ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
+#define KVM_CR4_GUEST_OWNED_BITS \
+ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
+ | X86_CR4_OSXMMEXCPT)
+
+#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
+#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
+
/*
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
* ple_gap: upper bound on the amount of time between two successive
@@ -136,6 +151,8 @@ struct vcpu_vmx {
ktime_t entry_time;
s64 vnmi_blocked_time;
u32 exit_reason;
+
+ bool rdtscp_enabled;
};
static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -210,7 +227,7 @@ static const u32 vmx_msr_index[] = {
#ifdef CONFIG_X86_64
MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
#endif
- MSR_EFER, MSR_K6_STAR,
+ MSR_EFER, MSR_TSC_AUX, MSR_K6_STAR,
};
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
@@ -301,6 +318,11 @@ static inline bool cpu_has_vmx_ept_2m_page(void)
return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT);
}
+static inline bool cpu_has_vmx_ept_1g_page(void)
+{
+ return !!(vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT);
+}
+
static inline int cpu_has_vmx_invept_individual_addr(void)
{
return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
@@ -336,9 +358,7 @@ static inline int cpu_has_vmx_ple(void)
static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
{
- return flexpriority_enabled &&
- (cpu_has_vmx_virtualize_apic_accesses()) &&
- (irqchip_in_kernel(kvm));
+ return flexpriority_enabled && irqchip_in_kernel(kvm);
}
static inline int cpu_has_vmx_vpid(void)
@@ -347,6 +367,12 @@ static inline int cpu_has_vmx_vpid(void)
SECONDARY_EXEC_ENABLE_VPID;
}
+static inline int cpu_has_vmx_rdtscp(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_RDTSCP;
+}
+
static inline int cpu_has_virtual_nmis(void)
{
return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
@@ -551,22 +577,18 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
{
u32 eb;
- eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR);
- if (!vcpu->fpu_active)
- eb |= 1u << NM_VECTOR;
- /*
- * Unconditionally intercept #DB so we can maintain dr6 without
- * reading it every exit.
- */
- eb |= 1u << DB_VECTOR;
- if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
- if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
- eb |= 1u << BP_VECTOR;
- }
+ eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
+ (1u << NM_VECTOR) | (1u << DB_VECTOR);
+ if ((vcpu->guest_debug &
+ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
+ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
+ eb |= 1u << BP_VECTOR;
if (to_vmx(vcpu)->rmode.vm86_active)
eb = ~0;
if (enable_ept)
eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
+ if (vcpu->fpu_active)
+ eb &= ~(1u << NM_VECTOR);
vmcs_write32(EXCEPTION_BITMAP, eb);
}
@@ -589,7 +611,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
u64 guest_efer;
u64 ignore_bits;
- guest_efer = vmx->vcpu.arch.shadow_efer;
+ guest_efer = vmx->vcpu.arch.efer;
/*
* NX is emulated; LMA and LME handled by hardware; SCE meaninless
@@ -767,22 +789,30 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
{
+ ulong cr0;
+
if (vcpu->fpu_active)
return;
vcpu->fpu_active = 1;
- vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
- if (vcpu->arch.cr0 & X86_CR0_TS)
- vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
+ cr0 = vmcs_readl(GUEST_CR0);
+ cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
+ cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
+ vmcs_writel(GUEST_CR0, cr0);
update_exception_bitmap(vcpu);
+ vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+ vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
}
+static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
+
static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
{
- if (!vcpu->fpu_active)
- return;
- vcpu->fpu_active = 0;
- vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
+ vmx_decache_cr0_guest_bits(vcpu);
+ vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
update_exception_bitmap(vcpu);
+ vcpu->arch.cr0_guest_owned_bits = 0;
+ vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
+ vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
}
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
@@ -878,6 +908,11 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
}
+static bool vmx_rdtscp_supported(void)
+{
+ return cpu_has_vmx_rdtscp();
+}
+
/*
* Swap MSR entry in host/guest MSR entry array.
*/
@@ -913,12 +948,15 @@ static void setup_msrs(struct vcpu_vmx *vmx)
index = __find_msr_index(vmx, MSR_CSTAR);
if (index >= 0)
move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_TSC_AUX);
+ if (index >= 0 && vmx->rdtscp_enabled)
+ move_msr_up(vmx, index, save_nmsrs++);
/*
* MSR_K6_STAR is only needed on long mode guests, and only
* if efer.sce is enabled.
*/
index = __find_msr_index(vmx, MSR_K6_STAR);
- if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE))
+ if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
move_msr_up(vmx, index, save_nmsrs++);
}
#endif
@@ -1002,6 +1040,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
case MSR_IA32_SYSENTER_ESP:
data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
+ case MSR_TSC_AUX:
+ if (!to_vmx(vcpu)->rdtscp_enabled)
+ return 1;
+ /* Otherwise falls through */
default:
vmx_load_host_state(to_vmx(vcpu));
msr = find_msr_entry(to_vmx(vcpu), msr_index);
@@ -1065,7 +1107,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
vcpu->arch.pat = data;
break;
}
- /* Otherwise falls through to kvm_set_msr_common */
+ ret = kvm_set_msr_common(vcpu, msr_index, data);
+ break;
+ case MSR_TSC_AUX:
+ if (!vmx->rdtscp_enabled)
+ return 1;
+ /* Check reserved bit, higher 32 bits should be zero */
+ if ((data >> 32) != 0)
+ return 1;
+ /* Otherwise falls through */
default:
msr = find_msr_entry(vmx, msr_index);
if (msr) {
@@ -1224,6 +1274,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
CPU_BASED_USE_IO_BITMAPS |
CPU_BASED_MOV_DR_EXITING |
CPU_BASED_USE_TSC_OFFSETING |
+ CPU_BASED_MWAIT_EXITING |
+ CPU_BASED_MONITOR_EXITING |
CPU_BASED_INVLPG_EXITING;
opt = CPU_BASED_TPR_SHADOW |
CPU_BASED_USE_MSR_BITMAPS |
@@ -1243,7 +1295,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_ENABLE_EPT |
SECONDARY_EXEC_UNRESTRICTED_GUEST |
- SECONDARY_EXEC_PAUSE_LOOP_EXITING;
+ SECONDARY_EXEC_PAUSE_LOOP_EXITING |
+ SECONDARY_EXEC_RDTSCP;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
@@ -1457,8 +1510,12 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
static gva_t rmode_tss_base(struct kvm *kvm)
{
if (!kvm->arch.tss_addr) {
- gfn_t base_gfn = kvm->memslots[0].base_gfn +
- kvm->memslots[0].npages - 3;
+ struct kvm_memslots *slots;
+ gfn_t base_gfn;
+
+ slots = rcu_dereference(kvm->memslots);
+ base_gfn = kvm->memslots->memslots[0].base_gfn +
+ kvm->memslots->memslots[0].npages - 3;
return base_gfn << PAGE_SHIFT;
}
return kvm->arch.tss_addr;
@@ -1544,9 +1601,7 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
* of this msr depends on is_long_mode().
*/
vmx_load_host_state(to_vmx(vcpu));
- vcpu->arch.shadow_efer = efer;
- if (!msr)
- return;
+ vcpu->arch.efer = efer;
if (efer & EFER_LMA) {
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS) |
@@ -1576,13 +1631,13 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
(guest_tr_ar & ~AR_TYPE_MASK)
| AR_TYPE_BUSY_64_TSS);
}
- vcpu->arch.shadow_efer |= EFER_LMA;
- vmx_set_efer(vcpu, vcpu->arch.shadow_efer);
+ vcpu->arch.efer |= EFER_LMA;
+ vmx_set_efer(vcpu, vcpu->arch.efer);
}
static void exit_lmode(struct kvm_vcpu *vcpu)
{
- vcpu->arch.shadow_efer &= ~EFER_LMA;
+ vcpu->arch.efer &= ~EFER_LMA;
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS)
@@ -1598,10 +1653,20 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
}
+static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
+{
+ ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
+
+ vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
+ vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
+}
+
static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
{
- vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
- vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
+ ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
+
+ vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
+ vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
}
static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
@@ -1646,7 +1711,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
(CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING));
vcpu->arch.cr0 = cr0;
- vmx_set_cr4(vcpu, vcpu->arch.cr4);
+ vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
} else if (!is_paging(vcpu)) {
/* From nonpaging to paging */
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
@@ -1654,23 +1719,13 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
~(CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING));
vcpu->arch.cr0 = cr0;
- vmx_set_cr4(vcpu, vcpu->arch.cr4);
+ vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
}
if (!(cr0 & X86_CR0_WP))
*hw_cr0 &= ~X86_CR0_WP;
}
-static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
- struct kvm_vcpu *vcpu)
-{
- if (!is_paging(vcpu)) {
- *hw_cr4 &= ~X86_CR4_PAE;
- *hw_cr4 |= X86_CR4_PSE;
- } else if (!(vcpu->arch.cr4 & X86_CR4_PAE))
- *hw_cr4 &= ~X86_CR4_PAE;
-}
-
static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -1682,8 +1737,6 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
else
hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
- vmx_fpu_deactivate(vcpu);
-
if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
enter_pmode(vcpu);
@@ -1691,7 +1744,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
enter_rmode(vcpu);
#ifdef CONFIG_X86_64
- if (vcpu->arch.shadow_efer & EFER_LME) {
+ if (vcpu->arch.efer & EFER_LME) {
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
enter_lmode(vcpu);
if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
@@ -1702,12 +1755,12 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if (enable_ept)
ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
+ if (!vcpu->fpu_active)
+ hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
+
vmcs_writel(CR0_READ_SHADOW, cr0);
vmcs_writel(GUEST_CR0, hw_cr0);
vcpu->arch.cr0 = cr0;
-
- if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
- vmx_fpu_activate(vcpu);
}
static u64 construct_eptp(unsigned long root_hpa)
@@ -1738,8 +1791,6 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
vmx_flush_tlb(vcpu);
vmcs_writel(GUEST_CR3, guest_cr3);
- if (vcpu->arch.cr0 & X86_CR0_PE)
- vmx_fpu_deactivate(vcpu);
}
static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -1748,8 +1799,14 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
vcpu->arch.cr4 = cr4;
- if (enable_ept)
- ept_update_paging_mode_cr4(&hw_cr4, vcpu);
+ if (enable_ept) {
+ if (!is_paging(vcpu)) {
+ hw_cr4 &= ~X86_CR4_PAE;
+ hw_cr4 |= X86_CR4_PSE;
+ } else if (!(cr4 & X86_CR4_PAE)) {
+ hw_cr4 &= ~X86_CR4_PAE;
+ }
+ }
vmcs_writel(CR4_READ_SHADOW, cr4);
vmcs_writel(GUEST_CR4, hw_cr4);
@@ -1787,7 +1844,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
static int vmx_get_cpl(struct kvm_vcpu *vcpu)
{
- if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */
+ if (!is_protmode(vcpu))
return 0;
if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
@@ -2042,7 +2099,7 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
static bool guest_state_valid(struct kvm_vcpu *vcpu)
{
/* real mode guest state checks */
- if (!(vcpu->arch.cr0 & X86_CR0_PE)) {
+ if (!is_protmode(vcpu)) {
if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
return false;
if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
@@ -2175,7 +2232,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
if (kvm->arch.apic_access_page)
goto out;
kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
@@ -2188,7 +2245,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
out:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}
@@ -2197,7 +2254,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
if (kvm->arch.ept_identity_pagetable)
goto out;
kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
@@ -2212,7 +2269,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
out:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}
@@ -2384,14 +2441,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
for (i = 0; i < NR_VMX_MSR; ++i) {
u32 index = vmx_msr_index[i];
u32 data_low, data_high;
- u64 data;
int j = vmx->nmsrs;
if (rdmsr_safe(index, &data_low, &data_high) < 0)
continue;
if (wrmsr_safe(index, data_low, data_high) < 0)
continue;
- data = data_low | ((u64)data_high << 32);
vmx->guest_msrs[j].index = i;
vmx->guest_msrs[j].data = 0;
vmx->guest_msrs[j].mask = -1ull;
@@ -2404,7 +2459,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
- vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
+ vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
+ if (enable_ept)
+ vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
+ vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
rdtscll(tsc_this);
@@ -2429,10 +2487,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 msr;
- int ret;
+ int ret, idx;
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
if (!init_rmode(vmx->vcpu.kvm)) {
ret = -ENOMEM;
goto out;
@@ -2526,7 +2584,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
- vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
+ vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
vmx_set_cr4(&vmx->vcpu, 0);
vmx_set_efer(&vmx->vcpu, 0);
vmx_fpu_activate(&vmx->vcpu);
@@ -2540,7 +2598,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx->emulation_required = 0;
out:
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
return ret;
}
@@ -2717,6 +2775,12 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
kvm_queue_exception(vcpu, vec);
return 1;
case BP_VECTOR:
+ /*
+ * Update instruction length as we may reinject the exception
+ * from user space while in guest debugging mode.
+ */
+ to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
+ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
return 0;
/* fall through */
@@ -2839,6 +2903,13 @@ static int handle_exception(struct kvm_vcpu *vcpu)
kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
/* fall through */
case BP_VECTOR:
+ /*
+ * Update instruction length as we may reinject #BP from
+ * user space while in guest debugging mode. Reading it for
+ * #DB as well causes no harm, it is not used in that case.
+ */
+ vmx->vcpu.arch.event_exit_inst_len =
+ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
kvm_run->debug.arch.exception = ex_no;
@@ -2940,11 +3011,10 @@ static int handle_cr(struct kvm_vcpu *vcpu)
};
break;
case 2: /* clts */
- vmx_fpu_deactivate(vcpu);
- vcpu->arch.cr0 &= ~X86_CR0_TS;
- vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
- vmx_fpu_activate(vcpu);
+ vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+ trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
skip_emulated_instruction(vcpu);
+ vmx_fpu_activate(vcpu);
return 1;
case 1: /*mov from cr*/
switch (cr) {
@@ -2962,7 +3032,9 @@ static int handle_cr(struct kvm_vcpu *vcpu)
}
break;
case 3: /* lmsw */
- kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
+ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
+ trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
+ kvm_lmsw(vcpu, val);
skip_emulated_instruction(vcpu);
return 1;
@@ -2975,12 +3047,22 @@ static int handle_cr(struct kvm_vcpu *vcpu)
return 0;
}
+static int check_dr_alias(struct kvm_vcpu *vcpu)
+{
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return -1;
+ }
+ return 0;
+}
+
static int handle_dr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
unsigned long val;
int dr, reg;
+ /* Do not handle if the CPL > 0, will trigger GP on re-entry */
if (!kvm_require_cpl(vcpu, 0))
return 1;
dr = vmcs_readl(GUEST_DR7);
@@ -3016,14 +3098,20 @@ static int handle_dr(struct kvm_vcpu *vcpu)
case 0 ... 3:
val = vcpu->arch.db[dr];
break;
+ case 4:
+ if (check_dr_alias(vcpu) < 0)
+ return 1;
+ /* fall through */
case 6:
val = vcpu->arch.dr6;
break;
- case 7:
+ case 5:
+ if (check_dr_alias(vcpu) < 0)
+ return 1;
+ /* fall through */
+ default: /* 7 */
val = vcpu->arch.dr7;
break;
- default:
- val = 0;
}
kvm_register_write(vcpu, reg, val);
} else {
@@ -3034,21 +3122,25 @@ static int handle_dr(struct kvm_vcpu *vcpu)
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
vcpu->arch.eff_db[dr] = val;
break;
- case 4 ... 5:
- if (vcpu->arch.cr4 & X86_CR4_DE)
- kvm_queue_exception(vcpu, UD_VECTOR);
- break;
+ case 4:
+ if (check_dr_alias(vcpu) < 0)
+ return 1;
+ /* fall through */
case 6:
if (val & 0xffffffff00000000ULL) {
- kvm_queue_exception(vcpu, GP_VECTOR);
- break;
+ kvm_inject_gp(vcpu, 0);
+ return 1;
}
vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
break;
- case 7:
+ case 5:
+ if (check_dr_alias(vcpu) < 0)
+ return 1;
+ /* fall through */
+ default: /* 7 */
if (val & 0xffffffff00000000ULL) {
- kvm_queue_exception(vcpu, GP_VECTOR);
- break;
+ kvm_inject_gp(vcpu, 0);
+ return 1;
}
vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
@@ -3075,6 +3167,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
u64 data;
if (vmx_get_msr(vcpu, ecx, &data)) {
+ trace_kvm_msr_read_ex(ecx);
kvm_inject_gp(vcpu, 0);
return 1;
}
@@ -3094,13 +3187,13 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
- trace_kvm_msr_write(ecx, data);
-
if (vmx_set_msr(vcpu, ecx, data) != 0) {
+ trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(vcpu, 0);
return 1;
}
+ trace_kvm_msr_write(ecx, data);
skip_emulated_instruction(vcpu);
return 1;
}
@@ -3385,7 +3478,6 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
}
if (err != EMULATE_DONE) {
- kvm_report_emulation_failure(vcpu, "emulation failure");
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
@@ -3416,6 +3508,12 @@ static int handle_pause(struct kvm_vcpu *vcpu)
return 1;
}
+static int handle_invalid_op(struct kvm_vcpu *vcpu)
+{
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+}
+
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -3453,6 +3551,8 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
[EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
[EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
+ [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
+ [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
};
static const int kvm_vmx_max_exit_handlers =
@@ -3686,9 +3786,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
*/
vmcs_writel(HOST_CR0, read_cr0());
- if (vcpu->arch.switch_db_regs)
- set_debugreg(vcpu->arch.dr6, 6);
-
asm(
/* Store host registers */
"push %%"R"dx; push %%"R"bp;"
@@ -3789,9 +3886,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
| (1 << VCPU_EXREG_PDPTR));
vcpu->arch.regs_dirty = 0;
- if (vcpu->arch.switch_db_regs)
- get_debugreg(vcpu->arch.dr6, 6);
-
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
if (vmx->rmode.irq.pending)
fixup_rmode_irq(vmx);
@@ -3920,7 +4014,7 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
* b. VT-d with snooping control feature: snooping control feature of
* VT-d engine can guarantee the cache correctness. Just set it
* to WB to keep consistent with host. So the same as item 3.
- * 3. EPT without VT-d: always map as WB and set IGMT=1 to keep
+ * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
* consistent with host MTRR
*/
if (is_mmio)
@@ -3931,37 +4025,88 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
VMX_EPT_MT_EPTE_SHIFT;
else
ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
- | VMX_EPT_IGMT_BIT;
+ | VMX_EPT_IPAT_BIT;
return ret;
}
+#define _ER(x) { EXIT_REASON_##x, #x }
+
static const struct trace_print_flags vmx_exit_reasons_str[] = {
- { EXIT_REASON_EXCEPTION_NMI, "exception" },
- { EXIT_REASON_EXTERNAL_INTERRUPT, "ext_irq" },
- { EXIT_REASON_TRIPLE_FAULT, "triple_fault" },
- { EXIT_REASON_NMI_WINDOW, "nmi_window" },
- { EXIT_REASON_IO_INSTRUCTION, "io_instruction" },
- { EXIT_REASON_CR_ACCESS, "cr_access" },
- { EXIT_REASON_DR_ACCESS, "dr_access" },
- { EXIT_REASON_CPUID, "cpuid" },
- { EXIT_REASON_MSR_READ, "rdmsr" },
- { EXIT_REASON_MSR_WRITE, "wrmsr" },
- { EXIT_REASON_PENDING_INTERRUPT, "interrupt_window" },
- { EXIT_REASON_HLT, "halt" },
- { EXIT_REASON_INVLPG, "invlpg" },
- { EXIT_REASON_VMCALL, "hypercall" },
- { EXIT_REASON_TPR_BELOW_THRESHOLD, "tpr_below_thres" },
- { EXIT_REASON_APIC_ACCESS, "apic_access" },
- { EXIT_REASON_WBINVD, "wbinvd" },
- { EXIT_REASON_TASK_SWITCH, "task_switch" },
- { EXIT_REASON_EPT_VIOLATION, "ept_violation" },
+ _ER(EXCEPTION_NMI),
+ _ER(EXTERNAL_INTERRUPT),
+ _ER(TRIPLE_FAULT),
+ _ER(PENDING_INTERRUPT),
+ _ER(NMI_WINDOW),
+ _ER(TASK_SWITCH),
+ _ER(CPUID),
+ _ER(HLT),
+ _ER(INVLPG),
+ _ER(RDPMC),
+ _ER(RDTSC),
+ _ER(VMCALL),
+ _ER(VMCLEAR),
+ _ER(VMLAUNCH),
+ _ER(VMPTRLD),
+ _ER(VMPTRST),
+ _ER(VMREAD),
+ _ER(VMRESUME),
+ _ER(VMWRITE),
+ _ER(VMOFF),
+ _ER(VMON),
+ _ER(CR_ACCESS),
+ _ER(DR_ACCESS),
+ _ER(IO_INSTRUCTION),
+ _ER(MSR_READ),
+ _ER(MSR_WRITE),
+ _ER(MWAIT_INSTRUCTION),
+ _ER(MONITOR_INSTRUCTION),
+ _ER(PAUSE_INSTRUCTION),
+ _ER(MCE_DURING_VMENTRY),
+ _ER(TPR_BELOW_THRESHOLD),
+ _ER(APIC_ACCESS),
+ _ER(EPT_VIOLATION),
+ _ER(EPT_MISCONFIG),
+ _ER(WBINVD),
{ -1, NULL }
};
-static bool vmx_gb_page_enable(void)
+#undef _ER
+
+static int vmx_get_lpage_level(void)
+{
+ if (enable_ept && !cpu_has_vmx_ept_1g_page())
+ return PT_DIRECTORY_LEVEL;
+ else
+ /* For shadow and EPT supported 1GB page */
+ return PT_PDPE_LEVEL;
+}
+
+static inline u32 bit(int bitno)
+{
+ return 1 << (bitno & 31);
+}
+
+static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
{
- return false;
+ struct kvm_cpuid_entry2 *best;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 exec_control;
+
+ vmx->rdtscp_enabled = false;
+ if (vmx_rdtscp_supported()) {
+ exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+ if (exec_control & SECONDARY_EXEC_RDTSCP) {
+ best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
+ if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
+ vmx->rdtscp_enabled = true;
+ else {
+ exec_control &= ~SECONDARY_EXEC_RDTSCP;
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+ exec_control);
+ }
+ }
+ }
}
static struct kvm_x86_ops vmx_x86_ops = {
@@ -3990,6 +4135,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_segment = vmx_set_segment,
.get_cpl = vmx_get_cpl,
.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
+ .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
.decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
.set_cr0 = vmx_set_cr0,
.set_cr3 = vmx_set_cr3,
@@ -4002,6 +4148,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.cache_reg = vmx_cache_reg,
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
+ .fpu_activate = vmx_fpu_activate,
+ .fpu_deactivate = vmx_fpu_deactivate,
.tlb_flush = vmx_flush_tlb,
@@ -4027,7 +4175,11 @@ static struct kvm_x86_ops vmx_x86_ops = {
.get_mt_mask = vmx_get_mt_mask,
.exit_reasons_str = vmx_exit_reasons_str,
- .gb_page_enable = vmx_gb_page_enable,
+ .get_lpage_level = vmx_get_lpage_level,
+
+ .cpuid_update = vmx_cpuid_update,
+
+ .rdtscp_supported = vmx_rdtscp_supported,
};
static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a1e1bc9..e46282a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -38,6 +38,7 @@
#include <linux/intel-iommu.h>
#include <linux/cpufreq.h>
#include <linux/user-return-notifier.h>
+#include <linux/srcu.h>
#include <trace/events/kvm.h>
#undef TRACE_INCLUDE_FILE
#define CREATE_TRACE_POINTS
@@ -93,16 +94,16 @@ module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
struct kvm_shared_msrs_global {
int nr;
- struct kvm_shared_msr {
- u32 msr;
- u64 value;
- } msrs[KVM_NR_SHARED_MSRS];
+ u32 msrs[KVM_NR_SHARED_MSRS];
};
struct kvm_shared_msrs {
struct user_return_notifier urn;
bool registered;
- u64 current_value[KVM_NR_SHARED_MSRS];
+ struct kvm_shared_msr_values {
+ u64 host;
+ u64 curr;
+ } values[KVM_NR_SHARED_MSRS];
};
static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
@@ -147,53 +148,64 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
static void kvm_on_user_return(struct user_return_notifier *urn)
{
unsigned slot;
- struct kvm_shared_msr *global;
struct kvm_shared_msrs *locals
= container_of(urn, struct kvm_shared_msrs, urn);
+ struct kvm_shared_msr_values *values;
for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
- global = &shared_msrs_global.msrs[slot];
- if (global->value != locals->current_value[slot]) {
- wrmsrl(global->msr, global->value);
- locals->current_value[slot] = global->value;
+ values = &locals->values[slot];
+ if (values->host != values->curr) {
+ wrmsrl(shared_msrs_global.msrs[slot], values->host);
+ values->curr = values->host;
}
}
locals->registered = false;
user_return_notifier_unregister(urn);
}
-void kvm_define_shared_msr(unsigned slot, u32 msr)
+static void shared_msr_update(unsigned slot, u32 msr)
{
- int cpu;
+ struct kvm_shared_msrs *smsr;
u64 value;
+ smsr = &__get_cpu_var(shared_msrs);
+ /* only read, and nobody should modify it at this time,
+ * so don't need lock */
+ if (slot >= shared_msrs_global.nr) {
+ printk(KERN_ERR "kvm: invalid MSR slot!");
+ return;
+ }
+ rdmsrl_safe(msr, &value);
+ smsr->values[slot].host = value;
+ smsr->values[slot].curr = value;
+}
+
+void kvm_define_shared_msr(unsigned slot, u32 msr)
+{
if (slot >= shared_msrs_global.nr)
shared_msrs_global.nr = slot + 1;
- shared_msrs_global.msrs[slot].msr = msr;
- rdmsrl_safe(msr, &value);
- shared_msrs_global.msrs[slot].value = value;
- for_each_online_cpu(cpu)
- per_cpu(shared_msrs, cpu).current_value[slot] = value;
+ shared_msrs_global.msrs[slot] = msr;
+ /* we need ensured the shared_msr_global have been updated */
+ smp_wmb();
}
EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
static void kvm_shared_msr_cpu_online(void)
{
unsigned i;
- struct kvm_shared_msrs *locals = &__get_cpu_var(shared_msrs);
for (i = 0; i < shared_msrs_global.nr; ++i)
- locals->current_value[i] = shared_msrs_global.msrs[i].value;
+ shared_msr_update(i, shared_msrs_global.msrs[i]);
}
void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
{
struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
- if (((value ^ smsr->current_value[slot]) & mask) == 0)
+ if (((value ^ smsr->values[slot].curr) & mask) == 0)
return;
- smsr->current_value[slot] = value;
- wrmsrl(shared_msrs_global.msrs[slot].msr, value);
+ smsr->values[slot].curr = value;
+ wrmsrl(shared_msrs_global.msrs[slot], value);
if (!smsr->registered) {
smsr->urn.on_user_return = kvm_on_user_return;
user_return_notifier_register(&smsr->urn);
@@ -257,12 +269,68 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
}
EXPORT_SYMBOL_GPL(kvm_set_apic_base);
+#define EXCPT_BENIGN 0
+#define EXCPT_CONTRIBUTORY 1
+#define EXCPT_PF 2
+
+static int exception_class(int vector)
+{
+ switch (vector) {
+ case PF_VECTOR:
+ return EXCPT_PF;
+ case DE_VECTOR:
+ case TS_VECTOR:
+ case NP_VECTOR:
+ case SS_VECTOR:
+ case GP_VECTOR:
+ return EXCPT_CONTRIBUTORY;
+ default:
+ break;
+ }
+ return EXCPT_BENIGN;
+}
+
+static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
+ unsigned nr, bool has_error, u32 error_code)
+{
+ u32 prev_nr;
+ int class1, class2;
+
+ if (!vcpu->arch.exception.pending) {
+ queue:
+ vcpu->arch.exception.pending = true;
+ vcpu->arch.exception.has_error_code = has_error;
+ vcpu->arch.exception.nr = nr;
+ vcpu->arch.exception.error_code = error_code;
+ return;
+ }
+
+ /* to check exception */
+ prev_nr = vcpu->arch.exception.nr;
+ if (prev_nr == DF_VECTOR) {
+ /* triple fault -> shutdown */
+ set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
+ return;
+ }
+ class1 = exception_class(prev_nr);
+ class2 = exception_class(nr);
+ if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
+ || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
+ /* generate double fault per SDM Table 5-5 */
+ vcpu->arch.exception.pending = true;
+ vcpu->arch.exception.has_error_code = true;
+ vcpu->arch.exception.nr = DF_VECTOR;
+ vcpu->arch.exception.error_code = 0;
+ } else
+ /* replace previous exception with a new one in a hope
+ that instruction re-execution will regenerate lost
+ exception */
+ goto queue;
+}
+
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
- WARN_ON(vcpu->arch.exception.pending);
- vcpu->arch.exception.pending = true;
- vcpu->arch.exception.has_error_code = false;
- vcpu->arch.exception.nr = nr;
+ kvm_multiple_exception(vcpu, nr, false, 0);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception);
@@ -270,25 +338,6 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
u32 error_code)
{
++vcpu->stat.pf_guest;
-
- if (vcpu->arch.exception.pending) {
- switch(vcpu->arch.exception.nr) {
- case DF_VECTOR:
- /* triple fault -> shutdown */
- set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
- return;
- case PF_VECTOR:
- vcpu->arch.exception.nr = DF_VECTOR;
- vcpu->arch.exception.error_code = 0;
- return;
- default:
- /* replace previous exception with a new one in a hope
- that instruction re-execution will regenerate lost
- exception */
- vcpu->arch.exception.pending = false;
- break;
- }
- }
vcpu->arch.cr2 = addr;
kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
}
@@ -301,11 +350,7 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
- WARN_ON(vcpu->arch.exception.pending);
- vcpu->arch.exception.pending = true;
- vcpu->arch.exception.has_error_code = true;
- vcpu->arch.exception.nr = nr;
- vcpu->arch.exception.error_code = error_code;
+ kvm_multiple_exception(vcpu, nr, true, error_code);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
@@ -383,12 +428,18 @@ out:
void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
- if (cr0 & CR0_RESERVED_BITS) {
+ cr0 |= X86_CR0_ET;
+
+#ifdef CONFIG_X86_64
+ if (cr0 & 0xffffffff00000000UL) {
printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
- cr0, vcpu->arch.cr0);
+ cr0, kvm_read_cr0(vcpu));
kvm_inject_gp(vcpu, 0);
return;
}
+#endif
+
+ cr0 &= ~CR0_RESERVED_BITS;
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
@@ -405,7 +456,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
#ifdef CONFIG_X86_64
- if ((vcpu->arch.shadow_efer & EFER_LME)) {
+ if ((vcpu->arch.efer & EFER_LME)) {
int cs_db, cs_l;
if (!is_pae(vcpu)) {
@@ -443,13 +494,13 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{
- kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
+ kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f));
}
EXPORT_SYMBOL_GPL(kvm_lmsw);
void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
- unsigned long old_cr4 = vcpu->arch.cr4;
+ unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
if (cr4 & CR4_RESERVED_BITS) {
@@ -575,9 +626,11 @@ static inline u32 bit(int bitno)
* kvm-specific. Those are put in the beginning of the list.
*/
-#define KVM_SAVE_MSRS_BEGIN 2
+#define KVM_SAVE_MSRS_BEGIN 5
static u32 msrs_to_save[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
+ HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
+ HV_X64_MSR_APIC_ASSIST_PAGE,
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
MSR_K6_STAR,
#ifdef CONFIG_X86_64
@@ -602,7 +655,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
}
if (is_paging(vcpu)
- && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
+ && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
kvm_inject_gp(vcpu, 0);
return;
@@ -633,9 +686,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
kvm_x86_ops->set_efer(vcpu, efer);
efer &= ~EFER_LMA;
- efer |= vcpu->arch.shadow_efer & EFER_LMA;
+ efer |= vcpu->arch.efer & EFER_LMA;
- vcpu->arch.shadow_efer = efer;
+ vcpu->arch.efer = efer;
vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
kvm_mmu_reset_context(vcpu);
@@ -957,6 +1010,100 @@ out:
return r;
}
+static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
+{
+ return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
+}
+
+static bool kvm_hv_msr_partition_wide(u32 msr)
+{
+ bool r = false;
+ switch (msr) {
+ case HV_X64_MSR_GUEST_OS_ID:
+ case HV_X64_MSR_HYPERCALL:
+ r = true;
+ break;
+ }
+
+ return r;
+}
+
+static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ switch (msr) {
+ case HV_X64_MSR_GUEST_OS_ID:
+ kvm->arch.hv_guest_os_id = data;
+ /* setting guest os id to zero disables hypercall page */
+ if (!kvm->arch.hv_guest_os_id)
+ kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
+ break;
+ case HV_X64_MSR_HYPERCALL: {
+ u64 gfn;
+ unsigned long addr;
+ u8 instructions[4];
+
+ /* if guest os id is not set hypercall should remain disabled */
+ if (!kvm->arch.hv_guest_os_id)
+ break;
+ if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
+ kvm->arch.hv_hypercall = data;
+ break;
+ }
+ gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
+ addr = gfn_to_hva(kvm, gfn);
+ if (kvm_is_error_hva(addr))
+ return 1;
+ kvm_x86_ops->patch_hypercall(vcpu, instructions);
+ ((unsigned char *)instructions)[3] = 0xc3; /* ret */
+ if (copy_to_user((void __user *)addr, instructions, 4))
+ return 1;
+ kvm->arch.hv_hypercall = data;
+ break;
+ }
+ default:
+ pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
+ "data 0x%llx\n", msr, data);
+ return 1;
+ }
+ return 0;
+}
+
+static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+ switch (msr) {
+ case HV_X64_MSR_APIC_ASSIST_PAGE: {
+ unsigned long addr;
+
+ if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
+ vcpu->arch.hv_vapic = data;
+ break;
+ }
+ addr = gfn_to_hva(vcpu->kvm, data >>
+ HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
+ if (kvm_is_error_hva(addr))
+ return 1;
+ if (clear_user((void __user *)addr, PAGE_SIZE))
+ return 1;
+ vcpu->arch.hv_vapic = data;
+ break;
+ }
+ case HV_X64_MSR_EOI:
+ return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
+ case HV_X64_MSR_ICR:
+ return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
+ case HV_X64_MSR_TPR:
+ return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
+ default:
+ pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
+ "data 0x%llx\n", msr, data);
+ return 1;
+ }
+
+ return 0;
+}
+
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
switch (msr) {
@@ -1071,6 +1218,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
+ case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
+ if (kvm_hv_msr_partition_wide(msr)) {
+ int r;
+ mutex_lock(&vcpu->kvm->lock);
+ r = set_msr_hyperv_pw(vcpu, msr, data);
+ mutex_unlock(&vcpu->kvm->lock);
+ return r;
+ } else
+ return set_msr_hyperv(vcpu, msr, data);
+ break;
default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
@@ -1170,6 +1327,54 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
return 0;
}
+static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+ u64 data = 0;
+ struct kvm *kvm = vcpu->kvm;
+
+ switch (msr) {
+ case HV_X64_MSR_GUEST_OS_ID:
+ data = kvm->arch.hv_guest_os_id;
+ break;
+ case HV_X64_MSR_HYPERCALL:
+ data = kvm->arch.hv_hypercall;
+ break;
+ default:
+ pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
+ return 1;
+ }
+
+ *pdata = data;
+ return 0;
+}
+
+static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+ u64 data = 0;
+
+ switch (msr) {
+ case HV_X64_MSR_VP_INDEX: {
+ int r;
+ struct kvm_vcpu *v;
+ kvm_for_each_vcpu(r, v, vcpu->kvm)
+ if (v == vcpu)
+ data = r;
+ break;
+ }
+ case HV_X64_MSR_EOI:
+ return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
+ case HV_X64_MSR_ICR:
+ return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
+ case HV_X64_MSR_TPR:
+ return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
+ default:
+ pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
+ return 1;
+ }
+ *pdata = data;
+ return 0;
+}
+
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
u64 data;
@@ -1221,7 +1426,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
data |= (((uint64_t)4ULL) << 40);
break;
case MSR_EFER:
- data = vcpu->arch.shadow_efer;
+ data = vcpu->arch.efer;
break;
case MSR_KVM_WALL_CLOCK:
data = vcpu->kvm->arch.wall_clock;
@@ -1236,6 +1441,16 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
return get_msr_mce(vcpu, msr, pdata);
+ case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
+ if (kvm_hv_msr_partition_wide(msr)) {
+ int r;
+ mutex_lock(&vcpu->kvm->lock);
+ r = get_msr_hyperv_pw(vcpu, msr, pdata);
+ mutex_unlock(&vcpu->kvm->lock);
+ return r;
+ } else
+ return get_msr_hyperv(vcpu, msr, pdata);
+ break;
default:
if (!ignore_msrs) {
pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
@@ -1261,15 +1476,15 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
int (*do_msr)(struct kvm_vcpu *vcpu,
unsigned index, u64 *data))
{
- int i;
+ int i, idx;
vcpu_load(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
for (i = 0; i < msrs->nmsrs; ++i)
if (do_msr(vcpu, entries[i].index, &entries[i].data))
break;
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu_put(vcpu);
@@ -1351,6 +1566,11 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_XEN_HVM:
case KVM_CAP_ADJUST_CLOCK:
case KVM_CAP_VCPU_EVENTS:
+ case KVM_CAP_HYPERV:
+ case KVM_CAP_HYPERV_VAPIC:
+ case KVM_CAP_HYPERV_SPIN:
+ case KVM_CAP_PCI_SEGMENT:
+ case KVM_CAP_X86_ROBUST_SINGLESTEP:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -1464,8 +1684,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
- kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu);
+ kvm_x86_ops->vcpu_put(vcpu);
}
static int is_efer_nx(void)
@@ -1530,6 +1750,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
cpuid_fix_nx_cap(vcpu);
r = 0;
kvm_apic_set_version(vcpu);
+ kvm_x86_ops->cpuid_update(vcpu);
out_free:
vfree(cpuid_entries);
@@ -1552,6 +1773,7 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
goto out;
vcpu->arch.cpuid_nent = cpuid->nent;
kvm_apic_set_version(vcpu);
+ kvm_x86_ops->cpuid_update(vcpu);
return 0;
out:
@@ -1594,12 +1816,15 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
u32 index, int *nent, int maxnent)
{
unsigned f_nx = is_efer_nx() ? F(NX) : 0;
- unsigned f_gbpages = kvm_x86_ops->gb_page_enable() ? F(GBPAGES) : 0;
#ifdef CONFIG_X86_64
+ unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
+ ? F(GBPAGES) : 0;
unsigned f_lm = F(LM);
#else
+ unsigned f_gbpages = 0;
unsigned f_lm = 0;
#endif
+ unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
/* cpuid 1.edx */
const u32 kvm_supported_word0_x86_features =
@@ -1619,7 +1844,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
F(PAT) | F(PSE36) | 0 /* Reserved */ |
f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
- F(FXSR) | F(FXSR_OPT) | f_gbpages | 0 /* RDTSCP */ |
+ F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
/* cpuid 1.ecx */
const u32 kvm_supported_word4_x86_features =
@@ -1866,7 +2091,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
return 0;
if (mce->status & MCI_STATUS_UC) {
if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
- !(vcpu->arch.cr4 & X86_CR4_MCE)) {
+ !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
printk(KERN_DEBUG "kvm: set_mce: "
"injects mce exception while "
"previous one is in progress!\n");
@@ -2160,14 +2385,14 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
return -EINVAL;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
spin_lock(&kvm->mmu_lock);
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
spin_unlock(&kvm->mmu_lock);
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return 0;
}
@@ -2176,13 +2401,35 @@ static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
return kvm->arch.n_alloc_mmu_pages;
}
+gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
+{
+ int i;
+ struct kvm_mem_alias *alias;
+ struct kvm_mem_aliases *aliases;
+
+ aliases = rcu_dereference(kvm->arch.aliases);
+
+ for (i = 0; i < aliases->naliases; ++i) {
+ alias = &aliases->aliases[i];
+ if (alias->flags & KVM_ALIAS_INVALID)
+ continue;
+ if (gfn >= alias->base_gfn
+ && gfn < alias->base_gfn + alias->npages)
+ return alias->target_gfn + gfn - alias->base_gfn;
+ }
+ return gfn;
+}
+
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
{
int i;
struct kvm_mem_alias *alias;
+ struct kvm_mem_aliases *aliases;
- for (i = 0; i < kvm->arch.naliases; ++i) {
- alias = &kvm->arch.aliases[i];
+ aliases = rcu_dereference(kvm->arch.aliases);
+
+ for (i = 0; i < aliases->naliases; ++i) {
+ alias = &aliases->aliases[i];
if (gfn >= alias->base_gfn
&& gfn < alias->base_gfn + alias->npages)
return alias->target_gfn + gfn - alias->base_gfn;
@@ -2200,6 +2447,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
{
int r, n;
struct kvm_mem_alias *p;
+ struct kvm_mem_aliases *aliases, *old_aliases;
r = -EINVAL;
/* General sanity checks */
@@ -2216,26 +2464,48 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
< alias->target_phys_addr)
goto out;
- down_write(&kvm->slots_lock);
- spin_lock(&kvm->mmu_lock);
+ r = -ENOMEM;
+ aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
+ if (!aliases)
+ goto out;
+
+ mutex_lock(&kvm->slots_lock);
- p = &kvm->arch.aliases[alias->slot];
+ /* invalidate any gfn reference in case of deletion/shrinking */
+ memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
+ aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
+ old_aliases = kvm->arch.aliases;
+ rcu_assign_pointer(kvm->arch.aliases, aliases);
+ synchronize_srcu_expedited(&kvm->srcu);
+ kvm_mmu_zap_all(kvm);
+ kfree(old_aliases);
+
+ r = -ENOMEM;
+ aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
+ if (!aliases)
+ goto out_unlock;
+
+ memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
+
+ p = &aliases->aliases[alias->slot];
p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
p->npages = alias->memory_size >> PAGE_SHIFT;
p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
+ p->flags &= ~(KVM_ALIAS_INVALID);
for (n = KVM_ALIAS_SLOTS; n > 0; --n)
- if (kvm->arch.aliases[n - 1].npages)
+ if (aliases->aliases[n - 1].npages)
break;
- kvm->arch.naliases = n;
+ aliases->naliases = n;
- spin_unlock(&kvm->mmu_lock);
- kvm_mmu_zap_all(kvm);
-
- up_write(&kvm->slots_lock);
-
- return 0;
+ old_aliases = kvm->arch.aliases;
+ rcu_assign_pointer(kvm->arch.aliases, aliases);
+ synchronize_srcu_expedited(&kvm->srcu);
+ kfree(old_aliases);
+ r = 0;
+out_unlock:
+ mutex_unlock(&kvm->slots_lock);
out:
return r;
}
@@ -2273,18 +2543,18 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
r = 0;
switch (chip->chip_id) {
case KVM_IRQCHIP_PIC_MASTER:
- spin_lock(&pic_irqchip(kvm)->lock);
+ raw_spin_lock(&pic_irqchip(kvm)->lock);
memcpy(&pic_irqchip(kvm)->pics[0],
&chip->chip.pic,
sizeof(struct kvm_pic_state));
- spin_unlock(&pic_irqchip(kvm)->lock);
+ raw_spin_unlock(&pic_irqchip(kvm)->lock);
break;
case KVM_IRQCHIP_PIC_SLAVE:
- spin_lock(&pic_irqchip(kvm)->lock);
+ raw_spin_lock(&pic_irqchip(kvm)->lock);
memcpy(&pic_irqchip(kvm)->pics[1],
&chip->chip.pic,
sizeof(struct kvm_pic_state));
- spin_unlock(&pic_irqchip(kvm)->lock);
+ raw_spin_unlock(&pic_irqchip(kvm)->lock);
break;
case KVM_IRQCHIP_IOAPIC:
r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
@@ -2364,29 +2634,62 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
- int r;
- int n;
+ int r, n, i;
struct kvm_memory_slot *memslot;
- int is_dirty = 0;
+ unsigned long is_dirty = 0;
+ unsigned long *dirty_bitmap = NULL;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
- r = kvm_get_dirty_log(kvm, log, &is_dirty);
- if (r)
+ r = -EINVAL;
+ if (log->slot >= KVM_MEMORY_SLOTS)
+ goto out;
+
+ memslot = &kvm->memslots->memslots[log->slot];
+ r = -ENOENT;
+ if (!memslot->dirty_bitmap)
+ goto out;
+
+ n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+
+ r = -ENOMEM;
+ dirty_bitmap = vmalloc(n);
+ if (!dirty_bitmap)
goto out;
+ memset(dirty_bitmap, 0, n);
+
+ for (i = 0; !is_dirty && i < n/sizeof(long); i++)
+ is_dirty = memslot->dirty_bitmap[i];
/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
+ struct kvm_memslots *slots, *old_slots;
+
spin_lock(&kvm->mmu_lock);
kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->mmu_lock);
- memslot = &kvm->memslots[log->slot];
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
- memset(memslot->dirty_bitmap, 0, n);
+
+ slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!slots)
+ goto out_free;
+
+ memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
+ slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
+
+ old_slots = kvm->memslots;
+ rcu_assign_pointer(kvm->memslots, slots);
+ synchronize_srcu_expedited(&kvm->srcu);
+ dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
+ kfree(old_slots);
}
+
r = 0;
+ if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
+ r = -EFAULT;
+out_free:
+ vfree(dirty_bitmap);
out:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}
@@ -2469,6 +2772,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (vpic) {
r = kvm_ioapic_init(kvm);
if (r) {
+ kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
+ &vpic->dev);
kfree(vpic);
goto create_irqchip_unlock;
}
@@ -2480,10 +2785,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_setup_default_irq_routing(kvm);
if (r) {
mutex_lock(&kvm->irq_lock);
- kfree(kvm->arch.vpic);
- kfree(kvm->arch.vioapic);
- kvm->arch.vpic = NULL;
- kvm->arch.vioapic = NULL;
+ kvm_ioapic_destroy(kvm);
+ kvm_destroy_pic(kvm);
mutex_unlock(&kvm->irq_lock);
}
create_irqchip_unlock:
@@ -2499,7 +2802,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
sizeof(struct kvm_pit_config)))
goto out;
create_pit:
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
r = -EEXIST;
if (kvm->arch.vpit)
goto create_pit_unlock;
@@ -2508,7 +2811,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (kvm->arch.vpit)
r = 0;
create_pit_unlock:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
break;
case KVM_IRQ_LINE_STATUS:
case KVM_IRQ_LINE: {
@@ -2725,7 +3028,7 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
!kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
return 0;
- return kvm_io_bus_write(&vcpu->kvm->mmio_bus, addr, len, v);
+ return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
}
static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
@@ -2734,17 +3037,44 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
!kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
return 0;
- return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v);
+ return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
}
-static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
- struct kvm_vcpu *vcpu)
+gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+{
+ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
+}
+
+ gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+{
+ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ access |= PFERR_FETCH_MASK;
+ return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
+}
+
+gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+{
+ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ access |= PFERR_WRITE_MASK;
+ return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
+}
+
+/* uses this to access any guest's mapped memory without checking CPL */
+gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+{
+ return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
+}
+
+static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
+ struct kvm_vcpu *vcpu, u32 access,
+ u32 *error)
{
void *data = val;
int r = X86EMUL_CONTINUE;
while (bytes) {
- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+ gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
@@ -2767,14 +3097,37 @@ out:
return r;
}
+/* used for instruction fetching */
+static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
+ struct kvm_vcpu *vcpu, u32 *error)
+{
+ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
+ access | PFERR_FETCH_MASK, error);
+}
+
+static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
+ struct kvm_vcpu *vcpu, u32 *error)
+{
+ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
+ error);
+}
+
+static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
+ struct kvm_vcpu *vcpu, u32 *error)
+{
+ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
+}
+
static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
- struct kvm_vcpu *vcpu)
+ struct kvm_vcpu *vcpu, u32 *error)
{
void *data = val;
int r = X86EMUL_CONTINUE;
while (bytes) {
- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+ gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
@@ -2804,6 +3157,7 @@ static int emulator_read_emulated(unsigned long addr,
struct kvm_vcpu *vcpu)
{
gpa_t gpa;
+ u32 error_code;
if (vcpu->mmio_read_completed) {
memcpy(val, vcpu->mmio_data, bytes);
@@ -2813,17 +3167,20 @@ static int emulator_read_emulated(unsigned long addr,
return X86EMUL_CONTINUE;
}
- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+ gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);
+
+ if (gpa == UNMAPPED_GVA) {
+ kvm_inject_page_fault(vcpu, addr, error_code);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
/* For APIC access vmexit */
if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
goto mmio;
- if (kvm_read_guest_virt(addr, val, bytes, vcpu)
+ if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
== X86EMUL_CONTINUE)
return X86EMUL_CONTINUE;
- if (gpa == UNMAPPED_GVA)
- return X86EMUL_PROPAGATE_FAULT;
mmio:
/*
@@ -2862,11 +3219,12 @@ static int emulator_write_emulated_onepage(unsigned long addr,
struct kvm_vcpu *vcpu)
{
gpa_t gpa;
+ u32 error_code;
- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+ gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);
if (gpa == UNMAPPED_GVA) {
- kvm_inject_page_fault(vcpu, addr, 2);
+ kvm_inject_page_fault(vcpu, addr, error_code);
return X86EMUL_PROPAGATE_FAULT;
}
@@ -2930,7 +3288,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
char *kaddr;
u64 val;
- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+ gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
if (gpa == UNMAPPED_GVA ||
(gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -2967,35 +3325,21 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
int emulate_clts(struct kvm_vcpu *vcpu)
{
- kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
+ kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+ kvm_x86_ops->fpu_activate(vcpu);
return X86EMUL_CONTINUE;
}
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
{
- struct kvm_vcpu *vcpu = ctxt->vcpu;
-
- switch (dr) {
- case 0 ... 3:
- *dest = kvm_x86_ops->get_dr(vcpu, dr);
- return X86EMUL_CONTINUE;
- default:
- pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
- return X86EMUL_UNHANDLEABLE;
- }
+ return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
}
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
{
unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
- int exception;
- kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
- if (exception) {
- /* FIXME: better handling */
- return X86EMUL_UNHANDLEABLE;
- }
- return X86EMUL_CONTINUE;
+ return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
}
void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
@@ -3009,7 +3353,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
- kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
+ kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL);
printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
@@ -3017,7 +3361,8 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
static struct x86_emulate_ops emulate_ops = {
- .read_std = kvm_read_guest_virt,
+ .read_std = kvm_read_guest_virt_system,
+ .fetch = kvm_fetch_guest_virt,
.read_emulated = emulator_read_emulated,
.write_emulated = emulator_write_emulated,
.cmpxchg_emulated = emulator_cmpxchg_emulated,
@@ -3060,8 +3405,9 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
vcpu->arch.emulate_ctxt.vcpu = vcpu;
vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
vcpu->arch.emulate_ctxt.mode =
+ (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
(vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
- ? X86EMUL_MODE_REAL : cs_l
+ ? X86EMUL_MODE_VM86 : cs_l
? X86EMUL_MODE_PROT64 : cs_db
? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
@@ -3153,12 +3499,17 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
gva_t q = vcpu->arch.pio.guest_gva;
unsigned bytes;
int ret;
+ u32 error_code;
bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
if (vcpu->arch.pio.in)
- ret = kvm_write_guest_virt(q, p, bytes, vcpu);
+ ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code);
else
- ret = kvm_read_guest_virt(q, p, bytes, vcpu);
+ ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
+
+ if (ret == X86EMUL_PROPAGATE_FAULT)
+ kvm_inject_page_fault(vcpu, q, error_code);
+
return ret;
}
@@ -3179,7 +3530,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
if (io->in) {
r = pio_copy_data(vcpu);
if (r)
- return r;
+ goto out;
}
delta = 1;
@@ -3206,7 +3557,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
kvm_register_write(vcpu, VCPU_REGS_RSI, val);
}
}
-
+out:
io->count -= io->cur_count;
io->cur_count = 0;
@@ -3219,11 +3570,12 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
int r;
if (vcpu->arch.pio.in)
- r = kvm_io_bus_read(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
+ r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
vcpu->arch.pio.size, pd);
else
- r = kvm_io_bus_write(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
- vcpu->arch.pio.size, pd);
+ r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
+ vcpu->arch.pio.port, vcpu->arch.pio.size,
+ pd);
return r;
}
@@ -3234,7 +3586,7 @@ static int pio_string_write(struct kvm_vcpu *vcpu)
int i, r = 0;
for (i = 0; i < io->cur_count; i++) {
- if (kvm_io_bus_write(&vcpu->kvm->pio_bus,
+ if (kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
io->port, io->size, pd)) {
r = -EOPNOTSUPP;
break;
@@ -3248,6 +3600,8 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
{
unsigned long val;
+ trace_kvm_pio(!in, port, size, 1);
+
vcpu->run->exit_reason = KVM_EXIT_IO;
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
vcpu->run->io.size = vcpu->arch.pio.size = size;
@@ -3259,11 +3613,10 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
vcpu->arch.pio.down = 0;
vcpu->arch.pio.rep = 0;
- trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
- size, 1);
-
- val = kvm_register_read(vcpu, VCPU_REGS_RAX);
- memcpy(vcpu->arch.pio_data, &val, 4);
+ if (!vcpu->arch.pio.in) {
+ val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ memcpy(vcpu->arch.pio_data, &val, 4);
+ }
if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
complete_pio(vcpu);
@@ -3280,6 +3633,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
unsigned now, in_page;
int ret = 0;
+ trace_kvm_pio(!in, port, size, count);
+
vcpu->run->exit_reason = KVM_EXIT_IO;
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
vcpu->run->io.size = vcpu->arch.pio.size = size;
@@ -3291,9 +3646,6 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
vcpu->arch.pio.down = down;
vcpu->arch.pio.rep = rep;
- trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
- size, count);
-
if (!count) {
kvm_x86_ops->skip_emulated_instruction(vcpu);
return 1;
@@ -3325,10 +3677,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
if (!vcpu->arch.pio.in) {
/* string PIO write */
ret = pio_copy_data(vcpu);
- if (ret == X86EMUL_PROPAGATE_FAULT) {
- kvm_inject_gp(vcpu, 0);
+ if (ret == X86EMUL_PROPAGATE_FAULT)
return 1;
- }
if (ret == 0 && !pio_string_write(vcpu)) {
complete_pio(vcpu);
if (vcpu->arch.pio.count == 0)
@@ -3487,11 +3837,76 @@ static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
return a0 | ((gpa_t)a1 << 32);
}
+int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
+{
+ u64 param, ingpa, outgpa, ret;
+ uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
+ bool fast, longmode;
+ int cs_db, cs_l;
+
+ /*
+ * hypercall generates UD from non zero cpl and real mode
+ * per HYPER-V spec
+ */
+ if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 0;
+ }
+
+ kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+ longmode = is_long_mode(vcpu) && cs_l == 1;
+
+ if (!longmode) {
+ param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
+ (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
+ ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
+ (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
+ outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
+ (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
+ }
+#ifdef CONFIG_X86_64
+ else {
+ param = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
+ outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
+ }
+#endif
+
+ code = param & 0xffff;
+ fast = (param >> 16) & 0x1;
+ rep_cnt = (param >> 32) & 0xfff;
+ rep_idx = (param >> 48) & 0xfff;
+
+ trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
+
+ switch (code) {
+ case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
+ kvm_vcpu_on_spin(vcpu);
+ break;
+ default:
+ res = HV_STATUS_INVALID_HYPERCALL_CODE;
+ break;
+ }
+
+ ret = res | (((u64)rep_done & 0xfff) << 32);
+ if (longmode) {
+ kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
+ } else {
+ kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
+ kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
+ }
+
+ return 1;
+}
+
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
unsigned long nr, a0, a1, a2, a3, ret;
int r = 1;
+ if (kvm_hv_hypercall_enabled(vcpu->kvm))
+ return kvm_hv_hypercall(vcpu);
+
nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
@@ -3534,10 +3949,8 @@ EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
{
char instruction[3];
- int ret = 0;
unsigned long rip = kvm_rip_read(vcpu);
-
/*
* Blow out the MMU to ensure that no other VCPU has an active mapping
* to ensure that the updated hypercall appears atomically across all
@@ -3546,11 +3959,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
kvm_mmu_zap_all(vcpu->kvm);
kvm_x86_ops->patch_hypercall(vcpu, instruction);
- if (emulator_write_emulated(rip, instruction, 3, vcpu)
- != X86EMUL_CONTINUE)
- ret = -EFAULT;
- return ret;
+ return emulator_write_emulated(rip, instruction, 3, vcpu);
}
static u64 mk_cr_64(u64 curr_cr, u32 new_val)
@@ -3583,10 +3993,9 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
{
unsigned long value;
- kvm_x86_ops->decache_cr4_guest_bits(vcpu);
switch (cr) {
case 0:
- value = vcpu->arch.cr0;
+ value = kvm_read_cr0(vcpu);
break;
case 2:
value = vcpu->arch.cr2;
@@ -3595,7 +4004,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
value = vcpu->arch.cr3;
break;
case 4:
- value = vcpu->arch.cr4;
+ value = kvm_read_cr4(vcpu);
break;
case 8:
value = kvm_get_cr8(vcpu);
@@ -3613,7 +4022,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
{
switch (cr) {
case 0:
- kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
+ kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
*rflags = kvm_get_rflags(vcpu);
break;
case 2:
@@ -3623,7 +4032,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
kvm_set_cr3(vcpu, val);
break;
case 4:
- kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
+ kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
break;
case 8:
kvm_set_cr8(vcpu, val & 0xfUL);
@@ -3690,6 +4099,7 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
}
return best;
}
+EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
{
@@ -3773,14 +4183,15 @@ static void vapic_enter(struct kvm_vcpu *vcpu)
static void vapic_exit(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
+ int idx;
if (!apic || !apic->vapic_addr)
return;
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_release_page_dirty(apic->vapic_page);
mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -3876,12 +4287,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
r = 0;
goto out;
}
+ if (test_and_clear_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests)) {
+ vcpu->fpu_active = 0;
+ kvm_x86_ops->fpu_deactivate(vcpu);
+ }
}
preempt_disable();
kvm_x86_ops->prepare_guest_switch(vcpu);
- kvm_load_guest_fpu(vcpu);
+ if (vcpu->fpu_active)
+ kvm_load_guest_fpu(vcpu);
local_irq_disable();
@@ -3909,7 +4325,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_lapic_sync_to_vapic(vcpu);
}
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_guest_enter();
@@ -3951,7 +4367,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
preempt_enable();
- down_read(&vcpu->kvm->slots_lock);
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
/*
* Profile KVM exit RIPs:
@@ -3973,6 +4389,7 @@ out:
static int __vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
+ struct kvm *kvm = vcpu->kvm;
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
pr_debug("vcpu %d received sipi with vector # %x\n",
@@ -3984,7 +4401,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
- down_read(&vcpu->kvm->slots_lock);
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
vapic_enter(vcpu);
r = 1;
@@ -3992,9 +4409,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
r = vcpu_enter_guest(vcpu);
else {
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_block(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
{
switch(vcpu->arch.mp_state) {
@@ -4029,13 +4446,13 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
++vcpu->stat.signal_exits;
}
if (need_resched()) {
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_resched(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
}
}
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
post_kvm_run_save(vcpu);
vapic_exit(vcpu);
@@ -4074,10 +4491,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->mmio_read_completed = 1;
vcpu->mmio_needed = 0;
- down_read(&vcpu->kvm->slots_lock);
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
EMULTYPE_NO_DECODE);
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (r == EMULATE_DO_MMIO) {
/*
* Read-modify-write. Back to userspace.
@@ -4204,13 +4621,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
sregs->gdt.limit = dt.limit;
sregs->gdt.base = dt.base;
- kvm_x86_ops->decache_cr4_guest_bits(vcpu);
- sregs->cr0 = vcpu->arch.cr0;
+ sregs->cr0 = kvm_read_cr0(vcpu);
sregs->cr2 = vcpu->arch.cr2;
sregs->cr3 = vcpu->arch.cr3;
- sregs->cr4 = vcpu->arch.cr4;
+ sregs->cr4 = kvm_read_cr4(vcpu);
sregs->cr8 = kvm_get_cr8(vcpu);
- sregs->efer = vcpu->arch.shadow_efer;
+ sregs->efer = vcpu->arch.efer;
sregs->apic_base = kvm_get_apic_base(vcpu);
memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
@@ -4298,14 +4714,23 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
{
struct descriptor_table dtable;
u16 index = selector >> 3;
+ int ret;
+ u32 err;
+ gva_t addr;
get_segment_descriptor_dtable(vcpu, selector, &dtable);
if (dtable.limit < index * 8 + 7) {
kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
- return 1;
+ return X86EMUL_PROPAGATE_FAULT;
}
- return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
+ addr = dtable.base + index * 8;
+ ret = kvm_read_guest_virt_system(addr, seg_desc, sizeof(*seg_desc),
+ vcpu, &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT)
+ kvm_inject_page_fault(vcpu, addr, err);
+
+ return ret;
}
/* allowed just for 8 bytes segments */
@@ -4319,15 +4744,23 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
if (dtable.limit < index * 8 + 7)
return 1;
- return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
+ return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
+}
+
+static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
+ struct desc_struct *seg_desc)
+{
+ u32 base_addr = get_desc_base(seg_desc);
+
+ return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
}
-static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu,
+static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
struct desc_struct *seg_desc)
{
u32 base_addr = get_desc_base(seg_desc);
- return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
+ return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
}
static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
@@ -4338,18 +4771,6 @@ static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
return kvm_seg.selector;
}
-static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
- u16 selector,
- struct kvm_segment *kvm_seg)
-{
- struct desc_struct seg_desc;
-
- if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
- return 1;
- seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
- return 0;
-}
-
static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
{
struct kvm_segment segvar = {
@@ -4367,7 +4788,7 @@ static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int se
.unusable = 0,
};
kvm_x86_ops->set_segment(vcpu, &segvar, seg);
- return 0;
+ return X86EMUL_CONTINUE;
}
static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
@@ -4377,24 +4798,112 @@ static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
(kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
}
-int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
- int type_bits, int seg)
+int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg)
{
struct kvm_segment kvm_seg;
+ struct desc_struct seg_desc;
+ u8 dpl, rpl, cpl;
+ unsigned err_vec = GP_VECTOR;
+ u32 err_code = 0;
+ bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
+ int ret;
- if (is_vm86_segment(vcpu, seg) || !(vcpu->arch.cr0 & X86_CR0_PE))
+ if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu))
return kvm_load_realmode_segment(vcpu, selector, seg);
- if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
- return 1;
- kvm_seg.type |= type_bits;
- if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
- seg != VCPU_SREG_LDTR)
- if (!kvm_seg.s)
- kvm_seg.unusable = 1;
+ /* NULL selector is not valid for TR, CS and SS */
+ if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
+ && null_selector)
+ goto exception;
+
+ /* TR should be in GDT only */
+ if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
+ goto exception;
+
+ ret = load_guest_segment_descriptor(vcpu, selector, &seg_desc);
+ if (ret)
+ return ret;
+
+ seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg);
+
+ if (null_selector) { /* for NULL selector skip all following checks */
+ kvm_seg.unusable = 1;
+ goto load;
+ }
+
+ err_code = selector & 0xfffc;
+ err_vec = GP_VECTOR;
+ /* can't load system descriptor into segment selecor */
+ if (seg <= VCPU_SREG_GS && !kvm_seg.s)
+ goto exception;
+
+ if (!kvm_seg.present) {
+ err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
+ goto exception;
+ }
+
+ rpl = selector & 3;
+ dpl = kvm_seg.dpl;
+ cpl = kvm_x86_ops->get_cpl(vcpu);
+
+ switch (seg) {
+ case VCPU_SREG_SS:
+ /*
+ * segment is not a writable data segment or segment
+ * selector's RPL != CPL or segment selector's RPL != CPL
+ */
+ if (rpl != cpl || (kvm_seg.type & 0xa) != 0x2 || dpl != cpl)
+ goto exception;
+ break;
+ case VCPU_SREG_CS:
+ if (!(kvm_seg.type & 8))
+ goto exception;
+
+ if (kvm_seg.type & 4) {
+ /* conforming */
+ if (dpl > cpl)
+ goto exception;
+ } else {
+ /* nonconforming */
+ if (rpl > cpl || dpl != cpl)
+ goto exception;
+ }
+ /* CS(RPL) <- CPL */
+ selector = (selector & 0xfffc) | cpl;
+ break;
+ case VCPU_SREG_TR:
+ if (kvm_seg.s || (kvm_seg.type != 1 && kvm_seg.type != 9))
+ goto exception;
+ break;
+ case VCPU_SREG_LDTR:
+ if (kvm_seg.s || kvm_seg.type != 2)
+ goto exception;
+ break;
+ default: /* DS, ES, FS, or GS */
+ /*
+ * segment is not a data or readable code segment or
+ * ((segment is a data or nonconforming code segment)
+ * and (both RPL and CPL > DPL))
+ */
+ if ((kvm_seg.type & 0xa) == 0x8 ||
+ (((kvm_seg.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl)))
+ goto exception;
+ break;
+ }
+
+ if (!kvm_seg.unusable && kvm_seg.s) {
+ /* mark segment as accessed */
+ kvm_seg.type |= 1;
+ seg_desc.type |= 1;
+ save_guest_segment_descriptor(vcpu, selector, &seg_desc);
+ }
+load:
kvm_set_segment(vcpu, &kvm_seg, seg);
- return 0;
+ return X86EMUL_CONTINUE;
+exception:
+ kvm_queue_exception_e(vcpu, err_vec, err_code);
+ return X86EMUL_PROPAGATE_FAULT;
}
static void save_state_to_tss32(struct kvm_vcpu *vcpu,
@@ -4420,6 +4929,14 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu,
tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
}
+static void kvm_load_segment_selector(struct kvm_vcpu *vcpu, u16 sel, int seg)
+{
+ struct kvm_segment kvm_seg;
+ kvm_get_segment(vcpu, &kvm_seg, seg);
+ kvm_seg.selector = sel;
+ kvm_set_segment(vcpu, &kvm_seg, seg);
+}
+
static int load_state_from_tss32(struct kvm_vcpu *vcpu,
struct tss_segment_32 *tss)
{
@@ -4437,25 +4954,41 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
- if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
+ /*
+ * SDM says that segment selectors are loaded before segment
+ * descriptors
+ */
+ kvm_load_segment_selector(vcpu, tss->ldt_selector, VCPU_SREG_LDTR);
+ kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
+ kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
+ kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
+ kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
+ kvm_load_segment_selector(vcpu, tss->fs, VCPU_SREG_FS);
+ kvm_load_segment_selector(vcpu, tss->gs, VCPU_SREG_GS);
+
+ /*
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+ if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, VCPU_SREG_LDTR))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
+ if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
+ if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
+ if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
+ if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
+ if (kvm_load_segment_descriptor(vcpu, tss->fs, VCPU_SREG_FS))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
+ if (kvm_load_segment_descriptor(vcpu, tss->gs, VCPU_SREG_GS))
return 1;
return 0;
}
@@ -4495,19 +5028,33 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
- if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
+ /*
+ * SDM says that segment selectors are loaded before segment
+ * descriptors
+ */
+ kvm_load_segment_selector(vcpu, tss->ldt, VCPU_SREG_LDTR);
+ kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
+ kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
+ kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
+ kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
+
+ /*
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+ if (kvm_load_segment_descriptor(vcpu, tss->ldt, VCPU_SREG_LDTR))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
+ if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
+ if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
+ if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
+ if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
return 1;
return 0;
}
@@ -4529,7 +5076,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
sizeof tss_segment_16))
goto out;
- if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
+ if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
&tss_segment_16, sizeof tss_segment_16))
goto out;
@@ -4537,7 +5084,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
tss_segment_16.prev_task_link = old_tss_sel;
if (kvm_write_guest(vcpu->kvm,
- get_tss_base_addr(vcpu, nseg_desc),
+ get_tss_base_addr_write(vcpu, nseg_desc),
&tss_segment_16.prev_task_link,
sizeof tss_segment_16.prev_task_link))
goto out;
@@ -4568,7 +5115,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
sizeof tss_segment_32))
goto out;
- if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
+ if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
&tss_segment_32, sizeof tss_segment_32))
goto out;
@@ -4576,7 +5123,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
tss_segment_32.prev_task_link = old_tss_sel;
if (kvm_write_guest(vcpu->kvm,
- get_tss_base_addr(vcpu, nseg_desc),
+ get_tss_base_addr_write(vcpu, nseg_desc),
&tss_segment_32.prev_task_link,
sizeof tss_segment_32.prev_task_link))
goto out;
@@ -4599,7 +5146,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
- old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
+ old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
/* FIXME: Handle errors. Failure to read either TSS or their
* descriptors should generate a pagefault.
@@ -4658,7 +5205,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
&nseg_desc);
}
- kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
+ kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0(vcpu) | X86_CR0_TS);
seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
tr_seg.type = 11;
kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
@@ -4689,17 +5236,15 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
kvm_set_cr8(vcpu, sregs->cr8);
- mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
+ mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
kvm_x86_ops->set_efer(vcpu, sregs->efer);
kvm_set_apic_base(vcpu, sregs->apic_base);
- kvm_x86_ops->decache_cr4_guest_bits(vcpu);
-
- mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
+ mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0;
- mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
+ mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
if (!is_long_mode(vcpu) && is_pae(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.cr3);
@@ -4734,7 +5279,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
/* Older userspace won't unhalt the vcpu on reset. */
if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
- !(vcpu->arch.cr0 & X86_CR0_PE))
+ !is_protmode(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
vcpu_put(vcpu);
@@ -4832,11 +5377,12 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
{
unsigned long vaddr = tr->linear_address;
gpa_t gpa;
+ int idx;
vcpu_load(vcpu);
- down_read(&vcpu->kvm->slots_lock);
- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
- up_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
tr->physical_address = gpa;
tr->valid = gpa != UNMAPPED_GVA;
tr->writeable = 1;
@@ -4917,14 +5463,14 @@ EXPORT_SYMBOL_GPL(fx_init);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
- if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
+ if (vcpu->guest_fpu_loaded)
return;
vcpu->guest_fpu_loaded = 1;
kvm_fx_save(&vcpu->arch.host_fx_image);
kvm_fx_restore(&vcpu->arch.guest_fx_image);
+ trace_kvm_fpu(1);
}
-EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
@@ -4935,8 +5481,9 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
kvm_fx_save(&vcpu->arch.guest_fx_image);
kvm_fx_restore(&vcpu->arch.host_fx_image);
++vcpu->stat.fpu_reload;
+ set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
+ trace_kvm_fpu(0);
}
-EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
@@ -5088,11 +5635,13 @@ fail:
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
+ int idx;
+
kfree(vcpu->arch.mce_banks);
kvm_free_lapic(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_mmu_destroy(vcpu);
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
free_page((unsigned long)vcpu->arch.pio_data);
}
@@ -5103,6 +5652,12 @@ struct kvm *kvm_arch_create_vm(void)
if (!kvm)
return ERR_PTR(-ENOMEM);
+ kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
+ if (!kvm->arch.aliases) {
+ kfree(kvm);
+ return ERR_PTR(-ENOMEM);
+ }
+
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
@@ -5159,16 +5714,18 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
put_page(kvm->arch.apic_access_page);
if (kvm->arch.ept_identity_pagetable)
put_page(kvm->arch.ept_identity_pagetable);
+ cleanup_srcu_struct(&kvm->srcu);
+ kfree(kvm->arch.aliases);
kfree(kvm);
}
-int kvm_arch_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
int user_alloc)
{
- int npages = mem->memory_size >> PAGE_SHIFT;
- struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
+ int npages = memslot->npages;
/*To keep backward compatibility with older userspace,
*x86 needs to hanlde !user_alloc case.
@@ -5188,26 +5745,35 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
if (IS_ERR((void *)userspace_addr))
return PTR_ERR((void *)userspace_addr);
- /* set userspace_addr atomically for kvm_hva_to_rmapp */
- spin_lock(&kvm->mmu_lock);
memslot->userspace_addr = userspace_addr;
- spin_unlock(&kvm->mmu_lock);
- } else {
- if (!old.user_alloc && old.rmap) {
- int ret;
-
- down_write(&current->mm->mmap_sem);
- ret = do_munmap(current->mm, old.userspace_addr,
- old.npages * PAGE_SIZE);
- up_write(&current->mm->mmap_sem);
- if (ret < 0)
- printk(KERN_WARNING
- "kvm_vm_ioctl_set_memory_region: "
- "failed to munmap memory\n");
- }
}
}
+
+ return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+
+ int npages = mem->memory_size >> PAGE_SHIFT;
+
+ if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
+ int ret;
+
+ down_write(&current->mm->mmap_sem);
+ ret = do_munmap(current->mm, old.userspace_addr,
+ old.npages * PAGE_SIZE);
+ up_write(&current->mm->mmap_sem);
+ if (ret < 0)
+ printk(KERN_WARNING
+ "kvm_vm_ioctl_set_memory_region: "
+ "failed to munmap memory\n");
+ }
+
spin_lock(&kvm->mmu_lock);
if (!kvm->arch.n_requested_mmu_pages) {
unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
@@ -5216,8 +5782,6 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
kvm_mmu_slot_remove_write_access(kvm, mem->slot);
spin_unlock(&kvm->mmu_lock);
-
- return 0;
}
void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 5eadea5..2d10163 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -2,6 +2,7 @@
#define ARCH_X86_KVM_X86_H
#include <linux/kvm_host.h>
+#include "kvm_cache_regs.h"
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
{
@@ -35,4 +36,33 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index);
+static inline bool is_protmode(struct kvm_vcpu *vcpu)
+{
+ return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
+}
+
+static inline int is_long_mode(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_X86_64
+ return vcpu->arch.efer & EFER_LMA;
+#else
+ return 0;
+#endif
+}
+
+static inline int is_pae(struct kvm_vcpu *vcpu)
+{
+ return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
+}
+
+static inline int is_pse(struct kvm_vcpu *vcpu)
+{
+ return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
+}
+
+static inline int is_paging(struct kvm_vcpu *vcpu)
+{
+ return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
+}
+
#endif
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 2226f2c..5cb3f0f 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -750,6 +750,7 @@ static void __init zone_sizes_init(void)
free_area_init_nodes(max_zone_pfns);
}
+#ifndef CONFIG_NO_BOOTMEM
static unsigned long __init setup_node_bootmem(int nodeid,
unsigned long start_pfn,
unsigned long end_pfn,
@@ -766,13 +767,14 @@ static unsigned long __init setup_node_bootmem(int nodeid,
printk(KERN_INFO " node %d bootmap %08lx - %08lx\n",
nodeid, bootmap, bootmap + bootmap_size);
free_bootmem_with_active_regions(nodeid, end_pfn);
- early_res_to_bootmem(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
return bootmap + bootmap_size;
}
+#endif
void __init setup_bootmem_allocator(void)
{
+#ifndef CONFIG_NO_BOOTMEM
int nodeid;
unsigned long bootmap_size, bootmap;
/*
@@ -784,11 +786,13 @@ void __init setup_bootmem_allocator(void)
if (bootmap == -1L)
panic("Cannot find bootmem map of size %ld\n", bootmap_size);
reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
+#endif
printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
max_pfn_mapped<<PAGE_SHIFT);
printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
+#ifndef CONFIG_NO_BOOTMEM
for_each_online_node(nodeid) {
unsigned long start_pfn, end_pfn;
@@ -806,6 +810,7 @@ void __init setup_bootmem_allocator(void)
bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn,
bootmap);
}
+#endif
after_bootmem = 1;
}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 69ddfbd..e9b040e 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -572,6 +572,7 @@ kernel_physical_mapping_init(unsigned long start,
void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
int acpi, int k8)
{
+#ifndef CONFIG_NO_BOOTMEM
unsigned long bootmap_size, bootmap;
bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
@@ -579,13 +580,15 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
PAGE_SIZE);
if (bootmap == -1L)
panic("Cannot find bootmem map of size %ld\n", bootmap_size);
+ reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
/* don't touch min_low_pfn */
bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
0, end_pfn);
e820_register_active_regions(0, start_pfn, end_pfn);
free_bootmem_with_active_regions(0, end_pfn);
- early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT);
- reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
+#else
+ e820_register_active_regions(0, start_pfn, end_pfn);
+#endif
}
#endif
@@ -974,7 +977,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
if (pmd_none(*pmd)) {
pte_t entry;
- p = vmemmap_alloc_block(PMD_SIZE, node);
+ p = vmemmap_alloc_block_buf(PMD_SIZE, node);
if (!p)
return -ENOMEM;
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index b20760c..809baaa 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -418,7 +418,10 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
for_each_online_node(nid) {
memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
+ NODE_DATA(nid)->node_id = nid;
+#ifndef CONFIG_NO_BOOTMEM
NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
+#endif
}
setup_bootmem_allocator();
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 3307ea8..8948f47 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -163,30 +163,48 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
unsigned long end, unsigned long size,
unsigned long align)
{
- unsigned long mem = find_e820_area(start, end, size, align);
- void *ptr;
+ unsigned long mem;
+ /*
+ * put it on high as possible
+ * something will go with NODE_DATA
+ */
+ if (start < (MAX_DMA_PFN<<PAGE_SHIFT))
+ start = MAX_DMA_PFN<<PAGE_SHIFT;
+ if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
+ end > (MAX_DMA32_PFN<<PAGE_SHIFT))
+ start = MAX_DMA32_PFN<<PAGE_SHIFT;
+ mem = find_e820_area(start, end, size, align);
+ if (mem != -1L)
+ return __va(mem);
+
+ /* extend the search scope */
+ end = max_pfn_mapped << PAGE_SHIFT;
+ if (end > (MAX_DMA32_PFN<<PAGE_SHIFT))
+ start = MAX_DMA32_PFN<<PAGE_SHIFT;
+ else
+ start = MAX_DMA_PFN<<PAGE_SHIFT;
+ mem = find_e820_area(start, end, size, align);
if (mem != -1L)
return __va(mem);
- ptr = __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
- if (ptr == NULL) {
- printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
+ printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
size, nodeid);
- return NULL;
- }
- return ptr;
+
+ return NULL;
}
/* Initialize bootmem allocator for a node */
void __init
setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
{
- unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size;
+ unsigned long start_pfn, last_pfn, nodedata_phys;
const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
- unsigned long bootmap_start, nodedata_phys;
- void *bootmap;
int nid;
+#ifndef CONFIG_NO_BOOTMEM
+ unsigned long bootmap_start, bootmap_pages, bootmap_size;
+ void *bootmap;
+#endif
if (!end)
return;
@@ -200,7 +218,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
start = roundup(start, ZONE_ALIGN);
- printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
+ printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", nodeid,
start, end);
start_pfn = start >> PAGE_SHIFT;
@@ -211,14 +229,21 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
if (node_data[nodeid] == NULL)
return;
nodedata_phys = __pa(node_data[nodeid]);
+ reserve_early(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
nodedata_phys + pgdat_size - 1);
+ nid = phys_to_nid(nodedata_phys);
+ if (nid != nodeid)
+ printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
- NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid];
+ NODE_DATA(nodeid)->node_id = nodeid;
NODE_DATA(nodeid)->node_start_pfn = start_pfn;
NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
+#ifndef CONFIG_NO_BOOTMEM
+ NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid];
+
/*
* Find a place for the bootmem map
* nodedata_phys could be on other nodes by alloc_bootmem,
@@ -227,11 +252,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
* of alloc_bootmem, that could clash with reserved range
*/
bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
- nid = phys_to_nid(nodedata_phys);
- if (nid == nodeid)
- bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
- else
- bootmap_start = roundup(start, PAGE_SIZE);
+ bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
/*
* SMP_CACHE_BYTES could be enough, but init_bootmem_node like
* to use that to align to PAGE_SIZE
@@ -239,18 +260,13 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
bootmap = early_node_mem(nodeid, bootmap_start, end,
bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
if (bootmap == NULL) {
- if (nodedata_phys < start || nodedata_phys >= end) {
- /*
- * only need to free it if it is from other node
- * bootmem
- */
- if (nid != nodeid)
- free_bootmem(nodedata_phys, pgdat_size);
- }
+ free_early(nodedata_phys, nodedata_phys + pgdat_size);
node_data[nodeid] = NULL;
return;
}
bootmap_start = __pa(bootmap);
+ reserve_early(bootmap_start, bootmap_start+(bootmap_pages<<PAGE_SHIFT),
+ "BOOTMAP");
bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
bootmap_start >> PAGE_SHIFT,
@@ -259,31 +275,12 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
printk(KERN_INFO " bootmap [%016lx - %016lx] pages %lx\n",
bootmap_start, bootmap_start + bootmap_size - 1,
bootmap_pages);
-
- free_bootmem_with_active_regions(nodeid, end);
-
- /*
- * convert early reserve to bootmem reserve earlier
- * otherwise early_node_mem could use early reserved mem
- * on previous node
- */
- early_res_to_bootmem(start, end);
-
- /*
- * in some case early_node_mem could use alloc_bootmem
- * to get range on other node, don't reserve that again
- */
- if (nid != nodeid)
- printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
- else
- reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys,
- pgdat_size, BOOTMEM_DEFAULT);
nid = phys_to_nid(bootmap_start);
if (nid != nodeid)
printk(KERN_INFO " bootmap(%d) on node %d\n", nodeid, nid);
- else
- reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
- bootmap_pages<<PAGE_SHIFT, BOOTMEM_DEFAULT);
+
+ free_bootmem_with_active_regions(nodeid, end);
+#endif
node_set_online(nodeid);
}
@@ -709,6 +706,10 @@ unsigned long __init numa_free_all_bootmem(void)
for_each_online_node(i)
pages += free_all_bootmem_node(NODE_DATA(i));
+#ifdef CONFIG_NO_BOOTMEM
+ pages += free_all_memory_core_early(MAX_NUMNODES);
+#endif
+
return pages;
}
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index 39fba37..0b7d3e9 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -14,8 +14,7 @@ obj-$(CONFIG_X86_VISWS) += visws.o
obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
obj-y += common.o early.o
-obj-y += amd_bus.o
-obj-$(CONFIG_X86_64) += bus_numa.o
+obj-y += amd_bus.o bus_numa.o
ifeq ($(CONFIG_PCI_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 95ecbd4..fc1e8fe 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -2,11 +2,11 @@
#include <linux/pci.h>
#include <linux/topology.h>
#include <linux/cpu.h>
+#include <linux/range.h>
+
#include <asm/pci_x86.h>
-#ifdef CONFIG_X86_64
#include <asm/pci-direct.h>
-#endif
#include "bus_numa.h"
@@ -15,60 +15,6 @@
* also get peer root bus resource for io,mmio
*/
-#ifdef CONFIG_X86_64
-
-#define RANGE_NUM 16
-
-struct res_range {
- size_t start;
- size_t end;
-};
-
-static void __init update_range(struct res_range *range, size_t start,
- size_t end)
-{
- int i;
- int j;
-
- for (j = 0; j < RANGE_NUM; j++) {
- if (!range[j].end)
- continue;
-
- if (start <= range[j].start && end >= range[j].end) {
- range[j].start = 0;
- range[j].end = 0;
- continue;
- }
-
- if (start <= range[j].start && end < range[j].end && range[j].start < end + 1) {
- range[j].start = end + 1;
- continue;
- }
-
-
- if (start > range[j].start && end >= range[j].end && range[j].end > start - 1) {
- range[j].end = start - 1;
- continue;
- }
-
- if (start > range[j].start && end < range[j].end) {
- /* find the new spare */
- for (i = 0; i < RANGE_NUM; i++) {
- if (range[i].end == 0)
- break;
- }
- if (i < RANGE_NUM) {
- range[i].end = range[j].end;
- range[i].start = end + 1;
- } else {
- printk(KERN_ERR "run of slot in ranges\n");
- }
- range[j].end = start - 1;
- continue;
- }
- }
-}
-
struct pci_hostbridge_probe {
u32 bus;
u32 slot;
@@ -111,6 +57,8 @@ static void __init get_pci_mmcfg_amd_fam10h_range(void)
fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
}
+#define RANGE_NUM 16
+
/**
* early_fill_mp_bus_to_node()
* called before pcibios_scan_root and pci_scan_bus
@@ -130,16 +78,17 @@ static int __init early_fill_mp_bus_info(void)
struct pci_root_info *info;
u32 reg;
struct resource *res;
- size_t start;
- size_t end;
- struct res_range range[RANGE_NUM];
+ u64 start;
+ u64 end;
+ struct range range[RANGE_NUM];
u64 val;
u32 address;
+ bool found;
if (!early_pci_allowed())
return -1;
- found_all_numa_early = 0;
+ found = false;
for (i = 0; i < ARRAY_SIZE(pci_probes); i++) {
u32 id;
u16 device;
@@ -153,12 +102,12 @@ static int __init early_fill_mp_bus_info(void)
device = (id>>16) & 0xffff;
if (pci_probes[i].vendor == vendor &&
pci_probes[i].device == device) {
- found_all_numa_early = 1;
+ found = true;
break;
}
}
- if (!found_all_numa_early)
+ if (!found)
return 0;
pci_root_num = 0;
@@ -196,7 +145,7 @@ static int __init early_fill_mp_bus_info(void)
def_link = (reg >> 8) & 0x03;
memset(range, 0, sizeof(range));
- range[0].end = 0xffff;
+ add_range(range, RANGE_NUM, 0, 0, 0xffff + 1);
/* io port resource */
for (i = 0; i < 4; i++) {
reg = read_pci_config(bus, slot, 1, 0xc0 + (i << 3));
@@ -220,13 +169,13 @@ static int __init early_fill_mp_bus_info(void)
info = &pci_root_info[j];
printk(KERN_DEBUG "node %d link %d: io port [%llx, %llx]\n",
- node, link, (u64)start, (u64)end);
+ node, link, start, end);
/* kernel only handle 16 bit only */
if (end > 0xffff)
end = 0xffff;
update_res(info, start, end, IORESOURCE_IO, 1);
- update_range(range, start, end);
+ subtract_range(range, RANGE_NUM, start, end + 1);
}
/* add left over io port range to def node/link, [0, 0xffff] */
/* find the position */
@@ -241,29 +190,32 @@ static int __init early_fill_mp_bus_info(void)
if (!range[i].end)
continue;
- update_res(info, range[i].start, range[i].end,
+ update_res(info, range[i].start, range[i].end - 1,
IORESOURCE_IO, 1);
}
}
memset(range, 0, sizeof(range));
/* 0xfd00000000-0xffffffffff for HT */
- range[0].end = (0xfdULL<<32) - 1;
+ end = cap_resource((0xfdULL<<32) - 1);
+ end++;
+ add_range(range, RANGE_NUM, 0, 0, end);
/* need to take out [0, TOM) for RAM*/
address = MSR_K8_TOP_MEM1;
rdmsrl(address, val);
end = (val & 0xffffff800000ULL);
- printk(KERN_INFO "TOM: %016lx aka %ldM\n", end, end>>20);
+ printk(KERN_INFO "TOM: %016llx aka %lldM\n", end, end>>20);
if (end < (1ULL<<32))
- update_range(range, 0, end - 1);
+ subtract_range(range, RANGE_NUM, 0, end);
/* get mmconfig */
get_pci_mmcfg_amd_fam10h_range();
/* need to take out mmconf range */
if (fam10h_mmconf_end) {
printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end);
- update_range(range, fam10h_mmconf_start, fam10h_mmconf_end);
+ subtract_range(range, RANGE_NUM, fam10h_mmconf_start,
+ fam10h_mmconf_end + 1);
}
/* mmio resource */
@@ -293,7 +245,7 @@ static int __init early_fill_mp_bus_info(void)
info = &pci_root_info[j];
printk(KERN_DEBUG "node %d link %d: mmio [%llx, %llx]",
- node, link, (u64)start, (u64)end);
+ node, link, start, end);
/*
* some sick allocation would have range overlap with fam10h
* mmconf range, so need to update start and end.
@@ -318,14 +270,15 @@ static int __init early_fill_mp_bus_info(void)
/* we got a hole */
endx = fam10h_mmconf_start - 1;
update_res(info, start, endx, IORESOURCE_MEM, 0);
- update_range(range, start, endx);
- printk(KERN_CONT " ==> [%llx, %llx]", (u64)start, endx);
+ subtract_range(range, RANGE_NUM, start,
+ endx + 1);
+ printk(KERN_CONT " ==> [%llx, %llx]", start, endx);
start = fam10h_mmconf_end + 1;
changed = 1;
}
if (changed) {
if (start <= end) {
- printk(KERN_CONT " %s [%llx, %llx]", endx?"and":"==>", (u64)start, (u64)end);
+ printk(KERN_CONT " %s [%llx, %llx]", endx ? "and" : "==>", start, end);
} else {
printk(KERN_CONT "%s\n", endx?"":" ==> none");
continue;
@@ -333,8 +286,9 @@ static int __init early_fill_mp_bus_info(void)
}
}
- update_res(info, start, end, IORESOURCE_MEM, 1);
- update_range(range, start, end);
+ update_res(info, cap_resource(start), cap_resource(end),
+ IORESOURCE_MEM, 1);
+ subtract_range(range, RANGE_NUM, start, end + 1);
printk(KERN_CONT "\n");
}
@@ -348,8 +302,8 @@ static int __init early_fill_mp_bus_info(void)
address = MSR_K8_TOP_MEM2;
rdmsrl(address, val);
end = (val & 0xffffff800000ULL);
- printk(KERN_INFO "TOM2: %016lx aka %ldM\n", end, end>>20);
- update_range(range, 1ULL<<32, end - 1);
+ printk(KERN_INFO "TOM2: %016llx aka %lldM\n", end, end>>20);
+ subtract_range(range, RANGE_NUM, 1ULL<<32, end);
}
/*
@@ -368,7 +322,8 @@ static int __init early_fill_mp_bus_info(void)
if (!range[i].end)
continue;
- update_res(info, range[i].start, range[i].end,
+ update_res(info, cap_resource(range[i].start),
+ cap_resource(range[i].end - 1),
IORESOURCE_MEM, 1);
}
}
@@ -384,24 +339,14 @@ static int __init early_fill_mp_bus_info(void)
info->bus_min, info->bus_max, info->node, info->link);
for (j = 0; j < res_num; j++) {
res = &info->res[j];
- printk(KERN_DEBUG "bus: %02x index %x %s: [%llx, %llx]\n",
- busnum, j,
- (res->flags & IORESOURCE_IO)?"io port":"mmio",
- res->start, res->end);
+ printk(KERN_DEBUG "bus: %02x index %x %pR\n",
+ busnum, j, res);
}
}
return 0;
}
-#else /* !CONFIG_X86_64 */
-
-static int __init early_fill_mp_bus_info(void) { return 0; }
-
-#endif /* !CONFIG_X86_64 */
-
-/* common 32/64 bit code */
-
#define ENABLE_CF8_EXT_CFG (1ULL << 46)
static void enable_pci_io_ecs(void *unused)
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index 12d54ff..64a1228 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -1,11 +1,11 @@
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/range.h>
#include "bus_numa.h"
int pci_root_num;
struct pci_root_info pci_root_info[PCI_ROOT_NR];
-int found_all_numa_early;
void x86_pci_root_bus_res_quirks(struct pci_bus *b)
{
@@ -21,10 +21,6 @@ void x86_pci_root_bus_res_quirks(struct pci_bus *b)
if (!pci_root_num)
return;
- /* for amd, if only one root bus, don't need to do anything */
- if (pci_root_num < 2 && found_all_numa_early)
- return;
-
for (i = 0; i < pci_root_num; i++) {
if (pci_root_info[i].bus_min == b->number)
break;
@@ -52,8 +48,8 @@ void x86_pci_root_bus_res_quirks(struct pci_bus *b)
}
}
-void __devinit update_res(struct pci_root_info *info, size_t start,
- size_t end, unsigned long flags, int merge)
+void __devinit update_res(struct pci_root_info *info, resource_size_t start,
+ resource_size_t end, unsigned long flags, int merge)
{
int i;
struct resource *res;
@@ -61,25 +57,28 @@ void __devinit update_res(struct pci_root_info *info, size_t start,
if (start > end)
return;
+ if (start == MAX_RESOURCE)
+ return;
+
if (!merge)
goto addit;
/* try to merge it with old one */
for (i = 0; i < info->res_num; i++) {
- size_t final_start, final_end;
- size_t common_start, common_end;
+ resource_size_t final_start, final_end;
+ resource_size_t common_start, common_end;
res = &info->res[i];
if (res->flags != flags)
continue;
- common_start = max((size_t)res->start, start);
- common_end = min((size_t)res->end, end);
+ common_start = max(res->start, start);
+ common_end = min(res->end, end);
if (common_start > common_end + 1)
continue;
- final_start = min((size_t)res->start, start);
- final_end = max((size_t)res->end, end);
+ final_start = min(res->start, start);
+ final_end = max(res->end, end);
res->start = final_start;
res->end = final_end;
diff --git a/arch/x86/pci/bus_numa.h b/arch/x86/pci/bus_numa.h
index 731b64e..804a4b4 100644
--- a/arch/x86/pci/bus_numa.h
+++ b/arch/x86/pci/bus_numa.h
@@ -1,5 +1,5 @@
-#ifdef CONFIG_X86_64
-
+#ifndef __BUS_NUMA_H
+#define __BUS_NUMA_H
/*
* sub bus (transparent) will use entres from 3 to store extra from
* root, so need to make sure we have enough slot there.
@@ -19,8 +19,7 @@ struct pci_root_info {
#define PCI_ROOT_NR 4
extern int pci_root_num;
extern struct pci_root_info pci_root_info[PCI_ROOT_NR];
-extern int found_all_numa_early;
-extern void update_res(struct pci_root_info *info, size_t start,
- size_t end, unsigned long flags, int merge);
+extern void update_res(struct pci_root_info *info, resource_size_t start,
+ resource_size_t end, unsigned long flags, int merge);
#endif
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 5a8fbf8..dece3eb 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -255,10 +255,6 @@ void __init pcibios_resource_survey(void)
*/
fs_initcall(pcibios_assign_resources);
-void __weak x86_pci_root_bus_res_quirks(struct pci_bus *b)
-{
-}
-
/*
* If we set up a device for bus mastering, we need to check the latency
* timer as certain crappy BIOSes forget to set it properly.
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 36daccb..b607239 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -50,6 +50,7 @@
#include <asm/traps.h>
#include <asm/setup.h>
#include <asm/desc.h>
+#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/reboot.h>
@@ -1094,6 +1095,12 @@ asmlinkage void __init xen_start_kernel(void)
__supported_pte_mask |= _PAGE_IOMAP;
+ /*
+ * Prevent page tables from being allocated in highmem, even
+ * if CONFIG_HIGHPTE is enabled.
+ */
+ __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+
/* Work out if we support NX */
x86_configure_nx();
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index bf4cd6b..f9eb7de 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1427,23 +1427,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
#endif
}
-#ifdef CONFIG_HIGHPTE
-static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
-{
- pgprot_t prot = PAGE_KERNEL;
-
- if (PagePinned(page))
- prot = PAGE_KERNEL_RO;
-
- if (0 && PageHighMem(page))
- printk("mapping highpte %lx type %d prot %s\n",
- page_to_pfn(page), type,
- (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
-
- return kmap_atomic_prot(page, type, prot);
-}
-#endif
-
#ifdef CONFIG_X86_32
static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
{
@@ -1902,10 +1885,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
.alloc_pmd_clone = paravirt_nop,
.release_pmd = xen_release_pmd_init,
-#ifdef CONFIG_HIGHPTE
- .kmap_atomic_pte = xen_kmap_atomic_pte,
-#endif
-
#ifdef CONFIG_X86_64
.set_pte = xen_set_pte,
#else
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 88e15de..22a2093 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -90,9 +90,9 @@ ENTRY(xen_iret)
GET_THREAD_INFO(%eax)
movl TI_cpu(%eax), %eax
movl __per_cpu_offset(,%eax,4), %eax
- mov per_cpu__xen_vcpu(%eax), %eax
+ mov xen_vcpu(%eax), %eax
#else
- movl per_cpu__xen_vcpu, %eax
+ movl xen_vcpu, %eax
#endif
/* check IF state we're restoring */
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 33a4ff4..b8c59b8 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -78,7 +78,6 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
walk->data -= walk->offset;
if (nbytes && walk->offset & alignmask && !err) {
- walk->offset += alignmask - 1;
walk->offset = ALIGN(walk->offset, alignmask + 1);
walk->data += walk->offset;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 1887090..2bb7348 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -386,11 +386,13 @@ static int crypto_authenc_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct ablkcipher_request *abreq = aead_request_ctx(req);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_ablkcipher *enc = ctx->enc;
struct scatterlist *dst = req->dst;
unsigned int cryptlen = req->cryptlen;
- u8 *iv = (u8 *)(abreq + 1) + crypto_ablkcipher_reqsize(enc);
+ struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+ + ctx->reqoff);
+ u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
int err;
ablkcipher_request_set_tfm(abreq, enc);
@@ -454,7 +456,7 @@ static int crypto_authenc_verify(struct aead_request *req,
unsigned int authsize;
areq_ctx->complete = authenc_verify_ahash_done;
- areq_ctx->complete = authenc_verify_ahash_update_done;
+ areq_ctx->update_complete = authenc_verify_ahash_update_done;
ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP);
if (IS_ERR(ohash))
@@ -546,10 +548,6 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
if (IS_ERR(auth))
return PTR_ERR(auth);
- ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
- crypto_ahash_alignmask(auth),
- crypto_ahash_alignmask(auth) + 1);
-
enc = crypto_spawn_skcipher(&ictx->enc);
err = PTR_ERR(enc);
if (IS_ERR(enc))
@@ -558,13 +556,18 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
ctx->auth = auth;
ctx->enc = enc;
- tfm->crt_aead.reqsize = max_t(unsigned int,
- crypto_ahash_reqsize(auth) + ctx->reqoff +
- sizeof(struct authenc_request_ctx) +
+ ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
+ crypto_ahash_alignmask(auth),
+ crypto_ahash_alignmask(auth) + 1) +
+ crypto_ablkcipher_ivsize(enc);
+
+ tfm->crt_aead.reqsize = sizeof(struct authenc_request_ctx) +
+ ctx->reqoff +
+ max_t(unsigned int,
+ crypto_ahash_reqsize(auth) +
sizeof(struct ahash_request),
sizeof(struct skcipher_givcrypt_request) +
- crypto_ablkcipher_reqsize(enc) +
- crypto_ablkcipher_ivsize(enc));
+ crypto_ablkcipher_reqsize(enc));
return 0;
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 704c141..ef71318 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -31,7 +31,7 @@ struct cryptd_cpu_queue {
};
struct cryptd_queue {
- struct cryptd_cpu_queue *cpu_queue;
+ struct cryptd_cpu_queue __percpu *cpu_queue;
};
struct cryptd_instance_ctx {
diff --git a/crypto/md5.c b/crypto/md5.c
index 9fda213..30efc7d 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -234,6 +234,7 @@ static struct shash_alg alg = {
.export = md5_export,
.import = md5_import,
.descsize = sizeof(struct md5_state),
+ .statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 7ad48df..b872546 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -279,9 +279,9 @@ int __init acpi_numa_init(void)
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
- acpi_parse_x2apic_affinity, NR_CPUS);
+ acpi_parse_x2apic_affinity, nr_cpu_ids);
acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity, NR_CPUS);
+ acpi_parse_processor_affinity, nr_cpu_ids);
ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
acpi_parse_memory_affinity,
NR_NODE_MEMBLKS);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index a959f6a..d648a98 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -561,7 +561,7 @@ end:
}
int acpi_processor_preregister_performance(
- struct acpi_processor_performance *performance)
+ struct acpi_processor_performance __percpu *performance)
{
int count, count_target;
int retval = 0;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 8a713f1..919a285 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -11,6 +11,9 @@
#include <asm/smp.h>
#include "agp.h"
+int intel_agp_enabled;
+EXPORT_SYMBOL(intel_agp_enabled);
+
/*
* If we have Intel graphics, we're not going to have anything other than
* an Intel IOMMU. So make the correct use of the PCI DMA API contingent
@@ -65,6 +68,10 @@
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
/* cover 915 and 945 variants */
#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -99,7 +106,9 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
extern int agp_memory_reserved;
@@ -148,6 +157,25 @@ extern int agp_memory_reserved;
#define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
+#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
+#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
+#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
+#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
+#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
+#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
+#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
+#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
+#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
+#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
+#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
+#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
+#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
+#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
+#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
+#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
+
static const struct aper_size_info_fixed intel_i810_sizes[] =
{
{64, 16384, 4},
@@ -294,6 +322,13 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
off_t pg_start, int mask_type)
{
int i, j;
+ u32 cache_bits = 0;
+
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+ {
+ cache_bits = I830_PTE_SYSTEM_CACHED;
+ }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
@@ -614,7 +649,7 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
static void intel_i830_init_gtt_entries(void)
{
u16 gmch_ctrl;
- int gtt_entries;
+ int gtt_entries = 0;
u8 rdct;
int local = 0;
static const int ddt[4] = { 0, 16, 32, 64 };
@@ -706,6 +741,63 @@ static void intel_i830_init_gtt_entries(void)
gtt_entries = 0;
break;
}
+ } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
+ /*
+ * SandyBridge has new memory control reg at 0x50.w
+ */
+ u16 snb_gmch_ctl;
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
+ case SNB_GMCH_GMS_STOLEN_32M:
+ gtt_entries = MB(32) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_64M:
+ gtt_entries = MB(64) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_96M:
+ gtt_entries = MB(96) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_128M:
+ gtt_entries = MB(128) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_160M:
+ gtt_entries = MB(160) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_192M:
+ gtt_entries = MB(192) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_224M:
+ gtt_entries = MB(224) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_256M:
+ gtt_entries = MB(256) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_288M:
+ gtt_entries = MB(288) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_320M:
+ gtt_entries = MB(320) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_352M:
+ gtt_entries = MB(352) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_384M:
+ gtt_entries = MB(384) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_416M:
+ gtt_entries = MB(416) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_448M:
+ gtt_entries = MB(448) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_480M:
+ gtt_entries = MB(480) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_512M:
+ gtt_entries = MB(512) - KB(size);
+ break;
+ }
} else {
switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
@@ -1357,6 +1449,8 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
+ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
+ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
*gtt_offset = *gtt_size = MB(2);
break;
default:
@@ -2338,9 +2432,9 @@ static const struct intel_driver_description {
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "Pineview",
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "Pineview",
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150",
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
"GM45", NULL, &intel_i965_driver },
@@ -2355,13 +2449,17 @@ static const struct intel_driver_description {
{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
"G41", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
- "Ironlake/D", NULL, &intel_i965_driver },
+ "HD Graphics", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
- "Ironlake/M", NULL, &intel_i965_driver },
+ "HD Graphics", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
- "Ironlake/MA", NULL, &intel_i965_driver },
+ "HD Graphics", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
- "Ironlake/MC2", NULL, &intel_i965_driver },
+ "HD Graphics", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0,
+ "Sandybridge", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0,
+ "Sandybridge", NULL, &intel_i965_driver },
{ 0, 0, 0, NULL, NULL, NULL }
};
@@ -2371,7 +2469,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge;
u8 cap_ptr = 0;
struct resource *r;
- int i;
+ int i, err;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
@@ -2463,7 +2561,10 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
}
pci_set_drvdata(pdev, bridge);
- return agp_add_bridge(bridge);
+ err = agp_add_bridge(bridge);
+ if (!err)
+ intel_agp_enabled = 1;
+ return err;
}
static void __devexit agp_intel_remove(struct pci_dev *pdev)
@@ -2568,6 +2669,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
+ ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
+ ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
{ }
};
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index 4254457..b861c08 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -158,13 +158,11 @@ static unsigned int cy_isa_addresses[] = {
#define NR_ISA_ADDRS ARRAY_SIZE(cy_isa_addresses)
-#ifdef MODULE
static long maddr[NR_CARDS];
static int irq[NR_CARDS];
module_param_array(maddr, long, NULL, 0);
module_param_array(irq, int, NULL, 0);
-#endif
#endif /* CONFIG_ISA */
@@ -598,12 +596,6 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip,
save_car = readb(base_addr + (CyCAR << index));
cy_writeb(base_addr + (CyCAR << index), save_xir);
- /* validate the port# (as configured and open) */
- if (channel + chip * 4 >= cinfo->nports) {
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr + (CySRER << index)) & ~CyTxRdy);
- goto end;
- }
info = &cinfo->ports[channel + chip * 4];
tty = tty_port_tty_get(&info->port);
if (tty == NULL) {
@@ -3316,13 +3308,10 @@ static int __init cy_detect_isa(void)
unsigned short cy_isa_irq, nboard;
void __iomem *cy_isa_address;
unsigned short i, j, cy_isa_nchan;
-#ifdef MODULE
int isparam = 0;
-#endif
nboard = 0;
-#ifdef MODULE
/* Check for module parameters */
for (i = 0; i < NR_CARDS; i++) {
if (maddr[i] || i) {
@@ -3332,7 +3321,6 @@ static int __init cy_detect_isa(void)
if (!maddr[i])
break;
}
-#endif
/* scan the address table probing for Cyclom-Y/ISA boards */
for (i = 0; i < NR_ISA_ADDRS; i++) {
@@ -3353,11 +3341,10 @@ static int __init cy_detect_isa(void)
iounmap(cy_isa_address);
continue;
}
-#ifdef MODULE
+
if (isparam && i < NR_CARDS && irq[i])
cy_isa_irq = irq[i];
else
-#endif
/* find out the board's irq by probing */
cy_isa_irq = detect_isa_irq(cy_isa_address);
if (cy_isa_irq == 0) {
@@ -4208,3 +4195,4 @@ module_exit(cy_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_VERSION(CY_VERSION);
MODULE_ALIAS_CHARDEV_MAJOR(CYCLADES_MAJOR);
+MODULE_FIRMWARE("cyzfirm.bin");
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 4c3b59b..465185f 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -146,7 +146,7 @@ static void hvc_console_print(struct console *co, const char *b,
return;
/* This console adapter was removed so it is not usable. */
- if (vtermnos[index] < 0)
+ if (vtermnos[index] == -1)
return;
while (count > 0 || i > 0) {
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 517271c..911e1da 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -208,6 +208,7 @@ static int DumpFifoBuffer( char __user *, int);
static void ip2_init_board(int, const struct firmware *);
static unsigned short find_eisa_board(int);
+static int ip2_setup(char *str);
/***************/
/* Static Data */
@@ -263,7 +264,7 @@ static int tracewrap;
/* Macros */
/**********/
-#if defined(MODULE) && defined(IP2DEBUG_OPEN)
+#ifdef IP2DEBUG_OPEN
#define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] ttyc=%d, modc=%x -> %s\n", \
tty->name,(pCh->flags), \
tty->count,/*GET_USE_COUNT(module)*/0,s)
@@ -285,7 +286,10 @@ MODULE_AUTHOR("Doug McNash");
MODULE_DESCRIPTION("Computone IntelliPort Plus Driver");
MODULE_LICENSE("GPL");
+#define MAX_CMD_STR 50
+
static int poll_only;
+static char cmd[MAX_CMD_STR];
static int Eisa_irq;
static int Eisa_slot;
@@ -309,6 +313,8 @@ module_param_array(io, int, NULL, 0);
MODULE_PARM_DESC(io, "I/O ports for IntelliPort Cards");
module_param(poll_only, bool, 0);
MODULE_PARM_DESC(poll_only, "Do not use card interrupts");
+module_param_string(ip2, cmd, MAX_CMD_STR, 0);
+MODULE_PARM_DESC(ip2, "Contains module parameter passed with 'ip2='");
/* for sysfs class support */
static struct class *ip2_class;
@@ -487,7 +493,6 @@ static const struct firmware *ip2_request_firmware(void)
return fw;
}
-#ifndef MODULE
/******************************************************************************
* ip2_setup:
* str: kernel command line string
@@ -531,7 +536,6 @@ static int __init ip2_setup(char *str)
return 1;
}
__setup("ip2=", ip2_setup);
-#endif /* !MODULE */
static int __init ip2_loadmain(void)
{
@@ -539,14 +543,20 @@ static int __init ip2_loadmain(void)
int err = 0;
i2eBordStrPtr pB = NULL;
int rc = -1;
- struct pci_dev *pdev = NULL;
const struct firmware *fw = NULL;
+ char *str;
+
+ str = cmd;
if (poll_only) {
/* Hard lock the interrupts to zero */
irq[0] = irq[1] = irq[2] = irq[3] = poll_only = 0;
}
+ /* Check module parameter with 'ip2=' has been passed or not */
+ if (!poll_only && (!strncmp(str, "ip2=", 4)))
+ ip2_setup(str);
+
ip2trace(ITRC_NO_PORT, ITRC_INIT, ITRC_ENTER, 0);
/* process command line arguments to modprobe or
@@ -612,6 +622,7 @@ static int __init ip2_loadmain(void)
case PCI:
#ifdef CONFIG_PCI
{
+ struct pci_dev *pdev = NULL;
u32 addr;
int status;
@@ -626,7 +637,7 @@ static int __init ip2_loadmain(void)
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "can't enable device\n");
- break;
+ goto out;
}
ip2config.type[i] = PCI;
ip2config.pci_dev[i] = pci_dev_get(pdev);
@@ -638,6 +649,8 @@ static int __init ip2_loadmain(void)
dev_err(&pdev->dev, "I/O address error\n");
ip2config.irq[i] = pdev->irq;
+out:
+ pci_dev_put(pdev);
}
#else
printk(KERN_ERR "IP2: PCI card specified but PCI "
@@ -656,7 +669,6 @@ static int __init ip2_loadmain(void)
break;
} /* switch */
} /* for */
- pci_dev_put(pdev);
for (i = 0; i < IP2_MAX_BOARDS; ++i) {
if (ip2config.addr[i]) {
@@ -3197,3 +3209,5 @@ static struct pci_device_id ip2main_pci_tbl[] __devinitdata = {
};
MODULE_DEVICE_TABLE(pci, ip2main_pci_tbl);
+
+MODULE_FIRMWARE("intelliport2.bin");
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 300d5bd..be2e8f9 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -113,6 +113,8 @@
* 64-bit verification
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
@@ -140,7 +142,6 @@
#define InterruptTheCard(base) outw(0, (base) + 0xc)
#define ClearInterrupt(base) inw((base) + 0x0a)
-#define pr_dbg(str...) pr_debug("ISICOM: " str)
#ifdef DEBUG
#define isicom_paranoia_check(a, b, c) __isicom_paranoia_check((a), (b), (c))
#else
@@ -249,8 +250,7 @@ static int lock_card(struct isi_board *card)
spin_unlock_irqrestore(&card->card_lock, card->flags);
msleep(10);
}
- printk(KERN_WARNING "ISICOM: Failed to lock Card (0x%lx)\n",
- card->base);
+ pr_warning("Failed to lock Card (0x%lx)\n", card->base);
return 0; /* Failed to acquire the card! */
}
@@ -379,13 +379,13 @@ static inline int __isicom_paranoia_check(struct isi_port const *port,
char *name, const char *routine)
{
if (!port) {
- printk(KERN_WARNING "ISICOM: Warning: bad isicom magic for "
- "dev %s in %s.\n", name, routine);
+ pr_warning("Warning: bad isicom magic for dev %s in %s.\n",
+ name, routine);
return 1;
}
if (port->magic != ISICOM_MAGIC) {
- printk(KERN_WARNING "ISICOM: Warning: NULL isicom port for "
- "dev %s in %s.\n", name, routine);
+ pr_warning("Warning: NULL isicom port for dev %s in %s.\n",
+ name, routine);
return 1;
}
@@ -450,8 +450,8 @@ static void isicom_tx(unsigned long _data)
if (!(inw(base + 0x02) & (1 << port->channel)))
continue;
- pr_dbg("txing %d bytes, port%d.\n", txcount,
- port->channel + 1);
+ pr_debug("txing %d bytes, port%d.\n",
+ txcount, port->channel + 1);
outw((port->channel << isi_card[card].shift_count) | txcount,
base);
residue = NO;
@@ -547,8 +547,8 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
byte_count = header & 0xff;
if (channel + 1 > card->port_count) {
- printk(KERN_WARNING "ISICOM: isicom_interrupt(0x%lx): "
- "%d(channel) > port_count.\n", base, channel+1);
+ pr_warning("%s(0x%lx): %d(channel) > port_count.\n",
+ __func__, base, channel+1);
outw(0x0000, base+0x04); /* enable interrupts */
spin_unlock(&card->card_lock);
return IRQ_HANDLED;
@@ -582,14 +582,15 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
if (port->status & ISI_DCD) {
if (!(header & ISI_DCD)) {
/* Carrier has been lost */
- pr_dbg("interrupt: DCD->low.\n"
- );
+ pr_debug("%s: DCD->low.\n",
+ __func__);
port->status &= ~ISI_DCD;
tty_hangup(tty);
}
} else if (header & ISI_DCD) {
/* Carrier has been detected */
- pr_dbg("interrupt: DCD->high.\n");
+ pr_debug("%s: DCD->high.\n",
+ __func__);
port->status |= ISI_DCD;
wake_up_interruptible(&port->port.open_wait);
}
@@ -641,17 +642,19 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
break;
case 2: /* Statistics */
- pr_dbg("isicom_interrupt: stats!!!.\n");
+ pr_debug("%s: stats!!!\n", __func__);
break;
default:
- pr_dbg("Intr: Unknown code in status packet.\n");
+ pr_debug("%s: Unknown code in status packet.\n",
+ __func__);
break;
}
} else { /* Data Packet */
count = tty_prepare_flip_string(tty, &rp, byte_count & ~1);
- pr_dbg("Intr: Can rx %d of %d bytes.\n", count, byte_count);
+ pr_debug("%s: Can rx %d of %d bytes.\n",
+ __func__, count, byte_count);
word_count = count >> 1;
insw(base, rp, word_count);
byte_count -= (word_count << 1);
@@ -661,8 +664,8 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
byte_count -= 2;
}
if (byte_count > 0) {
- pr_dbg("Intr(0x%lx:%d): Flip buffer overflow! dropping "
- "bytes...\n", base, channel + 1);
+ pr_debug("%s(0x%lx:%d): Flip buffer overflow! dropping bytes...\n",
+ __func__, base, channel + 1);
/* drain out unread xtra data */
while (byte_count > 0) {
inw(base);
@@ -888,8 +891,8 @@ static void isicom_shutdown_port(struct isi_port *port)
struct isi_board *card = port->card;
if (--card->count < 0) {
- pr_dbg("isicom_shutdown_port: bad board(0x%lx) count %d.\n",
- card->base, card->count);
+ pr_debug("%s: bad board(0x%lx) count %d.\n",
+ __func__, card->base, card->count);
card->count = 0;
}
/* last port was closed, shutdown that board too */
@@ -1681,13 +1684,13 @@ static int __init isicom_init(void)
retval = tty_register_driver(isicom_normal);
if (retval) {
- pr_dbg("Couldn't register the dialin driver\n");
+ pr_debug("Couldn't register the dialin driver\n");
goto err_puttty;
}
retval = pci_register_driver(&isicom_driver);
if (retval < 0) {
- printk(KERN_ERR "ISICOM: Unable to register pci driver.\n");
+ pr_err("Unable to register pci driver.\n");
goto err_unrtty;
}
@@ -1717,3 +1720,8 @@ module_exit(isicom_exit);
MODULE_AUTHOR("MultiTech");
MODULE_DESCRIPTION("Driver for the ISI series of cards by MultiTech");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("isi608.bin");
+MODULE_FIRMWARE("isi608em.bin");
+MODULE_FIRMWARE("isi616em.bin");
+MODULE_FIRMWARE("isi4608.bin");
+MODULE_FIRMWARE("isi4616.bin");
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 63ee3bb..166495d 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -164,24 +164,25 @@ static unsigned int moxaFuncTout = HZ / 2;
static unsigned int moxaLowWaterChk;
static DEFINE_MUTEX(moxa_openlock);
static DEFINE_SPINLOCK(moxa_lock);
-/* Variables for insmod */
-#ifdef MODULE
+
static unsigned long baseaddr[MAX_BOARDS];
static unsigned int type[MAX_BOARDS];
static unsigned int numports[MAX_BOARDS];
-#endif
MODULE_AUTHOR("William Chen");
MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver");
MODULE_LICENSE("GPL");
-#ifdef MODULE
+MODULE_FIRMWARE("c218tunx.cod");
+MODULE_FIRMWARE("cp204unx.cod");
+MODULE_FIRMWARE("c320tunx.cod");
+
module_param_array(type, uint, NULL, 0);
MODULE_PARM_DESC(type, "card type: C218=2, C320=4");
module_param_array(baseaddr, ulong, NULL, 0);
MODULE_PARM_DESC(baseaddr, "base address");
module_param_array(numports, uint, NULL, 0);
MODULE_PARM_DESC(numports, "numports (ignored for C218)");
-#endif
+
module_param(ttymajor, int, 0);
/*
@@ -1024,6 +1025,8 @@ static int __init moxa_init(void)
{
unsigned int isabrds = 0;
int retval = 0;
+ struct moxa_board_conf *brd = moxa_boards;
+ unsigned int i;
printk(KERN_INFO "MOXA Intellio family driver version %s\n",
MOXA_VERSION);
@@ -1051,10 +1054,7 @@ static int __init moxa_init(void)
}
/* Find the boards defined from module args. */
-#ifdef MODULE
- {
- struct moxa_board_conf *brd = moxa_boards;
- unsigned int i;
+
for (i = 0; i < MAX_BOARDS; i++) {
if (!baseaddr[i])
break;
@@ -1087,8 +1087,6 @@ static int __init moxa_init(void)
isabrds++;
}
}
- }
-#endif
#ifdef CONFIG_PCI
retval = pci_register_driver(&moxa_pci_driver);
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 3d92306..e0c5d2a 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -895,8 +895,7 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
if (inb(info->ioaddr + UART_LSR) == 0xff) {
spin_unlock_irqrestore(&info->slock, flags);
if (capable(CAP_SYS_ADMIN)) {
- if (tty)
- set_bit(TTY_IO_ERROR, &tty->flags);
+ set_bit(TTY_IO_ERROR, &tty->flags);
return 0;
} else
return -ENODEV;
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index 2ad7d37..a3f32a1 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -136,10 +136,6 @@ static int debug;
#define RECEIVE_BUF_MAX 4
-/* Define all types of vendors and devices to support */
-#define VENDOR1 0x1931 /* Vendor Option */
-#define DEVICE1 0x000c /* HSDPA card */
-
#define R_IIR 0x0000 /* Interrupt Identity Register */
#define R_FCR 0x0000 /* Flow Control Register */
#define R_IER 0x0004 /* Interrupt Enable Register */
@@ -371,6 +367,8 @@ struct port {
struct mutex tty_sem;
wait_queue_head_t tty_wait;
struct async_icount tty_icount;
+
+ struct nozomi *dc;
};
/* Private data one for each card in the system */
@@ -405,7 +403,7 @@ struct buffer {
/* Global variables */
static const struct pci_device_id nozomi_pci_tbl[] __devinitconst = {
- {PCI_DEVICE(VENDOR1, DEVICE1)},
+ {PCI_DEVICE(0x1931, 0x000c)}, /* Nozomi HSDPA */
{},
};
@@ -414,6 +412,8 @@ MODULE_DEVICE_TABLE(pci, nozomi_pci_tbl);
static struct nozomi *ndevs[NOZOMI_MAX_CARDS];
static struct tty_driver *ntty_driver;
+static const struct tty_port_operations noz_tty_port_ops;
+
/*
* find card by tty_index
*/
@@ -853,8 +853,6 @@ static int receive_data(enum port_type index, struct nozomi *dc)
goto put;
}
- tty_buffer_request_room(tty, size);
-
while (size > 0) {
read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX);
@@ -1473,9 +1471,11 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
for (i = 0; i < MAX_PORT; i++) {
struct device *tty_dev;
-
- mutex_init(&dc->port[i].tty_sem);
- tty_port_init(&dc->port[i].port);
+ struct port *port = &dc->port[i];
+ port->dc = dc;
+ mutex_init(&port->tty_sem);
+ tty_port_init(&port->port);
+ port->port.ops = &noz_tty_port_ops;
tty_dev = tty_register_device(ntty_driver, dc->index_start + i,
&pdev->dev);
@@ -1600,67 +1600,74 @@ static void set_dtr(const struct tty_struct *tty, int dtr)
* ----------------------------------------------------------------------------
*/
-/* Called when the userspace process opens the tty, /dev/noz*. */
-static int ntty_open(struct tty_struct *tty, struct file *file)
+static int ntty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct port *port = get_port_by_tty(tty);
struct nozomi *dc = get_dc_by_tty(tty);
- unsigned long flags;
-
+ int ret;
if (!port || !dc || dc->state != NOZOMI_STATE_READY)
return -ENODEV;
-
- if (mutex_lock_interruptible(&port->tty_sem))
- return -ERESTARTSYS;
-
- port->port.count++;
- dc->open_ttys++;
-
- /* Enable interrupt downlink for channel */
- if (port->port.count == 1) {
- tty->driver_data = port;
- tty_port_tty_set(&port->port, tty);
- DBG1("open: %d", port->token_dl);
- spin_lock_irqsave(&dc->spin_mutex, flags);
- dc->last_ier = dc->last_ier | port->token_dl;
- writew(dc->last_ier, dc->reg_ier);
- spin_unlock_irqrestore(&dc->spin_mutex, flags);
+ ret = tty_init_termios(tty);
+ if (ret == 0) {
+ tty_driver_kref_get(driver);
+ driver->ttys[tty->index] = tty;
}
- mutex_unlock(&port->tty_sem);
- return 0;
+ return ret;
}
-/* Called when the userspace process close the tty, /dev/noz*. Also
- called immediately if ntty_open fails in which case tty->driver_data
- will be NULL an we exit by the first return */
+static void ntty_cleanup(struct tty_struct *tty)
+{
+ tty->driver_data = NULL;
+}
-static void ntty_close(struct tty_struct *tty, struct file *file)
+static int ntty_activate(struct tty_port *tport, struct tty_struct *tty)
{
- struct nozomi *dc = get_dc_by_tty(tty);
- struct port *nport = tty->driver_data;
- struct tty_port *port = &nport->port;
+ struct port *port = container_of(tport, struct port, port);
+ struct nozomi *dc = port->dc;
unsigned long flags;
- if (!dc || !nport)
- return;
+ DBG1("open: %d", port->token_dl);
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ dc->last_ier = dc->last_ier | port->token_dl;
+ writew(dc->last_ier, dc->reg_ier);
+ dc->open_ttys++;
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+ printk("noz: activated %d: %p\n", tty->index, tport);
+ return 0;
+}
- /* Users cannot interrupt a close */
- mutex_lock(&nport->tty_sem);
+static int ntty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct port *port = get_port_by_tty(tty);
+ return tty_port_open(&port->port, tty, filp);
+}
- WARN_ON(!port->count);
+static void ntty_shutdown(struct tty_port *tport)
+{
+ struct port *port = container_of(tport, struct port, port);
+ struct nozomi *dc = port->dc;
+ unsigned long flags;
+ DBG1("close: %d", port->token_dl);
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ dc->last_ier &= ~(port->token_dl);
+ writew(dc->last_ier, dc->reg_ier);
dc->open_ttys--;
- port->count--;
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+ printk("noz: shutdown %p\n", tport);
+}
- if (port->count == 0) {
- DBG1("close: %d", nport->token_dl);
- tty_port_tty_set(port, NULL);
- spin_lock_irqsave(&dc->spin_mutex, flags);
- dc->last_ier &= ~(nport->token_dl);
- writew(dc->last_ier, dc->reg_ier);
- spin_unlock_irqrestore(&dc->spin_mutex, flags);
- }
- mutex_unlock(&nport->tty_sem);
+static void ntty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct port *port = tty->driver_data;
+ if (port)
+ tty_port_close(&port->port, tty, filp);
+}
+
+static void ntty_hangup(struct tty_struct *tty)
+{
+ struct port *port = tty->driver_data;
+ tty_port_hangup(&port->port);
}
/*
@@ -1680,15 +1687,7 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
if (!dc || !port)
return -ENODEV;
- if (unlikely(!mutex_trylock(&port->tty_sem))) {
- /*
- * must test lock as tty layer wraps calls
- * to this function with BKL
- */
- dev_err(&dc->pdev->dev, "Would have deadlocked - "
- "return EAGAIN\n");
- return -EAGAIN;
- }
+ mutex_lock(&port->tty_sem);
if (unlikely(!port->port.count)) {
DBG1(" ");
@@ -1728,25 +1727,23 @@ exit:
* This method is called by the upper tty layer.
* #according to sources N_TTY.c it expects a value >= 0 and
* does not check for negative values.
+ *
+ * If the port is unplugged report lots of room and let the bits
+ * dribble away so we don't block anything.
*/
static int ntty_write_room(struct tty_struct *tty)
{
struct port *port = tty->driver_data;
- int room = 0;
+ int room = 4096;
const struct nozomi *dc = get_dc_by_tty(tty);
- if (!dc || !port)
- return 0;
- if (!mutex_trylock(&port->tty_sem))
- return 0;
-
- if (!port->port.count)
- goto exit;
-
- room = port->fifo_ul.size - kfifo_len(&port->fifo_ul);
-
-exit:
- mutex_unlock(&port->tty_sem);
+ if (dc) {
+ mutex_lock(&port->tty_sem);
+ if (port->port.count)
+ room = port->fifo_ul.size -
+ kfifo_len(&port->fifo_ul);
+ mutex_unlock(&port->tty_sem);
+ }
return room;
}
@@ -1906,10 +1903,16 @@ exit_in_buffer:
return rval;
}
+static const struct tty_port_operations noz_tty_port_ops = {
+ .activate = ntty_activate,
+ .shutdown = ntty_shutdown,
+};
+
static const struct tty_operations tty_ops = {
.ioctl = ntty_ioctl,
.open = ntty_open,
.close = ntty_close,
+ .hangup = ntty_hangup,
.write = ntty_write,
.write_room = ntty_write_room,
.unthrottle = ntty_unthrottle,
@@ -1917,6 +1920,8 @@ static const struct tty_operations tty_ops = {
.chars_in_buffer = ntty_chars_in_buffer,
.tiocmget = ntty_tiocmget,
.tiocmset = ntty_tiocmset,
+ .install = ntty_install,
+ .cleanup = ntty_cleanup,
};
/* Module initialization */
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 452370a..986aa60 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -658,8 +658,7 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id)
info->mon.char_max = char_count;
info->mon.char_last = char_count;
#endif
- len = tty_buffer_request_room(tty, char_count);
- while (len--) {
+ while (char_count--) {
data = base_addr[CyRDR];
tty_insert_flip_char(tty, data, TTY_NORMAL);
#ifdef CYCLOM_16Y_HACK
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 268e17f..07ac14d 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -646,8 +646,6 @@ static void sx_receive(struct specialix_board *bp)
dprintk(SX_DEBUG_RX, "port: %p: count: %d\n", port, count);
port->hits[count > 8 ? 9 : count]++;
- tty_buffer_request_room(tty, count);
-
while (count--)
tty_insert_flip_char(tty, sx_in(bp, CD186x_RDR), TTY_NORMAL);
tty_flip_buffer_push(tty);
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 4846b73..0658fc5 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -2031,7 +2031,7 @@ static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
return 0;
- if (!tty || !info->xmit_buf)
+ if (!info->xmit_buf)
return 0;
spin_lock_irqsave(&info->irq_spinlock, flags);
@@ -2121,7 +2121,7 @@ static int mgsl_write(struct tty_struct * tty,
if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
goto cleanup;
- if (!tty || !info->xmit_buf)
+ if (!info->xmit_buf)
goto cleanup;
if ( info->params.mode == MGSL_MODE_HDLC ||
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 8678f0c..4561ce2 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -468,7 +468,7 @@ static unsigned int free_tbuf_count(struct slgt_info *info);
static unsigned int tbuf_bytes(struct slgt_info *info);
static void reset_tbufs(struct slgt_info *info);
static void tdma_reset(struct slgt_info *info);
-static void tx_load(struct slgt_info *info, const char *buf, unsigned int count);
+static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
static void get_signals(struct slgt_info *info);
static void set_signals(struct slgt_info *info);
@@ -813,59 +813,32 @@ static int write(struct tty_struct *tty,
int ret = 0;
struct slgt_info *info = tty->driver_data;
unsigned long flags;
- unsigned int bufs_needed;
if (sanity_check(info, tty->name, "write"))
- goto cleanup;
+ return -EIO;
+
DBGINFO(("%s write count=%d\n", info->device_name, count));
- if (!info->tx_buf)
- goto cleanup;
+ if (!info->tx_buf || (count > info->max_frame_size))
+ return -EIO;
- if (count > info->max_frame_size) {
- ret = -EIO;
- goto cleanup;
- }
+ if (!count || tty->stopped || tty->hw_stopped)
+ return 0;
- if (!count)
- goto cleanup;
+ spin_lock_irqsave(&info->lock, flags);
- if (!info->tx_active && info->tx_count) {
+ if (info->tx_count) {
/* send accumulated data from send_char() */
- tx_load(info, info->tx_buf, info->tx_count);
- goto start;
+ if (!tx_load(info, info->tx_buf, info->tx_count))
+ goto cleanup;
+ info->tx_count = 0;
}
- bufs_needed = (count/DMABUFSIZE);
- if (count % DMABUFSIZE)
- ++bufs_needed;
- if (bufs_needed > free_tbuf_count(info))
- goto cleanup;
- ret = info->tx_count = count;
- tx_load(info, buf, count);
- goto start;
-
-start:
- if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
- spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active)
- tx_start(info);
- else if (!(rd_reg32(info, TDCSR) & BIT0)) {
- /* transmit still active but transmit DMA stopped */
- unsigned int i = info->tbuf_current;
- if (!i)
- i = info->tbuf_count;
- i--;
- /* if DMA buf unsent must try later after tx idle */
- if (desc_count(info->tbufs[i]))
- ret = 0;
- }
- if (ret > 0)
- update_tx_timer(info);
- spin_unlock_irqrestore(&info->lock,flags);
- }
+ if (tx_load(info, buf, count))
+ ret = count;
cleanup:
+ spin_unlock_irqrestore(&info->lock, flags);
DBGINFO(("%s write rc=%d\n", info->device_name, ret));
return ret;
}
@@ -882,7 +855,7 @@ static int put_char(struct tty_struct *tty, unsigned char ch)
if (!info->tx_buf)
return 0;
spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active && (info->tx_count < info->max_frame_size)) {
+ if (info->tx_count < info->max_frame_size) {
info->tx_buf[info->tx_count++] = ch;
ret = 1;
}
@@ -981,10 +954,8 @@ static void flush_chars(struct tty_struct *tty)
DBGINFO(("%s flush_chars start transmit\n", info->device_name));
spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active && info->tx_count) {
- tx_load(info, info->tx_buf,info->tx_count);
- tx_start(info);
- }
+ if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
+ info->tx_count = 0;
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -997,10 +968,9 @@ static void flush_buffer(struct tty_struct *tty)
return;
DBGINFO(("%s flush_buffer\n", info->device_name));
- spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active)
- info->tx_count = 0;
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ info->tx_count = 0;
+ spin_unlock_irqrestore(&info->lock, flags);
tty_wakeup(tty);
}
@@ -1033,12 +1003,10 @@ static void tx_release(struct tty_struct *tty)
if (sanity_check(info, tty->name, "tx_release"))
return;
DBGINFO(("%s tx_release\n", info->device_name));
- spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active && info->tx_count) {
- tx_load(info, info->tx_buf, info->tx_count);
- tx_start(info);
- }
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
+ info->tx_count = 0;
+ spin_unlock_irqrestore(&info->lock, flags);
}
/*
@@ -1506,27 +1474,25 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
DBGINFO(("%s hdlc_xmit\n", dev->name));
+ if (!skb->len)
+ return NETDEV_TX_OK;
+
/* stop sending until this frame completes */
netif_stop_queue(dev);
- /* copy data to device buffers */
- info->tx_count = skb->len;
- tx_load(info, skb->data, skb->len);
-
/* update network statistics */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- /* done with socket buffer, so free it */
- dev_kfree_skb(skb);
-
/* save start time for transmit timeout detection */
dev->trans_start = jiffies;
- spin_lock_irqsave(&info->lock,flags);
- tx_start(info);
- update_tx_timer(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ tx_load(info, skb->data, skb->len);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ /* done with socket buffer, so free it */
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -2180,7 +2146,7 @@ static void isr_serial(struct slgt_info *info)
if (info->params.mode == MGSL_MODE_ASYNC) {
if (status & IRQ_TXIDLE) {
- if (info->tx_count)
+ if (info->tx_active)
isr_txeom(info, status);
}
if (info->rx_pio && (status & IRQ_RXDATA))
@@ -2276,13 +2242,42 @@ static void isr_tdma(struct slgt_info *info)
}
}
+/*
+ * return true if there are unsent tx DMA buffers, otherwise false
+ *
+ * if there are unsent buffers then info->tbuf_start
+ * is set to index of first unsent buffer
+ */
+static bool unsent_tbufs(struct slgt_info *info)
+{
+ unsigned int i = info->tbuf_current;
+ bool rc = false;
+
+ /*
+ * search backwards from last loaded buffer (precedes tbuf_current)
+ * for first unsent buffer (desc_count > 0)
+ */
+
+ do {
+ if (i)
+ i--;
+ else
+ i = info->tbuf_count - 1;
+ if (!desc_count(info->tbufs[i]))
+ break;
+ info->tbuf_start = i;
+ rc = true;
+ } while (i != info->tbuf_current);
+
+ return rc;
+}
+
static void isr_txeom(struct slgt_info *info, unsigned short status)
{
DBGISR(("%s txeom status=%04x\n", info->device_name, status));
slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
tdma_reset(info);
- reset_tbufs(info);
if (status & IRQ_TXUNDER) {
unsigned short val = rd_reg16(info, TCR);
wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
@@ -2297,8 +2292,12 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
info->icount.txok++;
}
+ if (unsent_tbufs(info)) {
+ tx_start(info);
+ update_tx_timer(info);
+ return;
+ }
info->tx_active = false;
- info->tx_count = 0;
del_timer(&info->tx_timer);
@@ -3949,7 +3948,7 @@ static void tx_start(struct slgt_info *info)
info->tx_enabled = true;
}
- if (info->tx_count) {
+ if (desc_count(info->tbufs[info->tbuf_start])) {
info->drop_rts_on_tx_done = false;
if (info->params.mode != MGSL_MODE_ASYNC) {
@@ -4772,25 +4771,36 @@ static unsigned int tbuf_bytes(struct slgt_info *info)
}
/*
- * load transmit DMA buffer(s) with data
+ * load data into transmit DMA buffer ring and start transmitter if needed
+ * return true if data accepted, otherwise false (buffers full)
*/
-static void tx_load(struct slgt_info *info, const char *buf, unsigned int size)
+static bool tx_load(struct slgt_info *info, const char *buf, unsigned int size)
{
unsigned short count;
unsigned int i;
struct slgt_desc *d;
- if (size == 0)
- return;
+ /* check required buffer space */
+ if (DIV_ROUND_UP(size, DMABUFSIZE) > free_tbuf_count(info))
+ return false;
DBGDATA(info, buf, size, "tx");
+ /*
+ * copy data to one or more DMA buffers in circular ring
+ * tbuf_start = first buffer for this data
+ * tbuf_current = next free buffer
+ *
+ * Copy all data before making data visible to DMA controller by
+ * setting descriptor count of the first buffer.
+ * This prevents an active DMA controller from reading the first DMA
+ * buffers of a frame and stopping before the final buffers are filled.
+ */
+
info->tbuf_start = i = info->tbuf_current;
while (size) {
d = &info->tbufs[i];
- if (++i == info->tbuf_count)
- i = 0;
count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size);
memcpy(d->buf, buf, count);
@@ -4808,11 +4818,27 @@ static void tx_load(struct slgt_info *info, const char *buf, unsigned int size)
else
set_desc_eof(*d, 0);
- set_desc_count(*d, count);
+ /* set descriptor count for all but first buffer */
+ if (i != info->tbuf_start)
+ set_desc_count(*d, count);
d->buf_count = count;
+
+ if (++i == info->tbuf_count)
+ i = 0;
}
info->tbuf_current = i;
+
+ /* set first buffer count to make new data visible to DMA controller */
+ d = &info->tbufs[info->tbuf_start];
+ set_desc_count(*d, d->buf_count);
+
+ /* start transmitter if needed and update transmit timeout */
+ if (!info->tx_active)
+ tx_start(info);
+ update_tx_timer(info);
+
+ return true;
}
static int register_test(struct slgt_info *info)
@@ -4934,9 +4960,7 @@ static int loopback_test(struct slgt_info *info)
spin_lock_irqsave(&info->lock,flags);
async_mode(info);
rx_start(info);
- info->tx_count = count;
tx_load(info, buf, count);
- tx_start(info);
spin_unlock_irqrestore(&info->lock, flags);
/* wait for receive complete */
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
index 66fa4e1..af8d977 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/char/tty_buffer.c
@@ -231,9 +231,10 @@ int tty_buffer_request_room(struct tty_struct *tty, size_t size)
EXPORT_SYMBOL_GPL(tty_buffer_request_room);
/**
- * tty_insert_flip_string - Add characters to the tty buffer
+ * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
* @tty: tty structure
* @chars: characters
+ * @flag: flag value for each character
* @size: size
*
* Queue a series of bytes to the tty buffering. All the characters
@@ -242,18 +243,19 @@ EXPORT_SYMBOL_GPL(tty_buffer_request_room);
* Locking: Called functions may take tty->buf.lock
*/
-int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
- size_t size)
+int tty_insert_flip_string_fixed_flag(struct tty_struct *tty,
+ const unsigned char *chars, char flag, size_t size)
{
int copied = 0;
do {
- int space = tty_buffer_request_room(tty, size - copied);
+ int goal = min(size - copied, TTY_BUFFER_PAGE);
+ int space = tty_buffer_request_room(tty, goal);
struct tty_buffer *tb = tty->buf.tail;
/* If there is no space then tb may be NULL */
if (unlikely(space == 0))
break;
memcpy(tb->char_buf_ptr + tb->used, chars, space);
- memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
+ memset(tb->flag_buf_ptr + tb->used, flag, space);
tb->used += space;
copied += space;
chars += space;
@@ -262,7 +264,7 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
} while (unlikely(size > copied));
return copied;
}
-EXPORT_SYMBOL(tty_insert_flip_string);
+EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
/**
* tty_insert_flip_string_flags - Add characters to the tty buffer
@@ -283,7 +285,8 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
{
int copied = 0;
do {
- int space = tty_buffer_request_room(tty, size - copied);
+ int goal = min(size - copied, TTY_BUFFER_PAGE);
+ int space = tty_buffer_request_room(tty, goal);
struct tty_buffer *tb = tty->buf.tail;
/* If there is no space then tb may be NULL */
if (unlikely(space == 0))
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index 3f653f7..500e740 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -706,12 +706,13 @@ static void tty_reset_termios(struct tty_struct *tty)
/**
* tty_ldisc_reinit - reinitialise the tty ldisc
* @tty: tty to reinit
+ * @ldisc: line discipline to reinitialize
*
- * Switch the tty back to N_TTY line discipline and leave the
- * ldisc state closed
+ * Switch the tty to a line discipline and leave the ldisc
+ * state closed
*/
-static void tty_ldisc_reinit(struct tty_struct *tty)
+static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
{
struct tty_ldisc *ld;
@@ -721,10 +722,10 @@ static void tty_ldisc_reinit(struct tty_struct *tty)
/*
* Switch the line discipline back
*/
- ld = tty_ldisc_get(N_TTY);
+ ld = tty_ldisc_get(ldisc);
BUG_ON(IS_ERR(ld));
tty_ldisc_assign(tty, ld);
- tty_set_termios_ldisc(tty, N_TTY);
+ tty_set_termios_ldisc(tty, ldisc);
}
/**
@@ -745,6 +746,8 @@ static void tty_ldisc_reinit(struct tty_struct *tty)
void tty_ldisc_hangup(struct tty_struct *tty)
{
struct tty_ldisc *ld;
+ int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS;
+ int err = 0;
/*
* FIXME! What are the locking issues here? This may me overdoing
@@ -772,25 +775,32 @@ void tty_ldisc_hangup(struct tty_struct *tty)
wake_up_interruptible_poll(&tty->read_wait, POLLIN);
/*
* Shutdown the current line discipline, and reset it to
- * N_TTY.
+ * N_TTY if need be.
+ *
+ * Avoid racing set_ldisc or tty_ldisc_release
*/
- if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
- /* Avoid racing set_ldisc or tty_ldisc_release */
- mutex_lock(&tty->ldisc_mutex);
- tty_ldisc_halt(tty);
- if (tty->ldisc) { /* Not yet closed */
- /* Switch back to N_TTY */
- tty_ldisc_reinit(tty);
- /* At this point we have a closed ldisc and we want to
- reopen it. We could defer this to the next open but
- it means auditing a lot of other paths so this is
- a FIXME */
+ mutex_lock(&tty->ldisc_mutex);
+ tty_ldisc_halt(tty);
+ /* At this point we have a closed ldisc and we want to
+ reopen it. We could defer this to the next open but
+ it means auditing a lot of other paths so this is
+ a FIXME */
+ if (tty->ldisc) { /* Not yet closed */
+ if (reset == 0) {
+ tty_ldisc_reinit(tty, tty->termios->c_line);
+ err = tty_ldisc_open(tty, tty->ldisc);
+ }
+ /* If the re-open fails or we reset then go to N_TTY. The
+ N_TTY open cannot fail */
+ if (reset || err) {
+ tty_ldisc_reinit(tty, N_TTY);
WARN_ON(tty_ldisc_open(tty, tty->ldisc));
- tty_ldisc_enable(tty);
}
- mutex_unlock(&tty->ldisc_mutex);
- tty_reset_termios(tty);
+ tty_ldisc_enable(tty);
}
+ mutex_unlock(&tty->ldisc_mutex);
+ if (reset)
+ tty_reset_termios(tty);
}
/**
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 6aa1028..87778dc 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -888,7 +888,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
ret = -EFAULT;
goto out;
}
- if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) {
+ if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS && tmp.mode != VT_PROCESS_AUTO) {
ret = -EINVAL;
goto out;
}
@@ -1622,7 +1622,7 @@ static void complete_change_console(struct vc_data *vc)
* telling it that it has acquired. Also check if it has died and
* clean up (similar to logic employed in change_console())
*/
- if (vc->vt_mode.mode == VT_PROCESS) {
+ if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) {
/*
* Send the signal as privileged - kill_pid() will
* tell us if the process has gone or something else
@@ -1682,7 +1682,7 @@ void change_console(struct vc_data *new_vc)
* vt to auto control.
*/
vc = vc_cons[fg_console].d;
- if (vc->vt_mode.mode == VT_PROCESS) {
+ if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) {
/*
* Send the signal as privileged - kill_pid() will
* tell us if the process has gone or something else
@@ -1693,27 +1693,28 @@ void change_console(struct vc_data *new_vc)
*/
vc->vt_newvt = new_vc->vc_num;
if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
+ if(vc->vt_mode.mode == VT_PROCESS)
+ /*
+ * It worked. Mark the vt to switch to and
+ * return. The process needs to send us a
+ * VT_RELDISP ioctl to complete the switch.
+ */
+ return;
+ } else {
/*
- * It worked. Mark the vt to switch to and
- * return. The process needs to send us a
- * VT_RELDISP ioctl to complete the switch.
+ * The controlling process has died, so we revert back to
+ * normal operation. In this case, we'll also change back
+ * to KD_TEXT mode. I'm not sure if this is strictly correct
+ * but it saves the agony when the X server dies and the screen
+ * remains blanked due to KD_GRAPHICS! It would be nice to do
+ * this outside of VT_PROCESS but there is no single process
+ * to account for and tracking tty count may be undesirable.
*/
- return;
+ reset_vc(vc);
}
/*
- * The controlling process has died, so we revert back to
- * normal operation. In this case, we'll also change back
- * to KD_TEXT mode. I'm not sure if this is strictly correct
- * but it saves the agony when the X server dies and the screen
- * remains blanked due to KD_GRAPHICS! It would be nice to do
- * this outside of VT_PROCESS but there is no single process
- * to account for and tracking tty count may be undesirable.
- */
- reset_vc(vc);
-
- /*
- * Fall through to normal (VT_AUTO) handling of the switch...
+ * Fall through to normal (VT_AUTO and VT_PROCESS_AUTO) handling of the switch...
*/
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e02d74b..c27f80e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -13,6 +13,22 @@ menuconfig DMADEVICES
DMA Device drivers supported by the configured arch, it may
be empty in some cases.
+config DMADEVICES_DEBUG
+ bool "DMA Engine debugging"
+ depends on DMADEVICES != n
+ help
+ This is an option for use by developers; most people should
+ say N here. This enables DMA engine core and driver debugging.
+
+config DMADEVICES_VDEBUG
+ bool "DMA Engine verbose debugging"
+ depends on DMADEVICES_DEBUG != n
+ help
+ This is an option for use by developers; most people should
+ say N here. This enables deeper (more verbose) debugging of
+ the DMA engine core and drivers.
+
+
if DMADEVICES
comment "DMA Devices"
@@ -69,6 +85,13 @@ config FSL_DMA
The Elo is the DMA controller on some 82xx and 83xx parts, and the
Elo Plus is the DMA controller on 85xx and 86xx parts.
+config MPC512X_DMA
+ tristate "Freescale MPC512x built-in DMA engine support"
+ depends on PPC_MPC512x
+ select DMA_ENGINE
+ ---help---
+ Enable support for the Freescale MPC512x built-in DMA engine.
+
config MV_XOR
bool "Marvell XOR engine support"
depends on PLAT_ORION
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 807053d..22bba3d 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,9 +1,17 @@
+ifeq ($(CONFIG_DMADEVICES_DEBUG),y)
+ EXTRA_CFLAGS += -DDEBUG
+endif
+ifeq ($(CONFIG_DMADEVICES_VDEBUG),y)
+ EXTRA_CFLAGS += -DVERBOSE_DEBUG
+endif
+
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
+obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 64a9372..1656fdc 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -39,7 +39,6 @@ struct coh901318_desc {
unsigned int sg_len;
struct coh901318_lli *data;
enum dma_data_direction dir;
- int pending_irqs;
unsigned long flags;
};
@@ -72,7 +71,6 @@ struct coh901318_chan {
unsigned long nbr_active_done;
unsigned long busy;
- int pending_irqs;
struct coh901318_base *base;
};
@@ -80,18 +78,16 @@ struct coh901318_chan {
static void coh901318_list_print(struct coh901318_chan *cohc,
struct coh901318_lli *lli)
{
- struct coh901318_lli *l;
- dma_addr_t addr = virt_to_phys(lli);
+ struct coh901318_lli *l = lli;
int i = 0;
- while (addr) {
- l = phys_to_virt(addr);
+ while (l) {
dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
- ", dst 0x%x, link 0x%x link_virt 0x%p\n",
+ ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n",
i, l, l->control, l->src_addr, l->dst_addr,
- l->link_addr, phys_to_virt(l->link_addr));
+ l->link_addr, l->virt_link_addr);
i++;
- addr = l->link_addr;
+ l = l->virt_link_addr;
}
}
@@ -125,7 +121,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
goto err_kmalloc;
tmp = dev_buf;
- tmp += sprintf(tmp, "DMA -- enable dma channels\n");
+ tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
if (started_channels & (1 << i))
@@ -337,16 +333,22 @@ coh901318_desc_get(struct coh901318_chan *cohc)
* TODO: alloc a pile of descs instead of just one,
* avoid many small allocations.
*/
- desc = kmalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
+ desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
if (desc == NULL)
goto out;
INIT_LIST_HEAD(&desc->node);
+ dma_async_tx_descriptor_init(&desc->desc, &cohc->chan);
} else {
/* Reuse an old desc. */
desc = list_first_entry(&cohc->free,
struct coh901318_desc,
node);
list_del(&desc->node);
+ /* Initialize it a bit so it's not insane */
+ desc->sg = NULL;
+ desc->sg_len = 0;
+ desc->desc.callback = NULL;
+ desc->desc.callback_param = NULL;
}
out:
@@ -364,10 +366,6 @@ static void
coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
{
list_add_tail(&desc->node, &cohc->active);
-
- BUG_ON(cohc->pending_irqs != 0);
-
- cohc->pending_irqs = desc->pending_irqs;
}
static struct coh901318_desc *
@@ -592,6 +590,10 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
return cohd_que;
}
+/*
+ * This tasklet is called from the interrupt handler to
+ * handle each descriptor (DMA job) that is sent to a channel.
+ */
static void dma_tasklet(unsigned long data)
{
struct coh901318_chan *cohc = (struct coh901318_chan *) data;
@@ -600,55 +602,58 @@ static void dma_tasklet(unsigned long data)
dma_async_tx_callback callback;
void *callback_param;
+ dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
+ " nbr_active_done %ld\n", __func__,
+ cohc->id, cohc->nbr_active_done);
+
spin_lock_irqsave(&cohc->lock, flags);
- /* get first active entry from list */
+ /* get first active descriptor entry from list */
cohd_fin = coh901318_first_active_get(cohc);
- BUG_ON(cohd_fin->pending_irqs == 0);
-
if (cohd_fin == NULL)
goto err;
- cohd_fin->pending_irqs--;
- cohc->completed = cohd_fin->desc.cookie;
+ /* locate callback to client */
+ callback = cohd_fin->desc.callback;
+ callback_param = cohd_fin->desc.callback_param;
- if (cohc->nbr_active_done == 0)
- return;
+ /* sign this job as completed on the channel */
+ cohc->completed = cohd_fin->desc.cookie;
- if (!cohd_fin->pending_irqs) {
- /* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
- }
+ /* release the lli allocation and remove the descriptor */
+ coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
- dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d pending_irqs %d"
- " nbr_active_done %ld\n", __func__,
- cohc->id, cohc->pending_irqs, cohc->nbr_active_done);
+ /* return desc to free-list */
+ coh901318_desc_remove(cohd_fin);
+ coh901318_desc_free(cohc, cohd_fin);
- /* callback to client */
- callback = cohd_fin->desc.callback;
- callback_param = cohd_fin->desc.callback_param;
-
- if (!cohd_fin->pending_irqs) {
- coh901318_desc_remove(cohd_fin);
+ spin_unlock_irqrestore(&cohc->lock, flags);
- /* return desc to free-list */
- coh901318_desc_free(cohc, cohd_fin);
- }
+ /* Call the callback when we're done */
+ if (callback)
+ callback(callback_param);
- if (cohc->nbr_active_done)
- cohc->nbr_active_done--;
+ spin_lock_irqsave(&cohc->lock, flags);
+ /*
+ * If another interrupt fired while the tasklet was scheduling,
+ * we don't get called twice, so we have this number of active
+ * counter that keep track of the number of IRQs expected to
+ * be handled for this channel. If there happen to be more than
+ * one IRQ to be ack:ed, we simply schedule this tasklet again.
+ */
+ cohc->nbr_active_done--;
if (cohc->nbr_active_done) {
+ dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs "
+ "came in while we were scheduling this tasklet\n");
if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet);
else
tasklet_schedule(&cohc->tasklet);
}
- spin_unlock_irqrestore(&cohc->lock, flags);
- if (callback)
- callback(callback_param);
+ spin_unlock_irqrestore(&cohc->lock, flags);
return;
@@ -667,16 +672,17 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
if (!cohc->allocated)
return;
- BUG_ON(cohc->pending_irqs == 0);
+ spin_lock(&cohc->lock);
- cohc->pending_irqs--;
cohc->nbr_active_done++;
- if (cohc->pending_irqs == 0 && coh901318_queue_start(cohc) == NULL)
+ if (coh901318_queue_start(cohc) == NULL)
cohc->busy = 0;
BUG_ON(list_empty(&cohc->active));
+ spin_unlock(&cohc->lock);
+
if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet);
else
@@ -870,6 +876,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
struct coh901318_chan *cohc = to_coh901318_chan(chan);
int lli_len;
u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
+ int ret;
spin_lock_irqsave(&cohc->lock, flg);
@@ -890,22 +897,19 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (data == NULL)
goto err;
- cohd = coh901318_desc_get(cohc);
- cohd->sg = NULL;
- cohd->sg_len = 0;
- cohd->data = data;
-
- cohd->pending_irqs =
- coh901318_lli_fill_memcpy(
- &cohc->base->pool, data, src, size, dest,
- cohc_chan_param(cohc)->ctrl_lli_chained,
- ctrl_last);
- cohd->flags = flags;
+ ret = coh901318_lli_fill_memcpy(
+ &cohc->base->pool, data, src, size, dest,
+ cohc_chan_param(cohc)->ctrl_lli_chained,
+ ctrl_last);
+ if (ret)
+ goto err;
COH_DBG(coh901318_list_print(cohc, data));
- dma_async_tx_descriptor_init(&cohd->desc, chan);
-
+ /* Pick a descriptor to handle this transfer */
+ cohd = coh901318_desc_get(cohc);
+ cohd->data = data;
+ cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
spin_unlock_irqrestore(&cohc->lock, flg);
@@ -924,6 +928,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct coh901318_chan *cohc = to_coh901318_chan(chan);
struct coh901318_lli *data;
struct coh901318_desc *cohd;
+ const struct coh901318_params *params;
struct scatterlist *sg;
int len = 0;
int size;
@@ -931,7 +936,9 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained;
u32 ctrl = cohc_chan_param(cohc)->ctrl_lli;
u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
+ u32 config;
unsigned long flg;
+ int ret;
if (!sgl)
goto out;
@@ -947,15 +954,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* Trigger interrupt after last lli */
ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
- cohd = coh901318_desc_get(cohc);
- cohd->sg = NULL;
- cohd->sg_len = 0;
- cohd->dir = direction;
+ params = cohc_chan_param(cohc);
+ config = params->config;
if (direction == DMA_TO_DEVICE) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
+ config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY;
ctrl_chained |= tx_flags;
ctrl_last |= tx_flags;
ctrl |= tx_flags;
@@ -963,16 +969,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
+ config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY;
ctrl_chained |= rx_flags;
ctrl_last |= rx_flags;
ctrl |= rx_flags;
} else
goto err_direction;
- dma_async_tx_descriptor_init(&cohd->desc, chan);
-
- cohd->desc.tx_submit = coh901318_tx_submit;
-
+ coh901318_set_conf(cohc, config);
/* The dma only supports transmitting packages up to
* MAX_DMA_PACKET_SIZE. Calculate to total number of
@@ -994,32 +998,37 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
len += factor;
}
+ pr_debug("Allocate %d lli:s for this transfer\n", len);
data = coh901318_lli_alloc(&cohc->base->pool, len);
if (data == NULL)
goto err_dma_alloc;
/* initiate allocated data list */
- cohd->pending_irqs =
- coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
- cohc_dev_addr(cohc),
- ctrl_chained,
- ctrl,
- ctrl_last,
- direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
- cohd->data = data;
-
- cohd->flags = flags;
+ ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
+ cohc_dev_addr(cohc),
+ ctrl_chained,
+ ctrl,
+ ctrl_last,
+ direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
+ if (ret)
+ goto err_lli_fill;
COH_DBG(coh901318_list_print(cohc, data));
+ /* Pick a descriptor to handle this transfer */
+ cohd = coh901318_desc_get(cohc);
+ cohd->dir = direction;
+ cohd->flags = flags;
+ cohd->desc.tx_submit = coh901318_tx_submit;
+ cohd->data = data;
+
spin_unlock_irqrestore(&cohc->lock, flg);
return &cohd->desc;
+ err_lli_fill:
err_dma_alloc:
err_direction:
- coh901318_desc_remove(cohd);
- coh901318_desc_free(cohc, cohd);
spin_unlock_irqrestore(&cohc->lock, flg);
out:
return NULL;
@@ -1092,9 +1101,8 @@ coh901318_terminate_all(struct dma_chan *chan)
/* release the lli allocation*/
coh901318_lli_free(&cohc->base->pool, &cohd->data);
- coh901318_desc_remove(cohd);
-
/* return desc to free-list */
+ coh901318_desc_remove(cohd);
coh901318_desc_free(cohc, cohd);
}
@@ -1102,16 +1110,14 @@ coh901318_terminate_all(struct dma_chan *chan)
/* release the lli allocation*/
coh901318_lli_free(&cohc->base->pool, &cohd->data);
- coh901318_desc_remove(cohd);
-
/* return desc to free-list */
+ coh901318_desc_remove(cohd);
coh901318_desc_free(cohc, cohd);
}
cohc->nbr_active_done = 0;
cohc->busy = 0;
- cohc->pending_irqs = 0;
spin_unlock_irqrestore(&cohc->lock, flags);
}
@@ -1138,7 +1144,6 @@ void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
spin_lock_init(&cohc->lock);
- cohc->pending_irqs = 0;
cohc->nbr_active_done = 0;
cohc->busy = 0;
INIT_LIST_HEAD(&cohc->free);
@@ -1254,12 +1259,17 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
base->dma_memcpy.dev = &pdev->dev;
+ /*
+ * This controller can only access address at even 32bit boundaries,
+ * i.e. 2^2
+ */
+ base->dma_memcpy.copy_align = 2;
err = dma_async_device_register(&base->dma_memcpy);
if (err)
goto err_register_memcpy;
- dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
+ dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
(u32) base->virtbase);
return err;
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index f5120f2..71d58c1 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -74,6 +74,8 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
lli = head;
lli->phy_this = phy;
+ lli->link_addr = 0x00000000;
+ lli->virt_link_addr = 0x00000000U;
for (i = 1; i < len; i++) {
lli_prev = lli;
@@ -85,13 +87,13 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
DEBUGFS_POOL_COUNTER_ADD(pool, 1);
lli->phy_this = phy;
+ lli->link_addr = 0x00000000;
+ lli->virt_link_addr = 0x00000000U;
lli_prev->link_addr = phy;
lli_prev->virt_link_addr = lli;
}
- lli->link_addr = 0x00000000U;
-
spin_unlock(&pool->lock);
return head;
@@ -166,8 +168,7 @@ coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
lli->src_addr = src;
lli->dst_addr = dst;
- /* One irq per single transfer */
- return 1;
+ return 0;
}
int
@@ -223,8 +224,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
lli->src_addr = src;
lli->dst_addr = dst;
- /* One irq per single transfer */
- return 1;
+ return 0;
}
int
@@ -240,7 +240,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
u32 ctrl_sg;
dma_addr_t src = 0;
dma_addr_t dst = 0;
- int nbr_of_irq = 0;
u32 bytes_to_transfer;
u32 elem_size;
@@ -269,15 +268,12 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
ctrl_sg = ctrl ? ctrl : ctrl_last;
- if ((ctrl_sg & ctrl_irq_mask))
- nbr_of_irq++;
-
if (dir == DMA_TO_DEVICE)
/* increment source address */
- src = sg_dma_address(sg);
+ src = sg_phys(sg);
else
/* increment destination address */
- dst = sg_dma_address(sg);
+ dst = sg_phys(sg);
bytes_to_transfer = sg_dma_len(sg);
@@ -310,8 +306,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
}
spin_unlock(&pool->lock);
- /* There can be many IRQs per sg transfer */
- return nbr_of_irq;
+ return 0;
err:
spin_unlock(&pool->lock);
return -EINVAL;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index e7a3230..87399ca 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -284,7 +284,7 @@ struct dma_chan_tbl_ent {
/**
* channel_table - percpu lookup table for memory-to-memory offload providers
*/
-static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
+static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
static int __init dma_channel_table_init(void)
{
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 948d563..6fa55fe 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -237,7 +237,7 @@ static int dmatest_func(void *data)
dma_cookie_t cookie;
enum dma_status status;
enum dma_ctrl_flags flags;
- u8 pq_coefs[pq_sources];
+ u8 pq_coefs[pq_sources + 1];
int ret;
int src_cnt;
int dst_cnt;
@@ -257,7 +257,7 @@ static int dmatest_func(void *data)
} else if (thread->type == DMA_PQ) {
src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
dst_cnt = 2;
- for (i = 0; i < pq_sources; i++)
+ for (i = 0; i < src_cnt; i++)
pq_coefs[i] = 1;
} else
goto err_srcs;
@@ -347,7 +347,7 @@ static int dmatest_func(void *data)
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
dma_dsts[0] + dst_off,
- dma_srcs, xor_sources,
+ dma_srcs, src_cnt,
len, flags);
else if (thread->type == DMA_PQ) {
dma_addr_t dma_pq[dst_cnt];
@@ -355,7 +355,7 @@ static int dmatest_func(void *data)
for (i = 0; i < dst_cnt; i++)
dma_pq[i] = dma_dsts[i] + dst_off;
tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
- pq_sources, pq_coefs,
+ src_cnt, pq_coefs,
len, flags);
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 296f9e7..bbb4be5 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -37,19 +37,19 @@
#include <asm/fsldma.h>
#include "fsldma.h"
-static void dma_init(struct fsl_dma_chan *fsl_chan)
+static void dma_init(struct fsldma_chan *chan)
{
/* Reset the channel */
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
+ DMA_OUT(chan, &chan->regs->mr, 0, 32);
- switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
+ switch (chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX:
/* Set the channel to below modes:
* EIE - Error interrupt enable
* EOSIE - End of segments interrupt enable (basic mode)
* EOLNIE - End of links interrupt enable
*/
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
+ DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE
| FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
break;
case FSL_DMA_IP_83XX:
@@ -57,170 +57,146 @@ static void dma_init(struct fsl_dma_chan *fsl_chan)
* EOTIE - End-of-transfer interrupt enable
* PRC_RM - PCI read multiple
*/
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE
+ DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
| FSL_DMA_MR_PRC_RM, 32);
break;
}
-
}
-static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
+static void set_sr(struct fsldma_chan *chan, u32 val)
{
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
+ DMA_OUT(chan, &chan->regs->sr, val, 32);
}
-static u32 get_sr(struct fsl_dma_chan *fsl_chan)
+static u32 get_sr(struct fsldma_chan *chan)
{
- return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
+ return DMA_IN(chan, &chan->regs->sr, 32);
}
-static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
+static void set_desc_cnt(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, u32 count)
{
- hw->count = CPU_TO_DMA(fsl_chan, count, 32);
+ hw->count = CPU_TO_DMA(chan, count, 32);
}
-static void set_desc_src(struct fsl_dma_chan *fsl_chan,
+static void set_desc_src(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
u64 snoop_bits;
- snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
- hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
+ hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
}
-static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
- struct fsl_dma_ld_hw *hw, dma_addr_t dest)
+static void set_desc_dst(struct fsldma_chan *chan,
+ struct fsl_dma_ld_hw *hw, dma_addr_t dst)
{
u64 snoop_bits;
- snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
- hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
+ hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
}
-static void set_desc_next(struct fsl_dma_chan *fsl_chan,
+static void set_desc_next(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
u64 snoop_bits;
- snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
? FSL_DMA_SNEN : 0;
- hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
-}
-
-static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
-{
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
+ hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
}
-static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
+static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
{
- return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
+ DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
}
-static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
+static dma_addr_t get_cdar(struct fsldma_chan *chan)
{
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
+ return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
}
-static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
+static dma_addr_t get_ndar(struct fsldma_chan *chan)
{
- return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
+ return DMA_IN(chan, &chan->regs->ndar, 64);
}
-static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
+static u32 get_bcr(struct fsldma_chan *chan)
{
- return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
+ return DMA_IN(chan, &chan->regs->bcr, 32);
}
-static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
+static int dma_is_idle(struct fsldma_chan *chan)
{
- u32 sr = get_sr(fsl_chan);
+ u32 sr = get_sr(chan);
return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
}
-static void dma_start(struct fsl_dma_chan *fsl_chan)
+static void dma_start(struct fsldma_chan *chan)
{
- u32 mr_set = 0;
-
- if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
- mr_set |= FSL_DMA_MR_EMP_EN;
- } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
- & ~FSL_DMA_MR_EMP_EN, 32);
+ u32 mode;
+
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+
+ if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
+ if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
+ DMA_OUT(chan, &chan->regs->bcr, 0, 32);
+ mode |= FSL_DMA_MR_EMP_EN;
+ } else {
+ mode &= ~FSL_DMA_MR_EMP_EN;
+ }
}
- if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
- mr_set |= FSL_DMA_MR_EMS_EN;
+ if (chan->feature & FSL_DMA_CHAN_START_EXT)
+ mode |= FSL_DMA_MR_EMS_EN;
else
- mr_set |= FSL_DMA_MR_CS;
+ mode |= FSL_DMA_MR_CS;
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
- | mr_set, 32);
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
}
-static void dma_halt(struct fsl_dma_chan *fsl_chan)
+static void dma_halt(struct fsldma_chan *chan)
{
+ u32 mode;
int i;
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
- 32);
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
- | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode |= FSL_DMA_MR_CA;
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
+
+ mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
for (i = 0; i < 100; i++) {
- if (dma_is_idle(fsl_chan))
- break;
+ if (dma_is_idle(chan))
+ return;
+
udelay(10);
}
- if (i >= 100 && !dma_is_idle(fsl_chan))
- dev_err(fsl_chan->dev, "DMA halt timeout!\n");
+
+ if (!dma_is_idle(chan))
+ dev_err(chan->dev, "DMA halt timeout!\n");
}
-static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
+static void set_ld_eol(struct fsldma_chan *chan,
struct fsl_desc_sw *desc)
{
u64 snoop_bits;
- snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
? FSL_DMA_SNEN : 0;
- desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
- DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
+ desc->hw.next_ln_addr = CPU_TO_DMA(chan,
+ DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
| snoop_bits, 64);
}
-static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
- struct fsl_desc_sw *new_desc)
-{
- struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
-
- if (list_empty(&fsl_chan->ld_queue))
- return;
-
- /* Link to the new descriptor physical address and
- * Enable End-of-segment interrupt for
- * the last link descriptor.
- * (the previous node's next link descriptor)
- *
- * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
- */
- queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
- new_desc->async_tx.phys | FSL_DMA_EOSIE |
- (((fsl_chan->feature & FSL_DMA_IP_MASK)
- == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
-}
-
/**
* fsl_chan_set_src_loop_size - Set source address hold transfer size
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
* @size : Address loop size, 0 for disable loop
*
* The set source address hold transfer size. The source
@@ -229,29 +205,30 @@ static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
* read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
* SA + 1 ... and so on.
*/
-static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
+static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
{
+ u32 mode;
+
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+
switch (size) {
case 0:
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
- (~FSL_DMA_MR_SAHE), 32);
+ mode &= ~FSL_DMA_MR_SAHE;
break;
case 1:
case 2:
case 4:
case 8:
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
- FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
- 32);
+ mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
break;
}
+
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
}
/**
- * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
- * @fsl_chan : Freescale DMA channel
+ * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
+ * @chan : Freescale DMA channel
* @size : Address loop size, 0 for disable loop
*
* The set destination address hold transfer size. The destination
@@ -260,29 +237,30 @@ static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
* write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
* TA + 1 ... and so on.
*/
-static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
+static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
{
+ u32 mode;
+
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+
switch (size) {
case 0:
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
- (~FSL_DMA_MR_DAHE), 32);
+ mode &= ~FSL_DMA_MR_DAHE;
break;
case 1:
case 2:
case 4:
case 8:
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
- FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
- 32);
+ mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
break;
}
+
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
}
/**
* fsl_chan_set_request_count - Set DMA Request Count for external control
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
* @size : Number of bytes to transfer in a single request
*
* The Freescale DMA channel can be controlled by the external signal DREQ#.
@@ -292,35 +270,38 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
*
* A size of 0 disables external pause control. The maximum size is 1024.
*/
-static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size)
+static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
{
+ u32 mode;
+
BUG_ON(size > 1024);
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
- | ((__ilog2(size) << 24) & 0x0f000000),
- 32);
+
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode |= (__ilog2(size) << 24) & 0x0f000000;
+
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
}
/**
* fsl_chan_toggle_ext_pause - Toggle channel external pause status
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
* @enable : 0 is disabled, 1 is enabled.
*
* The Freescale DMA channel can be controlled by the external signal DREQ#.
* The DMA Request Count feature should be used in addition to this feature
* to set the number of bytes to transfer before pausing the channel.
*/
-static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable)
+static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
{
if (enable)
- fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
+ chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
else
- fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
+ chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
}
/**
* fsl_chan_toggle_ext_start - Toggle channel external start status
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
* @enable : 0 is disabled, 1 is enabled.
*
* If enable the external start, the channel can be started by an
@@ -328,141 +309,196 @@ static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable)
* transfer immediately. The DMA channel will wait for the
* control pin asserted.
*/
-static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
+static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
{
if (enable)
- fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
+ chan->feature |= FSL_DMA_CHAN_START_EXT;
else
- fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
+ chan->feature &= ~FSL_DMA_CHAN_START_EXT;
+}
+
+static void append_ld_queue(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
+
+ if (list_empty(&chan->ld_pending))
+ goto out_splice;
+
+ /*
+ * Add the hardware descriptor to the chain of hardware descriptors
+ * that already exists in memory.
+ *
+ * This will un-set the EOL bit of the existing transaction, and the
+ * last link in this transaction will become the EOL descriptor.
+ */
+ set_desc_next(chan, &tail->hw, desc->async_tx.phys);
+
+ /*
+ * Add the software descriptor and all children to the list
+ * of pending transactions
+ */
+out_splice:
+ list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
}
static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
- struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
+ struct fsldma_chan *chan = to_fsl_chan(tx->chan);
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_desc_sw *child;
unsigned long flags;
dma_cookie_t cookie;
- /* cookie increment and adding to ld_queue must be atomic */
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
- cookie = fsl_chan->common.cookie;
+ /*
+ * assign cookies to all of the software descriptors
+ * that make up this transaction
+ */
+ cookie = chan->common.cookie;
list_for_each_entry(child, &desc->tx_list, node) {
cookie++;
if (cookie < 0)
cookie = 1;
- desc->async_tx.cookie = cookie;
+ child->async_tx.cookie = cookie;
}
- fsl_chan->common.cookie = cookie;
- append_ld_queue(fsl_chan, desc);
- list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev);
+ chan->common.cookie = cookie;
+
+ /* put this transaction onto the tail of the pending queue */
+ append_ld_queue(chan, desc);
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
return cookie;
}
/**
* fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
*
* Return - The descriptor allocated. NULL for failed.
*/
static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
- struct fsl_dma_chan *fsl_chan)
+ struct fsldma_chan *chan)
{
+ struct fsl_desc_sw *desc;
dma_addr_t pdesc;
- struct fsl_desc_sw *desc_sw;
-
- desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
- if (desc_sw) {
- memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
- INIT_LIST_HEAD(&desc_sw->tx_list);
- dma_async_tx_descriptor_init(&desc_sw->async_tx,
- &fsl_chan->common);
- desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
- desc_sw->async_tx.phys = pdesc;
+
+ desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
+ if (!desc) {
+ dev_dbg(chan->dev, "out of memory for link desc\n");
+ return NULL;
}
- return desc_sw;
+ memset(desc, 0, sizeof(*desc));
+ INIT_LIST_HEAD(&desc->tx_list);
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = fsl_dma_tx_submit;
+ desc->async_tx.phys = pdesc;
+
+ return desc;
}
/**
* fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
*
* This function will create a dma pool for descriptor allocation.
*
* Return - The number of descriptors allocated.
*/
-static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
+static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
{
- struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
/* Has this channel already been allocated? */
- if (fsl_chan->desc_pool)
+ if (chan->desc_pool)
return 1;
- /* We need the descriptor to be aligned to 32bytes
+ /*
+ * We need the descriptor to be aligned to 32bytes
* for meeting FSL DMA specification requirement.
*/
- fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
- fsl_chan->dev, sizeof(struct fsl_desc_sw),
- 32, 0);
- if (!fsl_chan->desc_pool) {
- dev_err(fsl_chan->dev, "No memory for channel %d "
- "descriptor dma pool.\n", fsl_chan->id);
- return 0;
+ chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
+ chan->dev,
+ sizeof(struct fsl_desc_sw),
+ __alignof__(struct fsl_desc_sw), 0);
+ if (!chan->desc_pool) {
+ dev_err(chan->dev, "unable to allocate channel %d "
+ "descriptor pool\n", chan->id);
+ return -ENOMEM;
}
+ /* there is at least one descriptor free to be allocated */
return 1;
}
/**
- * fsl_dma_free_chan_resources - Free all resources of the channel.
- * @fsl_chan : Freescale DMA channel
+ * fsldma_free_desc_list - Free all descriptors in a queue
+ * @chan: Freescae DMA channel
+ * @list: the list to free
+ *
+ * LOCKING: must hold chan->desc_lock
*/
-static void fsl_dma_free_chan_resources(struct dma_chan *chan)
+static void fsldma_free_desc_list(struct fsldma_chan *chan,
+ struct list_head *list)
{
- struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
struct fsl_desc_sw *desc, *_desc;
- unsigned long flags;
- dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
- list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
-#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(fsl_chan->dev,
- "LD %p will be released.\n", desc);
-#endif
+ list_for_each_entry_safe(desc, _desc, list, node) {
+ list_del(&desc->node);
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
+ }
+}
+
+static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
+ struct list_head *list)
+{
+ struct fsl_desc_sw *desc, *_desc;
+
+ list_for_each_entry_safe_reverse(desc, _desc, list, node) {
list_del(&desc->node);
- /* free link descriptor */
- dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
- dma_pool_destroy(fsl_chan->desc_pool);
+}
+
+/**
+ * fsl_dma_free_chan_resources - Free all resources of the channel.
+ * @chan : Freescale DMA channel
+ */
+static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
+ unsigned long flags;
+
+ dev_dbg(chan->dev, "Free all channel resources.\n");
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ fsldma_free_desc_list(chan, &chan->ld_pending);
+ fsldma_free_desc_list(chan, &chan->ld_running);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
- fsl_chan->desc_pool = NULL;
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
}
static struct dma_async_tx_descriptor *
-fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
+fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
{
- struct fsl_dma_chan *fsl_chan;
+ struct fsldma_chan *chan;
struct fsl_desc_sw *new;
- if (!chan)
+ if (!dchan)
return NULL;
- fsl_chan = to_fsl_chan(chan);
+ chan = to_fsl_chan(dchan);
- new = fsl_dma_alloc_descriptor(fsl_chan);
+ new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
+ dev_err(chan->dev, "No free memory for link descriptor\n");
return NULL;
}
@@ -473,51 +509,50 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
list_add_tail(&new->node, &new->tx_list);
/* Set End-of-link to the last link descriptor of new list*/
- set_ld_eol(fsl_chan, new);
+ set_ld_eol(chan, new);
return &new->async_tx;
}
static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
- struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
+ struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
size_t len, unsigned long flags)
{
- struct fsl_dma_chan *fsl_chan;
+ struct fsldma_chan *chan;
struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
- struct list_head *list;
size_t copy;
- if (!chan)
+ if (!dchan)
return NULL;
if (!len)
return NULL;
- fsl_chan = to_fsl_chan(chan);
+ chan = to_fsl_chan(dchan);
do {
/* Allocate the link descriptor from DMA pool */
- new = fsl_dma_alloc_descriptor(fsl_chan);
+ new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(fsl_chan->dev,
+ dev_err(chan->dev,
"No free memory for link descriptor\n");
goto fail;
}
#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
+ dev_dbg(chan->dev, "new link desc alloc %p\n", new);
#endif
copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
- set_desc_cnt(fsl_chan, &new->hw, copy);
- set_desc_src(fsl_chan, &new->hw, dma_src);
- set_desc_dest(fsl_chan, &new->hw, dma_dest);
+ set_desc_cnt(chan, &new->hw, copy);
+ set_desc_src(chan, &new->hw, dma_src);
+ set_desc_dst(chan, &new->hw, dma_dst);
if (!first)
first = new;
else
- set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
+ set_desc_next(chan, &prev->hw, new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -525,7 +560,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
prev = new;
len -= copy;
dma_src += copy;
- dma_dest += copy;
+ dma_dst += copy;
/* Insert the link descriptor to the LD ring */
list_add_tail(&new->node, &first->tx_list);
@@ -535,7 +570,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
new->async_tx.cookie = -EBUSY;
/* Set End-of-link to the last link descriptor of new list*/
- set_ld_eol(fsl_chan, new);
+ set_ld_eol(chan, new);
return &first->async_tx;
@@ -543,12 +578,7 @@ fail:
if (!first)
return NULL;
- list = &first->tx_list;
- list_for_each_entry_safe_reverse(new, prev, list, node) {
- list_del(&new->node);
- dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
- }
-
+ fsldma_free_desc_list_reverse(chan, &first->tx_list);
return NULL;
}
@@ -565,13 +595,12 @@ fail:
* chan->private variable.
*/
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_data_direction direction, unsigned long flags)
{
- struct fsl_dma_chan *fsl_chan;
+ struct fsldma_chan *chan;
struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
struct fsl_dma_slave *slave;
- struct list_head *tx_list;
size_t copy;
int i;
@@ -581,14 +610,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
struct fsl_dma_hw_addr *hw;
dma_addr_t dma_dst, dma_src;
- if (!chan)
+ if (!dchan)
return NULL;
- if (!chan->private)
+ if (!dchan->private)
return NULL;
- fsl_chan = to_fsl_chan(chan);
- slave = chan->private;
+ chan = to_fsl_chan(dchan);
+ slave = dchan->private;
if (list_empty(&slave->addresses))
return NULL;
@@ -637,14 +666,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
}
/* Allocate the link descriptor from DMA pool */
- new = fsl_dma_alloc_descriptor(fsl_chan);
+ new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(fsl_chan->dev, "No free memory for "
+ dev_err(chan->dev, "No free memory for "
"link descriptor\n");
goto fail;
}
#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
+ dev_dbg(chan->dev, "new link desc alloc %p\n", new);
#endif
/*
@@ -671,9 +700,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
}
/* Fill in the descriptor */
- set_desc_cnt(fsl_chan, &new->hw, copy);
- set_desc_src(fsl_chan, &new->hw, dma_src);
- set_desc_dest(fsl_chan, &new->hw, dma_dst);
+ set_desc_cnt(chan, &new->hw, copy);
+ set_desc_src(chan, &new->hw, dma_src);
+ set_desc_dst(chan, &new->hw, dma_dst);
/*
* If this is not the first descriptor, chain the
@@ -682,7 +711,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
if (!first) {
first = new;
} else {
- set_desc_next(fsl_chan, &prev->hw,
+ set_desc_next(chan, &prev->hw,
new->async_tx.phys);
}
@@ -708,23 +737,23 @@ finished:
new->async_tx.cookie = -EBUSY;
/* Set End-of-link to the last link descriptor of new list */
- set_ld_eol(fsl_chan, new);
+ set_ld_eol(chan, new);
/* Enable extra controller features */
- if (fsl_chan->set_src_loop_size)
- fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size);
+ if (chan->set_src_loop_size)
+ chan->set_src_loop_size(chan, slave->src_loop_size);
- if (fsl_chan->set_dest_loop_size)
- fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size);
+ if (chan->set_dst_loop_size)
+ chan->set_dst_loop_size(chan, slave->dst_loop_size);
- if (fsl_chan->toggle_ext_start)
- fsl_chan->toggle_ext_start(fsl_chan, slave->external_start);
+ if (chan->toggle_ext_start)
+ chan->toggle_ext_start(chan, slave->external_start);
- if (fsl_chan->toggle_ext_pause)
- fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause);
+ if (chan->toggle_ext_pause)
+ chan->toggle_ext_pause(chan, slave->external_pause);
- if (fsl_chan->set_request_count)
- fsl_chan->set_request_count(fsl_chan, slave->request_count);
+ if (chan->set_request_count)
+ chan->set_request_count(chan, slave->request_count);
return &first->async_tx;
@@ -741,215 +770,216 @@ fail:
*
* We're re-using variables for the loop, oh well
*/
- tx_list = &first->tx_list;
- list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
- list_del_init(&new->node);
- dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
- }
-
+ fsldma_free_desc_list_reverse(chan, &first->tx_list);
return NULL;
}
-static void fsl_dma_device_terminate_all(struct dma_chan *chan)
+static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
{
- struct fsl_dma_chan *fsl_chan;
- struct fsl_desc_sw *desc, *tmp;
+ struct fsldma_chan *chan;
unsigned long flags;
- if (!chan)
+ if (!dchan)
return;
- fsl_chan = to_fsl_chan(chan);
+ chan = to_fsl_chan(dchan);
/* Halt the DMA engine */
- dma_halt(fsl_chan);
+ dma_halt(chan);
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
/* Remove and free all of the descriptors in the LD queue */
- list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) {
- list_del(&desc->node);
- dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
- }
+ fsldma_free_desc_list(chan, &chan->ld_pending);
+ fsldma_free_desc_list(chan, &chan->ld_running);
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
}
/**
* fsl_dma_update_completed_cookie - Update the completed cookie.
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
+ *
+ * CONTEXT: hardirq
*/
-static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
+static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
{
- struct fsl_desc_sw *cur_desc, *desc;
- dma_addr_t ld_phy;
+ struct fsl_desc_sw *desc;
+ unsigned long flags;
+ dma_cookie_t cookie;
- ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
+ spin_lock_irqsave(&chan->desc_lock, flags);
- if (ld_phy) {
- cur_desc = NULL;
- list_for_each_entry(desc, &fsl_chan->ld_queue, node)
- if (desc->async_tx.phys == ld_phy) {
- cur_desc = desc;
- break;
- }
+ if (list_empty(&chan->ld_running)) {
+ dev_dbg(chan->dev, "no running descriptors\n");
+ goto out_unlock;
+ }
- if (cur_desc && cur_desc->async_tx.cookie) {
- if (dma_is_idle(fsl_chan))
- fsl_chan->completed_cookie =
- cur_desc->async_tx.cookie;
- else
- fsl_chan->completed_cookie =
- cur_desc->async_tx.cookie - 1;
- }
+ /* Get the last descriptor, update the cookie to that */
+ desc = to_fsl_desc(chan->ld_running.prev);
+ if (dma_is_idle(chan))
+ cookie = desc->async_tx.cookie;
+ else {
+ cookie = desc->async_tx.cookie - 1;
+ if (unlikely(cookie < DMA_MIN_COOKIE))
+ cookie = DMA_MAX_COOKIE;
}
+
+ chan->completed_cookie = cookie;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+}
+
+/**
+ * fsldma_desc_status - Check the status of a descriptor
+ * @chan: Freescale DMA channel
+ * @desc: DMA SW descriptor
+ *
+ * This function will return the status of the given descriptor
+ */
+static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ return dma_async_is_complete(desc->async_tx.cookie,
+ chan->completed_cookie,
+ chan->common.cookie);
}
/**
* fsl_chan_ld_cleanup - Clean up link descriptors
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
*
* This function clean up the ld_queue of DMA channel.
- * If 'in_intr' is set, the function will move the link descriptor to
- * the recycle list. Otherwise, free it directly.
*/
-static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
+static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
{
struct fsl_desc_sw *desc, *_desc;
unsigned long flags;
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
- dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
- fsl_chan->completed_cookie);
- list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
+ dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie);
+ list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
dma_async_tx_callback callback;
void *callback_param;
- if (dma_async_is_complete(desc->async_tx.cookie,
- fsl_chan->completed_cookie, fsl_chan->common.cookie)
- == DMA_IN_PROGRESS)
+ if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
break;
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
-
- /* Remove from ld_queue list */
+ /* Remove from the list of running transactions */
list_del(&desc->node);
- dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
- desc);
- dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
-
/* Run the link descriptor callback function */
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
if (callback) {
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
- dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
- desc);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ dev_dbg(chan->dev, "LD %p callback\n", desc);
callback(callback_param);
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
}
+
+ /* Run any dependencies, then free the descriptor */
+ dma_run_dependencies(&desc->async_tx);
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
}
/**
- * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
- * @fsl_chan : Freescale DMA channel
+ * fsl_chan_xfer_ld_queue - transfer any pending transactions
+ * @chan : Freescale DMA channel
+ *
+ * This will make sure that any pending transactions will be run.
+ * If the DMA controller is idle, it will be started. Otherwise,
+ * the DMA controller's interrupt handler will start any pending
+ * transactions when it becomes idle.
*/
-static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
+static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
{
- struct list_head *ld_node;
- dma_addr_t next_dest_addr;
+ struct fsl_desc_sw *desc;
unsigned long flags;
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
- if (!dma_is_idle(fsl_chan))
+ /*
+ * If the list of pending descriptors is empty, then we
+ * don't need to do any work at all
+ */
+ if (list_empty(&chan->ld_pending)) {
+ dev_dbg(chan->dev, "no pending LDs\n");
goto out_unlock;
+ }
- dma_halt(fsl_chan);
+ /*
+ * The DMA controller is not idle, which means the interrupt
+ * handler will start any queued transactions when it runs
+ * at the end of the current transaction
+ */
+ if (!dma_is_idle(chan)) {
+ dev_dbg(chan->dev, "DMA controller still busy\n");
+ goto out_unlock;
+ }
- /* If there are some link descriptors
- * not transfered in queue. We need to start it.
+ /*
+ * TODO:
+ * make sure the dma_halt() function really un-wedges the
+ * controller as much as possible
*/
+ dma_halt(chan);
- /* Find the first un-transfer desciptor */
- for (ld_node = fsl_chan->ld_queue.next;
- (ld_node != &fsl_chan->ld_queue)
- && (dma_async_is_complete(
- to_fsl_desc(ld_node)->async_tx.cookie,
- fsl_chan->completed_cookie,
- fsl_chan->common.cookie) == DMA_SUCCESS);
- ld_node = ld_node->next);
-
- if (ld_node != &fsl_chan->ld_queue) {
- /* Get the ld start address from ld_queue */
- next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
- dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n",
- (unsigned long long)next_dest_addr);
- set_cdar(fsl_chan, next_dest_addr);
- dma_start(fsl_chan);
- } else {
- set_cdar(fsl_chan, 0);
- set_ndar(fsl_chan, 0);
- }
+ /*
+ * If there are some link descriptors which have not been
+ * transferred, we need to start the controller
+ */
+
+ /*
+ * Move all elements from the queue of pending transactions
+ * onto the list of running transactions
+ */
+ desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
+ list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
+
+ /*
+ * Program the descriptor's address into the DMA controller,
+ * then start the DMA transaction
+ */
+ set_cdar(chan, desc->async_tx.phys);
+ dma_start(chan);
out_unlock:
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
}
/**
* fsl_dma_memcpy_issue_pending - Issue the DMA start command
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
*/
-static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
+static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
{
- struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
-
-#ifdef FSL_DMA_LD_DEBUG
- struct fsl_desc_sw *ld;
- unsigned long flags;
-
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
- if (list_empty(&fsl_chan->ld_queue)) {
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
- return;
- }
-
- dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
- list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
- int i;
- dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
- fsl_chan->id, ld->async_tx.phys);
- for (i = 0; i < 8; i++)
- dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
- i, *(((u32 *)&ld->hw) + i));
- }
- dev_dbg(fsl_chan->dev, "----------------\n");
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
-#endif
-
- fsl_chan_xfer_ld_queue(fsl_chan);
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
+ fsl_chan_xfer_ld_queue(chan);
}
/**
* fsl_dma_is_complete - Determine the DMA status
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
*/
-static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
+static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
dma_cookie_t cookie,
dma_cookie_t *done,
dma_cookie_t *used)
{
- struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
- fsl_chan_ld_cleanup(fsl_chan);
+ fsl_chan_ld_cleanup(chan);
- last_used = chan->cookie;
- last_complete = fsl_chan->completed_cookie;
+ last_used = dchan->cookie;
+ last_complete = chan->completed_cookie;
if (done)
*done = last_complete;
@@ -960,32 +990,37 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
return dma_async_is_complete(cookie, last_complete, last_used);
}
-static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
+/*----------------------------------------------------------------------------*/
+/* Interrupt Handling */
+/*----------------------------------------------------------------------------*/
+
+static irqreturn_t fsldma_chan_irq(int irq, void *data)
{
- struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
- u32 stat;
+ struct fsldma_chan *chan = data;
int update_cookie = 0;
int xfer_ld_q = 0;
+ u32 stat;
- stat = get_sr(fsl_chan);
- dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
- fsl_chan->id, stat);
- set_sr(fsl_chan, stat); /* Clear the event register */
+ /* save and clear the status register */
+ stat = get_sr(chan);
+ set_sr(chan, stat);
+ dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat);
stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
if (!stat)
return IRQ_NONE;
if (stat & FSL_DMA_SR_TE)
- dev_err(fsl_chan->dev, "Transfer Error!\n");
+ dev_err(chan->dev, "Transfer Error!\n");
- /* Programming Error
+ /*
+ * Programming Error
* The DMA_INTERRUPT async_tx is a NULL transfer, which will
* triger a PE interrupt.
*/
if (stat & FSL_DMA_SR_PE) {
- dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
- if (get_bcr(fsl_chan) == 0) {
+ dev_dbg(chan->dev, "irq: Programming Error INT\n");
+ if (get_bcr(chan) == 0) {
/* BCR register is 0, this is a DMA_INTERRUPT async_tx.
* Now, update the completed cookie, and continue the
* next uncompleted transfer.
@@ -996,208 +1031,296 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
stat &= ~FSL_DMA_SR_PE;
}
- /* If the link descriptor segment transfer finishes,
+ /*
+ * If the link descriptor segment transfer finishes,
* we will recycle the used descriptor.
*/
if (stat & FSL_DMA_SR_EOSI) {
- dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
- dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
- (unsigned long long)get_cdar(fsl_chan),
- (unsigned long long)get_ndar(fsl_chan));
+ dev_dbg(chan->dev, "irq: End-of-segments INT\n");
+ dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
+ (unsigned long long)get_cdar(chan),
+ (unsigned long long)get_ndar(chan));
stat &= ~FSL_DMA_SR_EOSI;
update_cookie = 1;
}
- /* For MPC8349, EOCDI event need to update cookie
+ /*
+ * For MPC8349, EOCDI event need to update cookie
* and start the next transfer if it exist.
*/
if (stat & FSL_DMA_SR_EOCDI) {
- dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n");
+ dev_dbg(chan->dev, "irq: End-of-Chain link INT\n");
stat &= ~FSL_DMA_SR_EOCDI;
update_cookie = 1;
xfer_ld_q = 1;
}
- /* If it current transfer is the end-of-transfer,
+ /*
+ * If it current transfer is the end-of-transfer,
* we should clear the Channel Start bit for
* prepare next transfer.
*/
if (stat & FSL_DMA_SR_EOLNI) {
- dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
+ dev_dbg(chan->dev, "irq: End-of-link INT\n");
stat &= ~FSL_DMA_SR_EOLNI;
xfer_ld_q = 1;
}
if (update_cookie)
- fsl_dma_update_completed_cookie(fsl_chan);
+ fsl_dma_update_completed_cookie(chan);
if (xfer_ld_q)
- fsl_chan_xfer_ld_queue(fsl_chan);
+ fsl_chan_xfer_ld_queue(chan);
if (stat)
- dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
- stat);
+ dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat);
- dev_dbg(fsl_chan->dev, "event: Exit\n");
- tasklet_schedule(&fsl_chan->tasklet);
+ dev_dbg(chan->dev, "irq: Exit\n");
+ tasklet_schedule(&chan->tasklet);
return IRQ_HANDLED;
}
-static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
+static void dma_do_tasklet(unsigned long data)
{
- struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
- u32 gsr;
- int ch_nr;
+ struct fsldma_chan *chan = (struct fsldma_chan *)data;
+ fsl_chan_ld_cleanup(chan);
+}
+
+static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
+{
+ struct fsldma_device *fdev = data;
+ struct fsldma_chan *chan;
+ unsigned int handled = 0;
+ u32 gsr, mask;
+ int i;
- gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
- : in_le32(fdev->reg_base);
- ch_nr = (32 - ffs(gsr)) / 8;
+ gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
+ : in_le32(fdev->regs);
+ mask = 0xff000000;
+ dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ if (gsr & mask) {
+ dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
+ fsldma_chan_irq(irq, chan);
+ handled++;
+ }
- return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
- fdev->chan[ch_nr]) : IRQ_NONE;
+ gsr &= ~mask;
+ mask >>= 8;
+ }
+
+ return IRQ_RETVAL(handled);
}
-static void dma_do_tasklet(unsigned long data)
+static void fsldma_free_irqs(struct fsldma_device *fdev)
{
- struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
- fsl_chan_ld_cleanup(fsl_chan);
+ struct fsldma_chan *chan;
+ int i;
+
+ if (fdev->irq != NO_IRQ) {
+ dev_dbg(fdev->dev, "free per-controller IRQ\n");
+ free_irq(fdev->irq, fdev);
+ return;
+ }
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (chan && chan->irq != NO_IRQ) {
+ dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id);
+ free_irq(chan->irq, chan);
+ }
+ }
}
-static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
+static int fsldma_request_irqs(struct fsldma_device *fdev)
+{
+ struct fsldma_chan *chan;
+ int ret;
+ int i;
+
+ /* if we have a per-controller IRQ, use that */
+ if (fdev->irq != NO_IRQ) {
+ dev_dbg(fdev->dev, "request per-controller IRQ\n");
+ ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
+ "fsldma-controller", fdev);
+ return ret;
+ }
+
+ /* no per-controller IRQ, use the per-channel IRQs */
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ if (chan->irq == NO_IRQ) {
+ dev_err(fdev->dev, "no interrupts property defined for "
+ "DMA channel %d. Please fix your "
+ "device tree\n", chan->id);
+ ret = -ENODEV;
+ goto out_unwind;
+ }
+
+ dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id);
+ ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
+ "fsldma-chan", chan);
+ if (ret) {
+ dev_err(fdev->dev, "unable to request IRQ for DMA "
+ "channel %d\n", chan->id);
+ goto out_unwind;
+ }
+ }
+
+ return 0;
+
+out_unwind:
+ for (/* none */; i >= 0; i--) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ if (chan->irq == NO_IRQ)
+ continue;
+
+ free_irq(chan->irq, chan);
+ }
+
+ return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+/* OpenFirmware Subsystem */
+/*----------------------------------------------------------------------------*/
+
+static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
struct device_node *node, u32 feature, const char *compatible)
{
- struct fsl_dma_chan *new_fsl_chan;
+ struct fsldma_chan *chan;
+ struct resource res;
int err;
/* alloc channel */
- new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
- if (!new_fsl_chan) {
- dev_err(fdev->dev, "No free memory for allocating "
- "dma channels!\n");
- return -ENOMEM;
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan) {
+ dev_err(fdev->dev, "no free memory for DMA channels!\n");
+ err = -ENOMEM;
+ goto out_return;
}
- /* get dma channel register base */
- err = of_address_to_resource(node, 0, &new_fsl_chan->reg);
- if (err) {
- dev_err(fdev->dev, "Can't get %s property 'reg'\n",
- node->full_name);
- goto err_no_reg;
+ /* ioremap registers for use */
+ chan->regs = of_iomap(node, 0);
+ if (!chan->regs) {
+ dev_err(fdev->dev, "unable to ioremap registers\n");
+ err = -ENOMEM;
+ goto out_free_chan;
}
- new_fsl_chan->feature = feature;
+ err = of_address_to_resource(node, 0, &res);
+ if (err) {
+ dev_err(fdev->dev, "unable to find 'reg' property\n");
+ goto out_iounmap_regs;
+ }
+ chan->feature = feature;
if (!fdev->feature)
- fdev->feature = new_fsl_chan->feature;
+ fdev->feature = chan->feature;
- /* If the DMA device's feature is different than its channels',
- * report the bug.
+ /*
+ * If the DMA device's feature is different than the feature
+ * of its channels, report the bug
*/
- WARN_ON(fdev->feature != new_fsl_chan->feature);
+ WARN_ON(fdev->feature != chan->feature);
- new_fsl_chan->dev = fdev->dev;
- new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
- new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
-
- new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
- if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
- dev_err(fdev->dev, "There is no %d channel!\n",
- new_fsl_chan->id);
+ chan->dev = fdev->dev;
+ chan->id = ((res.start - 0x100) & 0xfff) >> 7;
+ if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
+ dev_err(fdev->dev, "too many channels for device\n");
err = -EINVAL;
- goto err_no_chan;
+ goto out_iounmap_regs;
}
- fdev->chan[new_fsl_chan->id] = new_fsl_chan;
- tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
- (unsigned long)new_fsl_chan);
- /* Init the channel */
- dma_init(new_fsl_chan);
+ fdev->chan[chan->id] = chan;
+ tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
+
+ /* Initialize the channel */
+ dma_init(chan);
/* Clear cdar registers */
- set_cdar(new_fsl_chan, 0);
+ set_cdar(chan, 0);
- switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
+ switch (chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX:
- new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
+ chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
case FSL_DMA_IP_83XX:
- new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
- new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
- new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
- new_fsl_chan->set_request_count = fsl_chan_set_request_count;
+ chan->toggle_ext_start = fsl_chan_toggle_ext_start;
+ chan->set_src_loop_size = fsl_chan_set_src_loop_size;
+ chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
+ chan->set_request_count = fsl_chan_set_request_count;
}
- spin_lock_init(&new_fsl_chan->desc_lock);
- INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
+ spin_lock_init(&chan->desc_lock);
+ INIT_LIST_HEAD(&chan->ld_pending);
+ INIT_LIST_HEAD(&chan->ld_running);
+
+ chan->common.device = &fdev->common;
- new_fsl_chan->common.device = &fdev->common;
+ /* find the IRQ line, if it exists in the device tree */
+ chan->irq = irq_of_parse_and_map(node, 0);
/* Add the channel to DMA device channel list */
- list_add_tail(&new_fsl_chan->common.device_node,
- &fdev->common.channels);
+ list_add_tail(&chan->common.device_node, &fdev->common.channels);
fdev->common.chancnt++;
- new_fsl_chan->irq = irq_of_parse_and_map(node, 0);
- if (new_fsl_chan->irq != NO_IRQ) {
- err = request_irq(new_fsl_chan->irq,
- &fsl_dma_chan_do_interrupt, IRQF_SHARED,
- "fsldma-channel", new_fsl_chan);
- if (err) {
- dev_err(fdev->dev, "DMA channel %s request_irq error "
- "with return %d\n", node->full_name, err);
- goto err_no_irq;
- }
- }
-
- dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
- compatible,
- new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
+ dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
+ chan->irq != NO_IRQ ? chan->irq : fdev->irq);
return 0;
-err_no_irq:
- list_del(&new_fsl_chan->common.device_node);
-err_no_chan:
- iounmap(new_fsl_chan->reg_base);
-err_no_reg:
- kfree(new_fsl_chan);
+out_iounmap_regs:
+ iounmap(chan->regs);
+out_free_chan:
+ kfree(chan);
+out_return:
return err;
}
-static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
+static void fsl_dma_chan_remove(struct fsldma_chan *chan)
{
- if (fchan->irq != NO_IRQ)
- free_irq(fchan->irq, fchan);
- list_del(&fchan->common.device_node);
- iounmap(fchan->reg_base);
- kfree(fchan);
+ irq_dispose_mapping(chan->irq);
+ list_del(&chan->common.device_node);
+ iounmap(chan->regs);
+ kfree(chan);
}
-static int __devinit of_fsl_dma_probe(struct of_device *dev,
+static int __devinit fsldma_of_probe(struct of_device *op,
const struct of_device_id *match)
{
- int err;
- struct fsl_dma_device *fdev;
+ struct fsldma_device *fdev;
struct device_node *child;
+ int err;
- fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
+ fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
if (!fdev) {
- dev_err(&dev->dev, "No enough memory for 'priv'\n");
- return -ENOMEM;
+ dev_err(&op->dev, "No enough memory for 'priv'\n");
+ err = -ENOMEM;
+ goto out_return;
}
- fdev->dev = &dev->dev;
+
+ fdev->dev = &op->dev;
INIT_LIST_HEAD(&fdev->common.channels);
- /* get DMA controller register base */
- err = of_address_to_resource(dev->node, 0, &fdev->reg);
- if (err) {
- dev_err(&dev->dev, "Can't get %s property 'reg'\n",
- dev->node->full_name);
- goto err_no_reg;
+ /* ioremap the registers for use */
+ fdev->regs = of_iomap(op->node, 0);
+ if (!fdev->regs) {
+ dev_err(&op->dev, "unable to ioremap registers\n");
+ err = -ENOMEM;
+ goto out_free_fdev;
}
- dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
- "controller at 0x%llx...\n",
- match->compatible, (unsigned long long)fdev->reg.start);
- fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
- - fdev->reg.start + 1);
+ /* map the channel IRQ if it exists, but don't hookup the handler yet */
+ fdev->irq = irq_of_parse_and_map(op->node, 0);
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
@@ -1210,103 +1333,111 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
- fdev->common.dev = &dev->dev;
+ fdev->common.dev = &op->dev;
- fdev->irq = irq_of_parse_and_map(dev->node, 0);
- if (fdev->irq != NO_IRQ) {
- err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
- "fsldma-device", fdev);
- if (err) {
- dev_err(&dev->dev, "DMA device request_irq error "
- "with return %d\n", err);
- goto err;
- }
- }
-
- dev_set_drvdata(&(dev->dev), fdev);
+ dev_set_drvdata(&op->dev, fdev);
- /* We cannot use of_platform_bus_probe() because there is no
- * of_platform_bus_remove. Instead, we manually instantiate every DMA
+ /*
+ * We cannot use of_platform_bus_probe() because there is no
+ * of_platform_bus_remove(). Instead, we manually instantiate every DMA
* channel object.
*/
- for_each_child_of_node(dev->node, child) {
- if (of_device_is_compatible(child, "fsl,eloplus-dma-channel"))
+ for_each_child_of_node(op->node, child) {
+ if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
fsl_dma_chan_probe(fdev, child,
FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
"fsl,eloplus-dma-channel");
- if (of_device_is_compatible(child, "fsl,elo-dma-channel"))
+ }
+
+ if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
fsl_dma_chan_probe(fdev, child,
FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
"fsl,elo-dma-channel");
+ }
+ }
+
+ /*
+ * Hookup the IRQ handler(s)
+ *
+ * If we have a per-controller interrupt, we prefer that to the
+ * per-channel interrupts to reduce the number of shared interrupt
+ * handlers on the same IRQ line
+ */
+ err = fsldma_request_irqs(fdev);
+ if (err) {
+ dev_err(fdev->dev, "unable to request IRQs\n");
+ goto out_free_fdev;
}
dma_async_device_register(&fdev->common);
return 0;
-err:
- iounmap(fdev->reg_base);
-err_no_reg:
+out_free_fdev:
+ irq_dispose_mapping(fdev->irq);
kfree(fdev);
+out_return:
return err;
}
-static int of_fsl_dma_remove(struct of_device *of_dev)
+static int fsldma_of_remove(struct of_device *op)
{
- struct fsl_dma_device *fdev;
+ struct fsldma_device *fdev;
unsigned int i;
- fdev = dev_get_drvdata(&of_dev->dev);
-
+ fdev = dev_get_drvdata(&op->dev);
dma_async_device_unregister(&fdev->common);
- for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++)
+ fsldma_free_irqs(fdev);
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
if (fdev->chan[i])
fsl_dma_chan_remove(fdev->chan[i]);
+ }
- if (fdev->irq != NO_IRQ)
- free_irq(fdev->irq, fdev);
-
- iounmap(fdev->reg_base);
-
+ iounmap(fdev->regs);
+ dev_set_drvdata(&op->dev, NULL);
kfree(fdev);
- dev_set_drvdata(&of_dev->dev, NULL);
return 0;
}
-static struct of_device_id of_fsl_dma_ids[] = {
+static const struct of_device_id fsldma_of_ids[] = {
{ .compatible = "fsl,eloplus-dma", },
{ .compatible = "fsl,elo-dma", },
{}
};
-static struct of_platform_driver of_fsl_dma_driver = {
- .name = "fsl-elo-dma",
- .match_table = of_fsl_dma_ids,
- .probe = of_fsl_dma_probe,
- .remove = of_fsl_dma_remove,
+static struct of_platform_driver fsldma_of_driver = {
+ .name = "fsl-elo-dma",
+ .match_table = fsldma_of_ids,
+ .probe = fsldma_of_probe,
+ .remove = fsldma_of_remove,
};
-static __init int of_fsl_dma_init(void)
+/*----------------------------------------------------------------------------*/
+/* Module Init / Exit */
+/*----------------------------------------------------------------------------*/
+
+static __init int fsldma_init(void)
{
int ret;
pr_info("Freescale Elo / Elo Plus DMA driver\n");
- ret = of_register_platform_driver(&of_fsl_dma_driver);
+ ret = of_register_platform_driver(&fsldma_of_driver);
if (ret)
pr_err("fsldma: failed to register platform driver\n");
return ret;
}
-static void __exit of_fsl_dma_exit(void)
+static void __exit fsldma_exit(void)
{
- of_unregister_platform_driver(&of_fsl_dma_driver);
+ of_unregister_platform_driver(&fsldma_of_driver);
}
-subsys_initcall(of_fsl_dma_init);
-module_exit(of_fsl_dma_exit);
+subsys_initcall(fsldma_init);
+module_exit(fsldma_exit);
MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 0df14cb..cb4d6ff 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -92,11 +92,9 @@ struct fsl_desc_sw {
struct list_head node;
struct list_head tx_list;
struct dma_async_tx_descriptor async_tx;
- struct list_head *ld;
- void *priv;
} __attribute__((aligned(32)));
-struct fsl_dma_chan_regs {
+struct fsldma_chan_regs {
u32 mr; /* 0x00 - Mode Register */
u32 sr; /* 0x04 - Status Register */
u64 cdar; /* 0x08 - Current descriptor address register */
@@ -106,20 +104,19 @@ struct fsl_dma_chan_regs {
u64 ndar; /* 0x24 - Next Descriptor Address Register */
};
-struct fsl_dma_chan;
+struct fsldma_chan;
#define FSL_DMA_MAX_CHANS_PER_DEVICE 4
-struct fsl_dma_device {
- void __iomem *reg_base; /* DGSR register base */
- struct resource reg; /* Resource for register */
+struct fsldma_device {
+ void __iomem *regs; /* DGSR register base */
struct device *dev;
struct dma_device common;
- struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
+ struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
u32 feature; /* The same as DMA channels */
int irq; /* Channel IRQ */
};
-/* Define macros for fsl_dma_chan->feature property */
+/* Define macros for fsldma_chan->feature property */
#define FSL_DMA_LITTLE_ENDIAN 0x00000000
#define FSL_DMA_BIG_ENDIAN 0x00000001
@@ -130,28 +127,28 @@ struct fsl_dma_device {
#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
#define FSL_DMA_CHAN_START_EXT 0x00002000
-struct fsl_dma_chan {
- struct fsl_dma_chan_regs __iomem *reg_base;
+struct fsldma_chan {
+ struct fsldma_chan_regs __iomem *regs;
dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */
- struct list_head ld_queue; /* Link descriptors queue */
+ struct list_head ld_pending; /* Link descriptors queue */
+ struct list_head ld_running; /* Link descriptors queue */
struct dma_chan common; /* DMA common channel */
struct dma_pool *desc_pool; /* Descriptors pool */
struct device *dev; /* Channel device */
- struct resource reg; /* Resource for register */
int irq; /* Channel IRQ */
int id; /* Raw id of this channel */
struct tasklet_struct tasklet;
u32 feature;
- void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable);
- void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable);
- void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
- void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
- void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size);
+ void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
+ void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
+ void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size);
+ void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size);
+ void (*set_request_count)(struct fsldma_chan *fsl_chan, int size);
};
-#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common)
+#define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common)
#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index dcc4ab7..5d0e42b 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -94,16 +94,12 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
return IRQ_HANDLED;
}
-static void ioat1_cleanup_tasklet(unsigned long data);
-
/* common channel initialization */
-void ioat_init_channel(struct ioatdma_device *device,
- struct ioat_chan_common *chan, int idx,
- void (*timer_fn)(unsigned long),
- void (*tasklet)(unsigned long),
- unsigned long ioat)
+void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
{
struct dma_device *dma = &device->common;
+ struct dma_chan *c = &chan->common;
+ unsigned long data = (unsigned long) c;
chan->device = device;
chan->reg_base = device->reg_base + (0x80 * (idx + 1));
@@ -112,14 +108,12 @@ void ioat_init_channel(struct ioatdma_device *device,
list_add_tail(&chan->common.device_node, &dma->channels);
device->idx[idx] = chan;
init_timer(&chan->timer);
- chan->timer.function = timer_fn;
- chan->timer.data = ioat;
- tasklet_init(&chan->cleanup_task, tasklet, ioat);
+ chan->timer.function = device->timer_fn;
+ chan->timer.data = data;
+ tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
tasklet_disable(&chan->cleanup_task);
}
-static void ioat1_timer_event(unsigned long data);
-
/**
* ioat1_dma_enumerate_channels - find and initialize the device's channels
* @device: the device to be enumerated
@@ -155,10 +149,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device)
if (!ioat)
break;
- ioat_init_channel(device, &ioat->base, i,
- ioat1_timer_event,
- ioat1_cleanup_tasklet,
- (unsigned long) ioat);
+ ioat_init_channel(device, &ioat->base, i);
ioat->xfercap = xfercap;
spin_lock_init(&ioat->desc_lock);
INIT_LIST_HEAD(&ioat->free_desc);
@@ -532,12 +523,12 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
return &desc->txd;
}
-static void ioat1_cleanup_tasklet(unsigned long data)
+static void ioat1_cleanup_event(unsigned long data)
{
- struct ioat_dma_chan *chan = (void *)data;
+ struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
- ioat1_cleanup(chan);
- writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
+ ioat1_cleanup(ioat);
+ writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
@@ -687,7 +678,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat)
static void ioat1_timer_event(unsigned long data)
{
- struct ioat_dma_chan *ioat = (void *) data;
+ struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
@@ -734,16 +725,17 @@ static void ioat1_timer_event(unsigned long data)
spin_unlock_bh(&chan->cleanup_lock);
}
-static enum dma_status
-ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
+enum dma_status
+ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used)
{
- struct ioat_dma_chan *ioat = to_ioat_chan(c);
+ struct ioat_chan_common *chan = to_chan_common(c);
+ struct ioatdma_device *device = chan->device;
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
return DMA_SUCCESS;
- ioat1_cleanup(ioat);
+ device->cleanup_fn((unsigned long) c);
return ioat_is_complete(c, cookie, done, used);
}
@@ -1199,12 +1191,14 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
device->intr_quirk = ioat1_intr_quirk;
device->enumerate_channels = ioat1_enumerate_channels;
device->self_test = ioat_dma_self_test;
+ device->timer_fn = ioat1_timer_event;
+ device->cleanup_fn = ioat1_cleanup_event;
dma = &device->common;
dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
- dma->device_is_tx_complete = ioat1_dma_is_complete;
+ dma->device_is_tx_complete = ioat_is_dma_complete;
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index bbc3e78..4f747a2 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -61,7 +61,7 @@
* @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
* @enumerate_channels: hw version specific channel enumeration
* @reset_hw: hw version specific channel (re)initialization
- * @cleanup_tasklet: select between the v2 and v3 cleanup routines
+ * @cleanup_fn: select between the v2 and v3 cleanup routines
* @timer_fn: select between the v2 and v3 timer watchdog routines
* @self_test: hardware version specific self test for each supported op type
*
@@ -80,7 +80,7 @@ struct ioatdma_device {
void (*intr_quirk)(struct ioatdma_device *device);
int (*enumerate_channels)(struct ioatdma_device *device);
int (*reset_hw)(struct ioat_chan_common *chan);
- void (*cleanup_tasklet)(unsigned long data);
+ void (*cleanup_fn)(unsigned long data);
void (*timer_fn)(unsigned long data);
int (*self_test)(struct ioatdma_device *device);
};
@@ -337,10 +337,9 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
void __iomem *iobase);
unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device,
- struct ioat_chan_common *chan, int idx,
- void (*timer_fn)(unsigned long),
- void (*tasklet)(unsigned long),
- unsigned long ioat);
+ struct ioat_chan_common *chan, int idx);
+enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
+ dma_cookie_t *done, dma_cookie_t *used);
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5cc37af..1ed5d66 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -51,48 +51,40 @@ MODULE_PARM_DESC(ioat_ring_max_alloc_order,
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
{
- void * __iomem reg_base = ioat->base.reg_base;
+ struct ioat_chan_common *chan = &ioat->base;
- ioat->pending = 0;
ioat->dmacount += ioat2_ring_pending(ioat);
ioat->issued = ioat->head;
/* make descriptor updates globally visible before notifying channel */
wmb();
- writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
- dev_dbg(to_dev(&ioat->base),
+ writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+ dev_dbg(to_dev(chan),
"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
}
-void ioat2_issue_pending(struct dma_chan *chan)
+void ioat2_issue_pending(struct dma_chan *c)
{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(chan);
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- spin_lock_bh(&ioat->ring_lock);
- if (ioat->pending == 1)
+ if (ioat2_ring_pending(ioat)) {
+ spin_lock_bh(&ioat->ring_lock);
__ioat2_issue_pending(ioat);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->ring_lock);
+ }
}
/**
* ioat2_update_pending - log pending descriptors
* @ioat: ioat2+ channel
*
- * set pending to '1' unless pending is already set to '2', pending == 2
- * indicates that submission is temporarily blocked due to an in-flight
- * reset. If we are already above the ioat_pending_level threshold then
- * just issue pending.
- *
- * called with ring_lock held
+ * Check if the number of unsubmitted descriptors has exceeded the
+ * watermark. Called with ring_lock held
*/
static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
{
- if (unlikely(ioat->pending == 2))
- return;
- else if (ioat2_ring_pending(ioat) > ioat_pending_level)
+ if (ioat2_ring_pending(ioat) > ioat_pending_level)
__ioat2_issue_pending(ioat);
- else
- ioat->pending = 1;
}
static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
@@ -166,7 +158,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
seen_current = true;
}
ioat->tail += i;
- BUG_ON(!seen_current); /* no active descs have written a completion? */
+ BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete;
if (ioat->head == ioat->tail) {
@@ -207,9 +199,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&chan->cleanup_lock);
}
-void ioat2_cleanup_tasklet(unsigned long data)
+void ioat2_cleanup_event(unsigned long data)
{
- struct ioat2_dma_chan *ioat = (void *) data;
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
ioat2_cleanup(ioat);
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
@@ -291,7 +283,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
void ioat2_timer_event(unsigned long data)
{
- struct ioat2_dma_chan *ioat = (void *) data;
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
spin_lock_bh(&chan->cleanup_lock);
@@ -397,10 +389,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
if (!ioat)
break;
- ioat_init_channel(device, &ioat->base, i,
- device->timer_fn,
- device->cleanup_tasklet,
- (unsigned long) ioat);
+ ioat_init_channel(device, &ioat->base, i);
ioat->xfercap_log = xfercap_log;
spin_lock_init(&ioat->ring_lock);
if (device->reset_hw(&ioat->base)) {
@@ -546,7 +535,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat->head = 0;
ioat->issued = 0;
ioat->tail = 0;
- ioat->pending = 0;
ioat->alloc_order = order;
spin_unlock_bh(&ioat->ring_lock);
@@ -701,7 +689,7 @@ int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
spin_unlock_bh(&chan->cleanup_lock);
- device->timer_fn((unsigned long) ioat);
+ device->timer_fn((unsigned long) &chan->common);
} else
spin_unlock_bh(&chan->cleanup_lock);
return -ENOMEM;
@@ -785,7 +773,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
tasklet_disable(&chan->cleanup_task);
del_timer_sync(&chan->timer);
- device->cleanup_tasklet((unsigned long) ioat);
+ device->cleanup_fn((unsigned long) c);
device->reset_hw(chan);
spin_lock_bh(&ioat->ring_lock);
@@ -815,25 +803,9 @@ void ioat2_free_chan_resources(struct dma_chan *c)
chan->last_completion = 0;
chan->completion_dma = 0;
- ioat->pending = 0;
ioat->dmacount = 0;
}
-enum dma_status
-ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- struct ioatdma_device *device = ioat->base.device;
-
- if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
- return DMA_SUCCESS;
-
- device->cleanup_tasklet((unsigned long) ioat);
-
- return ioat_is_complete(c, cookie, done, used);
-}
-
static ssize_t ring_size_show(struct dma_chan *c, char *page)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
@@ -874,7 +846,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
device->enumerate_channels = ioat2_enumerate_channels;
device->reset_hw = ioat2_reset_hw;
- device->cleanup_tasklet = ioat2_cleanup_tasklet;
+ device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event;
device->self_test = ioat_dma_self_test;
dma = &device->common;
@@ -882,7 +854,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- dma->device_is_tx_complete = ioat2_is_complete;
+ dma->device_is_tx_complete = ioat_is_dma_complete;
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 3afad8d..ef2871f 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -47,7 +47,6 @@ extern int ioat_ring_alloc_order;
* @head: allocated index
* @issued: hardware notification point
* @tail: cleanup index
- * @pending: lock free indicator for issued != head
* @dmacount: identical to 'head' except for occasionally resetting to zero
* @alloc_order: log2 of the number of allocated descriptors
* @ring: software ring buffer implementation of hardware ring
@@ -61,7 +60,6 @@ struct ioat2_dma_chan {
u16 tail;
u16 dmacount;
u16 alloc_order;
- int pending;
struct ioat_ring_ent **ring;
spinlock_t ring_lock;
};
@@ -178,12 +176,10 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
void ioat2_issue_pending(struct dma_chan *chan);
int ioat2_alloc_chan_resources(struct dma_chan *c);
void ioat2_free_chan_resources(struct dma_chan *c);
-enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used);
void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
-void ioat2_cleanup_tasklet(unsigned long data);
+void ioat2_cleanup_event(unsigned long data);
void ioat2_timer_event(unsigned long data);
int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 9908c9e..26febc5 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -293,17 +293,25 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
}
}
ioat->tail += i;
- BUG_ON(!seen_current); /* no active descs have written a completion? */
+ BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete;
- if (ioat->head == ioat->tail) {
+
+ active = ioat2_ring_active(ioat);
+ if (active == 0) {
dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
__func__);
clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
+ /* 5 microsecond delay per pending descriptor */
+ writew(min((5 * active), IOAT_INTRDELAY_MASK),
+ chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
}
-static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
+/* try to cleanup, but yield (via spin_trylock) to incoming submissions
+ * with the expectation that we will immediately poll again shortly
+ */
+static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete;
@@ -329,29 +337,41 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&chan->cleanup_lock);
}
-static void ioat3_cleanup_tasklet(unsigned long data)
+/* run cleanup now because we already delayed the interrupt via INTRDELAY */
+static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ unsigned long phys_complete;
+
+ prefetch(chan->completion);
+
+ spin_lock_bh(&chan->cleanup_lock);
+ if (!ioat_cleanup_preamble(chan, &phys_complete)) {
+ spin_unlock_bh(&chan->cleanup_lock);
+ return;
+ }
+ spin_lock_bh(&ioat->ring_lock);
+
+ __cleanup(ioat, phys_complete);
+
+ spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+}
+
+static void ioat3_cleanup_event(unsigned long data)
{
- struct ioat2_dma_chan *ioat = (void *) data;
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
- ioat3_cleanup(ioat);
- writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN,
- ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
+ ioat3_cleanup_sync(ioat);
+ writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete;
- u32 status;
-
- status = ioat_chansts(chan);
- if (is_ioat_active(status) || is_ioat_idle(status))
- ioat_suspend(chan);
- while (is_ioat_active(status) || is_ioat_idle(status)) {
- status = ioat_chansts(chan);
- cpu_relax();
- }
+ ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
@@ -360,7 +380,7 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
static void ioat3_timer_event(unsigned long data)
{
- struct ioat2_dma_chan *ioat = (void *) data;
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
spin_lock_bh(&chan->cleanup_lock);
@@ -426,7 +446,7 @@ ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
return DMA_SUCCESS;
- ioat3_cleanup(ioat);
+ ioat3_cleanup_poll(ioat);
return ioat_is_complete(c, cookie, done, used);
}
@@ -1239,11 +1259,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (is_raid_device) {
dma->device_is_tx_complete = ioat3_is_complete;
- device->cleanup_tasklet = ioat3_cleanup_tasklet;
+ device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event;
} else {
- dma->device_is_tx_complete = ioat2_is_complete;
- device->cleanup_tasklet = ioat2_cleanup_tasklet;
+ dma->device_is_tx_complete = ioat_is_dma_complete;
+ device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event;
}
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index e8ae63b..1391798 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -60,7 +60,7 @@
#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */
#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */
-#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */
+#define IOAT_INTRDELAY_MASK 0x3FFF /* Interrupt Delay Time */
#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */
#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index e80bae1..2a44639 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -348,6 +348,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
break;
case IPU_PIX_FMT_BGRA32:
case IPU_PIX_FMT_BGR32:
+ case IPU_PIX_FMT_ABGR32:
params->ip.bpp = 0;
params->ip.pfs = 4;
params->ip.npb = 7;
@@ -376,20 +377,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
params->ip.wid2 = 7; /* Blue bit width - 1 */
params->ip.wid3 = 7; /* Alpha bit width - 1 */
break;
- case IPU_PIX_FMT_ABGR32:
- params->ip.bpp = 0;
- params->ip.pfs = 4;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 8; /* Red bit offset */
- params->ip.ofs1 = 16; /* Green bit offset */
- params->ip.ofs2 = 24; /* Blue bit offset */
- params->ip.ofs3 = 0; /* Alpha bit offset */
- params->ip.wid0 = 7; /* Red bit width - 1 */
- params->ip.wid1 = 7; /* Green bit width - 1 */
- params->ip.wid2 = 7; /* Blue bit width - 1 */
- params->ip.wid3 = 7; /* Alpha bit width - 1 */
- break;
case IPU_PIX_FMT_UYVY:
params->ip.bpp = 2;
params->ip.pfs = 6;
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
new file mode 100644
index 0000000..3fdf1f4
--- /dev/null
+++ b/drivers/dma/mpc512x_dma.c
@@ -0,0 +1,800 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
+ * Copyright (C) Semihalf 2009
+ *
+ * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
+ * (defines, structures and comments) was taken from MPC5121 DMA driver
+ * written by Hongjun Chen <hong-jun.chen@freescale.com>.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009; for details see www.osadl.org.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+/*
+ * This is initial version of MPC5121 DMA driver. Only memory to memory
+ * transfers are supported (tested using dmatest module).
+ */
+
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+#include <linux/random.h>
+
+/* Number of DMA Transfer descriptors allocated per channel */
+#define MPC_DMA_DESCRIPTORS 64
+
+/* Macro definitions */
+#define MPC_DMA_CHANNELS 64
+#define MPC_DMA_TCD_OFFSET 0x1000
+
+/* Arbitration mode of group and channel */
+#define MPC_DMA_DMACR_EDCG (1 << 31)
+#define MPC_DMA_DMACR_ERGA (1 << 3)
+#define MPC_DMA_DMACR_ERCA (1 << 2)
+
+/* Error codes */
+#define MPC_DMA_DMAES_VLD (1 << 31)
+#define MPC_DMA_DMAES_GPE (1 << 15)
+#define MPC_DMA_DMAES_CPE (1 << 14)
+#define MPC_DMA_DMAES_ERRCHN(err) \
+ (((err) >> 8) & 0x3f)
+#define MPC_DMA_DMAES_SAE (1 << 7)
+#define MPC_DMA_DMAES_SOE (1 << 6)
+#define MPC_DMA_DMAES_DAE (1 << 5)
+#define MPC_DMA_DMAES_DOE (1 << 4)
+#define MPC_DMA_DMAES_NCE (1 << 3)
+#define MPC_DMA_DMAES_SGE (1 << 2)
+#define MPC_DMA_DMAES_SBE (1 << 1)
+#define MPC_DMA_DMAES_DBE (1 << 0)
+
+#define MPC_DMA_TSIZE_1 0x00
+#define MPC_DMA_TSIZE_2 0x01
+#define MPC_DMA_TSIZE_4 0x02
+#define MPC_DMA_TSIZE_16 0x04
+#define MPC_DMA_TSIZE_32 0x05
+
+/* MPC5121 DMA engine registers */
+struct __attribute__ ((__packed__)) mpc_dma_regs {
+ /* 0x00 */
+ u32 dmacr; /* DMA control register */
+ u32 dmaes; /* DMA error status */
+ /* 0x08 */
+ u32 dmaerqh; /* DMA enable request high(channels 63~32) */
+ u32 dmaerql; /* DMA enable request low(channels 31~0) */
+ u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */
+ u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */
+ /* 0x18 */
+ u8 dmaserq; /* DMA set enable request */
+ u8 dmacerq; /* DMA clear enable request */
+ u8 dmaseei; /* DMA set enable error interrupt */
+ u8 dmaceei; /* DMA clear enable error interrupt */
+ /* 0x1c */
+ u8 dmacint; /* DMA clear interrupt request */
+ u8 dmacerr; /* DMA clear error */
+ u8 dmassrt; /* DMA set start bit */
+ u8 dmacdne; /* DMA clear DONE status bit */
+ /* 0x20 */
+ u32 dmainth; /* DMA interrupt request high(ch63~32) */
+ u32 dmaintl; /* DMA interrupt request low(ch31~0) */
+ u32 dmaerrh; /* DMA error high(ch63~32) */
+ u32 dmaerrl; /* DMA error low(ch31~0) */
+ /* 0x30 */
+ u32 dmahrsh; /* DMA hw request status high(ch63~32) */
+ u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
+ u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
+ u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
+ /* 0x40 ~ 0xff */
+ u32 reserve0[48]; /* Reserved */
+ /* 0x100 */
+ u8 dchpri[MPC_DMA_CHANNELS];
+ /* DMA channels(0~63) priority */
+};
+
+struct __attribute__ ((__packed__)) mpc_dma_tcd {
+ /* 0x00 */
+ u32 saddr; /* Source address */
+
+ u32 smod:5; /* Source address modulo */
+ u32 ssize:3; /* Source data transfer size */
+ u32 dmod:5; /* Destination address modulo */
+ u32 dsize:3; /* Destination data transfer size */
+ u32 soff:16; /* Signed source address offset */
+
+ /* 0x08 */
+ u32 nbytes; /* Inner "minor" byte count */
+ u32 slast; /* Last source address adjustment */
+ u32 daddr; /* Destination address */
+
+ /* 0x14 */
+ u32 citer_elink:1; /* Enable channel-to-channel linking on
+ * minor loop complete
+ */
+ u32 citer_linkch:6; /* Link channel for minor loop complete */
+ u32 citer:9; /* Current "major" iteration count */
+ u32 doff:16; /* Signed destination address offset */
+
+ /* 0x18 */
+ u32 dlast_sga; /* Last Destination address adjustment/scatter
+ * gather address
+ */
+
+ /* 0x1c */
+ u32 biter_elink:1; /* Enable channel-to-channel linking on major
+ * loop complete
+ */
+ u32 biter_linkch:6;
+ u32 biter:9; /* Beginning "major" iteration count */
+ u32 bwc:2; /* Bandwidth control */
+ u32 major_linkch:6; /* Link channel number */
+ u32 done:1; /* Channel done */
+ u32 active:1; /* Channel active */
+ u32 major_elink:1; /* Enable channel-to-channel linking on major
+ * loop complete
+ */
+ u32 e_sg:1; /* Enable scatter/gather processing */
+ u32 d_req:1; /* Disable request */
+ u32 int_half:1; /* Enable an interrupt when major counter is
+ * half complete
+ */
+ u32 int_maj:1; /* Enable an interrupt when major iteration
+ * count completes
+ */
+ u32 start:1; /* Channel start */
+};
+
+struct mpc_dma_desc {
+ struct dma_async_tx_descriptor desc;
+ struct mpc_dma_tcd *tcd;
+ dma_addr_t tcd_paddr;
+ int error;
+ struct list_head node;
+};
+
+struct mpc_dma_chan {
+ struct dma_chan chan;
+ struct list_head free;
+ struct list_head prepared;
+ struct list_head queued;
+ struct list_head active;
+ struct list_head completed;
+ struct mpc_dma_tcd *tcd;
+ dma_addr_t tcd_paddr;
+ dma_cookie_t completed_cookie;
+
+ /* Lock for this structure */
+ spinlock_t lock;
+};
+
+struct mpc_dma {
+ struct dma_device dma;
+ struct tasklet_struct tasklet;
+ struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
+ struct mpc_dma_regs __iomem *regs;
+ struct mpc_dma_tcd __iomem *tcd;
+ int irq;
+ uint error_status;
+
+ /* Lock for error_status field in this structure */
+ spinlock_t error_status_lock;
+};
+
+#define DRV_NAME "mpc512x_dma"
+
+/* Convert struct dma_chan to struct mpc_dma_chan */
+static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct mpc_dma_chan, chan);
+}
+
+/* Convert struct dma_chan to struct mpc_dma */
+static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
+{
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
+ return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
+}
+
+/*
+ * Execute all queued DMA descriptors.
+ *
+ * Following requirements must be met while calling mpc_dma_execute():
+ * a) mchan->lock is acquired,
+ * b) mchan->active list is empty,
+ * c) mchan->queued list contains at least one entry.
+ */
+static void mpc_dma_execute(struct mpc_dma_chan *mchan)
+{
+ struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
+ struct mpc_dma_desc *first = NULL;
+ struct mpc_dma_desc *prev = NULL;
+ struct mpc_dma_desc *mdesc;
+ int cid = mchan->chan.chan_id;
+
+ /* Move all queued descriptors to active list */
+ list_splice_tail_init(&mchan->queued, &mchan->active);
+
+ /* Chain descriptors into one transaction */
+ list_for_each_entry(mdesc, &mchan->active, node) {
+ if (!first)
+ first = mdesc;
+
+ if (!prev) {
+ prev = mdesc;
+ continue;
+ }
+
+ prev->tcd->dlast_sga = mdesc->tcd_paddr;
+ prev->tcd->e_sg = 1;
+ mdesc->tcd->start = 1;
+
+ prev = mdesc;
+ }
+
+ prev->tcd->start = 0;
+ prev->tcd->int_maj = 1;
+
+ /* Send first descriptor in chain into hardware */
+ memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
+ out_8(&mdma->regs->dmassrt, cid);
+}
+
+/* Handle interrupt on one half of DMA controller (32 channels) */
+static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
+{
+ struct mpc_dma_chan *mchan;
+ struct mpc_dma_desc *mdesc;
+ u32 status = is | es;
+ int ch;
+
+ while ((ch = fls(status) - 1) >= 0) {
+ status &= ~(1 << ch);
+ mchan = &mdma->channels[ch + off];
+
+ spin_lock(&mchan->lock);
+
+ /* Check error status */
+ if (es & (1 << ch))
+ list_for_each_entry(mdesc, &mchan->active, node)
+ mdesc->error = -EIO;
+
+ /* Execute queued descriptors */
+ list_splice_tail_init(&mchan->active, &mchan->completed);
+ if (!list_empty(&mchan->queued))
+ mpc_dma_execute(mchan);
+
+ spin_unlock(&mchan->lock);
+ }
+}
+
+/* Interrupt handler */
+static irqreturn_t mpc_dma_irq(int irq, void *data)
+{
+ struct mpc_dma *mdma = data;
+ uint es;
+
+ /* Save error status register */
+ es = in_be32(&mdma->regs->dmaes);
+ spin_lock(&mdma->error_status_lock);
+ if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
+ mdma->error_status = es;
+ spin_unlock(&mdma->error_status_lock);
+
+ /* Handle interrupt on each channel */
+ mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
+ in_be32(&mdma->regs->dmaerrh), 32);
+ mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
+ in_be32(&mdma->regs->dmaerrl), 0);
+
+ /* Ack interrupt on all channels */
+ out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
+
+ /* Schedule tasklet */
+ tasklet_schedule(&mdma->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/* DMA Tasklet */
+static void mpc_dma_tasklet(unsigned long data)
+{
+ struct mpc_dma *mdma = (void *)data;
+ dma_cookie_t last_cookie = 0;
+ struct mpc_dma_chan *mchan;
+ struct mpc_dma_desc *mdesc;
+ struct dma_async_tx_descriptor *desc;
+ unsigned long flags;
+ LIST_HEAD(list);
+ uint es;
+ int i;
+
+ spin_lock_irqsave(&mdma->error_status_lock, flags);
+ es = mdma->error_status;
+ mdma->error_status = 0;
+ spin_unlock_irqrestore(&mdma->error_status_lock, flags);
+
+ /* Print nice error report */
+ if (es) {
+ dev_err(mdma->dma.dev,
+ "Hardware reported following error(s) on channel %u:\n",
+ MPC_DMA_DMAES_ERRCHN(es));
+
+ if (es & MPC_DMA_DMAES_GPE)
+ dev_err(mdma->dma.dev, "- Group Priority Error\n");
+ if (es & MPC_DMA_DMAES_CPE)
+ dev_err(mdma->dma.dev, "- Channel Priority Error\n");
+ if (es & MPC_DMA_DMAES_SAE)
+ dev_err(mdma->dma.dev, "- Source Address Error\n");
+ if (es & MPC_DMA_DMAES_SOE)
+ dev_err(mdma->dma.dev, "- Source Offset"
+ " Configuration Error\n");
+ if (es & MPC_DMA_DMAES_DAE)
+ dev_err(mdma->dma.dev, "- Destination Address"
+ " Error\n");
+ if (es & MPC_DMA_DMAES_DOE)
+ dev_err(mdma->dma.dev, "- Destination Offset"
+ " Configuration Error\n");
+ if (es & MPC_DMA_DMAES_NCE)
+ dev_err(mdma->dma.dev, "- NBytes/Citter"
+ " Configuration Error\n");
+ if (es & MPC_DMA_DMAES_SGE)
+ dev_err(mdma->dma.dev, "- Scatter/Gather"
+ " Configuration Error\n");
+ if (es & MPC_DMA_DMAES_SBE)
+ dev_err(mdma->dma.dev, "- Source Bus Error\n");
+ if (es & MPC_DMA_DMAES_DBE)
+ dev_err(mdma->dma.dev, "- Destination Bus Error\n");
+ }
+
+ for (i = 0; i < mdma->dma.chancnt; i++) {
+ mchan = &mdma->channels[i];
+
+ /* Get all completed descriptors */
+ spin_lock_irqsave(&mchan->lock, flags);
+ if (!list_empty(&mchan->completed))
+ list_splice_tail_init(&mchan->completed, &list);
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ if (list_empty(&list))
+ continue;
+
+ /* Execute callbacks and run dependencies */
+ list_for_each_entry(mdesc, &list, node) {
+ desc = &mdesc->desc;
+
+ if (desc->callback)
+ desc->callback(desc->callback_param);
+
+ last_cookie = desc->cookie;
+ dma_run_dependencies(desc);
+ }
+
+ /* Free descriptors */
+ spin_lock_irqsave(&mchan->lock, flags);
+ list_splice_tail_init(&list, &mchan->free);
+ mchan->completed_cookie = last_cookie;
+ spin_unlock_irqrestore(&mchan->lock, flags);
+ }
+}
+
+/* Submit descriptor to hardware */
+static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
+ struct mpc_dma_desc *mdesc;
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ mdesc = container_of(txd, struct mpc_dma_desc, desc);
+
+ spin_lock_irqsave(&mchan->lock, flags);
+
+ /* Move descriptor to queue */
+ list_move_tail(&mdesc->node, &mchan->queued);
+
+ /* If channel is idle, execute all queued descriptors */
+ if (list_empty(&mchan->active))
+ mpc_dma_execute(mchan);
+
+ /* Update cookie */
+ cookie = mchan->chan.cookie + 1;
+ if (cookie <= 0)
+ cookie = 1;
+
+ mchan->chan.cookie = cookie;
+ mdesc->desc.cookie = cookie;
+
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ return cookie;
+}
+
+/* Alloc channel resources */
+static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ struct mpc_dma_desc *mdesc;
+ struct mpc_dma_tcd *tcd;
+ dma_addr_t tcd_paddr;
+ unsigned long flags;
+ LIST_HEAD(descs);
+ int i;
+
+ /* Alloc DMA memory for Transfer Control Descriptors */
+ tcd = dma_alloc_coherent(mdma->dma.dev,
+ MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
+ &tcd_paddr, GFP_KERNEL);
+ if (!tcd)
+ return -ENOMEM;
+
+ /* Alloc descriptors for this channel */
+ for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
+ mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
+ if (!mdesc) {
+ dev_notice(mdma->dma.dev, "Memory allocation error. "
+ "Allocated only %u descriptors\n", i);
+ break;
+ }
+
+ dma_async_tx_descriptor_init(&mdesc->desc, chan);
+ mdesc->desc.flags = DMA_CTRL_ACK;
+ mdesc->desc.tx_submit = mpc_dma_tx_submit;
+
+ mdesc->tcd = &tcd[i];
+ mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
+
+ list_add_tail(&mdesc->node, &descs);
+ }
+
+ /* Return error only if no descriptors were allocated */
+ if (i == 0) {
+ dma_free_coherent(mdma->dma.dev,
+ MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
+ tcd, tcd_paddr);
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&mchan->lock, flags);
+ mchan->tcd = tcd;
+ mchan->tcd_paddr = tcd_paddr;
+ list_splice_tail_init(&descs, &mchan->free);
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ /* Enable Error Interrupt */
+ out_8(&mdma->regs->dmaseei, chan->chan_id);
+
+ return 0;
+}
+
+/* Free channel resources */
+static void mpc_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ struct mpc_dma_desc *mdesc, *tmp;
+ struct mpc_dma_tcd *tcd;
+ dma_addr_t tcd_paddr;
+ unsigned long flags;
+ LIST_HEAD(descs);
+
+ spin_lock_irqsave(&mchan->lock, flags);
+
+ /* Channel must be idle */
+ BUG_ON(!list_empty(&mchan->prepared));
+ BUG_ON(!list_empty(&mchan->queued));
+ BUG_ON(!list_empty(&mchan->active));
+ BUG_ON(!list_empty(&mchan->completed));
+
+ /* Move data */
+ list_splice_tail_init(&mchan->free, &descs);
+ tcd = mchan->tcd;
+ tcd_paddr = mchan->tcd_paddr;
+
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ /* Free DMA memory used by descriptors */
+ dma_free_coherent(mdma->dma.dev,
+ MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
+ tcd, tcd_paddr);
+
+ /* Free descriptors */
+ list_for_each_entry_safe(mdesc, tmp, &descs, node)
+ kfree(mdesc);
+
+ /* Disable Error Interrupt */
+ out_8(&mdma->regs->dmaceei, chan->chan_id);
+}
+
+/* Send all pending descriptor to hardware */
+static void mpc_dma_issue_pending(struct dma_chan *chan)
+{
+ /*
+ * We are posting descriptors to the hardware as soon as
+ * they are ready, so this function does nothing.
+ */
+}
+
+/* Check request completion status */
+static enum dma_status
+mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
+ dma_cookie_t *done, dma_cookie_t *used)
+{
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ unsigned long flags;
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+
+ spin_lock_irqsave(&mchan->lock, flags);
+ last_used = mchan->chan.cookie;
+ last_complete = mchan->completed_cookie;
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ if (done)
+ *done = last_complete;
+
+ if (used)
+ *used = last_used;
+
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+/* Prepare descriptor for memory to memory copy */
+static struct dma_async_tx_descriptor *
+mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ struct mpc_dma_desc *mdesc = NULL;
+ struct mpc_dma_tcd *tcd;
+ unsigned long iflags;
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&mchan->lock, iflags);
+ if (!list_empty(&mchan->free)) {
+ mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
+ node);
+ list_del(&mdesc->node);
+ }
+ spin_unlock_irqrestore(&mchan->lock, iflags);
+
+ if (!mdesc)
+ return NULL;
+
+ mdesc->error = 0;
+ tcd = mdesc->tcd;
+
+ /* Prepare Transfer Control Descriptor for this transaction */
+ memset(tcd, 0, sizeof(struct mpc_dma_tcd));
+
+ if (IS_ALIGNED(src | dst | len, 32)) {
+ tcd->ssize = MPC_DMA_TSIZE_32;
+ tcd->dsize = MPC_DMA_TSIZE_32;
+ tcd->soff = 32;
+ tcd->doff = 32;
+ } else if (IS_ALIGNED(src | dst | len, 16)) {
+ tcd->ssize = MPC_DMA_TSIZE_16;
+ tcd->dsize = MPC_DMA_TSIZE_16;
+ tcd->soff = 16;
+ tcd->doff = 16;
+ } else if (IS_ALIGNED(src | dst | len, 4)) {
+ tcd->ssize = MPC_DMA_TSIZE_4;
+ tcd->dsize = MPC_DMA_TSIZE_4;
+ tcd->soff = 4;
+ tcd->doff = 4;
+ } else if (IS_ALIGNED(src | dst | len, 2)) {
+ tcd->ssize = MPC_DMA_TSIZE_2;
+ tcd->dsize = MPC_DMA_TSIZE_2;
+ tcd->soff = 2;
+ tcd->doff = 2;
+ } else {
+ tcd->ssize = MPC_DMA_TSIZE_1;
+ tcd->dsize = MPC_DMA_TSIZE_1;
+ tcd->soff = 1;
+ tcd->doff = 1;
+ }
+
+ tcd->saddr = src;
+ tcd->daddr = dst;
+ tcd->nbytes = len;
+ tcd->biter = 1;
+ tcd->citer = 1;
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&mchan->lock, iflags);
+ list_add_tail(&mdesc->node, &mchan->prepared);
+ spin_unlock_irqrestore(&mchan->lock, iflags);
+
+ return &mdesc->desc;
+}
+
+static int __devinit mpc_dma_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct device_node *dn = op->node;
+ struct device *dev = &op->dev;
+ struct dma_device *dma;
+ struct mpc_dma *mdma;
+ struct mpc_dma_chan *mchan;
+ struct resource res;
+ ulong regs_start, regs_size;
+ int retval, i;
+
+ mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
+ if (!mdma) {
+ dev_err(dev, "Memory exhausted!\n");
+ return -ENOMEM;
+ }
+
+ mdma->irq = irq_of_parse_and_map(dn, 0);
+ if (mdma->irq == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ return -EINVAL;
+ }
+
+ retval = of_address_to_resource(dn, 0, &res);
+ if (retval) {
+ dev_err(dev, "Error parsing memory region!\n");
+ return retval;
+ }
+
+ regs_start = res.start;
+ regs_size = res.end - res.start + 1;
+
+ if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
+ dev_err(dev, "Error requesting memory region!\n");
+ return -EBUSY;
+ }
+
+ mdma->regs = devm_ioremap(dev, regs_start, regs_size);
+ if (!mdma->regs) {
+ dev_err(dev, "Error mapping memory region!\n");
+ return -ENOMEM;
+ }
+
+ mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
+ + MPC_DMA_TCD_OFFSET);
+
+ retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
+ mdma);
+ if (retval) {
+ dev_err(dev, "Error requesting IRQ!\n");
+ return -EINVAL;
+ }
+
+ spin_lock_init(&mdma->error_status_lock);
+
+ dma = &mdma->dma;
+ dma->dev = dev;
+ dma->chancnt = MPC_DMA_CHANNELS;
+ dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = mpc_dma_free_chan_resources;
+ dma->device_issue_pending = mpc_dma_issue_pending;
+ dma->device_is_tx_complete = mpc_dma_is_tx_complete;
+ dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
+
+ INIT_LIST_HEAD(&dma->channels);
+ dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+
+ for (i = 0; i < dma->chancnt; i++) {
+ mchan = &mdma->channels[i];
+
+ mchan->chan.device = dma;
+ mchan->chan.chan_id = i;
+ mchan->chan.cookie = 1;
+ mchan->completed_cookie = mchan->chan.cookie;
+
+ INIT_LIST_HEAD(&mchan->free);
+ INIT_LIST_HEAD(&mchan->prepared);
+ INIT_LIST_HEAD(&mchan->queued);
+ INIT_LIST_HEAD(&mchan->active);
+ INIT_LIST_HEAD(&mchan->completed);
+
+ spin_lock_init(&mchan->lock);
+ list_add_tail(&mchan->chan.device_node, &dma->channels);
+ }
+
+ tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
+
+ /*
+ * Configure DMA Engine:
+ * - Dynamic clock,
+ * - Round-robin group arbitration,
+ * - Round-robin channel arbitration.
+ */
+ out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
+ MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
+
+ /* Disable hardware DMA requests */
+ out_be32(&mdma->regs->dmaerqh, 0);
+ out_be32(&mdma->regs->dmaerql, 0);
+
+ /* Disable error interrupts */
+ out_be32(&mdma->regs->dmaeeih, 0);
+ out_be32(&mdma->regs->dmaeeil, 0);
+
+ /* Clear interrupts status */
+ out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
+
+ /* Route interrupts to IPIC */
+ out_be32(&mdma->regs->dmaihsa, 0);
+ out_be32(&mdma->regs->dmailsa, 0);
+
+ /* Register DMA engine */
+ dev_set_drvdata(dev, mdma);
+ retval = dma_async_device_register(dma);
+ if (retval) {
+ devm_free_irq(dev, mdma->irq, mdma);
+ irq_dispose_mapping(mdma->irq);
+ }
+
+ return retval;
+}
+
+static int __devexit mpc_dma_remove(struct of_device *op)
+{
+ struct device *dev = &op->dev;
+ struct mpc_dma *mdma = dev_get_drvdata(dev);
+
+ dma_async_device_unregister(&mdma->dma);
+ devm_free_irq(dev, mdma->irq, mdma);
+ irq_dispose_mapping(mdma->irq);
+
+ return 0;
+}
+
+static struct of_device_id mpc_dma_match[] = {
+ { .compatible = "fsl,mpc5121-dma", },
+ {},
+};
+
+static struct of_platform_driver mpc_dma_driver = {
+ .match_table = mpc_dma_match,
+ .probe = mpc_dma_probe,
+ .remove = __devexit_p(mpc_dma_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mpc_dma_init(void)
+{
+ return of_register_platform_driver(&mpc_dma_driver);
+}
+module_init(mpc_dma_init);
+
+static void __exit mpc_dma_exit(void)
+{
+ of_unregister_platform_driver(&mpc_dma_driver);
+}
+module_exit(mpc_dma_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 0a3478e..e69d87f 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4940,7 +4940,7 @@ out_free:
return ret;
}
-static struct of_device_id __devinitdata ppc440spe_adma_of_match[] = {
+static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
{ .compatible = "ibm,dma-440spe", },
{ .compatible = "amcc,xor-accelerator", },
{},
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 3391e67..cf17dbb 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -13,7 +13,7 @@ module_param(report_gart_errors, int, 0644);
static int ecc_enable_override;
module_param(ecc_enable_override, int, 0644);
-static struct msr *msrs;
+static struct msr __percpu *msrs;
/* Lookup table for all possible MC control instances */
struct amd64_pvt;
@@ -2553,14 +2553,14 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
if (on) {
if (reg->l & K8_MSR_MCGCTL_NBE)
- pvt->flags.ecc_report = 1;
+ pvt->flags.nb_mce_enable = 1;
reg->l |= K8_MSR_MCGCTL_NBE;
} else {
/*
- * Turn off ECC reporting only when it was off before
+ * Turn off NB MCE reporting only when it was off before
*/
- if (!pvt->flags.ecc_report)
+ if (!pvt->flags.nb_mce_enable)
reg->l &= ~K8_MSR_MCGCTL_NBE;
}
}
@@ -2571,22 +2571,11 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
return 0;
}
-/*
- * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
- * enable it.
- */
static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
- if (!ecc_enable_override)
- return;
-
- amd64_printk(KERN_WARNING,
- "'ecc_enable_override' parameter is active, "
- "Enabling AMD ECC hardware now: CAUTION\n");
-
amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
/* turn on UECCn and CECCEn bits */
@@ -2611,6 +2600,8 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
"This node reports that DRAM ECC is "
"currently Disabled; ENABLING now\n");
+ pvt->flags.nb_ecc_prev = 0;
+
/* Attempt to turn on DRAM ECC Enable */
value |= K8_NBCFG_ECC_ENABLE;
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
@@ -2625,7 +2616,10 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
amd64_printk(KERN_DEBUG,
"Hardware accepted DRAM ECC Enable\n");
}
+ } else {
+ pvt->flags.nb_ecc_prev = 1;
}
+
debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
(value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
(value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
@@ -2644,12 +2638,18 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
value &= ~mask;
value |= pvt->old_nbctl;
- /* restore the NB Enable MCGCTL bit */
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+ /* restore previous BIOS DRAM ECC "off" setting which we force-enabled */
+ if (!pvt->flags.nb_ecc_prev) {
+ amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
+ value &= ~K8_NBCFG_ECC_ENABLE;
+ pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
+ }
+
+ /* restore the NB Enable MCGCTL bit */
if (amd64_toggle_ecc_err_reporting(pvt, OFF))
- amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
- "MCGCTL!\n");
+ amd64_printk(KERN_WARNING, "Error restoring NB MCGCTL settings!\n");
}
/*
@@ -2690,8 +2690,9 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
if (!ecc_enable_override) {
amd64_printk(KERN_NOTICE, "%s", ecc_msg);
return -ENODEV;
+ } else {
+ amd64_printk(KERN_WARNING, "Forcing ECC checking on!\n");
}
- ecc_enable_override = 0;
}
return 0;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 41bc561..0d4bf56 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -487,7 +487,8 @@ struct amd64_pvt {
/* misc settings */
struct flags {
unsigned long cf8_extcfg:1;
- unsigned long ecc_report:1;
+ unsigned long nb_mce_enable:1;
+ unsigned long nb_ecc_prev:1;
} flags;
};
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 4eeaed5..8be720b 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -25,6 +25,7 @@
#include <linux/firewire.h>
#include <linux/firewire-cdev.h>
#include <linux/idr.h>
+#include <linux/irqflags.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
@@ -32,7 +33,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/poll.h>
-#include <linux/preempt.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -368,39 +368,56 @@ void fw_device_cdev_remove(struct fw_device *device)
for_each_client(device, wake_up_client);
}
-static int ioctl_get_info(struct client *client, void *buffer)
+union ioctl_arg {
+ struct fw_cdev_get_info get_info;
+ struct fw_cdev_send_request send_request;
+ struct fw_cdev_allocate allocate;
+ struct fw_cdev_deallocate deallocate;
+ struct fw_cdev_send_response send_response;
+ struct fw_cdev_initiate_bus_reset initiate_bus_reset;
+ struct fw_cdev_add_descriptor add_descriptor;
+ struct fw_cdev_remove_descriptor remove_descriptor;
+ struct fw_cdev_create_iso_context create_iso_context;
+ struct fw_cdev_queue_iso queue_iso;
+ struct fw_cdev_start_iso start_iso;
+ struct fw_cdev_stop_iso stop_iso;
+ struct fw_cdev_get_cycle_timer get_cycle_timer;
+ struct fw_cdev_allocate_iso_resource allocate_iso_resource;
+ struct fw_cdev_send_stream_packet send_stream_packet;
+ struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
+};
+
+static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_get_info *get_info = buffer;
+ struct fw_cdev_get_info *a = &arg->get_info;
struct fw_cdev_event_bus_reset bus_reset;
unsigned long ret = 0;
- client->version = get_info->version;
- get_info->version = FW_CDEV_VERSION;
- get_info->card = client->device->card->index;
+ client->version = a->version;
+ a->version = FW_CDEV_VERSION;
+ a->card = client->device->card->index;
down_read(&fw_device_rwsem);
- if (get_info->rom != 0) {
- void __user *uptr = u64_to_uptr(get_info->rom);
- size_t want = get_info->rom_length;
+ if (a->rom != 0) {
+ size_t want = a->rom_length;
size_t have = client->device->config_rom_length * 4;
- ret = copy_to_user(uptr, client->device->config_rom,
- min(want, have));
+ ret = copy_to_user(u64_to_uptr(a->rom),
+ client->device->config_rom, min(want, have));
}
- get_info->rom_length = client->device->config_rom_length * 4;
+ a->rom_length = client->device->config_rom_length * 4;
up_read(&fw_device_rwsem);
if (ret != 0)
return -EFAULT;
- client->bus_reset_closure = get_info->bus_reset_closure;
- if (get_info->bus_reset != 0) {
- void __user *uptr = u64_to_uptr(get_info->bus_reset);
-
+ client->bus_reset_closure = a->bus_reset_closure;
+ if (a->bus_reset != 0) {
fill_bus_reset_event(&bus_reset, client);
- if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
+ if (copy_to_user(u64_to_uptr(a->bus_reset),
+ &bus_reset, sizeof(bus_reset)))
return -EFAULT;
}
@@ -571,11 +588,9 @@ static int init_request(struct client *client,
return ret;
}
-static int ioctl_send_request(struct client *client, void *buffer)
+static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_send_request *request = buffer;
-
- switch (request->tcode) {
+ switch (arg->send_request.tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
case TCODE_READ_QUADLET_REQUEST:
@@ -592,7 +607,7 @@ static int ioctl_send_request(struct client *client, void *buffer)
return -EINVAL;
}
- return init_request(client, request, client->device->node_id,
+ return init_request(client, &arg->send_request, client->device->node_id,
client->device->max_speed);
}
@@ -683,9 +698,9 @@ static void release_address_handler(struct client *client,
kfree(r);
}
-static int ioctl_allocate(struct client *client, void *buffer)
+static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_allocate *request = buffer;
+ struct fw_cdev_allocate *a = &arg->allocate;
struct address_handler_resource *r;
struct fw_address_region region;
int ret;
@@ -694,13 +709,13 @@ static int ioctl_allocate(struct client *client, void *buffer)
if (r == NULL)
return -ENOMEM;
- region.start = request->offset;
- region.end = request->offset + request->length;
- r->handler.length = request->length;
+ region.start = a->offset;
+ region.end = a->offset + a->length;
+ r->handler.length = a->length;
r->handler.address_callback = handle_request;
- r->handler.callback_data = r;
- r->closure = request->closure;
- r->client = client;
+ r->handler.callback_data = r;
+ r->closure = a->closure;
+ r->client = client;
ret = fw_core_add_address_handler(&r->handler, &region);
if (ret < 0) {
@@ -714,27 +729,25 @@ static int ioctl_allocate(struct client *client, void *buffer)
release_address_handler(client, &r->resource);
return ret;
}
- request->handle = r->resource.handle;
+ a->handle = r->resource.handle;
return 0;
}
-static int ioctl_deallocate(struct client *client, void *buffer)
+static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_deallocate *request = buffer;
-
- return release_client_resource(client, request->handle,
+ return release_client_resource(client, arg->deallocate.handle,
release_address_handler, NULL);
}
-static int ioctl_send_response(struct client *client, void *buffer)
+static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_send_response *request = buffer;
+ struct fw_cdev_send_response *a = &arg->send_response;
struct client_resource *resource;
struct inbound_transaction_resource *r;
int ret = 0;
- if (release_client_resource(client, request->handle,
+ if (release_client_resource(client, a->handle,
release_request, &resource) < 0)
return -EINVAL;
@@ -743,28 +756,24 @@ static int ioctl_send_response(struct client *client, void *buffer)
if (is_fcp_request(r->request))
goto out;
- if (request->length < r->length)
- r->length = request->length;
- if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) {
+ if (a->length < r->length)
+ r->length = a->length;
+ if (copy_from_user(r->data, u64_to_uptr(a->data), r->length)) {
ret = -EFAULT;
kfree(r->request);
goto out;
}
- fw_send_response(client->device->card, r->request, request->rcode);
+ fw_send_response(client->device->card, r->request, a->rcode);
out:
kfree(r);
return ret;
}
-static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
+static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_initiate_bus_reset *request = buffer;
- int short_reset;
-
- short_reset = (request->type == FW_CDEV_SHORT_RESET);
-
- return fw_core_initiate_bus_reset(client->device->card, short_reset);
+ return fw_core_initiate_bus_reset(client->device->card,
+ arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
}
static void release_descriptor(struct client *client,
@@ -777,9 +786,9 @@ static void release_descriptor(struct client *client,
kfree(r);
}
-static int ioctl_add_descriptor(struct client *client, void *buffer)
+static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_add_descriptor *request = buffer;
+ struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
struct descriptor_resource *r;
int ret;
@@ -787,22 +796,21 @@ static int ioctl_add_descriptor(struct client *client, void *buffer)
if (!client->device->is_local)
return -ENOSYS;
- if (request->length > 256)
+ if (a->length > 256)
return -EINVAL;
- r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
+ r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
if (r == NULL)
return -ENOMEM;
- if (copy_from_user(r->data,
- u64_to_uptr(request->data), request->length * 4)) {
+ if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
ret = -EFAULT;
goto failed;
}
- r->descriptor.length = request->length;
- r->descriptor.immediate = request->immediate;
- r->descriptor.key = request->key;
+ r->descriptor.length = a->length;
+ r->descriptor.immediate = a->immediate;
+ r->descriptor.key = a->key;
r->descriptor.data = r->data;
ret = fw_core_add_descriptor(&r->descriptor);
@@ -815,7 +823,7 @@ static int ioctl_add_descriptor(struct client *client, void *buffer)
fw_core_remove_descriptor(&r->descriptor);
goto failed;
}
- request->handle = r->resource.handle;
+ a->handle = r->resource.handle;
return 0;
failed:
@@ -824,11 +832,9 @@ static int ioctl_add_descriptor(struct client *client, void *buffer)
return ret;
}
-static int ioctl_remove_descriptor(struct client *client, void *buffer)
+static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_remove_descriptor *request = buffer;
-
- return release_client_resource(client, request->handle,
+ return release_client_resource(client, arg->remove_descriptor.handle,
release_descriptor, NULL);
}
@@ -851,49 +857,44 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle,
sizeof(e->interrupt) + header_length, NULL, 0);
}
-static int ioctl_create_iso_context(struct client *client, void *buffer)
+static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_create_iso_context *request = buffer;
+ struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
struct fw_iso_context *context;
/* We only support one context at this time. */
if (client->iso_context != NULL)
return -EBUSY;
- if (request->channel > 63)
+ if (a->channel > 63)
return -EINVAL;
- switch (request->type) {
+ switch (a->type) {
case FW_ISO_CONTEXT_RECEIVE:
- if (request->header_size < 4 || (request->header_size & 3))
+ if (a->header_size < 4 || (a->header_size & 3))
return -EINVAL;
-
break;
case FW_ISO_CONTEXT_TRANSMIT:
- if (request->speed > SCODE_3200)
+ if (a->speed > SCODE_3200)
return -EINVAL;
-
break;
default:
return -EINVAL;
}
- context = fw_iso_context_create(client->device->card,
- request->type,
- request->channel,
- request->speed,
- request->header_size,
- iso_callback, client);
+ context = fw_iso_context_create(client->device->card, a->type,
+ a->channel, a->speed, a->header_size,
+ iso_callback, client);
if (IS_ERR(context))
return PTR_ERR(context);
- client->iso_closure = request->closure;
+ client->iso_closure = a->closure;
client->iso_context = context;
/* We only support one context at this time. */
- request->handle = 0;
+ a->handle = 0;
return 0;
}
@@ -906,9 +907,9 @@ static int ioctl_create_iso_context(struct client *client, void *buffer)
#define GET_SY(v) (((v) >> 20) & 0x0f)
#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
-static int ioctl_queue_iso(struct client *client, void *buffer)
+static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_queue_iso *request = buffer;
+ struct fw_cdev_queue_iso *a = &arg->queue_iso;
struct fw_cdev_iso_packet __user *p, *end, *next;
struct fw_iso_context *ctx = client->iso_context;
unsigned long payload, buffer_end, header_length;
@@ -919,7 +920,7 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
u8 header[256];
} u;
- if (ctx == NULL || request->handle != 0)
+ if (ctx == NULL || a->handle != 0)
return -EINVAL;
/*
@@ -929,23 +930,23 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
* set them both to 0, which will still let packets with
* payload_length == 0 through. In other words, if no packets
* use the indirect payload, the iso buffer need not be mapped
- * and the request->data pointer is ignored.
+ * and the a->data pointer is ignored.
*/
- payload = (unsigned long)request->data - client->vm_start;
+ payload = (unsigned long)a->data - client->vm_start;
buffer_end = client->buffer.page_count << PAGE_SHIFT;
- if (request->data == 0 || client->buffer.pages == NULL ||
+ if (a->data == 0 || client->buffer.pages == NULL ||
payload >= buffer_end) {
payload = 0;
buffer_end = 0;
}
- p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
+ p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
- if (!access_ok(VERIFY_READ, p, request->size))
+ if (!access_ok(VERIFY_READ, p, a->size))
return -EFAULT;
- end = (void __user *)p + request->size;
+ end = (void __user *)p + a->size;
count = 0;
while (p < end) {
if (get_user(control, &p->control))
@@ -995,61 +996,78 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
count++;
}
- request->size -= uptr_to_u64(p) - request->packets;
- request->packets = uptr_to_u64(p);
- request->data = client->vm_start + payload;
+ a->size -= uptr_to_u64(p) - a->packets;
+ a->packets = uptr_to_u64(p);
+ a->data = client->vm_start + payload;
return count;
}
-static int ioctl_start_iso(struct client *client, void *buffer)
+static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_start_iso *request = buffer;
+ struct fw_cdev_start_iso *a = &arg->start_iso;
- if (client->iso_context == NULL || request->handle != 0)
+ if (client->iso_context == NULL || a->handle != 0)
return -EINVAL;
- if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
- if (request->tags == 0 || request->tags > 15)
- return -EINVAL;
-
- if (request->sync > 15)
- return -EINVAL;
- }
+ if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
+ (a->tags == 0 || a->tags > 15 || a->sync > 15))
+ return -EINVAL;
- return fw_iso_context_start(client->iso_context, request->cycle,
- request->sync, request->tags);
+ return fw_iso_context_start(client->iso_context,
+ a->cycle, a->sync, a->tags);
}
-static int ioctl_stop_iso(struct client *client, void *buffer)
+static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_stop_iso *request = buffer;
+ struct fw_cdev_stop_iso *a = &arg->stop_iso;
- if (client->iso_context == NULL || request->handle != 0)
+ if (client->iso_context == NULL || a->handle != 0)
return -EINVAL;
return fw_iso_context_stop(client->iso_context);
}
-static int ioctl_get_cycle_timer(struct client *client, void *buffer)
+static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_get_cycle_timer *request = buffer;
+ struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
struct fw_card *card = client->device->card;
- unsigned long long bus_time;
- struct timeval tv;
- unsigned long flags;
+ struct timespec ts = {0, 0};
+ u32 cycle_time;
+ int ret = 0;
+
+ local_irq_disable();
+
+ cycle_time = card->driver->get_cycle_time(card);
- preempt_disable();
- local_irq_save(flags);
+ switch (a->clk_id) {
+ case CLOCK_REALTIME: getnstimeofday(&ts); break;
+ case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break;
+ case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
+ default:
+ ret = -EINVAL;
+ }
- bus_time = card->driver->get_bus_time(card);
- do_gettimeofday(&tv);
+ local_irq_enable();
- local_irq_restore(flags);
- preempt_enable();
+ a->tv_sec = ts.tv_sec;
+ a->tv_nsec = ts.tv_nsec;
+ a->cycle_timer = cycle_time;
+
+ return ret;
+}
+
+static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
+{
+ struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
+ struct fw_cdev_get_cycle_timer2 ct2;
+
+ ct2.clk_id = CLOCK_REALTIME;
+ ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
+
+ a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
+ a->cycle_timer = ct2.cycle_timer;
- request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
- request->cycle_timer = bus_time & 0xffffffff;
return 0;
}
@@ -1220,33 +1238,32 @@ static int init_iso_resource(struct client *client,
return ret;
}
-static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
+static int ioctl_allocate_iso_resource(struct client *client,
+ union ioctl_arg *arg)
{
- struct fw_cdev_allocate_iso_resource *request = buffer;
-
- return init_iso_resource(client, request, ISO_RES_ALLOC);
+ return init_iso_resource(client,
+ &arg->allocate_iso_resource, ISO_RES_ALLOC);
}
-static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
+static int ioctl_deallocate_iso_resource(struct client *client,
+ union ioctl_arg *arg)
{
- struct fw_cdev_deallocate *request = buffer;
-
- return release_client_resource(client, request->handle,
- release_iso_resource, NULL);
+ return release_client_resource(client,
+ arg->deallocate.handle, release_iso_resource, NULL);
}
-static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
+static int ioctl_allocate_iso_resource_once(struct client *client,
+ union ioctl_arg *arg)
{
- struct fw_cdev_allocate_iso_resource *request = buffer;
-
- return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
+ return init_iso_resource(client,
+ &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
}
-static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
+static int ioctl_deallocate_iso_resource_once(struct client *client,
+ union ioctl_arg *arg)
{
- struct fw_cdev_allocate_iso_resource *request = buffer;
-
- return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
+ return init_iso_resource(client,
+ &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
}
/*
@@ -1254,16 +1271,17 @@ static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffe
* limited by the device's link speed, the local node's link speed,
* and all PHY port speeds between the two links.
*/
-static int ioctl_get_speed(struct client *client, void *buffer)
+static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
{
return client->device->max_speed;
}
-static int ioctl_send_broadcast_request(struct client *client, void *buffer)
+static int ioctl_send_broadcast_request(struct client *client,
+ union ioctl_arg *arg)
{
- struct fw_cdev_send_request *request = buffer;
+ struct fw_cdev_send_request *a = &arg->send_request;
- switch (request->tcode) {
+ switch (a->tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
break;
@@ -1272,36 +1290,36 @@ static int ioctl_send_broadcast_request(struct client *client, void *buffer)
}
/* Security policy: Only allow accesses to Units Space. */
- if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
+ if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
return -EACCES;
- return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
+ return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
}
-static int ioctl_send_stream_packet(struct client *client, void *buffer)
+static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_send_stream_packet *p = buffer;
+ struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
struct fw_cdev_send_request request;
int dest;
- if (p->speed > client->device->card->link_speed ||
- p->length > 1024 << p->speed)
+ if (a->speed > client->device->card->link_speed ||
+ a->length > 1024 << a->speed)
return -EIO;
- if (p->tag > 3 || p->channel > 63 || p->sy > 15)
+ if (a->tag > 3 || a->channel > 63 || a->sy > 15)
return -EINVAL;
- dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
+ dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
request.tcode = TCODE_STREAM_DATA;
- request.length = p->length;
- request.closure = p->closure;
- request.data = p->data;
- request.generation = p->generation;
+ request.length = a->length;
+ request.closure = a->closure;
+ request.data = a->data;
+ request.generation = a->generation;
- return init_request(client, &request, dest, p->speed);
+ return init_request(client, &request, dest, a->speed);
}
-static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
+static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
ioctl_get_info,
ioctl_send_request,
ioctl_allocate,
@@ -1322,47 +1340,35 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
ioctl_get_speed,
ioctl_send_broadcast_request,
ioctl_send_stream_packet,
+ ioctl_get_cycle_timer2,
};
static int dispatch_ioctl(struct client *client,
unsigned int cmd, void __user *arg)
{
- char buffer[sizeof(union {
- struct fw_cdev_get_info _00;
- struct fw_cdev_send_request _01;
- struct fw_cdev_allocate _02;
- struct fw_cdev_deallocate _03;
- struct fw_cdev_send_response _04;
- struct fw_cdev_initiate_bus_reset _05;
- struct fw_cdev_add_descriptor _06;
- struct fw_cdev_remove_descriptor _07;
- struct fw_cdev_create_iso_context _08;
- struct fw_cdev_queue_iso _09;
- struct fw_cdev_start_iso _0a;
- struct fw_cdev_stop_iso _0b;
- struct fw_cdev_get_cycle_timer _0c;
- struct fw_cdev_allocate_iso_resource _0d;
- struct fw_cdev_send_stream_packet _13;
- })];
+ union ioctl_arg buffer;
int ret;
+ if (fw_device_is_shutdown(client->device))
+ return -ENODEV;
+
if (_IOC_TYPE(cmd) != '#' ||
_IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
return -EINVAL;
if (_IOC_DIR(cmd) & _IOC_WRITE) {
if (_IOC_SIZE(cmd) > sizeof(buffer) ||
- copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
+ copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
return -EFAULT;
}
- ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
+ ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
if (ret < 0)
return ret;
if (_IOC_DIR(cmd) & _IOC_READ) {
if (_IOC_SIZE(cmd) > sizeof(buffer) ||
- copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
+ copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
return -EFAULT;
}
@@ -1372,24 +1378,14 @@ static int dispatch_ioctl(struct client *client,
static long fw_device_op_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct client *client = file->private_data;
-
- if (fw_device_is_shutdown(client->device))
- return -ENODEV;
-
- return dispatch_ioctl(client, cmd, (void __user *) arg);
+ return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
}
#ifdef CONFIG_COMPAT
static long fw_device_op_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct client *client = file->private_data;
-
- if (fw_device_is_shutdown(client->device))
- return -ENODEV;
-
- return dispatch_ioctl(client, cmd, compat_ptr(arg));
+ return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
}
#endif
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 9d0dfcb..014cabd 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -18,6 +18,7 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/bug.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -43,7 +44,7 @@
#include "core.h"
-void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p)
{
ci->p = p + 1;
ci->end = ci->p + (p[0] >> 16);
@@ -59,9 +60,76 @@ int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
}
EXPORT_SYMBOL(fw_csr_iterator_next);
+static const u32 *search_leaf(const u32 *directory, int search_key)
+{
+ struct fw_csr_iterator ci;
+ int last_key = 0, key, value;
+
+ fw_csr_iterator_init(&ci, directory);
+ while (fw_csr_iterator_next(&ci, &key, &value)) {
+ if (last_key == search_key &&
+ key == (CSR_DESCRIPTOR | CSR_LEAF))
+ return ci.p - 1 + value;
+
+ last_key = key;
+ }
+
+ return NULL;
+}
+
+static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
+{
+ unsigned int quadlets, i;
+ char c;
+
+ if (!size || !buf)
+ return -EINVAL;
+
+ quadlets = min(block[0] >> 16, 256U);
+ if (quadlets < 2)
+ return -ENODATA;
+
+ if (block[1] != 0 || block[2] != 0)
+ /* unknown language/character set */
+ return -ENODATA;
+
+ block += 3;
+ quadlets -= 2;
+ for (i = 0; i < quadlets * 4 && i < size - 1; i++) {
+ c = block[i / 4] >> (24 - 8 * (i % 4));
+ if (c == '\0')
+ break;
+ buf[i] = c;
+ }
+ buf[i] = '\0';
+
+ return i;
+}
+
+/**
+ * fw_csr_string - reads a string from the configuration ROM
+ * @directory: e.g. root directory or unit directory
+ * @key: the key of the preceding directory entry
+ * @buf: where to put the string
+ * @size: size of @buf, in bytes
+ *
+ * The string is taken from a minimal ASCII text descriptor leaf after
+ * the immediate entry with @key. The string is zero-terminated.
+ * Returns strlen(buf) or a negative error code.
+ */
+int fw_csr_string(const u32 *directory, int key, char *buf, size_t size)
+{
+ const u32 *leaf = search_leaf(directory, key);
+ if (!leaf)
+ return -ENOENT;
+
+ return textual_leaf_to_string(leaf, buf, size);
+}
+EXPORT_SYMBOL(fw_csr_string);
+
static bool is_fw_unit(struct device *dev);
-static int match_unit_directory(u32 *directory, u32 match_flags,
+static int match_unit_directory(const u32 *directory, u32 match_flags,
const struct ieee1394_device_id *id)
{
struct fw_csr_iterator ci;
@@ -195,7 +263,7 @@ static ssize_t show_immediate(struct device *dev,
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
struct fw_csr_iterator ci;
- u32 *dir;
+ const u32 *dir;
int key, value, ret = -ENOENT;
down_read(&fw_device_rwsem);
@@ -226,10 +294,10 @@ static ssize_t show_text_leaf(struct device *dev,
{
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
- struct fw_csr_iterator ci;
- u32 *dir, *block = NULL, *p, *end;
- int length, key, value, last_key = 0, ret = -ENOENT;
- char *b;
+ const u32 *dir;
+ size_t bufsize;
+ char dummy_buf[2];
+ int ret;
down_read(&fw_device_rwsem);
@@ -238,40 +306,23 @@ static ssize_t show_text_leaf(struct device *dev,
else
dir = fw_device(dev)->config_rom + 5;
- fw_csr_iterator_init(&ci, dir);
- while (fw_csr_iterator_next(&ci, &key, &value)) {
- if (attr->key == last_key &&
- key == (CSR_DESCRIPTOR | CSR_LEAF))
- block = ci.p - 1 + value;
- last_key = key;
+ if (buf) {
+ bufsize = PAGE_SIZE - 1;
+ } else {
+ buf = dummy_buf;
+ bufsize = 1;
}
- if (block == NULL)
- goto out;
-
- length = min(block[0] >> 16, 256U);
- if (length < 3)
- goto out;
-
- if (block[1] != 0 || block[2] != 0)
- /* Unknown encoding. */
- goto out;
+ ret = fw_csr_string(dir, attr->key, buf, bufsize);
- if (buf == NULL) {
- ret = length * 4;
- goto out;
+ if (ret >= 0) {
+ /* Strip trailing whitespace and add newline. */
+ while (ret > 0 && isspace(buf[ret - 1]))
+ ret--;
+ strcpy(buf + ret, "\n");
+ ret++;
}
- b = buf;
- end = &block[length + 1];
- for (p = &block[3]; p < end; p++, b += 4)
- * (u32 *) b = (__force u32) __cpu_to_be32(*p);
-
- /* Strip trailing whitespace and add newline. */
- while (b--, (isspace(*b) || *b == '\0') && b > buf);
- strcpy(b + 1, "\n");
- ret = b + 2 - buf;
- out:
up_read(&fw_device_rwsem);
return ret;
@@ -371,7 +422,7 @@ static ssize_t guid_show(struct device *dev,
return ret;
}
-static int units_sprintf(char *buf, u32 *directory)
+static int units_sprintf(char *buf, const u32 *directory)
{
struct fw_csr_iterator ci;
int key, value;
@@ -441,28 +492,29 @@ static int read_rom(struct fw_device *device,
return rcode;
}
-#define READ_BIB_ROM_SIZE 256
-#define READ_BIB_STACK_SIZE 16
+#define MAX_CONFIG_ROM_SIZE 256
/*
* Read the bus info block, perform a speed probe, and read all of the rest of
* the config ROM. We do all this with a cached bus generation. If the bus
- * generation changes under us, read_bus_info_block will fail and get retried.
+ * generation changes under us, read_config_rom will fail and get retried.
* It's better to start all over in this case because the node from which we
* are reading the ROM may have changed the ROM during the reset.
*/
-static int read_bus_info_block(struct fw_device *device, int generation)
+static int read_config_rom(struct fw_device *device, int generation)
{
- u32 *rom, *stack, *old_rom, *new_rom;
+ const u32 *old_rom, *new_rom;
+ u32 *rom, *stack;
u32 sp, key;
int i, end, length, ret = -1;
- rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE +
- sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL);
+ rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE +
+ sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL);
if (rom == NULL)
return -ENOMEM;
- stack = &rom[READ_BIB_ROM_SIZE];
+ stack = &rom[MAX_CONFIG_ROM_SIZE];
+ memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE);
device->max_speed = SCODE_100;
@@ -529,40 +581,54 @@ static int read_bus_info_block(struct fw_device *device, int generation)
*/
key = stack[--sp];
i = key & 0xffffff;
- if (i >= READ_BIB_ROM_SIZE)
- /*
- * The reference points outside the standard
- * config rom area, something's fishy.
- */
+ if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE))
goto out;
/* Read header quadlet for the block to get the length. */
if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
goto out;
end = i + (rom[i] >> 16) + 1;
- i++;
- if (end > READ_BIB_ROM_SIZE)
+ if (end > MAX_CONFIG_ROM_SIZE) {
/*
- * This block extends outside standard config
- * area (and the array we're reading it
- * into). That's broken, so ignore this
- * device.
+ * This block extends outside the config ROM which is
+ * a firmware bug. Ignore this whole block, i.e.
+ * simply set a fake block length of 0.
*/
- goto out;
+ fw_error("skipped invalid ROM block %x at %llx\n",
+ rom[i],
+ i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
+ rom[i] = 0;
+ end = i;
+ }
+ i++;
/*
* Now read in the block. If this is a directory
* block, check the entries as we read them to see if
* it references another block, and push it in that case.
*/
- while (i < end) {
+ for (; i < end; i++) {
if (read_rom(device, generation, i, &rom[i]) !=
RCODE_COMPLETE)
goto out;
- if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
- sp < READ_BIB_STACK_SIZE)
- stack[sp++] = i + rom[i];
- i++;
+
+ if ((key >> 30) != 3 || (rom[i] >> 30) < 2)
+ continue;
+ /*
+ * Offset points outside the ROM. May be a firmware
+ * bug or an Extended ROM entry (IEEE 1212-2001 clause
+ * 7.7.18). Simply overwrite this pointer here by a
+ * fake immediate entry so that later iterators over
+ * the ROM don't have to check offsets all the time.
+ */
+ if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) {
+ fw_error("skipped unsupported ROM entry %x at %llx\n",
+ rom[i],
+ i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
+ rom[i] = 0;
+ continue;
+ }
+ stack[sp++] = i + rom[i];
}
if (length < i)
length = i;
@@ -905,7 +971,7 @@ static void fw_device_init(struct work_struct *work)
* device.
*/
- if (read_bus_info_block(device, device->generation) < 0) {
+ if (read_config_rom(device, device->generation) < 0) {
if (device->config_rom_retries < MAX_RETRIES &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
@@ -1022,7 +1088,7 @@ enum {
};
/* Reread and compare bus info block and header of root directory */
-static int reread_bus_info_block(struct fw_device *device, int generation)
+static int reread_config_rom(struct fw_device *device, int generation)
{
u32 q;
int i;
@@ -1048,7 +1114,7 @@ static void fw_device_refresh(struct work_struct *work)
struct fw_card *card = device->card;
int node_id = device->node_id;
- switch (reread_bus_info_block(device, device->generation)) {
+ switch (reread_config_rom(device, device->generation)) {
case REREAD_BIB_ERROR:
if (device->config_rom_retries < MAX_RETRIES / 2 &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
@@ -1082,7 +1148,7 @@ static void fw_device_refresh(struct work_struct *work)
*/
device_for_each_child(&device->device, NULL, shutdown_unit);
- if (read_bus_info_block(device, device->generation) < 0) {
+ if (read_config_rom(device, device->generation) < 0) {
if (device->config_rom_retries < MAX_RETRIES &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 495849e..673b03f 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -921,23 +921,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
void *payload, size_t length, void *callback_data)
{
int reg = offset & ~CSR_REGISTER_BASE;
- unsigned long long bus_time;
__be32 *data = payload;
int rcode = RCODE_COMPLETE;
switch (reg) {
case CSR_CYCLE_TIME:
- case CSR_BUS_TIME:
- if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) {
- rcode = RCODE_TYPE_ERROR;
- break;
- }
-
- bus_time = card->driver->get_bus_time(card);
- if (reg == CSR_CYCLE_TIME)
- *data = cpu_to_be32(bus_time);
+ if (TCODE_IS_READ_REQUEST(tcode) && length == 4)
+ *data = cpu_to_be32(card->driver->get_cycle_time(card));
else
- *data = cpu_to_be32(bus_time >> 25);
+ rcode = RCODE_TYPE_ERROR;
break;
case CSR_BROADCAST_CHANNEL:
@@ -968,6 +960,9 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
case CSR_BUSY_TIMEOUT:
/* FIXME: Implement this. */
+ case CSR_BUS_TIME:
+ /* Useless without initialization by the bus manager. */
+
default:
rcode = RCODE_ADDRESS_ERROR;
break;
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index ed3b1a7..fb03213 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -70,7 +70,7 @@ struct fw_card_driver {
int (*enable_phys_dma)(struct fw_card *card,
int node_id, int generation);
- u64 (*get_bus_time)(struct fw_card *card);
+ u32 (*get_cycle_time)(struct fw_card *card);
struct fw_iso_context *
(*allocate_iso_context)(struct fw_card *card,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 43ebf33..75dc698 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -38,7 +38,6 @@
#include <linux/spinlock.h>
#include <linux/string.h>
-#include <asm/atomic.h>
#include <asm/byteorder.h>
#include <asm/page.h>
#include <asm/system.h>
@@ -73,20 +72,6 @@ struct descriptor {
__le16 transfer_status;
} __attribute__((aligned(16)));
-struct db_descriptor {
- __le16 first_size;
- __le16 control;
- __le16 second_req_count;
- __le16 first_req_count;
- __le32 branch_address;
- __le16 second_res_count;
- __le16 first_res_count;
- __le32 reserved0;
- __le32 first_buffer;
- __le32 second_buffer;
- __le32 reserved1;
-} __attribute__((aligned(16)));
-
#define CONTROL_SET(regs) (regs)
#define CONTROL_CLEAR(regs) ((regs) + 4)
#define COMMAND_PTR(regs) ((regs) + 12)
@@ -181,31 +166,16 @@ struct fw_ohci {
struct fw_card card;
__iomem char *registers;
- dma_addr_t self_id_bus;
- __le32 *self_id_cpu;
- struct tasklet_struct bus_reset_tasklet;
int node_id;
int generation;
int request_generation; /* for timestamping incoming requests */
- atomic_t bus_seconds;
-
- bool use_dualbuffer;
- bool old_uninorth;
- bool bus_reset_packet_quirk;
+ unsigned quirks;
/*
* Spinlock for accessing fw_ohci data. Never call out of
* this driver with this lock held.
*/
spinlock_t lock;
- u32 self_id_buffer[512];
-
- /* Config rom buffers */
- __be32 *config_rom;
- dma_addr_t config_rom_bus;
- __be32 *next_config_rom;
- dma_addr_t next_config_rom_bus;
- __be32 next_header;
struct ar_context ar_request_ctx;
struct ar_context ar_response_ctx;
@@ -217,6 +187,18 @@ struct fw_ohci {
u64 ir_context_channels;
u32 ir_context_mask;
struct iso_context *ir_context_list;
+
+ __be32 *config_rom;
+ dma_addr_t config_rom_bus;
+ __be32 *next_config_rom;
+ dma_addr_t next_config_rom_bus;
+ __be32 next_header;
+
+ __le32 *self_id_cpu;
+ dma_addr_t self_id_bus;
+ struct tasklet_struct bus_reset_tasklet;
+
+ u32 self_id_buffer[512];
};
static inline struct fw_ohci *fw_ohci(struct fw_card *card)
@@ -249,6 +231,30 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
static char ohci_driver_name[] = KBUILD_MODNAME;
+#define QUIRK_CYCLE_TIMER 1
+#define QUIRK_RESET_PACKET 2
+#define QUIRK_BE_HEADERS 4
+
+/* In case of multiple matches in ohci_quirks[], only the first one is used. */
+static const struct {
+ unsigned short vendor, device, flags;
+} ohci_quirks[] = {
+ {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
+ {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
+};
+
+/* This overrides anything that was found in ohci_quirks[]. */
+static int param_quirks;
+module_param_named(quirks, param_quirks, int, 0644);
+MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
+ ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
+ ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
+ ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
+ ")");
+
#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -275,7 +281,7 @@ static void log_irqs(u32 evt)
!(evt & OHCI1394_busReset))
return;
- fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
+ fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
evt & OHCI1394_selfIDComplete ? " selfID" : "",
evt & OHCI1394_RQPkt ? " AR_req" : "",
evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -285,7 +291,6 @@ static void log_irqs(u32 evt)
evt & OHCI1394_isochTx ? " IT" : "",
evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
- evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
evt & OHCI1394_busReset ? " busReset" : "",
@@ -293,8 +298,7 @@ static void log_irqs(u32 evt)
OHCI1394_RSPkt | OHCI1394_reqTxComplete |
OHCI1394_respTxComplete | OHCI1394_isochRx |
OHCI1394_isochTx | OHCI1394_postedWriteErr |
- OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
- OHCI1394_cycleInconsistent |
+ OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent |
OHCI1394_regAccessFail | OHCI1394_busReset)
? " ?" : "");
}
@@ -524,7 +528,7 @@ static void ar_context_release(struct ar_context *ctx)
#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
#define cond_le32_to_cpu(v) \
- (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
+ (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
#else
#define cond_le32_to_cpu(v) le32_to_cpu(v)
#endif
@@ -605,7 +609,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
* at a slightly incorrect time (in bus_reset_tasklet).
*/
if (evt == OHCI1394_evt_bus_reset) {
- if (!ohci->bus_reset_packet_quirk)
+ if (!(ohci->quirks & QUIRK_RESET_PACKET))
ohci->request_generation = (p.header[2] >> 16) & 0xff;
} else if (ctx == &ohci->ar_request_ctx) {
fw_core_handle_request(&ohci->card, &p);
@@ -1329,7 +1333,7 @@ static void bus_reset_tasklet(unsigned long data)
context_stop(&ohci->at_response_ctx);
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
- if (ohci->bus_reset_packet_quirk)
+ if (ohci->quirks & QUIRK_RESET_PACKET)
ohci->request_generation = generation;
/*
@@ -1384,7 +1388,7 @@ static void bus_reset_tasklet(unsigned long data)
static irqreturn_t irq_handler(int irq, void *data)
{
struct fw_ohci *ohci = data;
- u32 event, iso_event, cycle_time;
+ u32 event, iso_event;
int i;
event = reg_read(ohci, OHCI1394_IntEventClear);
@@ -1454,12 +1458,6 @@ static irqreturn_t irq_handler(int irq, void *data)
fw_notify("isochronous cycle inconsistent\n");
}
- if (event & OHCI1394_cycle64Seconds) {
- cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
- if ((cycle_time & 0x80000000) == 0)
- atomic_inc(&ohci->bus_seconds);
- }
-
return IRQ_HANDLED;
}
@@ -1553,8 +1551,7 @@ static int ohci_enable(struct fw_card *card,
OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
OHCI1394_isochRx | OHCI1394_isochTx |
OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
- OHCI1394_cycleInconsistent |
- OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
+ OHCI1394_cycleInconsistent | OHCI1394_regAccessFail |
OHCI1394_masterIntEnable);
if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
@@ -1794,16 +1791,61 @@ static int ohci_enable_phys_dma(struct fw_card *card,
#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
}
-static u64 ohci_get_bus_time(struct fw_card *card)
+static u32 cycle_timer_ticks(u32 cycle_timer)
{
- struct fw_ohci *ohci = fw_ohci(card);
- u32 cycle_time;
- u64 bus_time;
+ u32 ticks;
- cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
- bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | cycle_time;
+ ticks = cycle_timer & 0xfff;
+ ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
+ ticks += (3072 * 8000) * (cycle_timer >> 25);
+
+ return ticks;
+}
+
+/*
+ * Some controllers exhibit one or more of the following bugs when updating the
+ * iso cycle timer register:
+ * - When the lowest six bits are wrapping around to zero, a read that happens
+ * at the same time will return garbage in the lowest ten bits.
+ * - When the cycleOffset field wraps around to zero, the cycleCount field is
+ * not incremented for about 60 ns.
+ * - Occasionally, the entire register reads zero.
+ *
+ * To catch these, we read the register three times and ensure that the
+ * difference between each two consecutive reads is approximately the same, i.e.
+ * less than twice the other. Furthermore, any negative difference indicates an
+ * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
+ * execute, so we have enough precision to compute the ratio of the differences.)
+ */
+static u32 ohci_get_cycle_time(struct fw_card *card)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ u32 c0, c1, c2;
+ u32 t0, t1, t2;
+ s32 diff01, diff12;
+ int i;
- return bus_time;
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+
+ if (ohci->quirks & QUIRK_CYCLE_TIMER) {
+ i = 0;
+ c1 = c2;
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ do {
+ c0 = c1;
+ c1 = c2;
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ t0 = cycle_timer_ticks(c0);
+ t1 = cycle_timer_ticks(c1);
+ t2 = cycle_timer_ticks(c2);
+ diff01 = t1 - t0;
+ diff12 = t2 - t1;
+ } while ((diff01 <= 0 || diff12 <= 0 ||
+ diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
+ && i++ < 20);
+ }
+
+ return c2;
}
static void copy_iso_headers(struct iso_context *ctx, void *p)
@@ -1828,52 +1870,6 @@ static void copy_iso_headers(struct iso_context *ctx, void *p)
ctx->header_length += ctx->base.header_size;
}
-static int handle_ir_dualbuffer_packet(struct context *context,
- struct descriptor *d,
- struct descriptor *last)
-{
- struct iso_context *ctx =
- container_of(context, struct iso_context, context);
- struct db_descriptor *db = (struct db_descriptor *) d;
- __le32 *ir_header;
- size_t header_length;
- void *p, *end;
-
- if (db->first_res_count != 0 && db->second_res_count != 0) {
- if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
- /* This descriptor isn't done yet, stop iteration. */
- return 0;
- }
- ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
- }
-
- header_length = le16_to_cpu(db->first_req_count) -
- le16_to_cpu(db->first_res_count);
-
- p = db + 1;
- end = p + header_length;
- while (p < end) {
- copy_iso_headers(ctx, p);
- ctx->excess_bytes +=
- (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
- p += max(ctx->base.header_size, (size_t)8);
- }
-
- ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
- le16_to_cpu(db->second_res_count);
-
- if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
- ir_header = (__le32 *) (db + 1);
- ctx->base.callback(&ctx->base,
- le32_to_cpu(ir_header[0]) & 0xffff,
- ctx->header_length, ctx->header,
- ctx->base.callback_data);
- ctx->header_length = 0;
- }
-
- return 1;
-}
-
static int handle_ir_packet_per_buffer(struct context *context,
struct descriptor *d,
struct descriptor *last)
@@ -1960,10 +1956,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
channels = &ohci->ir_context_channels;
mask = &ohci->ir_context_mask;
list = ohci->ir_context_list;
- if (ohci->use_dualbuffer)
- callback = handle_ir_dualbuffer_packet;
- else
- callback = handle_ir_packet_per_buffer;
+ callback = handle_ir_packet_per_buffer;
}
spin_lock_irqsave(&ohci->lock, flags);
@@ -2026,8 +2019,6 @@ static int ohci_start_iso(struct fw_iso_context *base,
} else {
index = ctx - ohci->ir_context_list;
control = IR_CONTEXT_ISOCH_HEADER;
- if (ohci->use_dualbuffer)
- control |= IR_CONTEXT_DUAL_BUFFER_MODE;
match = (tags << 28) | (sync << 8) | ctx->base.channel;
if (cycle >= 0) {
match |= (cycle & 0x07fff) << 12;
@@ -2188,92 +2179,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
return 0;
}
-static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload)
-{
- struct iso_context *ctx = container_of(base, struct iso_context, base);
- struct db_descriptor *db = NULL;
- struct descriptor *d;
- struct fw_iso_packet *p;
- dma_addr_t d_bus, page_bus;
- u32 z, header_z, length, rest;
- int page, offset, packet_count, header_size;
-
- /*
- * FIXME: Cycle lost behavior should be configurable: lose
- * packet, retransmit or terminate..
- */
-
- p = packet;
- z = 2;
-
- /*
- * The OHCI controller puts the isochronous header and trailer in the
- * buffer, so we need at least 8 bytes.
- */
- packet_count = p->header_length / ctx->base.header_size;
- header_size = packet_count * max(ctx->base.header_size, (size_t)8);
-
- /* Get header size in number of descriptors. */
- header_z = DIV_ROUND_UP(header_size, sizeof(*d));
- page = payload >> PAGE_SHIFT;
- offset = payload & ~PAGE_MASK;
- rest = p->payload_length;
- /*
- * The controllers I've tested have not worked correctly when
- * second_req_count is zero. Rather than do something we know won't
- * work, return an error
- */
- if (rest == 0)
- return -EINVAL;
-
- while (rest > 0) {
- d = context_get_descriptors(&ctx->context,
- z + header_z, &d_bus);
- if (d == NULL)
- return -ENOMEM;
-
- db = (struct db_descriptor *) d;
- db->control = cpu_to_le16(DESCRIPTOR_STATUS |
- DESCRIPTOR_BRANCH_ALWAYS);
- db->first_size =
- cpu_to_le16(max(ctx->base.header_size, (size_t)8));
- if (p->skip && rest == p->payload_length) {
- db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
- db->first_req_count = db->first_size;
- } else {
- db->first_req_count = cpu_to_le16(header_size);
- }
- db->first_res_count = db->first_req_count;
- db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
-
- if (p->skip && rest == p->payload_length)
- length = 4;
- else if (offset + rest < PAGE_SIZE)
- length = rest;
- else
- length = PAGE_SIZE - offset;
-
- db->second_req_count = cpu_to_le16(length);
- db->second_res_count = db->second_req_count;
- page_bus = page_private(buffer->pages[page]);
- db->second_buffer = cpu_to_le32(page_bus + offset);
-
- if (p->interrupt && length == rest)
- db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
-
- context_append(&ctx->context, d, z, header_z);
- offset = (offset + length) & ~PAGE_MASK;
- rest -= length;
- if (offset == 0)
- page++;
- }
-
- return 0;
-}
-
static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
@@ -2364,9 +2269,6 @@ static int ohci_queue_iso(struct fw_iso_context *base,
spin_lock_irqsave(&ctx->context.ohci->lock, flags);
if (base->type == FW_ISO_CONTEXT_TRANSMIT)
ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
- else if (ctx->context.ohci->use_dualbuffer)
- ret = ohci_queue_iso_receive_dualbuffer(base, packet,
- buffer, payload);
else
ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
buffer, payload);
@@ -2383,7 +2285,7 @@ static const struct fw_card_driver ohci_driver = {
.send_response = ohci_send_response,
.cancel_packet = ohci_cancel_packet,
.enable_phys_dma = ohci_enable_phys_dma,
- .get_bus_time = ohci_get_bus_time,
+ .get_cycle_time = ohci_get_cycle_time,
.allocate_iso_context = ohci_allocate_iso_context,
.free_iso_context = ohci_free_iso_context,
@@ -2421,17 +2323,13 @@ static void ohci_pmac_off(struct pci_dev *dev)
#define ohci_pmac_off(dev)
#endif /* CONFIG_PPC_PMAC */
-#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
-#define PCI_DEVICE_ID_AGERE_FW643 0x5901
-#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024
-
static int __devinit pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
u32 bus_options, max_receive, link_speed, version;
u64 guid;
- int err;
+ int i, err, n_ir, n_it;
size_t size;
ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
@@ -2472,36 +2370,15 @@ static int __devinit pci_probe(struct pci_dev *dev,
goto fail_iomem;
}
- version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
-#if 0
- /* FIXME: make it a context option or remove dual-buffer mode */
- ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
-#endif
-
- /* dual-buffer mode is broken if more than one IR context is active */
- if (dev->vendor == PCI_VENDOR_ID_AGERE &&
- dev->device == PCI_DEVICE_ID_AGERE_FW643)
- ohci->use_dualbuffer = false;
-
- /* dual-buffer mode is broken */
- if (dev->vendor == PCI_VENDOR_ID_RICOH &&
- dev->device == PCI_DEVICE_ID_RICOH_R5C832)
- ohci->use_dualbuffer = false;
-
-/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
-#if !defined(CONFIG_X86_32)
- /* dual-buffer mode is broken with descriptor addresses above 2G */
- if (dev->vendor == PCI_VENDOR_ID_TI &&
- (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 ||
- dev->device == PCI_DEVICE_ID_TI_TSB43AB23))
- ohci->use_dualbuffer = false;
-#endif
-
-#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
- ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
- dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
-#endif
- ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
+ for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
+ if (ohci_quirks[i].vendor == dev->vendor &&
+ (ohci_quirks[i].device == dev->device ||
+ ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) {
+ ohci->quirks = ohci_quirks[i].flags;
+ break;
+ }
+ if (param_quirks)
+ ohci->quirks = param_quirks;
ar_context_init(&ohci->ar_request_ctx, ohci,
OHCI1394_AsReqRcvContextControlSet);
@@ -2516,17 +2393,19 @@ static int __devinit pci_probe(struct pci_dev *dev,
OHCI1394_AsRspTrContextControlSet, handle_at_packet);
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
- ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
+ ohci->ir_context_channels = ~0ULL;
+ ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
- size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
- ohci->it_context_list = kzalloc(size, GFP_KERNEL);
+ n_ir = hweight32(ohci->ir_context_mask);
+ size = sizeof(struct iso_context) * n_ir;
+ ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
- ohci->ir_context_channels = ~0ULL;
- ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
+ ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
- size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
- ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
+ n_it = hweight32(ohci->it_context_mask);
+ size = sizeof(struct iso_context) * n_it;
+ ohci->it_context_list = kzalloc(size, GFP_KERNEL);
if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
err = -ENOMEM;
@@ -2553,8 +2432,11 @@ static int __devinit pci_probe(struct pci_dev *dev,
if (err)
goto fail_self_id;
- fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
- dev_name(&dev->dev), version >> 16, version & 0xff);
+ version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
+ fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
+ "%d IR + %d IT contexts, quirks 0x%x\n",
+ dev_name(&dev->dev), version >> 16, version & 0xff,
+ n_ir, n_it, ohci->quirks);
return 0;
@@ -2662,7 +2544,7 @@ static int pci_resume(struct pci_dev *dev)
}
#endif
-static struct pci_device_id pci_table[] = {
+static const struct pci_device_id pci_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
{ }
};
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 70fef40..ca264f2 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1014,7 +1014,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
return 0;
}
-static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
+static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt,
+ const u32 *directory)
{
struct fw_csr_iterator ci;
int key, value;
@@ -1027,7 +1028,7 @@ static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
return 0;
}
-static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
+static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory,
u32 *model, u32 *firmware_revision)
{
struct fw_csr_iterator ci;
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 39c5aa7..abe3f44 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -4,7 +4,7 @@
ccflags-y := -Iinclude/drm
-drm-y := drm_auth.o drm_bufs.o drm_cache.o \
+drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o drm_drawable.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
new file mode 100644
index 0000000..55d03ed
--- /dev/null
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -0,0 +1,184 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Pauli Nieminen.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Multipart buffer for coping data which is larger than the page size.
+ *
+ * Authors:
+ * Pauli Nieminen <suokkos-at-gmail-dot-com>
+ */
+
+#include "drm_buffer.h"
+
+/**
+ * Allocate the drm buffer object.
+ *
+ * buf: Pointer to a pointer where the object is stored.
+ * size: The number of bytes to allocate.
+ */
+int drm_buffer_alloc(struct drm_buffer **buf, int size)
+{
+ int nr_pages = size / PAGE_SIZE + 1;
+ int idx;
+
+ /* Allocating pointer table to end of structure makes drm_buffer
+ * variable sized */
+ *buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
+ GFP_KERNEL);
+
+ if (*buf == NULL) {
+ DRM_ERROR("Failed to allocate drm buffer object to hold"
+ " %d bytes in %d pages.\n",
+ size, nr_pages);
+ return -ENOMEM;
+ }
+
+ (*buf)->size = size;
+
+ for (idx = 0; idx < nr_pages; ++idx) {
+
+ (*buf)->data[idx] =
+ kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
+ GFP_KERNEL);
+
+
+ if ((*buf)->data[idx] == NULL) {
+ DRM_ERROR("Failed to allocate %dth page for drm"
+ " buffer with %d bytes and %d pages.\n",
+ idx + 1, size, nr_pages);
+ goto error_out;
+ }
+
+ }
+
+ return 0;
+
+error_out:
+
+ /* Only last element can be null pointer so check for it first. */
+ if ((*buf)->data[idx])
+ kfree((*buf)->data[idx]);
+
+ for (--idx; idx >= 0; --idx)
+ kfree((*buf)->data[idx]);
+
+ kfree(*buf);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_buffer_alloc);
+
+/**
+ * Copy the user data to the begin of the buffer and reset the processing
+ * iterator.
+ *
+ * user_data: A pointer the data that is copied to the buffer.
+ * size: The Number of bytes to copy.
+ */
+extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
+ void __user *user_data, int size)
+{
+ int nr_pages = size / PAGE_SIZE + 1;
+ int idx;
+
+ if (size > buf->size) {
+ DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
+ " %d bytes space\n",
+ size, buf->size);
+ return -EFAULT;
+ }
+
+ for (idx = 0; idx < nr_pages; ++idx) {
+
+ if (DRM_COPY_FROM_USER(buf->data[idx],
+ user_data + idx * PAGE_SIZE,
+ min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
+ DRM_ERROR("Failed to copy user data (%p) to drm buffer"
+ " (%p) %dth page.\n",
+ user_data, buf, idx);
+ return -EFAULT;
+
+ }
+ }
+ buf->iterator = 0;
+ return 0;
+}
+EXPORT_SYMBOL(drm_buffer_copy_from_user);
+
+/**
+ * Free the drm buffer object
+ */
+void drm_buffer_free(struct drm_buffer *buf)
+{
+
+ if (buf != NULL) {
+
+ int nr_pages = buf->size / PAGE_SIZE + 1;
+ int idx;
+ for (idx = 0; idx < nr_pages; ++idx)
+ kfree(buf->data[idx]);
+
+ kfree(buf);
+ }
+}
+EXPORT_SYMBOL(drm_buffer_free);
+
+/**
+ * Read an object from buffer that may be split to multiple parts. If object
+ * is not split function just returns the pointer to object in buffer. But in
+ * case of split object data is copied to given stack object that is suplied
+ * by caller.
+ *
+ * The processing location of the buffer is also advanced to the next byte
+ * after the object.
+ *
+ * objsize: The size of the objet in bytes.
+ * stack_obj: A pointer to a memory location where object can be copied.
+ */
+void *drm_buffer_read_object(struct drm_buffer *buf,
+ int objsize, void *stack_obj)
+{
+ int idx = drm_buffer_index(buf);
+ int page = drm_buffer_page(buf);
+ void *obj = 0;
+
+ if (idx + objsize <= PAGE_SIZE) {
+ obj = &buf->data[page][idx];
+ } else {
+ /* The object is split which forces copy to temporary object.*/
+ int beginsz = PAGE_SIZE - idx;
+ memcpy(stack_obj, &buf->data[page][idx], beginsz);
+
+ memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
+ objsize - beginsz);
+
+ obj = stack_obj;
+ }
+
+ drm_buffer_advance(buf, objsize);
+ return obj;
+}
+EXPORT_SYMBOL(drm_buffer_read_object);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7d0f00a..f2aaf39 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -836,11 +836,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
} else if (set->fb == NULL) {
mode_changed = true;
- } else if ((set->fb->bits_per_pixel !=
- set->crtc->fb->bits_per_pixel) ||
- set->fb->depth != set->crtc->fb->depth)
- fb_changed = true;
- else
+ } else
fb_changed = true;
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 766c468..f3c58e2 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -125,28 +125,28 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ab6c973..f97e7c4 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -60,8 +60,7 @@
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
-/* define the number of Extension EDID block */
-#define MAX_EDID_EXT_NUM 4
+
#define LEVEL_DMT 0
#define LEVEL_GTF 1
@@ -114,14 +113,14 @@ static const u8 edid_header[] = {
};
/**
- * edid_is_valid - sanity check EDID data
+ * drm_edid_is_valid - sanity check EDID data
* @edid: EDID data
*
* Sanity check the EDID block by looking at the header, the version number
* and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
* valid.
*/
-static bool edid_is_valid(struct edid *edid)
+bool drm_edid_is_valid(struct edid *edid)
{
int i, score = 0;
u8 csum = 0;
@@ -163,6 +162,7 @@ bad:
}
return 0;
}
+EXPORT_SYMBOL(drm_edid_is_valid);
/**
* edid_vendor - match a string against EDID's obfuscated vendor field
@@ -1112,8 +1112,8 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
}
/* Chose real EDID extension number */
- edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
- MAX_EDID_EXT_NUM : edid->extensions;
+ edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
+ DRM_MAX_EDID_EXT_NUM : edid->extensions;
/* Find CEA extension */
for (i = 0; i < edid_ext_num; i++) {
@@ -1195,7 +1195,7 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter, buf, len))
return -1;
- if (edid_is_valid((struct edid *)buf))
+ if (drm_edid_is_valid((struct edid *)buf))
return 0;
}
@@ -1220,7 +1220,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,
int ret;
struct edid *edid;
- edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1),
+ edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
GFP_KERNEL);
if (edid == NULL) {
dev_warn(&connector->dev->pdev->dev,
@@ -1238,14 +1238,14 @@ struct edid *drm_get_edid(struct drm_connector *connector,
if (edid->extensions != 0) {
int edid_ext_num = edid->extensions;
- if (edid_ext_num > MAX_EDID_EXT_NUM) {
+ if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
dev_warn(&connector->dev->pdev->dev,
"The number of extension(%d) is "
"over max (%d), actually read number (%d)\n",
- edid_ext_num, MAX_EDID_EXT_NUM,
- MAX_EDID_EXT_NUM);
+ edid_ext_num, DRM_MAX_EDID_EXT_NUM,
+ DRM_MAX_EDID_EXT_NUM);
/* Reset EDID extension number to be read */
- edid_ext_num = MAX_EDID_EXT_NUM;
+ edid_ext_num = DRM_MAX_EDID_EXT_NUM;
}
/* Read EDID including extensions too */
ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
@@ -1288,8 +1288,8 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
goto end;
/* Chose real EDID extension number */
- edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
- MAX_EDID_EXT_NUM : edid->extensions;
+ edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
+ DRM_MAX_EDID_EXT_NUM : edid->extensions;
/* Find CEA extension */
for (i = 0; i < edid_ext_num; i++) {
@@ -1346,7 +1346,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
if (edid == NULL) {
return 0;
}
- if (!edid_is_valid(edid)) {
+ if (!drm_edid_is_valid(edid)) {
dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector));
return 0;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0f9e9055..5054970 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -27,6 +27,7 @@
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
+#include <linux/kernel.h>
#include <linux/sysrq.h>
#include <linux/fb.h>
#include "drmP.h"
@@ -50,21 +51,6 @@ int drm_fb_helper_add_connector(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_fb_helper_add_connector);
-static int my_atoi(const char *name)
-{
- int val = 0;
-
- for (;; name++) {
- switch (*name) {
- case '0' ... '9':
- val = 10*val+(*name-'0');
- break;
- default:
- return val;
- }
- }
-}
-
/**
* drm_fb_helper_connector_parse_command_line - parse command line for connector
* @connector - connector to parse line for
@@ -111,7 +97,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
namelen = i;
if (!refresh_specified && !bpp_specified &&
!yres_specified) {
- refresh = my_atoi(&name[i+1]);
+ refresh = simple_strtol(&name[i+1], NULL, 10);
refresh_specified = 1;
if (cvt || rb)
cvt = 0;
@@ -121,7 +107,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
case '-':
namelen = i;
if (!bpp_specified && !yres_specified) {
- bpp = my_atoi(&name[i+1]);
+ bpp = simple_strtol(&name[i+1], NULL, 10);
bpp_specified = 1;
if (cvt || rb)
cvt = 0;
@@ -130,7 +116,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
break;
case 'x':
if (!yres_specified) {
- yres = my_atoi(&name[i+1]);
+ yres = simple_strtol(&name[i+1], NULL, 10);
yres_specified = 1;
} else
goto done;
@@ -170,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
}
}
if (i < 0 && yres_specified) {
- xres = my_atoi(name);
+ xres = simple_strtol(name, NULL, 10);
res_specified = 1;
}
done:
@@ -694,7 +680,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
int i;
if (var->pixclock != 0) {
- DRM_ERROR("PIXEL CLCOK SET\n");
+ DRM_ERROR("PIXEL CLOCK SET\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8bf3770..aa89d4b 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -192,9 +192,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference_unlocked(obj);
return 0;
}
@@ -325,9 +323,7 @@ again:
}
err:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}
@@ -358,9 +354,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
if (ret)
return ret;
@@ -390,7 +384,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
- drm_gem_object_handle_unreference(obj);
+ drm_gem_object_handle_unreference_unlocked(obj);
return 0;
}
@@ -403,16 +397,25 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
- mutex_lock(&dev->struct_mutex);
idr_for_each(&file_private->object_idr,
&drm_gem_object_release_handle, NULL);
idr_destroy(&file_private->object_idr);
- mutex_unlock(&dev->struct_mutex);
+}
+
+static void
+drm_gem_object_free_common(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ fput(obj->filp);
+ atomic_dec(&dev->object_count);
+ atomic_sub(obj->size, &dev->object_memory);
+ kfree(obj);
}
/**
* Called after the last reference to the object has been lost.
+ * Must be called holding struct_ mutex
*
* Frees the object
*/
@@ -427,14 +430,40 @@ drm_gem_object_free(struct kref *kref)
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
- fput(obj->filp);
- atomic_dec(&dev->object_count);
- atomic_sub(obj->size, &dev->object_memory);
- kfree(obj);
+ drm_gem_object_free_common(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
/**
+ * Called after the last reference to the object has been lost.
+ * Must be called without holding struct_mutex
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free_unlocked(struct kref *kref)
+{
+ struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+ struct drm_device *dev = obj->dev;
+
+ if (dev->driver->gem_free_object_unlocked != NULL)
+ dev->driver->gem_free_object_unlocked(obj);
+ else if (dev->driver->gem_free_object != NULL) {
+ mutex_lock(&dev->struct_mutex);
+ dev->driver->gem_free_object(obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ drm_gem_object_free_common(obj);
+}
+EXPORT_SYMBOL(drm_gem_object_free_unlocked);
+
+static void drm_gem_object_ref_bug(struct kref *list_kref)
+{
+ BUG();
+}
+
+/**
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
@@ -458,8 +487,10 @@ drm_gem_object_handle_free(struct kref *kref)
/*
* The object name held a reference to this object, drop
* that now.
+ *
+ * This cannot be the last reference, since the handle holds one too.
*/
- drm_gem_object_unreference(obj);
+ kref_put(&obj->refcount, drm_gem_object_ref_bug);
} else
spin_unlock(&dev->object_name_lock);
@@ -477,11 +508,8 @@ EXPORT_SYMBOL(drm_gem_vm_open);
void drm_gem_vm_close(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
}
EXPORT_SYMBOL(drm_gem_vm_close);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a894ade..1376dfe 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -162,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- if (!IS_IRONLAKE(dev)) {
+ if (!HAS_PCH_SPLIT(dev)) {
seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
seq_printf(m, "Interrupt identity: %08x\n",
@@ -350,6 +350,36 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
return 0;
}
+static const char *pin_flag(int pinned)
+{
+ if (pinned > 0)
+ return " P";
+ else if (pinned < 0)
+ return " p";
+ else
+ return "";
+}
+
+static const char *tiling_flag(int tiling)
+{
+ switch (tiling) {
+ default:
+ case I915_TILING_NONE: return "";
+ case I915_TILING_X: return " X";
+ case I915_TILING_Y: return " Y";
+ }
+}
+
+static const char *dirty_flag(int dirty)
+{
+ return dirty ? " dirty" : "";
+}
+
+static const char *purgeable_flag(int purgeable)
+{
+ return purgeable ? " purgeable" : "";
+}
+
static int i915_error_state(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -357,6 +387,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
+ int i, page, offset, elt;
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (!dev_priv->first_error) {
@@ -368,6 +399,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
+ seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
@@ -379,6 +411,59 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
}
+ seq_printf(m, "seqno: 0x%08x\n", error->seqno);
+
+ if (error->active_bo_count) {
+ seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
+
+ for (i = 0; i < error->active_bo_count; i++) {
+ seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
+ error->active_bo[i].gtt_offset,
+ error->active_bo[i].size,
+ error->active_bo[i].read_domains,
+ error->active_bo[i].write_domain,
+ error->active_bo[i].seqno,
+ pin_flag(error->active_bo[i].pinned),
+ tiling_flag(error->active_bo[i].tiling),
+ dirty_flag(error->active_bo[i].dirty),
+ purgeable_flag(error->active_bo[i].purgeable));
+
+ if (error->active_bo[i].name)
+ seq_printf(m, " (name: %d)", error->active_bo[i].name);
+ if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
+ seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
+
+ seq_printf(m, "\n");
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
+ if (error->batchbuffer[i]) {
+ struct drm_i915_error_object *obj = error->batchbuffer[i];
+
+ seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+ }
+ }
+
+ if (error->ringbuffer) {
+ struct drm_i915_error_object *obj = error->ringbuffer;
+
+ seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+ }
out:
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -386,6 +471,165 @@ out:
return 0;
}
+static int i915_rstdby_delays(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 crstanddelay = I915_READ16(CRSTANDVID);
+
+ seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
+
+ return 0;
+}
+
+static int i915_cur_delayinfo(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
+
+ seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3);
+ seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1);
+ seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf,
+ rgvswctl & 0x3f);
+
+ return 0;
+}
+
+static int i915_delayfreq_table(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 delayfreq;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
+ seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq);
+ }
+
+ return 0;
+}
+
+static inline int MAP_TO_MV(int map)
+{
+ return 1250 - (map * 25);
+}
+
+static int i915_inttoext_table(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 inttoext;
+ int i;
+
+ for (i = 1; i <= 32; i++) {
+ inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
+ seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
+ }
+
+ return 0;
+}
+
+static int i915_drpc_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
+
+ seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
+ "yes" : "no");
+ seq_printf(m, "Boost freq: %d\n",
+ (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
+ MEMMODE_BOOST_FREQ_SHIFT);
+ seq_printf(m, "HW control enabled: %s\n",
+ rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
+ seq_printf(m, "SW control enabled: %s\n",
+ rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
+ seq_printf(m, "Gated voltage change: %s\n",
+ rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
+ seq_printf(m, "Starting frequency: P%d\n",
+ (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
+ seq_printf(m, "Max frequency: P%d\n",
+ (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
+ seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+
+ return 0;
+}
+
+static int i915_fbc_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_crtc *crtc;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ bool fbc_enabled = false;
+
+ if (!dev_priv->display.fbc_enabled) {
+ seq_printf(m, "FBC unsupported on this chipset\n");
+ return 0;
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (!crtc->enabled)
+ continue;
+ if (dev_priv->display.fbc_enabled(crtc))
+ fbc_enabled = true;
+ }
+
+ if (fbc_enabled) {
+ seq_printf(m, "FBC enabled\n");
+ } else {
+ seq_printf(m, "FBC disabled: ");
+ switch (dev_priv->no_fbc_reason) {
+ case FBC_STOLEN_TOO_SMALL:
+ seq_printf(m, "not enough stolen memory");
+ break;
+ case FBC_UNSUPPORTED_MODE:
+ seq_printf(m, "mode not supported");
+ break;
+ case FBC_MODE_TOO_LARGE:
+ seq_printf(m, "mode too large");
+ break;
+ case FBC_BAD_PLANE:
+ seq_printf(m, "FBC unsupported on plane");
+ break;
+ case FBC_NOT_TILED:
+ seq_printf(m, "scanout buffer not tiled");
+ break;
+ default:
+ seq_printf(m, "unknown reason");
+ }
+ seq_printf(m, "\n");
+ }
+ return 0;
+}
+
+static int i915_sr_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ bool sr_enabled = false;
+
+ if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev))
+ sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ else if (IS_I915GM(dev))
+ sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+ else if (IS_PINEVIEW(dev))
+ sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+
+ seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
+ "disabled");
+
+ return 0;
+}
+
static int
i915_wedged_open(struct inode *inode,
struct file *filp)
@@ -503,6 +747,13 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_ringbuffer_info", i915_ringbuffer_info, 0},
{"i915_batchbuffers", i915_batchbuffer_info, 0},
{"i915_error_state", i915_error_state, 0},
+ {"i915_rstdby_delays", i915_rstdby_delays, 0},
+ {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
+ {"i915_delayfreq_table", i915_delayfreq_table, 0},
+ {"i915_inttoext_table", i915_inttoext_table, 0},
+ {"i915_drpc_info", i915_drpc_info, 0},
+ {"i915_fbc_status", i915_fbc_status, 0},
+ {"i915_sr_status", i915_sr_status, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2307f98..8bfc0bb 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -35,6 +35,9 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include <linux/vgaarb.h>
+#include <linux/acpi.h>
+#include <linux/pnp.h>
+#include <linux/vga_switcheroo.h>
/* Really want an OS-independent resettable timer. Would like to have
* this loop run for (eg) 3 sec, but have the timer reset every time
@@ -933,6 +936,120 @@ static int i915_get_bridge_dev(struct drm_device *dev)
return 0;
}
+#define MCHBAR_I915 0x44
+#define MCHBAR_I965 0x48
+#define MCHBAR_SIZE (4*4096)
+
+#define DEVEN_REG 0x54
+#define DEVEN_MCHBAR_EN (1 << 28)
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp_lo, temp_hi = 0;
+ u64 mchbar_addr;
+ int ret = 0;
+
+ if (IS_I965G(dev))
+ pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
+ pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
+ mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+ /* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
+ if (mchbar_addr &&
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
+ ret = 0;
+ goto out;
+ }
+#endif
+
+ /* Get some space for it */
+ ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
+ MCHBAR_SIZE, MCHBAR_SIZE,
+ PCIBIOS_MIN_MEM,
+ 0, pcibios_align_resource,
+ dev_priv->bridge_dev);
+ if (ret) {
+ DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
+ dev_priv->mch_res.start = 0;
+ goto out;
+ }
+
+ if (IS_I965G(dev))
+ pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
+ upper_32_bits(dev_priv->mch_res.start));
+
+ pci_write_config_dword(dev_priv->bridge_dev, reg,
+ lower_32_bits(dev_priv->mch_res.start));
+out:
+ return ret;
+}
+
+/* Setup MCHBAR if possible, return true if we should disable it again */
+static void
+intel_setup_mchbar(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+ bool enabled;
+
+ dev_priv->mchbar_need_disable = false;
+
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+ enabled = !!(temp & DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ enabled = temp & 1;
+ }
+
+ /* If it's already enabled, don't have to do anything */
+ if (enabled)
+ return;
+
+ if (intel_alloc_mchbar_resource(dev))
+ return;
+
+ dev_priv->mchbar_need_disable = true;
+
+ /* Space is allocated or reserved, so enable it. */
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
+ temp | DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
+ }
+}
+
+static void
+intel_teardown_mchbar(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+
+ if (dev_priv->mchbar_need_disable) {
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+ temp &= ~DEVEN_MCHBAR_EN;
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ temp &= ~1;
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
+ }
+ }
+
+ if (dev_priv->mch_res.start)
+ release_resource(&dev_priv->mch_res);
+}
+
/**
* i915_probe_agp - get AGP bootup configuration
* @pdev: PCI device
@@ -978,59 +1095,123 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
* Some of the preallocated space is taken by the GTT
* and popup. GTT is 1K per MB of aperture size, and popup is 4K.
*/
- if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev))
+ if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
overhead = 4096;
else
overhead = (*aperture_size / 1024) + 4096;
- switch (tmp & INTEL_GMCH_GMS_MASK) {
- case INTEL_855_GMCH_GMS_DISABLED:
- DRM_ERROR("video memory is disabled\n");
- return -1;
- case INTEL_855_GMCH_GMS_STOLEN_1M:
- stolen = 1 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_4M:
- stolen = 4 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_8M:
- stolen = 8 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_16M:
- stolen = 16 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_32M:
- stolen = 32 * 1024 * 1024;
- break;
- case INTEL_915G_GMCH_GMS_STOLEN_48M:
- stolen = 48 * 1024 * 1024;
- break;
- case INTEL_915G_GMCH_GMS_STOLEN_64M:
- stolen = 64 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_128M:
- stolen = 128 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_256M:
- stolen = 256 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_96M:
- stolen = 96 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_160M:
- stolen = 160 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_224M:
- stolen = 224 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_352M:
- stolen = 352 * 1024 * 1024;
- break;
- default:
- DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
- tmp & INTEL_GMCH_GMS_MASK);
- return -1;
+ if (IS_GEN6(dev)) {
+ /* SNB has memory control reg at 0x50.w */
+ pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
+
+ switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
+ case INTEL_855_GMCH_GMS_DISABLED:
+ DRM_ERROR("video memory is disabled\n");
+ return -1;
+ case SNB_GMCH_GMS_STOLEN_32M:
+ stolen = 32 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_64M:
+ stolen = 64 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_96M:
+ stolen = 96 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_128M:
+ stolen = 128 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_160M:
+ stolen = 160 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_192M:
+ stolen = 192 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_224M:
+ stolen = 224 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_256M:
+ stolen = 256 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_288M:
+ stolen = 288 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_320M:
+ stolen = 320 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_352M:
+ stolen = 352 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_384M:
+ stolen = 384 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_416M:
+ stolen = 416 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_448M:
+ stolen = 448 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_480M:
+ stolen = 480 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_512M:
+ stolen = 512 * 1024 * 1024;
+ break;
+ default:
+ DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
+ tmp & SNB_GMCH_GMS_STOLEN_MASK);
+ return -1;
+ }
+ } else {
+ switch (tmp & INTEL_GMCH_GMS_MASK) {
+ case INTEL_855_GMCH_GMS_DISABLED:
+ DRM_ERROR("video memory is disabled\n");
+ return -1;
+ case INTEL_855_GMCH_GMS_STOLEN_1M:
+ stolen = 1 * 1024 * 1024;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_4M:
+ stolen = 4 * 1024 * 1024;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_8M:
+ stolen = 8 * 1024 * 1024;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_16M:
+ stolen = 16 * 1024 * 1024;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_32M:
+ stolen = 32 * 1024 * 1024;
+ break;
+ case INTEL_915G_GMCH_GMS_STOLEN_48M:
+ stolen = 48 * 1024 * 1024;
+ break;
+ case INTEL_915G_GMCH_GMS_STOLEN_64M:
+ stolen = 64 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_128M:
+ stolen = 128 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_256M:
+ stolen = 256 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_96M:
+ stolen = 96 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_160M:
+ stolen = 160 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_224M:
+ stolen = 224 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_352M:
+ stolen = 352 * 1024 * 1024;
+ break;
+ default:
+ DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
+ tmp & INTEL_GMCH_GMS_MASK);
+ return -1;
+ }
}
+
*preallocated_size = stolen - overhead;
*start = overhead;
@@ -1064,7 +1245,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
int gtt_offset, gtt_size;
if (IS_I965G(dev)) {
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
gtt_offset = 2*1024*1024;
gtt_size = 2*1024*1024;
} else {
@@ -1133,6 +1314,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
/* Leave 1M for line length buffer & misc. */
compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
if (!compressed_fb) {
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
i915_warn_stolen(dev);
return;
}
@@ -1140,6 +1322,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
if (!compressed_fb) {
i915_warn_stolen(dev);
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
return;
}
@@ -1199,6 +1382,32 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
+static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+ if (state == VGA_SWITCHEROO_ON) {
+ printk(KERN_INFO "i915: switched off\n");
+ /* i915 resume handler doesn't set to D0 */
+ pci_set_power_state(dev->pdev, PCI_D0);
+ i915_resume(dev);
+ } else {
+ printk(KERN_ERR "i915: switched off\n");
+ i915_suspend(dev, pmm);
+ }
+}
+
+static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+ can_switch = (dev->open_count == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+}
+
static int i915_load_modeset_init(struct drm_device *dev,
unsigned long prealloc_start,
unsigned long prealloc_size,
@@ -1260,6 +1469,12 @@ static int i915_load_modeset_init(struct drm_device *dev,
if (ret)
goto destroy_ringbuffer;
+ ret = vga_switcheroo_register_client(dev->pdev,
+ i915_switcheroo_set_state,
+ i915_switcheroo_can_switch);
+ if (ret)
+ goto destroy_ringbuffer;
+
intel_modeset_init(dev);
ret = drm_irq_install(dev);
@@ -1281,7 +1496,9 @@ static int i915_load_modeset_init(struct drm_device *dev,
return 0;
destroy_ringbuffer:
+ mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
+ mutex_unlock(&dev->struct_mutex);
out:
return ret;
}
@@ -1445,11 +1662,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ intel_setup_mchbar(dev);
+
i915_gem_load(dev);
/* Init HWS */
@@ -1523,6 +1743,8 @@ int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ i915_destroy_error_state(dev);
+
destroy_workqueue(dev_priv->wq);
del_timer_sync(&dev_priv->hangcheck_timer);
@@ -1544,6 +1766,7 @@ int i915_driver_unload(struct drm_device *dev)
dev_priv->child_dev_num = 0;
}
drm_irq_uninstall(dev);
+ vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
@@ -1569,6 +1792,8 @@ int i915_driver_unload(struct drm_device *dev)
intel_cleanup_overlay(dev);
}
+ intel_teardown_mchbar(dev);
+
pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private);
@@ -1611,6 +1836,7 @@ void i915_driver_lastclose(struct drm_device * dev)
if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
drm_fb_helper_restore();
+ vga_switcheroo_process_delayed_switch();
return;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cf4cb3e..1b2e954 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,6 +49,7 @@ unsigned int i915_lvds_downclock = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
static struct drm_driver driver;
+extern int intel_agp_enabled;
#define INTEL_VGA_DEVICE(id, info) { \
.class = PCI_CLASS_DISPLAY_VGA << 8, \
@@ -136,6 +137,16 @@ const static struct intel_device_info intel_ironlake_m_info = {
.has_hotplug = 1,
};
+const static struct intel_device_info intel_sandybridge_d_info = {
+ .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_sandybridge_m_info = {
+ .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_hotplug = 1,
+};
+
const static struct pci_device_id pciidlist[] = {
INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
@@ -167,6 +178,8 @@ const static struct pci_device_id pciidlist[] = {
INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
+ INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
{0, 0, 0}
};
@@ -201,7 +214,7 @@ static int i915_drm_freeze(struct drm_device *dev)
return 0;
}
-static int i915_suspend(struct drm_device *dev, pm_message_t state)
+int i915_suspend(struct drm_device *dev, pm_message_t state)
{
int error;
@@ -255,7 +268,7 @@ static int i915_drm_thaw(struct drm_device *dev)
return error;
}
-static int i915_resume(struct drm_device *dev)
+int i915_resume(struct drm_device *dev)
{
if (pci_enable_device(dev->pdev))
return -EIO;
@@ -546,6 +559,11 @@ static struct drm_driver driver = {
static int __init i915_init(void)
{
+ if (!intel_agp_enabled) {
+ DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
+ return -ENODEV;
+ }
+
driver.num_ioctls = i915_max_ioctl;
i915_gem_shrinker_init();
@@ -571,6 +589,11 @@ static int __init i915_init(void)
driver.driver_features &= ~DRIVER_MODESET;
#endif
+ if (!(driver.driver_features & DRIVER_MODESET)) {
+ driver.suspend = i915_suspend;
+ driver.resume = i915_resume;
+ }
+
return drm_init(&driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b99b6a8..979439c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -150,7 +150,27 @@ struct drm_i915_error_state {
u32 instps;
u32 instdone1;
u32 seqno;
+ u64 bbaddr;
struct timeval time;
+ struct drm_i915_error_object {
+ int page_count;
+ u32 gtt_offset;
+ u32 *pages[0];
+ } *ringbuffer, *batchbuffer[2];
+ struct drm_i915_error_buffer {
+ size_t size;
+ u32 name;
+ u32 seqno;
+ u32 gtt_offset;
+ u32 read_domains;
+ u32 write_domain;
+ u32 fence_reg;
+ s32 pinned:2;
+ u32 tiling:2;
+ u32 dirty:1;
+ u32 purgeable:1;
+ } *active_bo;
+ u32 active_bo_count;
};
struct drm_i915_display_funcs {
@@ -192,6 +212,14 @@ struct intel_device_info {
u8 cursor_needs_physical : 1;
};
+enum no_fbc_reason {
+ FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
+ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+ FBC_MODE_TOO_LARGE, /* mode too large for compression */
+ FBC_BAD_PLANE, /* fbc not supported on plane */
+ FBC_NOT_TILED, /* buffer not tiled */
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -452,6 +480,7 @@ typedef struct drm_i915_private {
u32 savePIPEB_DATA_N1;
u32 savePIPEB_LINK_M1;
u32 savePIPEB_LINK_N1;
+ u32 saveMCHBAR_RENDER_STANDBY;
struct {
struct drm_mm gtt_space;
@@ -590,6 +619,14 @@ typedef struct drm_i915_private {
int child_dev_num;
struct child_device_config *child_dev;
struct drm_connector *int_lvds_connector;
+
+ bool mchbar_need_disable;
+
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+
+ enum no_fbc_reason no_fbc_reason;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
@@ -736,6 +773,8 @@ extern unsigned int i915_fbpercrtc;
extern unsigned int i915_powersave;
extern unsigned int i915_lvds_downclock;
+extern int i915_suspend(struct drm_device *dev, pm_message_t state);
+extern int i915_resume(struct drm_device *dev);
extern void i915_save_display(struct drm_device *dev);
extern void i915_restore_display(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
@@ -761,6 +800,7 @@ extern int i965_reset(struct drm_device *dev, u8 flags);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
+void i915_destroy_error_state(struct drm_device *dev);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -897,7 +937,8 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
int tiling_mode);
-bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj);
+bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
+ int tiling_mode);
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -1026,7 +1067,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx)
+#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
@@ -1045,8 +1086,29 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+#define IS_GEN3(dev) (IS_I915G(dev) || \
+ IS_I915GM(dev) || \
+ IS_I945G(dev) || \
+ IS_I945GM(dev) || \
+ IS_G33(dev) || \
+ IS_PINEVIEW(dev))
+#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
+ (dev)->pci_device == 0x2982 || \
+ (dev)->pci_device == 0x2992 || \
+ (dev)->pci_device == 0x29A2 || \
+ (dev)->pci_device == 0x2A02 || \
+ (dev)->pci_device == 0x2A12 || \
+ (dev)->pci_device == 0x2E02 || \
+ (dev)->pci_device == 0x2E12 || \
+ (dev)->pci_device == 0x2E22 || \
+ (dev)->pci_device == 0x2E32 || \
+ (dev)->pci_device == 0x2A42 || \
+ (dev)->pci_device == 0x2E42)
+
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+#define IS_GEN6(dev) ((dev)->pci_device == 0x0102)
+
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
@@ -1067,6 +1129,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
+#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
+ IS_GEN6(dev))
+
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ec8a0d7..fba37e9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -128,9 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference_unlocked(obj);
if (ret)
return ret;
@@ -488,7 +486,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
*/
if (args->offset > obj->size || args->size > obj->size ||
args->offset + args->size > obj->size) {
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
}
@@ -501,7 +499,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
file_priv);
}
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}
@@ -961,7 +959,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
*/
if (args->offset > obj->size || args->size > obj->size ||
args->offset + args->size > obj->size) {
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
}
@@ -995,7 +993,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
DRM_INFO("pwrite failed %d\n", ret);
#endif
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}
@@ -1138,9 +1136,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
up_write(&current->mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
if (IS_ERR((void *)addr))
return addr;
@@ -1562,6 +1558,38 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
i915_verify_inactive(dev, __FILE__, __LINE__);
}
+static void
+i915_gem_process_flushing_list(struct drm_device *dev,
+ uint32_t flush_domains, uint32_t seqno)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv, *next;
+
+ list_for_each_entry_safe(obj_priv, next,
+ &dev_priv->mm.gpu_write_list,
+ gpu_write_list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ if ((obj->write_domain & flush_domains) ==
+ obj->write_domain) {
+ uint32_t old_write_domain = obj->write_domain;
+
+ obj->write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
+ i915_gem_object_move_to_active(obj, seqno);
+
+ /* update the fence lru list */
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ list_move_tail(&obj_priv->fence_list,
+ &dev_priv->mm.fence_list);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
+ }
+ }
+}
+
/**
* Creates a new sequence number, emitting a write of it to the status page
* plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1620,29 +1648,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
/* Associate any objects on the flushing list matching the write
* domain we're flushing with our flush.
*/
- if (flush_domains != 0) {
- struct drm_i915_gem_object *obj_priv, *next;
-
- list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.gpu_write_list,
- gpu_write_list) {
- struct drm_gem_object *obj = obj_priv->obj;
-
- if ((obj->write_domain & flush_domains) ==
- obj->write_domain) {
- uint32_t old_write_domain = obj->write_domain;
-
- obj->write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_active(obj, seqno);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
- }
-
- }
+ if (flush_domains != 0)
+ i915_gem_process_flushing_list(dev, flush_domains, seqno);
if (!dev_priv->mm.suspended) {
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1822,7 +1829,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
return -EIO;
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
ier = I915_READ(IER);
@@ -1991,6 +1998,7 @@ int
i915_gem_object_unbind(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret = 0;
@@ -2046,8 +2054,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
}
/* Remove ourselves from the LRU list if present. */
+ spin_lock(&dev_priv->mm.active_list_lock);
if (!list_empty(&obj_priv->list))
list_del_init(&obj_priv->list);
+ spin_unlock(&dev_priv->mm.active_list_lock);
if (i915_gem_object_is_purgeable(obj_priv))
i915_gem_object_truncate(obj);
@@ -2085,11 +2095,34 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
}
static int
+i915_gpu_idle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ bool lists_empty;
+ uint32_t seqno;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list);
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ if (lists_empty)
+ return 0;
+
+ /* Flush everything onto the inactive list. */
+ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+ if (seqno == 0)
+ return -ENOMEM;
+
+ return i915_wait_request(dev, seqno);
+}
+
+static int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- uint32_t seqno;
bool lists_empty;
spin_lock(&dev_priv->mm.active_list_lock);
@@ -2102,12 +2135,7 @@ i915_gem_evict_everything(struct drm_device *dev)
return -ENOSPC;
/* Flush everything (on to the inactive lists) and evict */
- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
- if (seqno == 0)
- return -ENOMEM;
-
- ret = i915_wait_request(dev, seqno);
+ ret = i915_gpu_idle(dev);
if (ret)
return ret;
@@ -2265,6 +2293,28 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
return 0;
}
+static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
+{
+ struct drm_gem_object *obj = reg->obj;
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int regnum = obj_priv->fence_reg;
+ uint64_t val;
+
+ val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj_priv->gtt_offset & 0xfffff000;
+ val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
+ SANDYBRIDGE_FENCE_PITCH_SHIFT;
+
+ if (obj_priv->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
+
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
+}
+
static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
{
struct drm_gem_object *obj = reg->obj;
@@ -2361,6 +2411,58 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
}
+static int i915_find_fence_reg(struct drm_device *dev)
+{
+ struct drm_i915_fence_reg *reg = NULL;
+ struct drm_i915_gem_object *obj_priv = NULL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj = NULL;
+ int i, avail, ret;
+
+ /* First try to find a free reg */
+ avail = 0;
+ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+ reg = &dev_priv->fence_regs[i];
+ if (!reg->obj)
+ return i;
+
+ obj_priv = reg->obj->driver_private;
+ if (!obj_priv->pin_count)
+ avail++;
+ }
+
+ if (avail == 0)
+ return -ENOSPC;
+
+ /* None available, try to steal one or wait for a user to finish */
+ i = I915_FENCE_REG_NONE;
+ list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
+ fence_list) {
+ obj = obj_priv->obj;
+
+ if (obj_priv->pin_count)
+ continue;
+
+ /* found one! */
+ i = obj_priv->fence_reg;
+ break;
+ }
+
+ BUG_ON(i == I915_FENCE_REG_NONE);
+
+ /* We only have a reference on obj from the active list. put_fence_reg
+ * might drop that one, causing a use-after-free in it. So hold a
+ * private reference to obj like the other callers of put_fence_reg
+ * (set_tiling ioctl) do. */
+ drm_gem_object_reference(obj);
+ ret = i915_gem_object_put_fence_reg(obj);
+ drm_gem_object_unreference(obj);
+ if (ret != 0)
+ return ret;
+
+ return i;
+}
+
/**
* i915_gem_object_get_fence_reg - set up a fence reg for an object
* @obj: object to map through a fence reg
@@ -2381,8 +2483,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
struct drm_i915_fence_reg *reg = NULL;
- struct drm_i915_gem_object *old_obj_priv = NULL;
- int i, ret, avail;
+ int ret;
/* Just update our place in the LRU if our fence is getting used. */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -2410,86 +2511,27 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
break;
}
- /* First try to find a free reg */
- avail = 0;
- for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
- reg = &dev_priv->fence_regs[i];
- if (!reg->obj)
- break;
-
- old_obj_priv = reg->obj->driver_private;
- if (!old_obj_priv->pin_count)
- avail++;
- }
-
- /* None available, try to steal one or wait for a user to finish */
- if (i == dev_priv->num_fence_regs) {
- struct drm_gem_object *old_obj = NULL;
-
- if (avail == 0)
- return -ENOSPC;
-
- list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
- fence_list) {
- old_obj = old_obj_priv->obj;
-
- if (old_obj_priv->pin_count)
- continue;
-
- /* Take a reference, as otherwise the wait_rendering
- * below may cause the object to get freed out from
- * under us.
- */
- drm_gem_object_reference(old_obj);
-
- /* i915 uses fences for GPU access to tiled buffers */
- if (IS_I965G(dev) || !old_obj_priv->active)
- break;
-
- /* This brings the object to the head of the LRU if it
- * had been written to. The only way this should
- * result in us waiting longer than the expected
- * optimal amount of time is if there was a
- * fence-using buffer later that was read-only.
- */
- i915_gem_object_flush_gpu_write_domain(old_obj);
- ret = i915_gem_object_wait_rendering(old_obj);
- if (ret != 0) {
- drm_gem_object_unreference(old_obj);
- return ret;
- }
-
- break;
- }
-
- /*
- * Zap this virtual mapping so we can set up a fence again
- * for this object next time we need it.
- */
- i915_gem_release_mmap(old_obj);
-
- i = old_obj_priv->fence_reg;
- reg = &dev_priv->fence_regs[i];
-
- old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
- list_del_init(&old_obj_priv->fence_list);
-
- drm_gem_object_unreference(old_obj);
- }
+ ret = i915_find_fence_reg(dev);
+ if (ret < 0)
+ return ret;
- obj_priv->fence_reg = i;
+ obj_priv->fence_reg = ret;
+ reg = &dev_priv->fence_regs[obj_priv->fence_reg];
list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
reg->obj = obj;
- if (IS_I965G(dev))
+ if (IS_GEN6(dev))
+ sandybridge_write_fence_reg(reg);
+ else if (IS_I965G(dev))
i965_write_fence_reg(reg);
else if (IS_I9XX(dev))
i915_write_fence_reg(reg);
else
i830_write_fence_reg(reg);
- trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
+ trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
+ obj_priv->tiling_mode);
return 0;
}
@@ -2508,9 +2550,12 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
- if (IS_I965G(dev))
+ if (IS_GEN6(dev)) {
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
+ (obj_priv->fence_reg * 8), 0);
+ } else if (IS_I965G(dev)) {
I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
- else {
+ } else {
uint32_t fence_reg;
if (obj_priv->fence_reg < 8)
@@ -2544,6 +2589,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
return 0;
+ /* If we've changed tiling, GTT-mappings of the object
+ * need to re-fault to ensure that the correct fence register
+ * setup is in place.
+ */
+ i915_gem_release_mmap(obj);
+
/* On the i915, GPU access to tiled buffers is via a fence,
* therefore we must wait for any outstanding access to complete
* before clearing the fence.
@@ -2552,12 +2603,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
int ret;
i915_gem_object_flush_gpu_write_domain(obj);
- i915_gem_object_flush_gtt_write_domain(obj);
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
return ret;
}
+ i915_gem_object_flush_gtt_write_domain(obj);
i915_gem_clear_fence_reg (obj);
return 0;
@@ -2697,7 +2748,6 @@ static void
i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- uint32_t seqno;
uint32_t old_write_domain;
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
@@ -2706,9 +2756,8 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
/* Queue the GPU write cache flushing we need. */
old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
- seqno = i915_add_request(dev, NULL, obj->write_domain);
+ (void) i915_add_request(dev, NULL, obj->write_domain);
BUG_ON(obj->write_domain);
- i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
@@ -3247,7 +3296,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
obj_priv->tiling_mode != I915_TILING_NONE;
/* Check fence reg constraints and rebind if necessary */
- if (need_fence && !i915_obj_fenceable(dev, obj))
+ if (need_fence && !i915_gem_object_fence_offset_ok(obj,
+ obj_priv->tiling_mode))
i915_gem_object_unbind(obj);
/* Choose the GTT offset for our buffer and put it there. */
@@ -3317,6 +3367,16 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
}
/* Validate that the target is in a valid r/w GPU domain */
+ if (reloc->write_domain & (reloc->write_domain - 1)) {
+ DRM_ERROR("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return -EINVAL;
+ }
if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
reloc->read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
@@ -4445,8 +4505,7 @@ int
i915_gem_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t seqno, cur_seqno, last_seqno;
- int stuck, ret;
+ int ret;
mutex_lock(&dev->struct_mutex);
@@ -4455,115 +4514,36 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- */
- dev_priv->mm.suspended = 1;
- del_timer(&dev_priv->hangcheck_timer);
-
- /* Cancel the retire work handler, wait for it to finish if running
- */
- mutex_unlock(&dev->struct_mutex);
- cancel_delayed_work_sync(&dev_priv->mm.retire_work);
- mutex_lock(&dev->struct_mutex);
-
- i915_kernel_lost_context(dev);
-
- /* Flush the GPU along with all non-CPU write domains
- */
- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
-
- if (seqno == 0) {
+ ret = i915_gpu_idle(dev);
+ if (ret) {
mutex_unlock(&dev->struct_mutex);
- return -ENOMEM;
+ return ret;
}
- dev_priv->mm.waiting_gem_seqno = seqno;
- last_seqno = 0;
- stuck = 0;
- for (;;) {
- cur_seqno = i915_get_gem_seqno(dev);
- if (i915_seqno_passed(cur_seqno, seqno))
- break;
- if (last_seqno == cur_seqno) {
- if (stuck++ > 100) {
- DRM_ERROR("hardware wedged\n");
- atomic_set(&dev_priv->mm.wedged, 1);
- DRM_WAKEUP(&dev_priv->irq_queue);
- break;
- }
+ /* Under UMS, be paranoid and evict. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = i915_gem_evict_from_inactive_list(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
- msleep(10);
- last_seqno = cur_seqno;
- }
- dev_priv->mm.waiting_gem_seqno = 0;
-
- i915_gem_retire_requests(dev);
-
- spin_lock(&dev_priv->mm.active_list_lock);
- if (!atomic_read(&dev_priv->mm.wedged)) {
- /* Active and flushing should now be empty as we've
- * waited for a sequence higher than any pending execbuffer
- */
- WARN_ON(!list_empty(&dev_priv->mm.active_list));
- WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
- /* Request should now be empty as we've also waited
- * for the last request in the list
- */
- WARN_ON(!list_empty(&dev_priv->mm.request_list));
}
- /* Empty the active and flushing lists to inactive. If there's
- * anything left at this point, it means that we're wedged and
- * nothing good's going to happen by leaving them there. So strip
- * the GPU domains and just stuff them onto inactive.
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
+ * And not confound mm.suspended!
*/
- while (!list_empty(&dev_priv->mm.active_list)) {
- struct drm_gem_object *obj;
- uint32_t old_write_domain;
-
- obj = list_first_entry(&dev_priv->mm.active_list,
- struct drm_i915_gem_object,
- list)->obj;
- old_write_domain = obj->write_domain;
- obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
- spin_unlock(&dev_priv->mm.active_list_lock);
-
- while (!list_empty(&dev_priv->mm.flushing_list)) {
- struct drm_gem_object *obj;
- uint32_t old_write_domain;
-
- obj = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list)->obj;
- old_write_domain = obj->write_domain;
- obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
-
-
- /* Move all inactive buffers out of the GTT. */
- ret = i915_gem_evict_from_inactive_list(dev);
- WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ dev_priv->mm.suspended = 1;
+ del_timer(&dev_priv->hangcheck_timer);
+ i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
+
mutex_unlock(&dev->struct_mutex);
+ /* Cancel the retire work handler, which should be idle now. */
+ cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+
return 0;
}
@@ -4607,8 +4587,13 @@ i915_gem_init_hws(struct drm_device *dev)
}
dev_priv->hws_obj = obj;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
- I915_READ(HWS_PGA); /* posting read */
+ if (IS_GEN6(dev)) {
+ I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
+ I915_READ(HWS_PGA_GEN6); /* posting read */
+ } else {
+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ I915_READ(HWS_PGA); /* posting read */
+ }
DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
return 0;
@@ -4850,7 +4835,8 @@ i915_gem_load(struct drm_device *dev)
spin_unlock(&shrink_list_lock);
/* Old X drivers will take 0-2 for front, back, depth buffers */
- dev_priv->fence_reg_start = 3;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev_priv->fence_reg_start = 3;
if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dev_priv->num_fence_regs = 16;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index df278b2..b5c55d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,8 +25,6 @@
*
*/
-#include <linux/acpi.h>
-#include <linux/pnp.h>
#include "linux/string.h"
#include "linux/bitops.h"
#include "drmP.h"
@@ -83,120 +81,6 @@
* to match what the GPU expects.
*/
-#define MCHBAR_I915 0x44
-#define MCHBAR_I965 0x48
-#define MCHBAR_SIZE (4*4096)
-
-#define DEVEN_REG 0x54
-#define DEVEN_MCHBAR_EN (1 << 28)
-
-/* Allocate space for the MCH regs if needed, return nonzero on error */
-static int
-intel_alloc_mchbar_resource(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp_lo, temp_hi = 0;
- u64 mchbar_addr;
- int ret = 0;
-
- if (IS_I965G(dev))
- pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
- pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
- mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
-
- /* If ACPI doesn't have it, assume we need to allocate it ourselves */
-#ifdef CONFIG_PNP
- if (mchbar_addr &&
- pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
- ret = 0;
- goto out;
- }
-#endif
-
- /* Get some space for it */
- ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
- MCHBAR_SIZE, MCHBAR_SIZE,
- PCIBIOS_MIN_MEM,
- 0, pcibios_align_resource,
- dev_priv->bridge_dev);
- if (ret) {
- DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
- dev_priv->mch_res.start = 0;
- goto out;
- }
-
- if (IS_I965G(dev))
- pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
- upper_32_bits(dev_priv->mch_res.start));
-
- pci_write_config_dword(dev_priv->bridge_dev, reg,
- lower_32_bits(dev_priv->mch_res.start));
-out:
- return ret;
-}
-
-/* Setup MCHBAR if possible, return true if we should disable it again */
-static bool
-intel_setup_mchbar(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
- bool need_disable = false, enabled;
-
- if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
- enabled = !!(temp & DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- enabled = temp & 1;
- }
-
- /* If it's already enabled, don't have to do anything */
- if (enabled)
- goto out;
-
- if (intel_alloc_mchbar_resource(dev))
- goto out;
-
- need_disable = true;
-
- /* Space is allocated or reserved, so enable it. */
- if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
- temp | DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
- }
-out:
- return need_disable;
-}
-
-static void
-intel_teardown_mchbar(struct drm_device *dev, bool disable)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
-
- if (disable) {
- if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
- temp &= ~DEVEN_MCHBAR_EN;
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- temp &= ~1;
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
- }
- }
-
- if (dev_priv->mch_res.start)
- release_resource(&dev_priv->mch_res);
-}
-
/**
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
@@ -207,9 +91,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- bool need_disable;
- if (IS_IRONLAKE(dev)) {
+ if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
@@ -224,9 +107,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
} else if (IS_MOBILE(dev)) {
uint32_t dcc;
- /* Try to make sure MCHBAR is enabled before poking at it */
- need_disable = intel_setup_mchbar(dev);
-
/* On mobile 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
@@ -266,8 +146,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
-
- intel_teardown_mchbar(dev, need_disable);
} else {
/* The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
@@ -302,39 +180,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
-
-/**
- * Returns whether an object is currently fenceable. If not, it may need
- * to be unbound and have its pitch adjusted.
- */
-bool
-i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- if (IS_I965G(dev)) {
- /* The 965 can have fences at any page boundary. */
- if (obj->size & 4095)
- return false;
- return true;
- } else if (IS_I9XX(dev)) {
- if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
- return false;
- } else {
- if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
- return false;
- }
-
- /* Power of two sized... */
- if (obj->size & (obj->size - 1))
- return false;
-
- /* Objects must be size aligned as well */
- if (obj_priv->gtt_offset & (obj->size - 1))
- return false;
- return true;
-}
-
/* Check pitch constriants for all chips & tiling formats */
bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
@@ -391,7 +236,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
return true;
}
-static bool
+bool
i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
{
struct drm_device *dev = obj->dev;
@@ -438,9 +283,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj_priv = obj->driver_private;
if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
}
@@ -493,12 +336,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
goto err;
}
- /* If we've changed tiling, GTT-mappings of the object
- * need to re-fault to ensure that the correct fence register
- * setup is in place.
- */
- i915_gem_release_mmap(obj);
-
obj_priv->tiling_mode = args->tiling_mode;
obj_priv->stride = args->stride;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a17d6bd..5388354 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -166,7 +166,7 @@ void intel_enable_asle (struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
else
i915_enable_pipestat(dev_priv, 1,
@@ -269,6 +269,57 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_sysfs_hotplug_event(dev);
}
+static void i915_handle_rps_change(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 busy_up, busy_down, max_avg, min_avg;
+ u16 rgvswctl;
+ u8 new_delay = dev_priv->cur_delay;
+
+ I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG);
+ busy_up = I915_READ(RCPREVBSYTUPAVG);
+ busy_down = I915_READ(RCPREVBSYTDNAVG);
+ max_avg = I915_READ(RCBMAXAVG);
+ min_avg = I915_READ(RCBMINAVG);
+
+ /* Handle RCS change request from hw */
+ if (busy_up > max_avg) {
+ if (dev_priv->cur_delay != dev_priv->max_delay)
+ new_delay = dev_priv->cur_delay - 1;
+ if (new_delay < dev_priv->max_delay)
+ new_delay = dev_priv->max_delay;
+ } else if (busy_down < min_avg) {
+ if (dev_priv->cur_delay != dev_priv->min_delay)
+ new_delay = dev_priv->cur_delay + 1;
+ if (new_delay > dev_priv->min_delay)
+ new_delay = dev_priv->min_delay;
+ }
+
+ DRM_DEBUG("rps change requested: %d -> %d\n",
+ dev_priv->cur_delay, new_delay);
+
+ rgvswctl = I915_READ(MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_ERROR("gpu busy, RCS change rejected\n");
+ return; /* still busy with another command */
+ }
+
+ /* Program the new state */
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ POSTING_READ(MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+
+ dev_priv->cur_delay = new_delay;
+
+ DRM_DEBUG("rps changed\n");
+
+ return;
+}
+
irqreturn_t ironlake_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -331,6 +382,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
}
+ if (de_iir & DE_PCU_EVENT) {
+ I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS));
+ i915_handle_rps_change(dev);
+ }
+
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
@@ -376,6 +432,121 @@ static void i915_error_work_func(struct work_struct *work)
}
}
+static struct drm_i915_error_object *
+i915_error_object_create(struct drm_device *dev,
+ struct drm_gem_object *src)
+{
+ struct drm_i915_error_object *dst;
+ struct drm_i915_gem_object *src_priv;
+ int page, page_count;
+
+ if (src == NULL)
+ return NULL;
+
+ src_priv = src->driver_private;
+ if (src_priv->pages == NULL)
+ return NULL;
+
+ page_count = src->size / PAGE_SIZE;
+
+ dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
+ if (dst == NULL)
+ return NULL;
+
+ for (page = 0; page < page_count; page++) {
+ void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (d == NULL)
+ goto unwind;
+ s = kmap_atomic(src_priv->pages[page], KM_USER0);
+ memcpy(d, s, PAGE_SIZE);
+ kunmap_atomic(s, KM_USER0);
+ dst->pages[page] = d;
+ }
+ dst->page_count = page_count;
+ dst->gtt_offset = src_priv->gtt_offset;
+
+ return dst;
+
+unwind:
+ while (page--)
+ kfree(dst->pages[page]);
+ kfree(dst);
+ return NULL;
+}
+
+static void
+i915_error_object_free(struct drm_i915_error_object *obj)
+{
+ int page;
+
+ if (obj == NULL)
+ return;
+
+ for (page = 0; page < obj->page_count; page++)
+ kfree(obj->pages[page]);
+
+ kfree(obj);
+}
+
+static void
+i915_error_state_free(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ i915_error_object_free(error->batchbuffer[0]);
+ i915_error_object_free(error->batchbuffer[1]);
+ i915_error_object_free(error->ringbuffer);
+ kfree(error->active_bo);
+ kfree(error);
+}
+
+static u32
+i915_get_bbaddr(struct drm_device *dev, u32 *ring)
+{
+ u32 cmd;
+
+ if (IS_I830(dev) || IS_845G(dev))
+ cmd = MI_BATCH_BUFFER;
+ else if (IS_I965G(dev))
+ cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ else
+ cmd = (MI_BATCH_BUFFER_START | (2 << 6));
+
+ return ring[0] == cmd ? ring[1] : 0;
+}
+
+static u32
+i915_ringbuffer_last_batch(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 head, bbaddr;
+ u32 *ring;
+
+ /* Locate the current position in the ringbuffer and walk back
+ * to find the most recently dispatched batch buffer.
+ */
+ bbaddr = 0;
+ head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ ring = (u32 *)(dev_priv->ring.virtual_start + head);
+
+ while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+ bbaddr = i915_get_bbaddr(dev, ring);
+ if (bbaddr)
+ break;
+ }
+
+ if (bbaddr == 0) {
+ ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size);
+ while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+ bbaddr = i915_get_bbaddr(dev, ring);
+ if (bbaddr)
+ break;
+ }
+ }
+
+ return bbaddr;
+}
+
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@@ -388,19 +559,26 @@ static void i915_error_work_func(struct work_struct *work)
static void i915_capture_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
struct drm_i915_error_state *error;
+ struct drm_gem_object *batchbuffer[2];
unsigned long flags;
+ u32 bbaddr;
+ int count;
spin_lock_irqsave(&dev_priv->error_lock, flags);
- if (dev_priv->first_error)
- goto out;
+ error = dev_priv->first_error;
+ spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ if (error)
+ return;
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
- DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
- goto out;
+ DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+ return;
}
+ error->seqno = i915_get_gem_seqno(dev);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
error->pipeastat = I915_READ(PIPEASTAT);
@@ -411,6 +589,7 @@ static void i915_capture_error_state(struct drm_device *dev)
error->ipehr = I915_READ(IPEHR);
error->instdone = I915_READ(INSTDONE);
error->acthd = I915_READ(ACTHD);
+ error->bbaddr = 0;
} else {
error->ipeir = I915_READ(IPEIR_I965);
error->ipehr = I915_READ(IPEHR_I965);
@@ -418,14 +597,101 @@ static void i915_capture_error_state(struct drm_device *dev)
error->instps = I915_READ(INSTPS);
error->instdone1 = I915_READ(INSTDONE1);
error->acthd = I915_READ(ACTHD_I965);
+ error->bbaddr = I915_READ64(BB_ADDR);
}
- do_gettimeofday(&error->time);
+ bbaddr = i915_ringbuffer_last_batch(dev);
+
+ /* Grab the current batchbuffer, most likely to have crashed. */
+ batchbuffer[0] = NULL;
+ batchbuffer[1] = NULL;
+ count = 0;
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ if (batchbuffer[0] == NULL &&
+ bbaddr >= obj_priv->gtt_offset &&
+ bbaddr < obj_priv->gtt_offset + obj->size)
+ batchbuffer[0] = obj;
+
+ if (batchbuffer[1] == NULL &&
+ error->acthd >= obj_priv->gtt_offset &&
+ error->acthd < obj_priv->gtt_offset + obj->size &&
+ batchbuffer[0] != obj)
+ batchbuffer[1] = obj;
+
+ count++;
+ }
- dev_priv->first_error = error;
+ /* We need to copy these to an anonymous buffer as the simplest
+ * method to avoid being overwritten by userpace.
+ */
+ error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
+ error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
+
+ /* Record the ringbuffer */
+ error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj);
+
+ /* Record buffers on the active list. */
+ error->active_bo = NULL;
+ error->active_bo_count = 0;
+
+ if (count)
+ error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
+ GFP_ATOMIC);
+
+ if (error->active_bo) {
+ int i = 0;
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ error->active_bo[i].size = obj->size;
+ error->active_bo[i].name = obj->name;
+ error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
+ error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
+ error->active_bo[i].read_domains = obj->read_domains;
+ error->active_bo[i].write_domain = obj->write_domain;
+ error->active_bo[i].fence_reg = obj_priv->fence_reg;
+ error->active_bo[i].pinned = 0;
+ if (obj_priv->pin_count > 0)
+ error->active_bo[i].pinned = 1;
+ if (obj_priv->user_pin_count > 0)
+ error->active_bo[i].pinned = -1;
+ error->active_bo[i].tiling = obj_priv->tiling_mode;
+ error->active_bo[i].dirty = obj_priv->dirty;
+ error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
+
+ if (++i == count)
+ break;
+ }
+ error->active_bo_count = i;
+ }
+
+ do_gettimeofday(&error->time);
-out:
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
+ if (dev_priv->first_error == NULL) {
+ dev_priv->first_error = error;
+ error = NULL;
+ }
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+
+ if (error)
+ i915_error_state_free(dev, error);
+}
+
+void i915_destroy_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+
+ spin_lock(&dev_priv->error_lock);
+ error = dev_priv->first_error;
+ dev_priv->first_error = NULL;
+ spin_unlock(&dev_priv->error_lock);
+
+ if (error)
+ i915_error_state_free(dev, error);
}
/**
@@ -576,7 +842,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
atomic_inc(&dev_priv->irq_received);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return ironlake_irq_handler(dev);
iir = I915_READ(IIR);
@@ -737,7 +1003,7 @@ void i915_user_irq_get(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -753,7 +1019,7 @@ void i915_user_irq_put(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -861,7 +1127,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
return -EINVAL;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else if (IS_I965G(dev))
@@ -883,7 +1149,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else
@@ -897,7 +1163,7 @@ void i915_enable_interrupt (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
opregion_enable_asle(dev);
dev_priv->irq_enabled = 1;
}
@@ -973,7 +1239,11 @@ void i915_hangcheck_elapsed(unsigned long data)
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t acthd;
-
+
+ /* No reset support on this chip yet. */
+ if (IS_GEN6(dev))
+ return;
+
if (!IS_I965G(dev))
acthd = I915_READ(ACTHD);
else
@@ -1064,6 +1334,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
(void) I915_READ(SDEIER);
+ if (IS_IRONLAKE_M(dev)) {
+ /* Clear & enable PCU event interrupts */
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
+ ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+ }
+
return 0;
}
@@ -1076,7 +1353,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
ironlake_irq_preinstall(dev);
return;
}
@@ -1108,7 +1385,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return ironlake_irq_postinstall(dev);
/* Unmask the interrupts that we always want on. */
@@ -1196,7 +1473,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
dev_priv->vblank_pipe = 0;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
ironlake_irq_uninstall(dev);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ab1bd2d..3d59862 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -53,6 +53,25 @@
#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
+#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
+#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
+#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
+#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
+#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
+#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
+#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
+#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
+#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
+#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
+#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
+#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
+#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
+#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
+#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
+#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
+
/* PCI config space */
#define HPLLCC 0xc0 /* 855 only */
@@ -61,6 +80,7 @@
#define GC_CLOCK_100_200 (1 << 0)
#define GC_CLOCK_100_133 (2 << 0)
#define GC_CLOCK_166_250 (3 << 0)
+#define GCFGC2 0xda
#define GCFGC 0xf0 /* 915+ only */
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
@@ -234,6 +254,9 @@
#define I965_FENCE_REG_VALID (1<<0)
#define I965_FENCE_MAX_PITCH_VAL 0x0400
+#define FENCE_REG_SANDYBRIDGE_0 0x100000
+#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
+
/*
* Instruction and interrupt control regs
*/
@@ -265,6 +288,7 @@
#define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074
#define HWS_PGA 0x02080
+#define HWS_PGA_GEN6 0x04080
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
#define PWRCTXA 0x2088 /* 965GM+ only */
@@ -282,7 +306,7 @@
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
-#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
#define I915_HWB_OOM_INTERRUPT (1<<13)
#define I915_SYNC_STATUS_INTERRUPT (1<<12)
#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
@@ -306,11 +330,14 @@
#define I915_ERROR_MEMORY_REFRESH (1<<1)
#define I915_ERROR_INSTRUCTION (1<<0)
#define INSTPM 0x020c0
+#define INSTPM_SELF_EN (1<<12) /* 915GM only */
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
#define FW_BLC_SELF 0x020e0 /* 915+ only */
-#define FW_BLC_SELF_EN (1<<15)
+#define FW_BLC_SELF_EN_MASK (1<<31)
+#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
+#define FW_BLC_SELF_EN (1<<15) /* 945 only */
#define MM_BURST_LENGTH 0x00700000
#define MM_FIFO_WATERMARK 0x0001F000
#define LM_BURST_LENGTH 0x00000700
@@ -324,6 +351,7 @@
#define CM0_COLOR_EVICT_DISABLE (1<<3)
#define CM0_DEPTH_WRITE_DISABLE (1<<1)
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
@@ -784,10 +812,144 @@
#define CLKCFG_MEM_800 (3 << 4)
#define CLKCFG_MEM_MASK (7 << 4)
-/** GM965 GM45 render standby register */
-#define MCHBAR_RENDER_STANDBY 0x111B8
+#define CRSTANDVID 0x11100
+#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
+#define PXVFREQ_PX_MASK 0x7f000000
+#define PXVFREQ_PX_SHIFT 24
+#define VIDFREQ_BASE 0x11110
+#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */
+#define VIDFREQ2 0x11114
+#define VIDFREQ3 0x11118
+#define VIDFREQ4 0x1111c
+#define VIDFREQ_P0_MASK 0x1f000000
+#define VIDFREQ_P0_SHIFT 24
+#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
+#define VIDFREQ_P0_CSCLK_SHIFT 20
+#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
+#define VIDFREQ_P0_CRCLK_SHIFT 16
+#define VIDFREQ_P1_MASK 0x00001f00
+#define VIDFREQ_P1_SHIFT 8
+#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
+#define VIDFREQ_P1_CSCLK_SHIFT 4
+#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
+#define INTTOEXT_BASE_ILK 0x11300
+#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */
+#define INTTOEXT_MAP3_SHIFT 24
+#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
+#define INTTOEXT_MAP2_SHIFT 16
+#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
+#define INTTOEXT_MAP1_SHIFT 8
+#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
+#define INTTOEXT_MAP0_SHIFT 0
+#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
+#define MEMSWCTL 0x11170 /* Ironlake only */
+#define MEMCTL_CMD_MASK 0xe000
+#define MEMCTL_CMD_SHIFT 13
+#define MEMCTL_CMD_RCLK_OFF 0
+#define MEMCTL_CMD_RCLK_ON 1
+#define MEMCTL_CMD_CHFREQ 2
+#define MEMCTL_CMD_CHVID 3
+#define MEMCTL_CMD_VMMOFF 4
+#define MEMCTL_CMD_VMMON 5
+#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
+ when command complete */
+#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
+#define MEMCTL_FREQ_SHIFT 8
+#define MEMCTL_SFCAVM (1<<7)
+#define MEMCTL_TGT_VID_MASK 0x007f
+#define MEMIHYST 0x1117c
+#define MEMINTREN 0x11180 /* 16 bits */
+#define MEMINT_RSEXIT_EN (1<<8)
+#define MEMINT_CX_SUPR_EN (1<<7)
+#define MEMINT_CONT_BUSY_EN (1<<6)
+#define MEMINT_AVG_BUSY_EN (1<<5)
+#define MEMINT_EVAL_CHG_EN (1<<4)
+#define MEMINT_MON_IDLE_EN (1<<3)
+#define MEMINT_UP_EVAL_EN (1<<2)
+#define MEMINT_DOWN_EVAL_EN (1<<1)
+#define MEMINT_SW_CMD_EN (1<<0)
+#define MEMINTRSTR 0x11182 /* 16 bits */
+#define MEM_RSEXIT_MASK 0xc000
+#define MEM_RSEXIT_SHIFT 14
+#define MEM_CONT_BUSY_MASK 0x3000
+#define MEM_CONT_BUSY_SHIFT 12
+#define MEM_AVG_BUSY_MASK 0x0c00
+#define MEM_AVG_BUSY_SHIFT 10
+#define MEM_EVAL_CHG_MASK 0x0300
+#define MEM_EVAL_BUSY_SHIFT 8
+#define MEM_MON_IDLE_MASK 0x00c0
+#define MEM_MON_IDLE_SHIFT 6
+#define MEM_UP_EVAL_MASK 0x0030
+#define MEM_UP_EVAL_SHIFT 4
+#define MEM_DOWN_EVAL_MASK 0x000c
+#define MEM_DOWN_EVAL_SHIFT 2
+#define MEM_SW_CMD_MASK 0x0003
+#define MEM_INT_STEER_GFX 0
+#define MEM_INT_STEER_CMR 1
+#define MEM_INT_STEER_SMI 2
+#define MEM_INT_STEER_SCI 3
+#define MEMINTRSTS 0x11184
+#define MEMINT_RSEXIT (1<<7)
+#define MEMINT_CONT_BUSY (1<<6)
+#define MEMINT_AVG_BUSY (1<<5)
+#define MEMINT_EVAL_CHG (1<<4)
+#define MEMINT_MON_IDLE (1<<3)
+#define MEMINT_UP_EVAL (1<<2)
+#define MEMINT_DOWN_EVAL (1<<1)
+#define MEMINT_SW_CMD (1<<0)
+#define MEMMODECTL 0x11190
+#define MEMMODE_BOOST_EN (1<<31)
+#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
+#define MEMMODE_BOOST_FREQ_SHIFT 24
+#define MEMMODE_IDLE_MODE_MASK 0x00030000
+#define MEMMODE_IDLE_MODE_SHIFT 16
+#define MEMMODE_IDLE_MODE_EVAL 0
+#define MEMMODE_IDLE_MODE_CONT 1
+#define MEMMODE_HWIDLE_EN (1<<15)
+#define MEMMODE_SWMODE_EN (1<<14)
+#define MEMMODE_RCLK_GATE (1<<13)
+#define MEMMODE_HW_UPDATE (1<<12)
+#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
+#define MEMMODE_FSTART_SHIFT 8
+#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
+#define MEMMODE_FMAX_SHIFT 4
+#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
+#define RCBMAXAVG 0x1119c
+#define MEMSWCTL2 0x1119e /* Cantiga only */
+#define SWMEMCMD_RENDER_OFF (0 << 13)
+#define SWMEMCMD_RENDER_ON (1 << 13)
+#define SWMEMCMD_SWFREQ (2 << 13)
+#define SWMEMCMD_TARVID (3 << 13)
+#define SWMEMCMD_VRM_OFF (4 << 13)
+#define SWMEMCMD_VRM_ON (5 << 13)
+#define CMDSTS (1<<12)
+#define SFCAVM (1<<11)
+#define SWFREQ_MASK 0x0380 /* P0-7 */
+#define SWFREQ_SHIFT 7
+#define TARVID_MASK 0x001f
+#define MEMSTAT_CTG 0x111a0
+#define RCBMINAVG 0x111a0
+#define RCUPEI 0x111b0
+#define RCDNEI 0x111b4
+#define MCHBAR_RENDER_STANDBY 0x111b8
#define RCX_SW_EXIT (1<<23)
#define RSX_STATUS_MASK 0x00700000
+#define VIDCTL 0x111c0
+#define VIDSTS 0x111c8
+#define VIDSTART 0x111cc /* 8 bits */
+#define MEMSTAT_ILK 0x111f8
+#define MEMSTAT_VID_MASK 0x7f00
+#define MEMSTAT_VID_SHIFT 8
+#define MEMSTAT_PSTATE_MASK 0x00f8
+#define MEMSTAT_PSTATE_SHIFT 3
+#define MEMSTAT_MON_ACTV (1<<2)
+#define MEMSTAT_SRC_CTL_MASK 0x0003
+#define MEMSTAT_SRC_CTL_CORE 0
+#define MEMSTAT_SRC_CTL_TRB 1
+#define MEMSTAT_SRC_CTL_THM 2
+#define MEMSTAT_SRC_CTL_STDBY 3
+#define RCPREVBSYTUPAVG 0x113b8
+#define RCPREVBSYTDNAVG 0x113bc
#define PEG_BAND_GAP_DATA 0x14d68
/*
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a3b90c9..ac0d1a7 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -682,6 +682,8 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
+ I915_WRITE(MCHBAR_RENDER_STANDBY,
+ dev_priv->saveMCHBAR_RENDER_STANDBY);
} else {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
@@ -745,11 +747,16 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveGTIMR = I915_READ(GTIMR);
dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
+ dev_priv->saveMCHBAR_RENDER_STANDBY =
+ I915_READ(MCHBAR_RENDER_STANDBY);
} else {
dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR);
}
+ if (IS_IRONLAKE_M(dev))
+ ironlake_disable_drps(dev);
+
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -820,6 +827,9 @@ int i915_restore_state(struct drm_device *dev)
/* Clock gating state */
intel_init_clock_gating(dev);
+ if (IS_IRONLAKE_M(dev))
+ ironlake_enable_drps(dev);
+
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 15fbc1b..70c9d4b 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -247,6 +247,7 @@ static void
parse_general_features(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
+ struct drm_device *dev = dev_priv->dev;
struct bdb_general_features *general;
/* Set sensible defaults in case we can't find the general block */
@@ -263,7 +264,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
if (IS_I85X(dev_priv->dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 66 : 48;
- else if (IS_IRONLAKE(dev_priv->dev))
+ else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 100 : 120;
else
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 79dd402..fccf074 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp, reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = PCH_ADPA;
else
reg = ADPA;
@@ -113,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
else
dpll_md_reg = DPLL_B_MD;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
adpa_reg = PCH_ADPA;
else
adpa_reg = ADPA;
@@ -122,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
*/
- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
dpll_md = I915_READ(dpll_md_reg);
I915_WRITE(dpll_md_reg,
dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -136,11 +136,11 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (intel_crtc->pipe == 0) {
adpa |= ADPA_PIPE_A_SELECT;
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_A, 0);
} else {
adpa |= ADPA_PIPE_B_SELECT;
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_B, 0);
}
@@ -202,7 +202,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
u32 hotplug_en;
int i, tries = 0;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return intel_ironlake_crt_detect_hotplug(connector);
/*
@@ -524,7 +524,7 @@ void intel_crt_init(struct drm_device *dev)
&intel_output->enc);
/* Set up the DDC bus. */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
i2c_reg = PCH_GPIOA;
else {
i2c_reg = GPIOA;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b27202d..9cd6de5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -232,7 +232,7 @@ struct intel_limit {
#define G4X_P2_DISPLAY_PORT_FAST 10
#define G4X_P2_DISPLAY_PORT_LIMIT 0
-/* Ironlake */
+/* Ironlake / Sandybridge */
/* as we calculate clock using (register_value + 2) for
N/M1/M2, so here the range value for them is (actual_value-2).
*/
@@ -690,7 +690,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
limit = intel_ironlake_limit(crtc);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
@@ -886,7 +886,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
int lvds_reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
@@ -1188,25 +1188,30 @@ static void intel_update_fbc(struct drm_crtc *crtc,
if (intel_fb->obj->size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n");
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
+ dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
goto out_disable;
}
if ((mode->hdisplay > 2048) ||
(mode->vdisplay > 1536)) {
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
if (obj_priv->tiling_mode != I915_TILING_X) {
DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
}
@@ -1366,7 +1371,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dspcntr &= ~DISPPLANE_TILED;
}
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
/* must disable */
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -1427,7 +1432,7 @@ static void i915_disable_vga (struct drm_device *dev)
u8 sr1;
u32 vga_reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
vga_reg = CPU_VGACNTRL;
else
vga_reg = VGACNTRL;
@@ -2111,7 +2116,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
/* FDI link clock is fixed at 2.7G */
if (mode->clock * 3 > 27000 * 4)
return MODE_CLOCK_HIGH;
@@ -2757,11 +2762,22 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
srwm = total_size - sr_entries;
if (srwm < 0)
srwm = 1;
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
+
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else if (IS_I915GM(dev)) {
+ /* 915M has a smaller SRWM field */
+ I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+ I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+ }
} else {
/* Turn off self refresh if both pipes are enabled */
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
+ if (IS_I945G(dev) || IS_I945GM(dev)) {
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
+ } else if (IS_I915GM(dev)) {
+ I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+ }
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -2967,7 +2983,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
refclk / 1000);
} else if (IS_I9XX(dev)) {
refclk = 96000;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
refclk = 120000; /* 120Mhz refclk */
} else {
refclk = 48000;
@@ -3025,7 +3041,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
/* FDI link */
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
int lane, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N
according to current link config */
@@ -3102,7 +3118,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* PCH B stepping, previous chipset stepping should be
* ignoring this setting.
*/
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
temp = I915_READ(PCH_DREF_CONTROL);
/* Always enable nonspread source */
temp &= ~DREF_NONSPREAD_SOURCE_MASK;
@@ -3149,7 +3165,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
reduced_clock.m2;
}
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
dpll = DPLL_VGA_MODE_DIS;
if (IS_I9XX(dev)) {
@@ -3162,7 +3178,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- else if (IS_IRONLAKE(dev))
+ else if (HAS_PCH_SPLIT(dev))
dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
if (is_dp)
@@ -3174,7 +3190,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
else {
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
if (IS_G4X(dev) && has_reduced_clock)
dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
@@ -3193,7 +3209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
} else {
if (is_lvds) {
@@ -3227,7 +3243,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Ironlake's plane is forced to pipe, bit 24 is to
enable color space conversion */
- if (!IS_IRONLAKE(dev)) {
+ if (!HAS_PCH_SPLIT(dev)) {
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
@@ -3254,14 +3270,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Disable the panel fitter if it was on our pipe */
- if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe)
+ if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
I915_WRITE(PFIT_CONTROL, 0);
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
/* assign to Ironlake registers */
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
fp_reg = pch_fp_reg;
dpll_reg = pch_dpll_reg;
}
@@ -3282,7 +3298,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds) {
u32 lvds;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
lvds = I915_READ(lvds_reg);
@@ -3304,12 +3320,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* set the dithering flag */
if (IS_I965G(dev)) {
if (dev_priv->lvds_dither) {
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
pipeconf |= PIPE_ENABLE_DITHER;
else
lvds |= LVDS_ENABLE_DITHER;
} else {
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
pipeconf &= ~PIPE_ENABLE_DITHER;
else
lvds &= ~LVDS_ENABLE_DITHER;
@@ -3328,7 +3344,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Wait for the clocks to stabilize. */
udelay(150);
- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
if (is_sdvo) {
sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
@@ -3375,14 +3391,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* pipesrc and dspsize control the size that is scaled from, which should
* always be the user's requested size.
*/
- if (!IS_IRONLAKE(dev)) {
+ if (!HAS_PCH_SPLIT(dev)) {
I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
(mode->hdisplay - 1));
I915_WRITE(dsppos_reg, 0);
}
I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
I915_WRITE(link_m1_reg, m_n.link_m);
@@ -3438,7 +3454,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
return;
/* use legacy palette for Ironlake */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
LGC_PALETTE_B;
@@ -3553,11 +3569,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc->cursor_bo = bo;
return 0;
-fail:
- mutex_lock(&dev->struct_mutex);
fail_locked:
- drm_gem_object_unreference(bo);
mutex_unlock(&dev->struct_mutex);
+fail:
+ drm_gem_object_unreference_unlocked(bo);
return ret;
}
@@ -3922,7 +3937,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll = I915_READ(dpll_reg);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -3961,7 +3976,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll = I915_READ(dpll_reg);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -4011,6 +4026,11 @@ static void intel_idle_update(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
+ if (IS_I945G(dev) || IS_I945GM(dev)) {
+ DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+ }
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
@@ -4044,9 +4064,17 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (!dev_priv->busy)
+ if (!dev_priv->busy) {
+ if (IS_I945G(dev) || IS_I945GM(dev)) {
+ u32 fw_blc_self;
+
+ DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
+ fw_blc_self = I915_READ(FW_BLC_SELF);
+ fw_blc_self &= ~FW_BLC_SELF_EN;
+ I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
+ }
dev_priv->busy = true;
- else
+ } else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
@@ -4058,6 +4086,14 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
intel_fb = to_intel_framebuffer(crtc->fb);
if (intel_fb->obj == obj) {
if (!intel_crtc->busy) {
+ if (IS_I945G(dev) || IS_I945GM(dev)) {
+ u32 fw_blc_self;
+
+ DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
+ fw_blc_self = I915_READ(FW_BLC_SELF);
+ fw_blc_self &= ~FW_BLC_SELF_EN;
+ I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
+ }
/* Non-busy -> busy, upclock */
intel_increase_pllclock(crtc, true);
intel_crtc->busy = true;
@@ -4382,7 +4418,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (IS_MOBILE(dev) && !IS_I830(dev))
intel_lvds_init(dev);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
int found;
if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
@@ -4451,7 +4487,7 @@ static void intel_setup_outputs(struct drm_device *dev)
DRM_DEBUG_KMS("probing DP_D\n");
intel_dp_init(dev, DP_D);
}
- } else if (IS_I8XX(dev))
+ } else if (IS_GEN2(dev))
intel_dvo_init(dev);
if (SUPPORTS_TV(dev))
@@ -4476,9 +4512,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
intelfb_remove(dev, fb);
drm_framebuffer_cleanup(fb);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(intel_fb->obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(intel_fb->obj);
kfree(intel_fb);
}
@@ -4541,9 +4575,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
if (ret) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
return NULL;
}
@@ -4591,6 +4623,91 @@ err_unref:
return NULL;
}
+void ironlake_enable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl;
+ u8 fmax, fmin, fstart, vstart;
+ int i = 0;
+
+ /* 100ms RC evaluation intervals */
+ I915_WRITE(RCUPEI, 100000);
+ I915_WRITE(RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ I915_WRITE(RCBMAXAVG, 90000);
+ I915_WRITE(RCBMINAVG, 80000);
+
+ I915_WRITE(MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+ vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+
+ dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */
+ dev_priv->min_delay = fmin;
+ dev_priv->cur_delay = fstart;
+
+ I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ /*
+ * Interrupts will be enabled in ironlake_irq_postinstall
+ */
+
+ I915_WRITE(VIDSTART, vstart);
+ POSTING_READ(VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ I915_WRITE(MEMMODECTL, rgvmodectl);
+
+ while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
+ if (i++ > 100) {
+ DRM_ERROR("stuck trying to change perf mode\n");
+ break;
+ }
+ msleep(1);
+ }
+ msleep(1);
+
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ POSTING_READ(MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+}
+
+void ironlake_disable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rgvswctl;
+ u8 fstart;
+
+ /* Ack interrupts, disable EFC interrupt */
+ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
+ I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
+ I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+
+ /* Go back to the starting frequency */
+ fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ msleep(1);
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ msleep(1);
+
+}
+
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4599,7 +4716,7 @@ void intel_init_clock_gating(struct drm_device *dev)
* Disable clock gating reported to work incorrectly according to the
* specs, but enable as much else as we can.
*/
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
@@ -4672,7 +4789,7 @@ static void intel_init_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
dev_priv->display.dpms = ironlake_crtc_dpms;
else
dev_priv->display.dpms = i9xx_crtc_dpms;
@@ -4715,7 +4832,7 @@ static void intel_init_display(struct drm_device *dev)
i830_get_display_clock_speed;
/* For FIFO watermark updates */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
dev_priv->display.update_wm = NULL;
else if (IS_G4X(dev))
dev_priv->display.update_wm = g4x_update_wm;
@@ -4774,11 +4891,6 @@ void intel_modeset_init(struct drm_device *dev)
DRM_DEBUG_KMS("%d display pipe%s available.\n",
num_pipe, num_pipe > 1 ? "s" : "");
- if (IS_I85X(dev))
- pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock);
- else if (IS_I9XX(dev) || IS_G4X(dev))
- pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock);
-
for (i = 0; i < num_pipe; i++) {
intel_crtc_init(dev, i);
}
@@ -4787,6 +4899,9 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_clock_gating(dev);
+ if (IS_IRONLAKE_M(dev))
+ ironlake_enable_drps(dev);
+
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
@@ -4834,6 +4949,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
drm_gem_object_unreference(dev_priv->pwrctx);
}
+ if (IS_IRONLAKE_M(dev))
+ ironlake_disable_drps(dev);
+
mutex_unlock(&dev->struct_mutex);
drm_mode_config_cleanup(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 439506c..3ef3a0d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -231,7 +231,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
*/
if (IS_eDP(intel_output))
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
- else if (IS_IRONLAKE(dev))
+ else if (HAS_PCH_SPLIT(dev))
aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
@@ -584,7 +584,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
intel_dp_compute_m_n(3, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
if (intel_crtc->pipe == 0) {
I915_WRITE(TRANSA_DATA_M1,
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
@@ -1176,7 +1176,7 @@ intel_dp_detect(struct drm_connector *connector)
dp_priv->has_audio = false;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return ironlake_dp_detect(connector);
temp = I915_READ(PORT_HOTPLUG_EN);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a51573d..3a467ca 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -209,6 +209,8 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_init_clock_gating(struct drm_device *dev);
+extern void ironlake_enable_drps(struct drm_device *dev);
+extern void ironlake_disable_drps(struct drm_device *dev);
extern int intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index aaabbcb..8cd791d 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
+#include <linux/vga_switcheroo.h>
#include "drmP.h"
#include "drm.h"
@@ -235,6 +236,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
obj_priv->gtt_offset, fbo);
mutex_unlock(&dev->struct_mutex);
+ vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unpin:
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0e268de..a30f8bf 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -82,7 +82,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
POSTING_READ(hdmi_priv->sdvox_reg);
}
@@ -99,7 +99,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp);
POSTING_READ(hdmi_priv->sdvox_reg);
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 8673c73..fcc753c 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -128,7 +128,7 @@ intel_i2c_reset_gmbus(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_GMBUS0, 0);
} else {
I915_WRITE(GMBUS0, 0);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c2e8a45..14e516f 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 blc_pwm_ctl, reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = BLC_PWM_CPU_CTL;
else
reg = BLC_PWM_CTL;
@@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = BLC_PWM_PCH_CTL2;
else
reg = BLC_PWM_CTL;
@@ -89,17 +89,22 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
static void intel_lvds_set_power(struct drm_device *dev, bool on)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_status, ctl_reg, status_reg;
+ u32 pp_status, ctl_reg, status_reg, lvds_reg;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
status_reg = PCH_PP_STATUS;
+ lvds_reg = PCH_LVDS;
} else {
ctl_reg = PP_CONTROL;
status_reg = PP_STATUS;
+ lvds_reg = LVDS;
}
if (on) {
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
+ POSTING_READ(lvds_reg);
+
I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
POWER_TARGET_ON);
do {
@@ -115,6 +120,9 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
do {
pp_status = I915_READ(status_reg);
} while (pp_status & PP_ON);
+
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
+ POSTING_READ(lvds_reg);
}
}
@@ -137,7 +145,7 @@ static void intel_lvds_save(struct drm_connector *connector)
u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
u32 pwm_ctl_reg;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_ctl_reg = PCH_PP_CONTROL;
@@ -174,7 +182,7 @@ static void intel_lvds_restore(struct drm_connector *connector)
u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
u32 pwm_ctl_reg;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_ctl_reg = PCH_PP_CONTROL;
@@ -297,7 +305,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
/* full screen scale for now */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
goto out;
/* 965+ wants fuzzy fitting */
@@ -327,7 +335,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
- if (!IS_IRONLAKE(dev)) {
+ if (!HAS_PCH_SPLIT(dev)) {
I915_WRITE(BCLRPAT_A, 0);
I915_WRITE(BCLRPAT_B, 0);
}
@@ -548,7 +556,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = BLC_PWM_CPU_CTL;
else
reg = BLC_PWM_CTL;
@@ -587,7 +595,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* settings.
*/
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return;
/*
@@ -655,8 +663,15 @@ static const struct dmi_system_id bad_lid_status[] = {
*/
static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
enum drm_connector_status status = connector_status_connected;
+ /* ACPI lid methods were generally unreliable in this generation, so
+ * don't even bother.
+ */
+ if (IS_GEN2(dev))
+ return connector_status_connected;
+
if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
status = connector_status_disconnected;
@@ -1020,7 +1035,7 @@ void intel_lvds_init(struct drm_device *dev)
return;
}
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return;
if (dev_priv->edp_support) {
@@ -1123,7 +1138,7 @@ void intel_lvds_init(struct drm_device *dev)
*/
/* Ironlake: FIXME if still fail, not try pipe mode now */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
goto failed;
lvds = I915_READ(LVDS);
@@ -1144,7 +1159,7 @@ void intel_lvds_init(struct drm_device *dev)
goto failed;
out:
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
u32 pwm;
/* make sure PWM is enabled */
pwm = I915_READ(BLC_PWM_CPU_CTL2);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 2639591..d355d1d 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -172,7 +172,7 @@ struct overlay_registers {
#define OFC_UPDATE 0x1
#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
-#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev))
+#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
@@ -199,16 +199,11 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
if (OVERLAY_NONPHYSICAL(overlay->dev))
io_mapping_unmap_atomic(overlay->virt_addr);
overlay->virt_addr = NULL;
- I915_READ(OVADD); /* flush wc cashes */
-
return;
}
@@ -225,9 +220,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
overlay->active = 1;
overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
+ BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
OUT_RING(overlay->flip_addr | OFC_UPDATE);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -267,9 +260,7 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- BEGIN_LP_RING(4);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
+ BEGIN_LP_RING(2);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
ADVANCE_LP_RING();
@@ -338,9 +329,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
/* wait for overlay to go idle */
overlay->hw_wedged = SWITCH_OFF_STAGE_1;
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
+ BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -358,9 +347,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
/* turn overlay off */
overlay->hw_wedged = SWITCH_OFF_STAGE_2;
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
+ BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
OUT_RING(flip_addr);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -435,9 +422,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
overlay->hw_wedged = SWITCH_OFF_STAGE_2;
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
+ BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
OUT_RING(flip_addr);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -1179,7 +1164,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
- drm_gem_object_unreference(new_bo);
+ drm_gem_object_unreference_unlocked(new_bo);
kfree(params);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 82678d3..48daee5 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -35,6 +35,7 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
+#include <linux/dmi.h>
static char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
@@ -2283,6 +2284,25 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
return 0x72;
}
+static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident);
+ return 1;
+}
+
+static struct dmi_system_id intel_sdvo_bad_tv[] = {
+ {
+ .callback = intel_sdvo_bad_tv_callback,
+ .ident = "IntelG45/ICH10R/DME1737",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
+ },
+ },
+
+ { } /* terminating entry */
+};
+
static bool
intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
{
@@ -2323,7 +2343,8 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
(1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
}
- } else if (flags & SDVO_OUTPUT_SVID0) {
+ } else if ((flags & SDVO_OUTPUT_SVID0) &&
+ !dmi_check_system(intel_sdvo_bad_tv)) {
sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 48c290b..32db806 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -16,7 +16,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o \
- nv40_grctx.o \
+ nv40_grctx.o nv50_grctx.o \
nv04_instmem.o nv50_instmem.o \
nv50_crtc.o nv50_dac.o nv50_sor.o \
nv50_cursor.o nv50_display.o nv50_fbcon.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 48227e7..0e0730a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -11,6 +11,8 @@
#include "nouveau_drm.h"
#include "nv50_display.h"
+#include <linux/vga_switcheroo.h>
+
#define NOUVEAU_DSM_SUPPORTED 0x00
#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
@@ -28,31 +30,30 @@
#define NOUVEAU_DSM_POWER_SPEED 0x01
#define NOUVEAU_DSM_POWER_STAMINA 0x02
-static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
-{
- static char muid[] = {
- 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
- 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
- };
+static struct nouveau_dsm_priv {
+ bool dsm_detected;
+ acpi_handle dhandle;
+ acpi_handle dsm_handle;
+} nouveau_dsm_priv;
+
+static const char nouveau_dsm_muid[] = {
+ 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
+ 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
+};
- struct pci_dev *pdev = dev->pdev;
- struct acpi_handle *handle;
+static int nouveau_dsm(acpi_handle handle, int func, int arg, int *result)
+{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_object_list input;
union acpi_object params[4];
union acpi_object *obj;
int err;
- handle = DEVICE_ACPI_HANDLE(&pdev->dev);
-
- if (!handle)
- return -ENODEV;
-
input.count = 4;
input.pointer = params;
params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(muid);
- params[0].buffer.pointer = (char *)muid;
+ params[0].buffer.length = sizeof(nouveau_dsm_muid);
+ params[0].buffer.pointer = (char *)nouveau_dsm_muid;
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = 0x00000102;
params[2].type = ACPI_TYPE_INTEGER;
@@ -62,7 +63,7 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
err = acpi_evaluate_object(handle, "_DSM", &input, &output);
if (err) {
- NV_INFO(dev, "failed to evaluate _DSM: %d\n", err);
+ printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
return err;
}
@@ -86,40 +87,119 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
return 0;
}
-int nouveau_hybrid_setup(struct drm_device *dev)
+static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
{
- int result;
-
- if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
- &result))
- return -ENODEV;
-
- NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
-
- if (result) { /* Ensure that the external GPU is enabled */
- nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
- nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
- NULL);
- } else { /* Stamina mode - disable the external GPU */
- nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
- NULL);
- nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
- NULL);
- }
+ return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL);
+}
+
+static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state)
+{
+ int arg;
+ if (state == VGA_SWITCHEROO_ON)
+ arg = NOUVEAU_DSM_POWER_SPEED;
+ else
+ arg = NOUVEAU_DSM_POWER_STAMINA;
+ nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL);
+ return 0;
+}
+
+static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
+{
+ if (id == VGA_SWITCHEROO_IGD)
+ return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA);
+ else
+ return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED);
+}
+static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
+ enum vga_switcheroo_state state)
+{
+ if (id == VGA_SWITCHEROO_IGD)
+ return 0;
+
+ return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state);
+}
+
+static int nouveau_dsm_init(void)
+{
return 0;
}
-bool nouveau_dsm_probe(struct drm_device *dev)
+static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
{
- int support = 0;
+ if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ return VGA_SWITCHEROO_IGD;
+ else
+ return VGA_SWITCHEROO_DIS;
+}
+
+static struct vga_switcheroo_handler nouveau_dsm_handler = {
+ .switchto = nouveau_dsm_switchto,
+ .power_state = nouveau_dsm_power_state,
+ .init = nouveau_dsm_init,
+ .get_client_id = nouveau_dsm_get_client_id,
+};
- if (nouveau_dsm(dev, NOUVEAU_DSM_SUPPORTED,
- NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &support))
+static bool nouveau_dsm_pci_probe(struct pci_dev *pdev)
+{
+ acpi_handle dhandle, nvidia_handle;
+ acpi_status status;
+ int ret;
+ uint32_t result;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+ status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
+ if (ACPI_FAILURE(status)) {
return false;
+ }
- if (!support)
+ ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED,
+ NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
+ if (ret < 0)
return false;
+ nouveau_dsm_priv.dhandle = dhandle;
+ nouveau_dsm_priv.dsm_handle = nvidia_handle;
return true;
}
+
+static bool nouveau_dsm_detect(void)
+{
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+ struct pci_dev *pdev = NULL;
+ int has_dsm = 0;
+ int vga_count = 0;
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_dsm |= (nouveau_dsm_pci_probe(pdev) == true);
+ }
+
+ if (vga_count == 2 && has_dsm) {
+ acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer);
+ printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
+ acpi_method_name);
+ nouveau_dsm_priv.dsm_detected = true;
+ return true;
+ }
+ return false;
+}
+
+void nouveau_register_dsm_handler(void)
+{
+ bool r;
+
+ r = nouveau_dsm_detect();
+ if (!r)
+ return;
+
+ vga_switcheroo_register_handler(&nouveau_dsm_handler);
+}
+
+void nouveau_unregister_dsm_handler(void)
+{
+ vga_switcheroo_unregister_handler();
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0e9cd1d..71247da 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -311,11 +311,11 @@ valid_reg(struct nvbios *bios, uint32_t reg)
/* C51 has misaligned regs on purpose. Marvellous */
if (reg & 0x2 ||
- (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51))
+ (reg & 0x1 && dev_priv->vbios.chip_version != 0x51))
NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
/* warn on C51 regs that haven't been verified accessible in tracing */
- if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
+ if (reg & 0x1 && dev_priv->vbios.chip_version == 0x51 &&
reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
reg);
@@ -420,7 +420,7 @@ bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
LOG_OLD_VALUE(bios_rd32(bios, reg));
BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
- if (dev_priv->VBIOS.execute) {
+ if (dev_priv->vbios.execute) {
still_alive();
nv_wr32(bios->dev, reg, data);
}
@@ -647,7 +647,7 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
- if (dev_priv->VBIOS.execute) {
+ if (dev_priv->vbios.execute) {
still_alive();
nv_wr32(dev, reg + 4, reg1);
nv_wr32(dev, reg + 0, reg0);
@@ -689,7 +689,7 @@ setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
/*
* For the results of this function to be correct, CR44 must have been
@@ -700,7 +700,7 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
- if (dcb_entry > bios->bdcb.dcb.entries) {
+ if (dcb_entry > bios->dcb.entries) {
NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
"(%02X)\n", dcb_entry);
dcb_entry = 0x7f; /* unused / invalid marker */
@@ -713,25 +713,26 @@ static struct nouveau_i2c_chan *
init_i2c_device_find(struct drm_device *dev, int i2c_index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
if (i2c_index == 0xff) {
/* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
- int default_indices = bdcb->i2c_default_indices;
+ int default_indices = dcb->i2c_default_indices;
- if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default)
+ if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
shift = 4;
i2c_index = (default_indices >> shift) & 0xf;
}
if (i2c_index == 0x80) /* g80+ */
- i2c_index = bdcb->i2c_default_indices & 0xf;
+ i2c_index = dcb->i2c_default_indices & 0xf;
return nouveau_i2c_find(dev, i2c_index);
}
-static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
+static uint32_t
+get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
{
/*
* For mlv < 0x80, it is an index into a table of TMDS base addresses.
@@ -744,6 +745,7 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
const int pramdac_offset[13] = {
0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
const uint32_t pramdac_table[4] = {
@@ -756,13 +758,12 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
dcb_entry = dcb_entry_idx_from_crtchead(dev);
if (dcb_entry == 0x7f)
return 0;
- dacoffset = pramdac_offset[
- dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or];
+ dacoffset = pramdac_offset[bios->dcb.entry[dcb_entry].or];
if (mlv == 0x81)
dacoffset ^= 8;
return 0x6808b0 + dacoffset;
} else {
- if (mlv > ARRAY_SIZE(pramdac_table)) {
+ if (mlv >= ARRAY_SIZE(pramdac_table)) {
NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
mlv);
return 0;
@@ -2574,19 +2575,19 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
- const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr];
+ const uint8_t *gpio_table = &bios->data[bios->dcb.gpio_table_ptr];
const uint8_t *gpio_entry;
int i;
if (!iexec->execute)
return 1;
- if (bios->bdcb.version != 0x40) {
+ if (bios->dcb.version != 0x40) {
NV_ERROR(bios->dev, "DCB table not version 4.0\n");
return 0;
}
- if (!bios->bdcb.gpio_table_ptr) {
+ if (!bios->dcb.gpio_table_ptr) {
NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
return 0;
}
@@ -3123,7 +3124,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
struct dcb_entry *dcbent, int head, bool dl)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
struct init_exec iexec = {true, false};
NV_TRACE(dev, "0x%04X: Parsing digital output script table\n",
@@ -3140,7 +3141,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0);
uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
@@ -3194,7 +3195,7 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
* of a list of pxclks and script pointers.
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
uint16_t scriptptr = 0, clktable;
uint8_t clktableptr = 0;
@@ -3261,7 +3262,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head,
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
uint32_t sel_clk_binding, sel_clk;
int ret;
@@ -3395,7 +3396,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
#ifndef __powerpc__
NV_ERROR(dev, "Pointer to flat panel table invalid\n");
#endif
- bios->pub.digital_min_front_porch = 0x4b;
+ bios->digital_min_front_porch = 0x4b;
return 0;
}
@@ -3428,7 +3429,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
* fptable[4] is the minimum
* RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
*/
- bios->pub.digital_min_front_porch = fptable[4];
+ bios->digital_min_front_porch = fptable[4];
ofs = -7;
break;
default:
@@ -3467,7 +3468,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
/* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
if (lth.lvds_ver > 0x10)
- bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
+ bios->fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
/*
* If either the strap or xlated fpindex value are 0xf there is no
@@ -3491,7 +3492,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
if (!mode) /* just checking whether we can produce a mode */
@@ -3562,11 +3563,11 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
* until later, when this function should be called with non-zero pxclk
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
struct lvdstableheader lth;
uint16_t lvdsofs;
- int ret, chip_version = bios->pub.chip_version;
+ int ret, chip_version = bios->chip_version;
ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
if (ret)
@@ -3682,7 +3683,7 @@ bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
uint16_t record, int record_len, int record_nr)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint32_t entry;
uint16_t table;
int i, v;
@@ -3716,7 +3717,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
int *length)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint8_t *table;
if (!bios->display.dp_table_ptr) {
@@ -3725,7 +3726,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
table = &bios->data[bios->display.dp_table_ptr];
- if (table[0] != 0x21) {
+ if (table[0] != 0x20 && table[0] != 0x21) {
NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
table[0]);
return NULL;
@@ -3765,7 +3766,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint8_t *table = &bios->data[bios->display.script_table_ptr];
uint8_t *otable = NULL;
uint16_t script;
@@ -3918,8 +3919,8 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
- int cv = bios->pub.chip_version;
+ struct nvbios *bios = &dev_priv->vbios;
+ int cv = bios->chip_version;
uint16_t clktable = 0, scriptptr;
uint32_t sel_clk_binding, sel_clk;
@@ -3978,8 +3979,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
- int cv = bios->pub.chip_version, pllindex = 0;
+ struct nvbios *bios = &dev_priv->vbios;
+ int cv = bios->chip_version, pllindex = 0;
uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
uint32_t crystal_strap_mask, crystal_straps;
@@ -4332,7 +4333,7 @@ static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint
*/
bios->major_version = bios->data[offset + 3];
- bios->pub.chip_version = bios->data[offset + 2];
+ bios->chip_version = bios->data[offset + 2];
NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n",
bios->data[offset + 3], bios->data[offset + 2],
bios->data[offset + 1], bios->data[offset]);
@@ -4402,7 +4403,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
}
/* First entry is normal dac, 2nd tv-out perhaps? */
- bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
+ bios->dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
return 0;
}
@@ -4526,8 +4527,8 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
return -ENOSYS;
}
- bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
- bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
+ bios->dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
+ bios->tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
return 0;
}
@@ -4796,11 +4797,11 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
uint16_t legacy_scripts_offset, legacy_i2c_offset;
/* load needed defaults in case we can't parse this info */
- bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
- bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
- bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
- bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
- bios->pub.digital_min_front_porch = 0x4b;
+ bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
+ bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
+ bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
+ bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
+ bios->digital_min_front_porch = 0x4b;
bios->fmaxvco = 256000;
bios->fminvco = 128000;
bios->fp.duallink_transition_clk = 90000;
@@ -4907,10 +4908,10 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
- bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
- bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
- bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
- bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
+ bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
+ bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
+ bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
+ bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
if (bmplength > 74) {
bios->fmaxvco = ROM32(bmp[67]);
@@ -4984,7 +4985,8 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i
else
NV_WARN(dev,
"DCB I2C table has more entries than indexable "
- "(%d entries, max index 15)\n", i2ctable[2]);
+ "(%d entries, max %d)\n", i2ctable[2],
+ DCB_MAX_NUM_I2C_ENTRIES);
entry_len = i2ctable[3];
/* [4] is i2c_default_indices, read in parse_dcb_table() */
}
@@ -5000,8 +5002,8 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i
if (index == 0xf)
return 0;
- if (index > i2c_entries) {
- NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n",
+ if (index >= i2c_entries) {
+ NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
index, i2ctable[2]);
return -ENOENT;
}
@@ -5036,7 +5038,7 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i
static struct dcb_gpio_entry *
new_gpio_entry(struct nvbios *bios)
{
- struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio;
+ struct dcb_gpio_table *gpio = &bios->dcb.gpio;
return &gpio->entry[gpio->entries++];
}
@@ -5045,14 +5047,14 @@ struct dcb_gpio_entry *
nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
int i;
- for (i = 0; i < bios->bdcb.gpio.entries; i++) {
- if (bios->bdcb.gpio.entry[i].tag != tag)
+ for (i = 0; i < bios->dcb.gpio.entries; i++) {
+ if (bios->dcb.gpio.entry[i].tag != tag)
continue;
- return &bios->bdcb.gpio.entry[i];
+ return &bios->dcb.gpio.entry[i];
}
return NULL;
@@ -5100,7 +5102,7 @@ static void
parse_dcb_gpio_table(struct nvbios *bios)
{
struct drm_device *dev = bios->dev;
- uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr;
+ uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr;
uint8_t *gpio_table = &bios->data[gpio_table_ptr];
int header_len = gpio_table[1],
entries = gpio_table[2],
@@ -5108,7 +5110,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
int i;
- if (bios->bdcb.version >= 0x40) {
+ if (bios->dcb.version >= 0x40) {
if (gpio_table_ptr && entry_len != 4) {
NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
return;
@@ -5116,7 +5118,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
parse_entry = parse_dcb40_gpio_entry;
- } else if (bios->bdcb.version >= 0x30) {
+ } else if (bios->dcb.version >= 0x30) {
if (gpio_table_ptr && entry_len != 2) {
NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
return;
@@ -5124,7 +5126,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
parse_entry = parse_dcb30_gpio_entry;
- } else if (bios->bdcb.version >= 0x22) {
+ } else if (bios->dcb.version >= 0x22) {
/*
* DCBs older than v3.0 don't really have a GPIO
* table, instead they keep some GPIO info at fixed
@@ -5158,30 +5160,67 @@ struct dcb_connector_table_entry *
nouveau_bios_connector_entry(struct drm_device *dev, int index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
struct dcb_connector_table_entry *cte;
- if (index >= bios->bdcb.connector.entries)
+ if (index >= bios->dcb.connector.entries)
return NULL;
- cte = &bios->bdcb.connector.entry[index];
+ cte = &bios->dcb.connector.entry[index];
if (cte->type == 0xff)
return NULL;
return cte;
}
+static enum dcb_connector_type
+divine_connector_type(struct nvbios *bios, int index)
+{
+ struct dcb_table *dcb = &bios->dcb;
+ unsigned encoders = 0, type = DCB_CONNECTOR_NONE;
+ int i;
+
+ for (i = 0; i < dcb->entries; i++) {
+ if (dcb->entry[i].connector == index)
+ encoders |= (1 << dcb->entry[i].type);
+ }
+
+ if (encoders & (1 << OUTPUT_DP)) {
+ if (encoders & (1 << OUTPUT_TMDS))
+ type = DCB_CONNECTOR_DP;
+ else
+ type = DCB_CONNECTOR_eDP;
+ } else
+ if (encoders & (1 << OUTPUT_TMDS)) {
+ if (encoders & (1 << OUTPUT_ANALOG))
+ type = DCB_CONNECTOR_DVI_I;
+ else
+ type = DCB_CONNECTOR_DVI_D;
+ } else
+ if (encoders & (1 << OUTPUT_ANALOG)) {
+ type = DCB_CONNECTOR_VGA;
+ } else
+ if (encoders & (1 << OUTPUT_LVDS)) {
+ type = DCB_CONNECTOR_LVDS;
+ } else
+ if (encoders & (1 << OUTPUT_TV)) {
+ type = DCB_CONNECTOR_TV_0;
+ }
+
+ return type;
+}
+
static void
parse_dcb_connector_table(struct nvbios *bios)
{
struct drm_device *dev = bios->dev;
- struct dcb_connector_table *ct = &bios->bdcb.connector;
+ struct dcb_connector_table *ct = &bios->dcb.connector;
struct dcb_connector_table_entry *cte;
- uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr];
+ uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr];
uint8_t *entry;
int i;
- if (!bios->bdcb.connector_table_ptr) {
+ if (!bios->dcb.connector_table_ptr) {
NV_DEBUG_KMS(dev, "No DCB connector table present\n");
return;
}
@@ -5203,6 +5242,7 @@ parse_dcb_connector_table(struct nvbios *bios)
cte->entry = ROM16(entry[0]);
else
cte->entry = ROM32(entry[0]);
+
cte->type = (cte->entry & 0x000000ff) >> 0;
cte->index = (cte->entry & 0x00000f00) >> 8;
switch (cte->entry & 0x00033000) {
@@ -5228,10 +5268,33 @@ parse_dcb_connector_table(struct nvbios *bios)
NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
i, cte->entry, cte->type, cte->index, cte->gpio_tag);
+
+ /* check for known types, fallback to guessing the type
+ * from attached encoders if we hit an unknown.
+ */
+ switch (cte->type) {
+ case DCB_CONNECTOR_VGA:
+ case DCB_CONNECTOR_TV_0:
+ case DCB_CONNECTOR_TV_1:
+ case DCB_CONNECTOR_TV_3:
+ case DCB_CONNECTOR_DVI_I:
+ case DCB_CONNECTOR_DVI_D:
+ case DCB_CONNECTOR_LVDS:
+ case DCB_CONNECTOR_DP:
+ case DCB_CONNECTOR_eDP:
+ case DCB_CONNECTOR_HDMI_0:
+ case DCB_CONNECTOR_HDMI_1:
+ break;
+ default:
+ cte->type = divine_connector_type(bios, cte->index);
+ NV_WARN(dev, "unknown type, using 0x%02x", cte->type);
+ break;
+ }
+
}
}
-static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
+static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
{
struct dcb_entry *entry = &dcb->entry[dcb->entries];
@@ -5241,7 +5304,7 @@ static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
return entry;
}
-static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
+static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
{
struct dcb_entry *entry = new_dcb_entry(dcb);
@@ -5252,7 +5315,7 @@ static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
/* "or" mostly unused in early gen crt modesetting, 0 is fine */
}
-static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
+static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
{
struct dcb_entry *entry = new_dcb_entry(dcb);
@@ -5279,7 +5342,7 @@ static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
#endif
}
-static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
+static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
{
struct dcb_entry *entry = new_dcb_entry(dcb);
@@ -5290,13 +5353,13 @@ static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
}
static bool
-parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
+parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
uint32_t conn, uint32_t conf, struct dcb_entry *entry)
{
entry->type = conn & 0xf;
entry->i2c_index = (conn >> 4) & 0xf;
entry->heads = (conn >> 8) & 0xf;
- if (bdcb->version >= 0x40)
+ if (dcb->version >= 0x40)
entry->connector = (conn >> 12) & 0xf;
entry->bus = (conn >> 16) & 0xf;
entry->location = (conn >> 20) & 0x3;
@@ -5314,7 +5377,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
* Although the rest of a CRT conf dword is usually
* zeros, mac biosen have stuff there so we must mask
*/
- entry->crtconf.maxfreq = (bdcb->version < 0x30) ?
+ entry->crtconf.maxfreq = (dcb->version < 0x30) ?
(conf & 0xffff) * 10 :
(conf & 0xff) * 10000;
break;
@@ -5323,7 +5386,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
uint32_t mask;
if (conf & 0x1)
entry->lvdsconf.use_straps_for_mode = true;
- if (bdcb->version < 0x22) {
+ if (dcb->version < 0x22) {
mask = ~0xd;
/*
* The laptop in bug 14567 lies and claims to not use
@@ -5347,7 +5410,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
* Until we even try to use these on G8x, it's
* useless reporting unknown bits. They all are.
*/
- if (bdcb->version >= 0x40)
+ if (dcb->version >= 0x40)
break;
NV_ERROR(dev, "Unknown LVDS configuration bits, "
@@ -5357,7 +5420,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
}
case OUTPUT_TV:
{
- if (bdcb->version >= 0x30)
+ if (dcb->version >= 0x30)
entry->tvconf.has_component_output = conf & (0x8 << 4);
else
entry->tvconf.has_component_output = false;
@@ -5384,8 +5447,10 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
break;
case 0xe:
/* weird g80 mobile type that "nv" treats as a terminator */
- bdcb->dcb.entries--;
+ dcb->entries--;
return false;
+ default:
+ break;
}
/* unsure what DCB version introduces this, 3.0? */
@@ -5396,7 +5461,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
}
static bool
-parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
+parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
uint32_t conn, uint32_t conf, struct dcb_entry *entry)
{
switch (conn & 0x0000000f) {
@@ -5462,27 +5527,27 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
return true;
}
-static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
+static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
uint32_t conn, uint32_t conf)
{
- struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb);
+ struct dcb_entry *entry = new_dcb_entry(dcb);
bool ret;
- if (bdcb->version >= 0x20)
- ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry);
+ if (dcb->version >= 0x20)
+ ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
else
- ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry);
+ ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
if (!ret)
return ret;
- read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table,
- entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]);
+ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+ entry->i2c_index, &dcb->i2c[entry->i2c_index]);
return true;
}
static
-void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
+void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
{
/*
* DCB v2.0 lists each output combination separately.
@@ -5534,8 +5599,7 @@ static int
parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct bios_parsed_dcb *bdcb = &bios->bdcb;
- struct parsed_dcb *dcb;
+ struct dcb_table *dcb = &bios->dcb;
uint16_t dcbptr = 0, i2ctabptr = 0;
uint8_t *dcbtable;
uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
@@ -5543,9 +5607,6 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
int recordlength = 8, confofs = 4;
int i;
- dcb = bios->pub.dcb = &bdcb->dcb;
- dcb->entries = 0;
-
/* get the offset from 0x36 */
if (dev_priv->card_type > NV_04) {
dcbptr = ROM16(bios->data[0x36]);
@@ -5567,21 +5628,21 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
dcbtable = &bios->data[dcbptr];
/* get DCB version */
- bdcb->version = dcbtable[0];
+ dcb->version = dcbtable[0];
NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
- bdcb->version >> 4, bdcb->version & 0xf);
+ dcb->version >> 4, dcb->version & 0xf);
- if (bdcb->version >= 0x20) { /* NV17+ */
+ if (dcb->version >= 0x20) { /* NV17+ */
uint32_t sig;
- if (bdcb->version >= 0x30) { /* NV40+ */
+ if (dcb->version >= 0x30) { /* NV40+ */
headerlen = dcbtable[1];
entries = dcbtable[2];
recordlength = dcbtable[3];
i2ctabptr = ROM16(dcbtable[4]);
sig = ROM32(dcbtable[6]);
- bdcb->gpio_table_ptr = ROM16(dcbtable[10]);
- bdcb->connector_table_ptr = ROM16(dcbtable[20]);
+ dcb->gpio_table_ptr = ROM16(dcbtable[10]);
+ dcb->connector_table_ptr = ROM16(dcbtable[20]);
} else {
i2ctabptr = ROM16(dcbtable[2]);
sig = ROM32(dcbtable[4]);
@@ -5593,7 +5654,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
"signature (%08X)\n", sig);
return -EINVAL;
}
- } else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */
+ } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
char sig[8] = { 0 };
strncpy(sig, (char *)&dcbtable[-7], 7);
@@ -5641,14 +5702,11 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
if (!i2ctabptr)
NV_WARN(dev, "No pointer to DCB I2C port table\n");
else {
- bdcb->i2c_table = &bios->data[i2ctabptr];
- if (bdcb->version >= 0x30)
- bdcb->i2c_default_indices = bdcb->i2c_table[4];
+ dcb->i2c_table = &bios->data[i2ctabptr];
+ if (dcb->version >= 0x30)
+ dcb->i2c_default_indices = dcb->i2c_table[4];
}
- parse_dcb_gpio_table(bios);
- parse_dcb_connector_table(bios);
-
if (entries > DCB_MAX_NUM_ENTRIES)
entries = DCB_MAX_NUM_ENTRIES;
@@ -5673,7 +5731,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
dcb->entries, connection, config);
- if (!parse_dcb_entry(dev, bdcb, connection, config))
+ if (!parse_dcb_entry(dev, dcb, connection, config))
break;
}
@@ -5681,18 +5739,22 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
* apart for v2.1+ not being known for requiring merging, this
* guarantees dcbent->index is the index of the entry in the rom image
*/
- if (bdcb->version < 0x21)
+ if (dcb->version < 0x21)
merge_like_dcb_entries(dev, dcb);
- return dcb->entries ? 0 : -ENXIO;
+ if (!dcb->entries)
+ return -ENXIO;
+
+ parse_dcb_gpio_table(bios);
+ parse_dcb_connector_table(bios);
+ return 0;
}
static void
fixup_legacy_connector(struct nvbios *bios)
{
- struct bios_parsed_dcb *bdcb = &bios->bdcb;
- struct parsed_dcb *dcb = &bdcb->dcb;
- int high = 0, i;
+ struct dcb_table *dcb = &bios->dcb;
+ int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
/*
* DCB 3.0 also has the table in most cases, but there are some cards
@@ -5700,9 +5762,11 @@ fixup_legacy_connector(struct nvbios *bios)
* indices are all 0. We don't need the connector indices on pre-G80
* chips (yet?) so limit the use to DCB 4.0 and above.
*/
- if (bdcb->version >= 0x40)
+ if (dcb->version >= 0x40)
return;
+ dcb->connector.entries = 0;
+
/*
* No known connector info before v3.0, so make it up. the rule here
* is: anything on the same i2c bus is considered to be on the same
@@ -5710,37 +5774,38 @@ fixup_legacy_connector(struct nvbios *bios)
* its own unique connector index.
*/
for (i = 0; i < dcb->entries; i++) {
- if (dcb->entry[i].i2c_index == 0xf)
- continue;
-
/*
* Ignore the I2C index for on-chip TV-out, as there
* are cards with bogus values (nv31m in bug 23212),
* and it's otherwise useless.
*/
if (dcb->entry[i].type == OUTPUT_TV &&
- dcb->entry[i].location == DCB_LOC_ON_CHIP) {
+ dcb->entry[i].location == DCB_LOC_ON_CHIP)
dcb->entry[i].i2c_index = 0xf;
+ i2c = dcb->entry[i].i2c_index;
+
+ if (i2c_conn[i2c]) {
+ dcb->entry[i].connector = i2c_conn[i2c] - 1;
continue;
}
- dcb->entry[i].connector = dcb->entry[i].i2c_index;
- if (dcb->entry[i].connector > high)
- high = dcb->entry[i].connector;
+ dcb->entry[i].connector = dcb->connector.entries++;
+ if (i2c != 0xf)
+ i2c_conn[i2c] = dcb->connector.entries;
}
- for (i = 0; i < dcb->entries; i++) {
- if (dcb->entry[i].i2c_index != 0xf)
- continue;
-
- dcb->entry[i].connector = ++high;
+ /* Fake the connector table as well as just connector indices */
+ for (i = 0; i < dcb->connector.entries; i++) {
+ dcb->connector.entry[i].index = i;
+ dcb->connector.entry[i].type = divine_connector_type(bios, i);
+ dcb->connector.entry[i].gpio_tag = 0xff;
}
}
static void
fixup_legacy_i2c(struct nvbios *bios)
{
- struct parsed_dcb *dcb = &bios->bdcb.dcb;
+ struct dcb_table *dcb = &bios->dcb;
int i;
for (i = 0; i < dcb->entries; i++) {
@@ -5826,7 +5891,7 @@ static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
const uint8_t edid_sig[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
uint16_t offset = 0;
@@ -5859,7 +5924,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
struct dcb_entry *dcbent)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
struct init_exec iexec = { true, false };
mutex_lock(&bios->lock);
@@ -5872,7 +5937,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
static bool NVInitVBIOS(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
memset(bios, 0, sizeof(struct nvbios));
mutex_init(&bios->lock);
@@ -5888,7 +5953,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
static int nouveau_parse_vbios_struct(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
int offset;
@@ -5915,7 +5980,7 @@ int
nouveau_run_vbios_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
int i, ret = 0;
NVLockVgaCrtcs(dev, false);
@@ -5946,9 +6011,9 @@ nouveau_run_vbios_init(struct drm_device *dev)
}
if (dev_priv->card_type >= NV_50) {
- for (i = 0; i < bios->bdcb.dcb.entries; i++) {
+ for (i = 0; i < bios->dcb.entries; i++) {
nouveau_bios_run_display_table(dev,
- &bios->bdcb.dcb.entry[i],
+ &bios->dcb.entry[i],
0, 0);
}
}
@@ -5962,11 +6027,11 @@ static void
nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
struct dcb_i2c_entry *entry;
int i;
- entry = &bios->bdcb.dcb.i2c[0];
+ entry = &bios->dcb.i2c[0];
for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
nouveau_i2c_fini(dev, entry);
}
@@ -5975,13 +6040,11 @@ int
nouveau_bios_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint32_t saved_nv_pextdev_boot_0;
bool was_locked;
int ret;
- dev_priv->vbios = &bios->pub;
-
if (!NVInitVBIOS(dev))
return -ENODEV;
@@ -6023,10 +6086,8 @@ nouveau_bios_init(struct drm_device *dev)
bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
ret = nouveau_run_vbios_init(dev);
- if (ret) {
- dev_priv->vbios = NULL;
+ if (ret)
return ret;
- }
/* feature_byte on BMP is poor, but init always sets CR4B */
was_locked = NVLockVgaCrtcs(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index fd94bd6..9f688aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -34,9 +34,67 @@
#define DCB_LOC_ON_CHIP 0
+struct dcb_i2c_entry {
+ uint8_t port_type;
+ uint8_t read, write;
+ struct nouveau_i2c_chan *chan;
+};
+
+enum dcb_gpio_tag {
+ DCB_GPIO_TVDAC0 = 0xc,
+ DCB_GPIO_TVDAC1 = 0x2d,
+};
+
+struct dcb_gpio_entry {
+ enum dcb_gpio_tag tag;
+ int line;
+ bool invert;
+};
+
+struct dcb_gpio_table {
+ int entries;
+ struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
+};
+
+enum dcb_connector_type {
+ DCB_CONNECTOR_VGA = 0x00,
+ DCB_CONNECTOR_TV_0 = 0x10,
+ DCB_CONNECTOR_TV_1 = 0x11,
+ DCB_CONNECTOR_TV_3 = 0x13,
+ DCB_CONNECTOR_DVI_I = 0x30,
+ DCB_CONNECTOR_DVI_D = 0x31,
+ DCB_CONNECTOR_LVDS = 0x40,
+ DCB_CONNECTOR_DP = 0x46,
+ DCB_CONNECTOR_eDP = 0x47,
+ DCB_CONNECTOR_HDMI_0 = 0x60,
+ DCB_CONNECTOR_HDMI_1 = 0x61,
+ DCB_CONNECTOR_NONE = 0xff
+};
+
+struct dcb_connector_table_entry {
+ uint32_t entry;
+ enum dcb_connector_type type;
+ uint8_t index;
+ uint8_t gpio_tag;
+};
+
+struct dcb_connector_table {
+ int entries;
+ struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
+};
+
+enum dcb_type {
+ OUTPUT_ANALOG = 0,
+ OUTPUT_TV = 1,
+ OUTPUT_TMDS = 2,
+ OUTPUT_LVDS = 3,
+ OUTPUT_DP = 6,
+ OUTPUT_ANY = -1
+};
+
struct dcb_entry {
int index; /* may not be raw dcb index if merging has happened */
- uint8_t type;
+ enum dcb_type type;
uint8_t i2c_index;
uint8_t heads;
uint8_t connector;
@@ -71,69 +129,22 @@ struct dcb_entry {
bool i2c_upper_default;
};
-struct dcb_i2c_entry {
- uint8_t port_type;
- uint8_t read, write;
- struct nouveau_i2c_chan *chan;
-};
+struct dcb_table {
+ uint8_t version;
-struct parsed_dcb {
int entries;
struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
- struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
-};
-
-enum dcb_gpio_tag {
- DCB_GPIO_TVDAC0 = 0xc,
- DCB_GPIO_TVDAC1 = 0x2d,
-};
-
-struct dcb_gpio_entry {
- enum dcb_gpio_tag tag;
- int line;
- bool invert;
-};
-
-struct parsed_dcb_gpio {
- int entries;
- struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
-};
-
-struct dcb_connector_table_entry {
- uint32_t entry;
- uint8_t type;
- uint8_t index;
- uint8_t gpio_tag;
-};
-
-struct dcb_connector_table {
- int entries;
- struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
-};
-
-struct bios_parsed_dcb {
- uint8_t version;
-
- struct parsed_dcb dcb;
uint8_t *i2c_table;
uint8_t i2c_default_indices;
+ struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
uint16_t gpio_table_ptr;
- struct parsed_dcb_gpio gpio;
+ struct dcb_gpio_table gpio;
uint16_t connector_table_ptr;
struct dcb_connector_table connector;
};
-enum nouveau_encoder_type {
- OUTPUT_ANALOG = 0,
- OUTPUT_TV = 1,
- OUTPUT_TMDS = 2,
- OUTPUT_LVDS = 3,
- OUTPUT_DP = 6,
- OUTPUT_ANY = -1
-};
-
enum nouveau_or {
OUTPUT_A = (1 << 0),
OUTPUT_B = (1 << 1),
@@ -190,8 +201,8 @@ struct pll_lims {
int refclk;
};
-struct nouveau_bios_info {
- struct parsed_dcb *dcb;
+struct nvbios {
+ struct drm_device *dev;
uint8_t chip_version;
@@ -199,11 +210,6 @@ struct nouveau_bios_info {
uint32_t tvdactestval;
uint8_t digital_min_front_porch;
bool fp_no_ddc;
-};
-
-struct nvbios {
- struct drm_device *dev;
- struct nouveau_bios_info pub;
struct mutex lock;
@@ -234,7 +240,7 @@ struct nvbios {
uint16_t some_script_ptr; /* BIT I + 14 */
uint16_t init96_tbl_ptr; /* BIT I + 16 */
- struct bios_parsed_dcb bdcb;
+ struct dcb_table dcb;
struct {
int crtchead;
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
index ee2b845..88f9bc0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -274,7 +274,7 @@ getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
* returns calculated clock
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int cv = dev_priv->vbios->chip_version;
+ int cv = dev_priv->vbios.chip_version;
int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
@@ -373,7 +373,7 @@ getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
* returns calculated clock
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chip_version = dev_priv->vbios->chip_version;
+ int chip_version = dev_priv->vbios.chip_version;
int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 2281f99..6dfb425 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -35,22 +35,27 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *pb = chan->pushbuf_bo;
struct nouveau_gpuobj *pushbuf = NULL;
- uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT;
int ret;
+ if (dev_priv->card_type >= NV_50) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
+ dev_priv->vm_end, NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_AGP, &pushbuf);
+ chan->pushbuf_base = pb->bo.offset;
+ } else
if (pb->bo.mem.mem_type == TTM_PL_TT) {
ret = nouveau_gpuobj_gart_dma_new(chan, 0,
dev_priv->gart_info.aper_size,
NV_DMA_ACCESS_RO, &pushbuf,
NULL);
- chan->pushbuf_base = start;
+ chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
} else
if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->fb_available_size,
NV_DMA_ACCESS_RO,
NV_DMA_TARGET_VIDMEM, &pushbuf);
- chan->pushbuf_base = start;
+ chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
} else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
* exact reason for existing :) PCI access to cmdbuf in
@@ -61,7 +66,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
dev_priv->fb_available_size,
NV_DMA_ACCESS_RO,
NV_DMA_TARGET_PCI, &pushbuf);
- chan->pushbuf_base = start;
+ chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
}
ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
@@ -275,9 +280,18 @@ nouveau_channel_free(struct nouveau_channel *chan)
*/
nouveau_fence_fini(chan);
- /* Ensure the channel is no longer active on the GPU */
+ /* This will prevent pfifo from switching channels. */
pfifo->reassign(dev, false);
+ /* We want to give pgraph a chance to idle and get rid of all potential
+ * errors. We need to do this before the lock, otherwise the irq handler
+ * is unable to process them.
+ */
+ if (pgraph->channel(dev) == chan)
+ nouveau_wait_for_idle(dev);
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
pgraph->fifo_access(dev, false);
if (pgraph->channel(dev) == chan)
pgraph->unload_context(dev);
@@ -293,6 +307,8 @@ nouveau_channel_free(struct nouveau_channel *chan)
pfifo->reassign(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
/* Release the channel's resources */
nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
if (chan->pushbuf_bo) {
@@ -369,6 +385,14 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
return ret;
init->channel = chan->id;
+ if (chan->dma.ib_max)
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+ NOUVEAU_GEM_DOMAIN_GART;
+ else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
+ else
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
+
init->subchan[0].handle = NvM2MF;
if (dev_priv->card_type < NV_50)
init->subchan[0].grclass = 0x0039;
@@ -408,7 +432,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
***********************************/
struct drm_ioctl_desc nouveau_ioctls[] = {
- DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
@@ -418,13 +441,9 @@ struct drm_ioctl_desc nouveau_ioctls[] = {
DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
};
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index d2f6335..24327f4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -218,7 +218,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->interlace_allowed = true;
}
- if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+ if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
drm_connector_property_set_value(connector,
dev->mode_config.dvi_i_subconnector_property,
nv_encoder->dcb->type == OUTPUT_TMDS ?
@@ -236,15 +236,17 @@ nouveau_connector_detect(struct drm_connector *connector)
struct nouveau_i2c_chan *i2c;
int type, flags;
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS)
nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
if (nv_encoder && nv_connector->native_mode) {
+ unsigned status = connector_status_connected;
+
#ifdef CONFIG_ACPI
if (!nouveau_ignorelid && !acpi_lid_open())
- return connector_status_disconnected;
+ status = connector_status_unknown;
#endif
nouveau_connector_set_encoder(connector, nv_encoder);
- return connector_status_connected;
+ return status;
}
/* Cleanup the previous EDID block. */
@@ -279,7 +281,7 @@ nouveau_connector_detect(struct drm_connector *connector)
* same i2c channel so the value returned from ddc_detect
* isn't necessarily correct.
*/
- if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+ if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
type = OUTPUT_TMDS;
else
@@ -321,11 +323,11 @@ detect_analog:
static void
nouveau_connector_force(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder;
int type;
- if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+ if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
if (connector->force == DRM_FORCE_ON_DIGITAL)
type = OUTPUT_TMDS;
else
@@ -335,7 +337,7 @@ nouveau_connector_force(struct drm_connector *connector)
nv_encoder = find_encoder_by_type(connector, type);
if (!nv_encoder) {
- NV_ERROR(dev, "can't find encoder to force %s on!\n",
+ NV_ERROR(connector->dev, "can't find encoder to force %s on!\n",
drm_get_connector_name(connector));
connector->status = connector_status_disconnected;
return;
@@ -369,7 +371,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
}
/* LVDS always needs gpu scaling */
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
+ if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS &&
value == DRM_MODE_SCALE_NONE)
return -EINVAL;
@@ -535,7 +537,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
/* If we're not LVDS, destroy the previous native mode, the attached
* monitor could have changed.
*/
- if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
+ if (nv_connector->dcb->type != DCB_CONNECTOR_LVDS &&
nv_connector->native_mode) {
drm_mode_destroy(dev, nv_connector->native_mode);
nv_connector->native_mode = NULL;
@@ -563,7 +565,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
ret = get_slave_funcs(nv_encoder)->
get_modes(to_drm_encoder(nv_encoder), connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ if (nv_encoder->dcb->type == OUTPUT_LVDS)
ret += nouveau_connector_scaler_modes_add(connector);
return ret;
@@ -613,6 +615,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
clock *= 3;
break;
+ default:
+ BUG_ON(1);
+ return MODE_BAD;
}
if (clock < min_clock)
@@ -680,7 +685,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
/* Firstly try getting EDID over DDC, if allowed and I2C channel
* is available.
*/
- if (!dev_priv->VBIOS.pub.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
+ if (!dev_priv->vbios.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
if (i2c) {
@@ -695,7 +700,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
*/
if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) &&
(nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
- dev_priv->VBIOS.pub.fp_no_ddc)) {
+ dev_priv->vbios.fp_no_ddc)) {
nv_connector->native_mode = drm_mode_duplicate(dev, &native);
goto out;
}
@@ -704,7 +709,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
* stored for the panel stored in them.
*/
if (!nv_connector->edid && !nv_connector->native_mode &&
- !dev_priv->VBIOS.pub.fp_no_ddc) {
+ !dev_priv->vbios.fp_no_ddc) {
struct edid *edid =
(struct edid *)nouveau_bios_embedded_edid(dev);
if (edid) {
@@ -739,46 +744,66 @@ out:
}
int
-nouveau_connector_create(struct drm_device *dev, int index, int type)
+nouveau_connector_create(struct drm_device *dev,
+ struct dcb_connector_table_entry *dcb)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_connector *nv_connector = NULL;
struct drm_connector *connector;
struct drm_encoder *encoder;
- int ret;
+ int ret, type;
NV_DEBUG_KMS(dev, "\n");
- nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
- if (!nv_connector)
- return -ENOMEM;
- nv_connector->dcb = nouveau_bios_connector_entry(dev, index);
- connector = &nv_connector->base;
-
- switch (type) {
- case DRM_MODE_CONNECTOR_VGA:
+ switch (dcb->type) {
+ case DCB_CONNECTOR_NONE:
+ return 0;
+ case DCB_CONNECTOR_VGA:
NV_INFO(dev, "Detected a VGA connector\n");
+ type = DRM_MODE_CONNECTOR_VGA;
break;
- case DRM_MODE_CONNECTOR_DVID:
- NV_INFO(dev, "Detected a DVI-D connector\n");
+ case DCB_CONNECTOR_TV_0:
+ case DCB_CONNECTOR_TV_1:
+ case DCB_CONNECTOR_TV_3:
+ NV_INFO(dev, "Detected a TV connector\n");
+ type = DRM_MODE_CONNECTOR_TV;
break;
- case DRM_MODE_CONNECTOR_DVII:
+ case DCB_CONNECTOR_DVI_I:
NV_INFO(dev, "Detected a DVI-I connector\n");
+ type = DRM_MODE_CONNECTOR_DVII;
break;
- case DRM_MODE_CONNECTOR_LVDS:
- NV_INFO(dev, "Detected a LVDS connector\n");
+ case DCB_CONNECTOR_DVI_D:
+ NV_INFO(dev, "Detected a DVI-D connector\n");
+ type = DRM_MODE_CONNECTOR_DVID;
break;
- case DRM_MODE_CONNECTOR_TV:
- NV_INFO(dev, "Detected a TV connector\n");
+ case DCB_CONNECTOR_HDMI_0:
+ case DCB_CONNECTOR_HDMI_1:
+ NV_INFO(dev, "Detected a HDMI connector\n");
+ type = DRM_MODE_CONNECTOR_HDMIA;
+ break;
+ case DCB_CONNECTOR_LVDS:
+ NV_INFO(dev, "Detected a LVDS connector\n");
+ type = DRM_MODE_CONNECTOR_LVDS;
break;
- case DRM_MODE_CONNECTOR_DisplayPort:
+ case DCB_CONNECTOR_DP:
NV_INFO(dev, "Detected a DisplayPort connector\n");
+ type = DRM_MODE_CONNECTOR_DisplayPort;
break;
- default:
- NV_ERROR(dev, "Unknown connector, this is not good.\n");
+ case DCB_CONNECTOR_eDP:
+ NV_INFO(dev, "Detected an eDP connector\n");
+ type = DRM_MODE_CONNECTOR_eDP;
break;
+ default:
+ NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
+ return -EINVAL;
}
+ nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
+ if (!nv_connector)
+ return -ENOMEM;
+ nv_connector->dcb = dcb;
+ connector = &nv_connector->base;
+
/* defaults, will get overridden in detect() */
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
@@ -786,55 +811,65 @@ nouveau_connector_create(struct drm_device *dev, int index, int type)
drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+ /* attach encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb->connector != dcb->index)
+ continue;
+
+ if (get_slave_funcs(nv_encoder))
+ get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ }
+
+ if (!connector->encoder_ids[0]) {
+ NV_WARN(dev, " no encoders, ignoring\n");
+ drm_connector_cleanup(connector);
+ kfree(connector);
+ return 0;
+ }
+
/* Init DVI-I specific properties */
- if (type == DRM_MODE_CONNECTOR_DVII) {
+ if (dcb->type == DCB_CONNECTOR_DVI_I) {
drm_mode_create_dvi_i_properties(dev);
drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
}
- if (type != DRM_MODE_CONNECTOR_LVDS)
+ if (dcb->type != DCB_CONNECTOR_LVDS)
nv_connector->use_dithering = false;
- if (type == DRM_MODE_CONNECTOR_DVID ||
- type == DRM_MODE_CONNECTOR_DVII ||
- type == DRM_MODE_CONNECTOR_LVDS ||
- type == DRM_MODE_CONNECTOR_DisplayPort) {
- nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
-
- drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property,
- nv_connector->scaling_mode);
- drm_connector_attach_property(connector, dev->mode_config.dithering_mode_property,
- nv_connector->use_dithering ? DRM_MODE_DITHERING_ON
- : DRM_MODE_DITHERING_OFF);
-
- } else {
- nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
-
- if (type == DRM_MODE_CONNECTOR_VGA &&
- dev_priv->card_type >= NV_50) {
+ switch (dcb->type) {
+ case DCB_CONNECTOR_VGA:
+ if (dev_priv->card_type >= NV_50) {
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
}
- }
-
- /* attach encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-
- if (nv_encoder->dcb->connector != index)
- continue;
-
- if (get_slave_funcs(nv_encoder))
- get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
+ /* fall-through */
+ case DCB_CONNECTOR_TV_0:
+ case DCB_CONNECTOR_TV_1:
+ case DCB_CONNECTOR_TV_3:
+ nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
+ break;
+ default:
+ nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property,
+ nv_connector->scaling_mode);
+ drm_connector_attach_property(connector,
+ dev->mode_config.dithering_mode_property,
+ nv_connector->use_dithering ?
+ DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
+ break;
}
drm_sysfs_connector_add(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ if (dcb->type == DCB_CONNECTOR_LVDS) {
ret = nouveau_connector_create_lvds(dev, connector);
if (ret) {
connector->funcs->destroy(connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 728b809..4ef38ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -49,6 +49,7 @@ static inline struct nouveau_connector *nouveau_connector(
return container_of(con, struct nouveau_connector, base);
}
-int nouveau_connector_create(struct drm_device *dev, int i2c_index, int type);
+int nouveau_connector_create(struct drm_device *,
+ struct dcb_connector_table_entry *);
#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index d79db36..8ff9ef5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -47,12 +47,23 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2);
+ if (chan->dma.ib_max) {
+ seq_printf(m, " ib max: 0x%08x\n", chan->dma.ib_max);
+ seq_printf(m, " ib put: 0x%08x\n", chan->dma.ib_put);
+ seq_printf(m, " ib free: 0x%08x\n", chan->dma.ib_free);
+ }
seq_printf(m, "gpu fifo state:\n");
seq_printf(m, " get: 0x%08x\n",
nvchan_rd32(chan, chan->user_get));
seq_printf(m, " put: 0x%08x\n",
nvchan_rd32(chan, chan->user_put));
+ if (chan->dma.ib_max) {
+ seq_printf(m, " ib get: 0x%08x\n",
+ nvchan_rd32(chan, 0x88));
+ seq_printf(m, " ib put: 0x%08x\n",
+ nvchan_rd32(chan, 0x8c));
+ }
seq_printf(m, "last fence : %d\n", chan->fence.sequence);
seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
@@ -133,9 +144,22 @@ nouveau_debugfs_memory_info(struct seq_file *m, void *data)
return 0;
}
+static int
+nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
+ int i;
+
+ for (i = 0; i < dev_priv->vbios.length; i++)
+ seq_printf(m, "%c", dev_priv->vbios.data[i]);
+ return 0;
+}
+
static struct drm_info_list nouveau_debugfs_list[] = {
{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
+ { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index dfc9439..cf1c5c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -39,11 +39,8 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
if (drm_fb->fbdev)
nouveau_fbcon_remove(dev, drm_fb);
- if (fb->nvbo) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(fb->nvbo->gem);
- mutex_unlock(&dev->struct_mutex);
- }
+ if (fb->nvbo)
+ drm_gem_object_unreference_unlocked(fb->nvbo->gem);
drm_framebuffer_cleanup(drm_fb);
kfree(fb);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 50d9e67..c8482a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -32,7 +32,22 @@
void
nouveau_dma_pre_init(struct nouveau_channel *chan)
{
- chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_bo *pushbuf = chan->pushbuf_bo;
+
+ if (dev_priv->card_type == NV_50) {
+ const int ib_size = pushbuf->bo.mem.size / 2;
+
+ chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
+ chan->dma.ib_max = (ib_size / 8) - 1;
+ chan->dma.ib_put = 0;
+ chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
+
+ chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
+ } else {
+ chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
+ }
+
chan->dma.put = 0;
chan->dma.cur = chan->dma.put;
chan->dma.free = chan->dma.max - chan->dma.cur;
@@ -162,12 +177,101 @@ READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
return (val - chan->pushbuf_base) >> 2;
}
+void
+nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
+ int delta, int length)
+{
+ struct nouveau_bo *pb = chan->pushbuf_bo;
+ uint64_t offset = bo->bo.offset + delta;
+ int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
+
+ BUG_ON(chan->dma.ib_free < 1);
+ nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
+ nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
+
+ chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
+ nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
+ chan->dma.ib_free--;
+}
+
+static int
+nv50_dma_push_wait(struct nouveau_channel *chan, int count)
+{
+ uint32_t cnt = 0, prev_get = 0;
+
+ while (chan->dma.ib_free < count) {
+ uint32_t get = nvchan_rd32(chan, 0x88);
+ if (get != prev_get) {
+ prev_get = get;
+ cnt = 0;
+ }
+
+ if ((++cnt & 0xff) == 0) {
+ DRM_UDELAY(1);
+ if (cnt > 100000)
+ return -EBUSY;
+ }
+
+ chan->dma.ib_free = get - chan->dma.ib_put;
+ if (chan->dma.ib_free <= 0)
+ chan->dma.ib_free += chan->dma.ib_max + 1;
+ }
+
+ return 0;
+}
+
+static int
+nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
+{
+ uint32_t cnt = 0, prev_get = 0;
+ int ret;
+
+ ret = nv50_dma_push_wait(chan, slots + 1);
+ if (unlikely(ret))
+ return ret;
+
+ while (chan->dma.free < count) {
+ int get = READ_GET(chan, &prev_get, &cnt);
+ if (unlikely(get < 0)) {
+ if (get == -EINVAL)
+ continue;
+
+ return get;
+ }
+
+ if (get <= chan->dma.cur) {
+ chan->dma.free = chan->dma.max - chan->dma.cur;
+ if (chan->dma.free >= count)
+ break;
+
+ FIRE_RING(chan);
+ do {
+ get = READ_GET(chan, &prev_get, &cnt);
+ if (unlikely(get < 0)) {
+ if (get == -EINVAL)
+ continue;
+ return get;
+ }
+ } while (get == 0);
+ chan->dma.cur = 0;
+ chan->dma.put = 0;
+ }
+
+ chan->dma.free = get - chan->dma.cur - 1;
+ }
+
+ return 0;
+}
+
int
-nouveau_dma_wait(struct nouveau_channel *chan, int size)
+nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
{
uint32_t prev_get = 0, cnt = 0;
int get;
+ if (chan->dma.ib_max)
+ return nv50_dma_wait(chan, slots, size);
+
while (chan->dma.free < size) {
get = READ_GET(chan, &prev_get, &cnt);
if (unlikely(get == -EBUSY))
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index dabfd65..8b05c15 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -31,6 +31,9 @@
#define NOUVEAU_DMA_DEBUG 0
#endif
+void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
+ int delta, int length);
+
/*
* There's a hw race condition where you can't jump to your PUT offset,
* to avoid this we jump to offset + SKIPS and fill the difference with
@@ -96,13 +99,11 @@ enum {
static __must_check inline int
RING_SPACE(struct nouveau_channel *chan, int size)
{
- if (chan->dma.free < size) {
- int ret;
+ int ret;
- ret = nouveau_dma_wait(chan, size);
- if (ret)
- return ret;
- }
+ ret = nouveau_dma_wait(chan, 1, size);
+ if (ret)
+ return ret;
chan->dma.free -= size;
return 0;
@@ -146,7 +147,13 @@ FIRE_RING(struct nouveau_channel *chan)
return;
chan->accel_done = true;
- WRITE_PUT(chan->dma.cur);
+ if (chan->dma.ib_max) {
+ nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2,
+ (chan->dma.cur - chan->dma.put) << 2);
+ } else {
+ WRITE_PUT(chan->dma.cur);
+ }
+
chan->dma.put = chan->dma.cur;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index da3b93b..30cc09e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -75,11 +75,11 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
int nouveau_ignorelid = 0;
module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
-MODULE_PARM_DESC(noagp, "Disable all acceleration");
+MODULE_PARM_DESC(noaccel, "Disable all acceleration");
int nouveau_noaccel = 0;
module_param_named(noaccel, nouveau_noaccel, int, 0400);
-MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
+MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
int nouveau_nofbaccel = 0;
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
@@ -135,7 +135,7 @@ nouveau_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
-static int
+int
nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -233,7 +233,7 @@ out_abort:
return ret;
}
-static int
+int
nouveau_pci_resume(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -402,8 +402,10 @@ static int __init nouveau_init(void)
nouveau_modeset = 1;
}
- if (nouveau_modeset == 1)
+ if (nouveau_modeset == 1) {
driver.driver_features |= DRIVER_MODESET;
+ nouveau_register_dsm_handler();
+ }
return drm_init(&driver);
}
@@ -411,6 +413,7 @@ static int __init nouveau_init(void)
static void __exit nouveau_exit(void)
{
drm_exit(&driver);
+ nouveau_unregister_dsm_handler();
}
module_init(nouveau_init);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1c15ef3..5f8d987 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -34,7 +34,7 @@
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 15
+#define DRIVER_PATCHLEVEL 16
#define NOUVEAU_FAMILY 0x0000FFFF
#define NOUVEAU_FLAGS 0xFFFF0000
@@ -83,6 +83,7 @@ struct nouveau_bo {
struct drm_file *reserved_by;
struct list_head entry;
int pbbo_index;
+ bool validate_mapped;
struct nouveau_channel *channel;
@@ -239,6 +240,11 @@ struct nouveau_channel {
int cur;
int put;
/* access via pushbuf_bo */
+
+ int ib_base;
+ int ib_max;
+ int ib_free;
+ int ib_put;
} dma;
uint32_t sw_subchannel[8];
@@ -533,6 +539,9 @@ struct drm_nouveau_private {
struct nouveau_engine engine;
struct nouveau_channel *channel;
+ /* For PFIFO and PGRAPH. */
+ spinlock_t context_switch_lock;
+
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
struct nouveau_gpuobj *ramht;
uint32_t ramin_rsvd_vram;
@@ -596,8 +605,7 @@ struct drm_nouveau_private {
struct list_head gpuobj_list;
- struct nvbios VBIOS;
- struct nouveau_bios_info *vbios;
+ struct nvbios vbios;
struct nv04_mode_state mode_reg;
struct nv04_mode_state saved_reg;
@@ -614,7 +622,6 @@ struct drm_nouveau_private {
} susres;
struct backlight_device *backlight;
- bool acpi_dsm;
struct nouveau_channel *evo;
@@ -682,6 +689,9 @@ extern int nouveau_ignorelid;
extern int nouveau_nofbaccel;
extern int nouveau_noaccel;
+extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
+extern int nouveau_pci_resume(struct pci_dev *pdev);
+
/* nouveau_state.c */
extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
extern int nouveau_load(struct drm_device *, unsigned long flags);
@@ -696,12 +706,6 @@ extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
uint32_t reg, uint32_t mask, uint32_t val);
extern bool nouveau_wait_for_idle(struct drm_device *);
extern int nouveau_card_init(struct drm_device *);
-extern int nouveau_ioctl_card_init(struct drm_device *, void *data,
- struct drm_file *);
-extern int nouveau_ioctl_suspend(struct drm_device *, void *data,
- struct drm_file *);
-extern int nouveau_ioctl_resume(struct drm_device *, void *data,
- struct drm_file *);
/* nouveau_mem.c */
extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
@@ -845,21 +849,15 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
/* nouveau_dma.c */
extern void nouveau_dma_pre_init(struct nouveau_channel *);
extern int nouveau_dma_init(struct nouveau_channel *);
-extern int nouveau_dma_wait(struct nouveau_channel *, int size);
+extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
/* nouveau_acpi.c */
-#ifdef CONFIG_ACPI
-extern int nouveau_hybrid_setup(struct drm_device *dev);
-extern bool nouveau_dsm_probe(struct drm_device *dev);
+#if defined(CONFIG_ACPI)
+void nouveau_register_dsm_handler(void);
+void nouveau_unregister_dsm_handler(void);
#else
-static inline int nouveau_hybrid_setup(struct drm_device *dev)
-{
- return 0;
-}
-static inline bool nouveau_dsm_probe(struct drm_device *dev)
-{
- return false;
-}
+static inline void nouveau_register_dsm_handler(void) {}
+static inline void nouveau_unregister_dsm_handler(void) {}
#endif
/* nouveau_backlight.c */
@@ -1027,6 +1025,7 @@ extern void nv50_graph_destroy_context(struct nouveau_channel *);
extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_unload_context(struct drm_device *);
extern void nv50_graph_context_switch(struct drm_device *);
+extern int nv50_grctx_init(struct nouveau_grctx *);
/* nouveau_grctx.c */
extern int nouveau_grctx_prog_load(struct drm_device *);
@@ -1152,16 +1151,6 @@ extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
struct drm_file *);
extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
struct drm_file *);
-extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *,
- struct drm_file *);
-extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *,
- struct drm_file *);
-extern int nouveau_gem_ioctl_pin(struct drm_device *, void *,
- struct drm_file *);
-extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *,
- struct drm_file *);
-extern int nouveau_gem_ioctl_tile(struct drm_device *, void *,
- struct drm_file *);
extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
struct drm_file *);
extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index ea879a2..68cedd9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -36,6 +36,7 @@
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/screen_info.h>
+#include <linux/vga_switcheroo.h>
#include "drmP.h"
#include "drm.h"
@@ -370,6 +371,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
nvbo->bo.offset, nvbo);
mutex_unlock(&dev->struct_mutex);
+ vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unref:
@@ -401,10 +403,8 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
unregister_framebuffer(info);
nouveau_bo_unmap(nouveau_fb->nvbo);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(nouveau_fb->nvbo->gem);
+ drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
- mutex_unlock(&dev->struct_mutex);
if (par)
drm_fb_helper_free(&par->helper);
framebuffer_release(info);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 70cc308..0d22f66 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -167,12 +167,10 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(nvbo->gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference_unlocked(nvbo->gem);
if (ret)
- drm_gem_object_unreference(nvbo->gem);
+ drm_gem_object_unreference_unlocked(nvbo->gem);
return ret;
}
@@ -243,6 +241,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
nouveau_fence_unref((void *)&prev_fence);
}
+ if (unlikely(nvbo->validate_mapped)) {
+ ttm_bo_kunmap(&nvbo->kmap);
+ nvbo->validate_mapped = false;
+ }
+
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
@@ -302,11 +305,14 @@ retry:
if (ret == -EAGAIN)
ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
drm_gem_object_unreference(gem);
- if (ret)
+ if (ret) {
+ NV_ERROR(dev, "fail reserve\n");
return ret;
+ }
goto retry;
}
+ b->user_priv = (uint64_t)(unsigned long)nvbo;
nvbo->reserved_by = file_priv;
nvbo->pbbo_index = i;
if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
@@ -336,8 +342,10 @@ retry:
}
ret = ttm_bo_wait_cpu(&nvbo->bo, false);
- if (ret)
+ if (ret) {
+ NV_ERROR(dev, "fail wait_cpu\n");
return ret;
+ }
goto retry;
}
}
@@ -351,6 +359,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
{
struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
(void __force __user *)(uintptr_t)user_pbbo_ptr;
+ struct drm_device *dev = chan->dev;
struct nouveau_bo *nvbo;
int ret, relocs = 0;
@@ -362,39 +371,46 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
spin_lock(&nvbo->bo.lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, false);
spin_unlock(&nvbo->bo.lock);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail wait other chan\n");
return ret;
+ }
}
ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail set_domain\n");
return ret;
+ }
nvbo->channel = chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
false, false);
nvbo->channel = NULL;
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail ttm_validate\n");
return ret;
+ }
- if (nvbo->bo.offset == b->presumed_offset &&
+ if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
- b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
+ b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
(nvbo->bo.mem.mem_type == TTM_PL_TT &&
- b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART)))
+ b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
continue;
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART;
+ b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
else
- b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM;
- b->presumed_offset = nvbo->bo.offset;
- b->presumed_ok = 0;
+ b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
+ b->presumed.offset = nvbo->bo.offset;
+ b->presumed.valid = 0;
relocs++;
- if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b)))
+ if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
+ &b->presumed, sizeof(b->presumed)))
return -EFAULT;
}
@@ -408,6 +424,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
uint64_t user_buffers, int nr_buffers,
struct validate_op *op, int *apply_relocs)
{
+ struct drm_device *dev = chan->dev;
int ret, relocs = 0;
INIT_LIST_HEAD(&op->vram_list);
@@ -418,11 +435,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
return 0;
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "validate_init\n");
return ret;
+ }
ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
+ NV_ERROR(dev, "validate vram_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -430,6 +450,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
+ NV_ERROR(dev, "validate gart_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -437,6 +458,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
+ NV_ERROR(dev, "validate both_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -465,59 +487,82 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
}
static int
-nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
- struct drm_nouveau_gem_pushbuf_bo *bo,
- unsigned nr_relocs, uint64_t ptr_relocs,
- unsigned nr_dwords, unsigned first_dword,
- uint32_t *pushbuf, bool is_iomem)
+nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
+ struct drm_nouveau_gem_pushbuf *req,
+ struct drm_nouveau_gem_pushbuf_bo *bo)
{
struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
- struct drm_device *dev = chan->dev;
int ret = 0;
unsigned i;
- reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
+ reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
if (IS_ERR(reloc))
return PTR_ERR(reloc);
- for (i = 0; i < nr_relocs; i++) {
+ for (i = 0; i < req->nr_relocs; i++) {
struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
struct drm_nouveau_gem_pushbuf_bo *b;
+ struct nouveau_bo *nvbo;
uint32_t data;
- if (r->bo_index >= nr_bo || r->reloc_index < first_dword ||
- r->reloc_index >= first_dword + nr_dwords) {
- NV_ERROR(dev, "Bad relocation %d\n", i);
- NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo);
- NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords);
+ if (unlikely(r->bo_index > req->nr_buffers)) {
+ NV_ERROR(dev, "reloc bo index invalid\n");
ret = -EINVAL;
break;
}
b = &bo[r->bo_index];
- if (b->presumed_ok)
+ if (b->presumed.valid)
continue;
+ if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
+ NV_ERROR(dev, "reloc container bo index invalid\n");
+ ret = -EINVAL;
+ break;
+ }
+ nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
+
+ if (unlikely(r->reloc_bo_offset + 4 >
+ nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
+ NV_ERROR(dev, "reloc outside of bo\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!nvbo->kmap.virtual) {
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
+ &nvbo->kmap);
+ if (ret) {
+ NV_ERROR(dev, "failed kmap for reloc\n");
+ break;
+ }
+ nvbo->validate_mapped = true;
+ }
+
if (r->flags & NOUVEAU_GEM_RELOC_LOW)
- data = b->presumed_offset + r->data;
+ data = b->presumed.offset + r->data;
else
if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
- data = (b->presumed_offset + r->data) >> 32;
+ data = (b->presumed.offset + r->data) >> 32;
else
data = r->data;
if (r->flags & NOUVEAU_GEM_RELOC_OR) {
- if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART)
+ if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
data |= r->tor;
else
data |= r->vor;
}
- if (is_iomem)
- iowrite32_native(data, (void __force __iomem *)
- &pushbuf[r->reloc_index]);
- else
- pushbuf[r->reloc_index] = data;
+ spin_lock(&nvbo->bo.lock);
+ ret = ttm_bo_wait(&nvbo->bo, false, false, false);
+ spin_unlock(&nvbo->bo.lock);
+ if (ret) {
+ NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
+ break;
+ }
+
+ nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
}
kfree(reloc);
@@ -528,127 +573,50 @@ int
nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_gem_pushbuf *req = data;
- struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
+ struct drm_nouveau_gem_pushbuf_push *push;
+ struct drm_nouveau_gem_pushbuf_bo *bo;
struct nouveau_channel *chan;
struct validate_op op;
- struct nouveau_fence* fence = 0;
- uint32_t *pushbuf = NULL;
- int ret = 0, do_reloc = 0, i;
+ struct nouveau_fence *fence = 0;
+ int i, j, ret = 0, do_reloc = 0;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
- if (req->nr_dwords >= chan->dma.max ||
- req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
- req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
- NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
- NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords,
- chan->dma.max - 1);
- NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
- NOUVEAU_GEM_MAX_BUFFERS);
- NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
- NOUVEAU_GEM_MAX_RELOCS);
- return -EINVAL;
- }
-
- pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
- if (IS_ERR(pushbuf))
- return PTR_ERR(pushbuf);
-
- bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
- if (IS_ERR(bo)) {
- kfree(pushbuf);
- return PTR_ERR(bo);
- }
-
- mutex_lock(&dev->struct_mutex);
-
- /* Validate buffer list */
- ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
- req->nr_buffers, &op, &do_reloc);
- if (ret)
- goto out;
-
- /* Apply any relocations that are required */
- if (do_reloc) {
- ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
- bo, req->nr_relocs,
- req->relocs,
- req->nr_dwords, 0,
- pushbuf, false);
- if (ret)
- goto out;
- }
-
- /* Emit push buffer to the hw
- */
- ret = RING_SPACE(chan, req->nr_dwords);
- if (ret)
- goto out;
-
- OUT_RINGp(chan, pushbuf, req->nr_dwords);
+ req->vram_available = dev_priv->fb_aper_free;
+ req->gart_available = dev_priv->gart_info.aper_free;
+ if (unlikely(req->nr_push == 0))
+ goto out_next;
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret) {
- NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
- WIND_RING(chan);
- goto out;
+ if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
+ NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
+ req->nr_push, NOUVEAU_GEM_MAX_PUSH);
+ return -EINVAL;
}
- if (nouveau_gem_pushbuf_sync(chan)) {
- ret = nouveau_fence_wait(fence, NULL, false, false);
- if (ret) {
- for (i = 0; i < req->nr_dwords; i++)
- NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
- NV_ERROR(dev, "^^ above push buffer is fail :(\n");
- }
+ if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
+ NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
+ req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
+ return -EINVAL;
}
-out:
- validate_fini(&op, fence);
- nouveau_fence_unref((void**)&fence);
- mutex_unlock(&dev->struct_mutex);
- kfree(pushbuf);
- kfree(bo);
- return ret;
-}
-
-#define PUSHBUF_CAL (dev_priv->card_type >= NV_20)
-
-int
-nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_nouveau_gem_pushbuf_call *req = data;
- struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
- struct nouveau_channel *chan;
- struct drm_gem_object *gem;
- struct nouveau_bo *pbbo;
- struct validate_op op;
- struct nouveau_fence* fence = 0;
- int i, ret = 0, do_reloc = 0;
-
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
-
- if (unlikely(req->handle == 0))
- goto out_next;
-
- if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
- req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
- NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
- NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
- NOUVEAU_GEM_MAX_BUFFERS);
- NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
- NOUVEAU_GEM_MAX_RELOCS);
+ if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
+ NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
+ req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
return -EINVAL;
}
+ push = u_memcpya(req->push, req->nr_push, sizeof(*push));
+ if (IS_ERR(push))
+ return PTR_ERR(push);
+
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
- if (IS_ERR(bo))
+ if (IS_ERR(bo)) {
+ kfree(push);
return PTR_ERR(bo);
+ }
mutex_lock(&dev->struct_mutex);
@@ -660,122 +628,84 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
goto out;
}
- /* Validate DMA push buffer */
- gem = drm_gem_object_lookup(dev, file_priv, req->handle);
- if (!gem) {
- NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
- ret = -EINVAL;
- goto out;
- }
- pbbo = nouveau_gem_object(gem);
-
- if ((req->offset & 3) || req->nr_dwords < 2 ||
- (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
- (unsigned long)req->nr_dwords >
- ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
- NV_ERROR(dev, "pb call misaligned or out of bounds: "
- "%d + %d * 4 > %ld\n",
- req->offset, req->nr_dwords, pbbo->bo.mem.size);
- ret = -EINVAL;
- drm_gem_object_unreference(gem);
- goto out;
- }
-
- ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
- chan->fence.sequence);
- if (ret) {
- NV_ERROR(dev, "resv pb: %d\n", ret);
- drm_gem_object_unreference(gem);
- goto out;
- }
-
- nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
- ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
- if (ret) {
- NV_ERROR(dev, "validate pb: %d\n", ret);
- ttm_bo_unreserve(&pbbo->bo);
- drm_gem_object_unreference(gem);
- goto out;
- }
-
- list_add_tail(&pbbo->entry, &op.both_list);
-
- /* If presumed return address doesn't match, we need to map the
- * push buffer and fix it..
- */
- if (!PUSHBUF_CAL) {
- uint32_t retaddy;
-
- if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
- ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS);
- if (ret) {
- NV_ERROR(dev, "jmp_space: %d\n", ret);
- goto out;
- }
- }
-
- retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
- retaddy |= 0x20000000;
- if (retaddy != req->suffix0) {
- req->suffix0 = retaddy;
- do_reloc = 1;
- }
- }
-
/* Apply any relocations that are required */
if (do_reloc) {
- void *pbvirt;
- bool is_iomem;
- ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
- &pbbo->kmap);
+ ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
if (ret) {
- NV_ERROR(dev, "kmap pb: %d\n", ret);
+ NV_ERROR(dev, "reloc apply: %d\n", ret);
goto out;
}
+ }
- pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem);
- ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo,
- req->nr_relocs,
- req->relocs,
- req->nr_dwords,
- req->offset / 4,
- pbvirt, is_iomem);
-
- if (!PUSHBUF_CAL) {
- nouveau_bo_wr32(pbbo,
- req->offset / 4 + req->nr_dwords - 2,
- req->suffix0);
- }
-
- ttm_bo_kunmap(&pbbo->kmap);
+ if (chan->dma.ib_max) {
+ ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
if (ret) {
- NV_ERROR(dev, "reloc apply: %d\n", ret);
+ NV_INFO(dev, "nv50cal_space: %d\n", ret);
goto out;
}
- }
- if (PUSHBUF_CAL) {
- ret = RING_SPACE(chan, 2);
+ for (i = 0; i < req->nr_push; i++) {
+ struct nouveau_bo *nvbo = (void *)(unsigned long)
+ bo[push[i].bo_index].user_priv;
+
+ nv50_dma_push(chan, nvbo, push[i].offset,
+ push[i].length);
+ }
+ } else
+ if (dev_priv->card_type >= NV_20) {
+ ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
NV_ERROR(dev, "cal_space: %d\n", ret);
goto out;
}
- OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
- req->offset) | 2);
- OUT_RING(chan, 0);
+
+ for (i = 0; i < req->nr_push; i++) {
+ struct nouveau_bo *nvbo = (void *)(unsigned long)
+ bo[push[i].bo_index].user_priv;
+ struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
+
+ OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
+ push[i].offset) | 2);
+ OUT_RING(chan, 0);
+ }
} else {
- ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS);
+ ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
NV_ERROR(dev, "jmp_space: %d\n", ret);
goto out;
}
- OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
- req->offset) | 0x20000000);
- OUT_RING(chan, 0);
- /* Space the jumps apart with NOPs. */
- for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+ for (i = 0; i < req->nr_push; i++) {
+ struct nouveau_bo *nvbo = (void *)(unsigned long)
+ bo[push[i].bo_index].user_priv;
+ struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
+ uint32_t cmd;
+
+ cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
+ cmd |= 0x20000000;
+ if (unlikely(cmd != req->suffix0)) {
+ if (!nvbo->kmap.virtual) {
+ ret = ttm_bo_kmap(&nvbo->bo, 0,
+ nvbo->bo.mem.
+ num_pages,
+ &nvbo->kmap);
+ if (ret) {
+ WIND_RING(chan);
+ goto out;
+ }
+ nvbo->validate_mapped = true;
+ }
+
+ nouveau_bo_wr32(nvbo, (push[i].offset +
+ push[i].length - 8) / 4, cmd);
+ }
+
+ OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
+ push[i].offset) | 0x20000000);
OUT_RING(chan, 0);
+ for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
+ OUT_RING(chan, 0);
+ }
}
ret = nouveau_fence_new(chan, &fence, true);
@@ -790,9 +720,14 @@ out:
nouveau_fence_unref((void**)&fence);
mutex_unlock(&dev->struct_mutex);
kfree(bo);
+ kfree(push);
out_next:
- if (PUSHBUF_CAL) {
+ if (chan->dma.ib_max) {
+ req->suffix0 = 0x00000000;
+ req->suffix1 = 0x00000000;
+ } else
+ if (dev_priv->card_type >= NV_20) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
@@ -804,19 +739,6 @@ out_next:
return ret;
}
-int
-nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_nouveau_gem_pushbuf_call *req = data;
-
- req->vram_available = dev_priv->fb_aper_free;
- req->gart_available = dev_priv->gart_info.aper_free;
-
- return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
-}
-
static inline uint32_t
domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
{
@@ -831,74 +753,6 @@ domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
}
int
-nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_gem_pin *req = data;
- struct drm_gem_object *gem;
- struct nouveau_bo *nvbo;
- int ret = 0;
-
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
- return -EINVAL;
- }
-
- if (!DRM_SUSER(DRM_CURPROC))
- return -EPERM;
-
- gem = drm_gem_object_lookup(dev, file_priv, req->handle);
- if (!gem)
- return -EINVAL;
- nvbo = nouveau_gem_object(gem);
-
- ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
- if (ret)
- goto out;
-
- req->offset = nvbo->bo.offset;
- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- req->domain = NOUVEAU_GEM_DOMAIN_GART;
- else
- req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
-
-out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-int
-nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_gem_pin *req = data;
- struct drm_gem_object *gem;
- int ret;
-
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- gem = drm_gem_object_lookup(dev, file_priv, req->handle);
- if (!gem)
- return -EINVAL;
-
- ret = nouveau_bo_unpin(nouveau_gem_object(gem));
-
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-int
nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -935,9 +789,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
}
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return ret;
}
@@ -965,9 +817,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
ret = 0;
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return ret;
}
@@ -986,9 +836,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
return -EINVAL;
ret = nouveau_gem_info(gem, req);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index dc46792..7855b35 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -160,7 +160,7 @@ static void
setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chip_version = dev_priv->vbios->chip_version;
+ int chip_version = dev_priv->vbios.chip_version;
uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
@@ -216,7 +216,7 @@ setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
struct nouveau_pll_vals *pv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chip_version = dev_priv->vbios->chip_version;
+ int chip_version = dev_priv->vbios.chip_version;
bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
@@ -374,7 +374,7 @@ nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
struct nouveau_pll_vals *pv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int cv = dev_priv->vbios->chip_version;
+ int cv = dev_priv->vbios.chip_version;
if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
cv >= 0x40) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 70e994d..88583e7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -254,16 +254,16 @@ struct nouveau_i2c_chan *
nouveau_i2c_find(struct drm_device *dev, int index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
- if (index > DCB_MAX_NUM_I2C_ENTRIES)
+ if (index >= DCB_MAX_NUM_I2C_ENTRIES)
return NULL;
- if (!bios->bdcb.dcb.i2c[index].chan) {
- if (nouveau_i2c_init(dev, &bios->bdcb.dcb.i2c[index], index))
+ if (!bios->dcb.i2c[index].chan) {
+ if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index))
return NULL;
}
- return bios->bdcb.dcb.i2c[index].chan;
+ return bios->dcb.i2c[index].chan;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 447f9f6..95220dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -691,11 +691,14 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
struct drm_device *dev = (struct drm_device *)arg;
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t status, fbdev_flags = 0;
+ unsigned long flags;
status = nv_rd32(dev, NV03_PMC_INTR_0);
if (!status)
return IRQ_NONE;
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
if (dev_priv->fbdev_info) {
fbdev_flags = dev_priv->fbdev_info->flags;
dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
@@ -733,5 +736,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
if (dev_priv->fbdev_info)
dev_priv->fbdev_info->flags = fbdev_flags;
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index d99dc08..9537f3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -61,11 +61,8 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
chan->notifier_bo = ntfy;
out_err:
- if (ret) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(ntfy->gem);
- mutex_unlock(&dev->struct_mutex);
- }
+ if (ret)
+ drm_gem_object_unreference_unlocked(ntfy->gem);
return ret;
}
@@ -81,8 +78,8 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
nouveau_bo_unmap(chan->notifier_bo);
mutex_lock(&dev->struct_mutex);
nouveau_bo_unpin(chan->notifier_bo);
- drm_gem_object_unreference(chan->notifier_bo->gem);
mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
nouveau_mem_takedown(&chan->notifier_heap);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a4851af..eb8f084 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -29,6 +29,7 @@
#include "drm_sarea.h"
#include "drm_crtc_helper.h"
#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
#include "nouveau_drv.h"
#include "nouveau_drm.h"
@@ -371,6 +372,30 @@ out_err:
return ret;
}
+static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
+ enum vga_switcheroo_state state)
+{
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+ if (state == VGA_SWITCHEROO_ON) {
+ printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
+ nouveau_pci_resume(pdev);
+ } else {
+ printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
+ nouveau_pci_suspend(pdev, pmm);
+ }
+}
+
+static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+ can_switch = (dev->open_count == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+}
+
int
nouveau_card_init(struct drm_device *dev)
{
@@ -384,6 +409,8 @@ nouveau_card_init(struct drm_device *dev)
return 0;
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
+ vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
+ nouveau_switcheroo_can_switch);
/* Initialise internal driver API hooks */
ret = nouveau_init_engine_ptrs(dev);
@@ -391,6 +418,7 @@ nouveau_card_init(struct drm_device *dev)
goto out;
engine = &dev_priv->engine;
dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
+ spin_lock_init(&dev_priv->context_switch_lock);
/* Parse BIOS tables / Run init tables if card not POSTed */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -617,11 +645,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
dev->pci_vendor, dev->pci_device, dev->pdev->class);
- dev_priv->acpi_dsm = nouveau_dsm_probe(dev);
-
- if (dev_priv->acpi_dsm)
- nouveau_hybrid_setup(dev);
-
dev_priv->wq = create_workqueue("nouveau");
if (!dev_priv->wq)
return -EINVAL;
@@ -776,13 +799,6 @@ int nouveau_unload(struct drm_device *dev)
return 0;
}
-int
-nouveau_ioctl_card_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return nouveau_card_init(dev);
-}
-
int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index d2f143e..a1d1ebb 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -926,9 +926,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
nv_crtc->cursor.show(nv_crtc, true);
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index 1d73b15..1cb19e3 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -230,13 +230,13 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
if (dcb->type == OUTPUT_TV) {
testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
- if (dev_priv->vbios->tvdactestval)
- testval = dev_priv->vbios->tvdactestval;
+ if (dev_priv->vbios.tvdactestval)
+ testval = dev_priv->vbios.tvdactestval;
} else {
testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
- if (dev_priv->vbios->dactestval)
- testval = dev_priv->vbios->dactestval;
+ if (dev_priv->vbios.dactestval)
+ testval = dev_priv->vbios.dactestval;
}
saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 483f875..41634d4 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -269,10 +269,10 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
if (!nv_gf4_disp_arch(dev) ||
(output_mode->hsync_start - output_mode->hdisplay) >=
- dev_priv->vbios->digital_min_front_porch)
+ dev_priv->vbios.digital_min_front_porch)
regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
else
- regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios->digital_min_front_porch - 1;
+ regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios.digital_min_front_porch - 1;
regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index ef77215..c7898b4 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -93,10 +93,9 @@ int
nv04_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct parsed_dcb *dcb = dev_priv->vbios->dcb;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
- uint16_t connector[16] = { 0 };
int i, ret;
NV_DEBUG_KMS(dev, "\n");
@@ -154,52 +153,10 @@ nv04_display_create(struct drm_device *dev)
if (ret)
continue;
-
- connector[dcbent->connector] |= (1 << dcbent->type);
}
- for (i = 0; i < dcb->entries; i++) {
- struct dcb_entry *dcbent = &dcb->entry[i];
- uint16_t encoders;
- int type;
-
- encoders = connector[dcbent->connector];
- if (!(encoders & (1 << dcbent->type)))
- continue;
- connector[dcbent->connector] = 0;
-
- switch (dcbent->type) {
- case OUTPUT_ANALOG:
- if (!MULTIPLE_ENCODERS(encoders))
- type = DRM_MODE_CONNECTOR_VGA;
- else
- type = DRM_MODE_CONNECTOR_DVII;
- break;
- case OUTPUT_TMDS:
- if (!MULTIPLE_ENCODERS(encoders))
- type = DRM_MODE_CONNECTOR_DVID;
- else
- type = DRM_MODE_CONNECTOR_DVII;
- break;
- case OUTPUT_LVDS:
- type = DRM_MODE_CONNECTOR_LVDS;
-#if 0
- /* don't create i2c adapter when lvds ddc not allowed */
- if (dcbent->lvdsconf.use_straps_for_mode ||
- dev_priv->vbios->fp_no_ddc)
- i2c_index = 0xf;
-#endif
- break;
- case OUTPUT_TV:
- type = DRM_MODE_CONNECTOR_TV;
- break;
- default:
- type = DRM_MODE_CONNECTOR_Unknown;
- continue;
- }
-
- nouveau_connector_create(dev, dcbent->connector, type);
- }
+ for (i = 0; i < dcb->connector.entries; i++)
+ nouveau_connector_create(dev, &dcb->connector.entry[i]);
/* Save previous state */
NVLockVgaCrtcs(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index fd01caa..3da90c2 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -118,7 +118,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
return;
}
- width = (image->width + 31) & ~31;
+ width = ALIGN(image->width, 32);
dsize = (width * image->height) >> 5;
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index f31347b..66fe559 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -117,6 +117,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
int ret;
ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
@@ -127,6 +128,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
if (ret)
return ret;
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
/* Setup initial state */
dev_priv->engine.instmem.prepare_access(dev, true);
RAMFC_WR(DMA_PUT, chan->pushbuf_base);
@@ -144,6 +147,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
/* enable the fifo dma operation */
nv_wr32(dev, NV04_PFIFO_MODE,
nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 9c63099..c4e3404 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -262,7 +262,7 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
nv_encoder->or = ffs(entry->or) - 1;
/* Run the slave-specific initialization */
- adap = &dev_priv->vbios->dcb->i2c[i2c_index].chan->adapter;
+ adap = &dev_priv->vbios.dcb.i2c[i2c_index].chan->adapter;
was_locked = NVLockVgaCrtcs(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 21ac6e4..74c8803 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -45,8 +45,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
- if (dev_priv->vbios->tvdactestval)
- testval = dev_priv->vbios->tvdactestval;
+ if (dev_priv->vbios.tvdactestval)
+ testval = dev_priv->vbios.tvdactestval;
dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
head = (dacclk & 0x100) >> 8;
@@ -367,7 +367,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
!enc->crtc &&
nv04_dfp_get_bound_head(dev, dcb) == head) {
nv04_dfp_bind_head(dev, dcb, head ^ 1,
- dev_priv->VBIOS.fp.dual_link);
+ dev_priv->vbios.fp.dual_link);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index b4f19cc..6b2ef4a 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -37,6 +37,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t fc = NV40_RAMFC(chan->id);
+ unsigned long flags;
int ret;
ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
@@ -45,6 +46,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
if (ret)
return ret;
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
dev_priv->engine.instmem.prepare_access(dev, true);
nv_wi32(dev, fc + 0, chan->pushbuf_base);
nv_wi32(dev, fc + 4, chan->pushbuf_base);
@@ -63,6 +66,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
/* enable the fifo dma operation */
nv_wr32(dev, NV04_PFIFO_MODE,
nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index d1a651e..cfabeb9 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -358,9 +358,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nv_crtc->cursor.show(nv_crtc, true);
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index f08f042..1fd9537 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -79,8 +79,8 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
}
/* Use bios provided value if possible. */
- if (dev_priv->vbios->dactestval) {
- load_pattern = dev_priv->vbios->dactestval;
+ if (dev_priv->vbios.dactestval) {
+ load_pattern = dev_priv->vbios.dactestval;
NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n",
load_pattern);
} else {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 90f0bf5..61a89f2 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -370,9 +370,7 @@ nv50_display_init(struct drm_device *dev)
struct nouveau_connector *conn = nouveau_connector(connector);
struct dcb_gpio_entry *gpio;
- if (connector->connector_type != DRM_MODE_CONNECTOR_DVII &&
- connector->connector_type != DRM_MODE_CONNECTOR_DVID &&
- connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ if (conn->dcb->gpio_tag == 0xff)
continue;
gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
@@ -465,8 +463,7 @@ static int nv50_display_disable(struct drm_device *dev)
int nv50_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct parsed_dcb *dcb = dev_priv->vbios->dcb;
- uint32_t connector[16] = {};
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
int ret, i;
NV_DEBUG_KMS(dev, "\n");
@@ -522,44 +519,13 @@ int nv50_display_create(struct drm_device *dev)
NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
continue;
}
-
- connector[entry->connector] |= (1 << entry->type);
}
- /* It appears that DCB 3.0+ VBIOS has a connector table, however,
- * I'm not 100% certain how to decode it correctly yet so just
- * look at what encoders are present on each connector index and
- * attempt to derive the connector type from that.
- */
- for (i = 0 ; i < dcb->entries; i++) {
- struct dcb_entry *entry = &dcb->entry[i];
- uint16_t encoders;
- int type;
-
- encoders = connector[entry->connector];
- if (!(encoders & (1 << entry->type)))
+ for (i = 0 ; i < dcb->connector.entries; i++) {
+ if (i != 0 && dcb->connector.entry[i].index ==
+ dcb->connector.entry[i - 1].index)
continue;
- connector[entry->connector] = 0;
-
- if (encoders & (1 << OUTPUT_DP)) {
- type = DRM_MODE_CONNECTOR_DisplayPort;
- } else if (encoders & (1 << OUTPUT_TMDS)) {
- if (encoders & (1 << OUTPUT_ANALOG))
- type = DRM_MODE_CONNECTOR_DVII;
- else
- type = DRM_MODE_CONNECTOR_DVID;
- } else if (encoders & (1 << OUTPUT_ANALOG)) {
- type = DRM_MODE_CONNECTOR_VGA;
- } else if (encoders & (1 << OUTPUT_LVDS)) {
- type = DRM_MODE_CONNECTOR_LVDS;
- } else {
- type = DRM_MODE_CONNECTOR_Unknown;
- }
-
- if (type == DRM_MODE_CONNECTOR_Unknown)
- continue;
-
- nouveau_connector_create(dev, entry->connector, type);
+ nouveau_connector_create(dev, &dcb->connector.entry[i]);
}
ret = nv50_display_init(dev);
@@ -667,8 +633,8 @@ nv50_display_irq_head(struct drm_device *dev, int *phead,
return -1;
}
- for (i = 0; i < dev_priv->vbios->dcb->entries; i++) {
- struct dcb_entry *dcbent = &dev_priv->vbios->dcb->entry[i];
+ for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
+ struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i];
if (dcbent->type != type)
continue;
@@ -692,7 +658,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_connector *nv_connector = NULL;
struct drm_encoder *encoder;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint32_t mc, script = 0, or;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -710,7 +676,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
switch (dcbent->type) {
case OUTPUT_LVDS:
script = (mc >> 8) & 0xf;
- if (bios->pub.fp_no_ddc) {
+ if (bios->fp_no_ddc) {
if (bios->fp.dual_link)
script |= 0x0100;
if (bios->fp.if_is_24bit)
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 0f57cdf..993c712 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -109,7 +109,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
return;
}
- width = (image->width + 31) & ~31;
+ width = ALIGN(image->width, 32);
dwords = (width * image->height) >> 5;
BEGIN_RING(chan, NvSub2D, 0x0814, 2);
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 204a79f..e20c0e2 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -243,6 +243,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramfc = NULL;
+ unsigned long flags;
int ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
@@ -278,19 +279,21 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
return ret;
}
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
dev_priv->engine.instmem.prepare_access(dev, true);
- nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base);
- nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base);
nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
- nv_wo32(dev, ramfc, 0x3c/4, 0x00086078);
nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
- nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff);
+ nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078);
+ nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base +
+ chan->dma.ib_base * 4);
+ nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16);
if (!IS_G80) {
nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
@@ -306,10 +309,12 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
ret = nv50_fifo_channel_enable(dev, chan->id, false);
if (ret) {
NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
return ret;
}
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 6d50480..857a096 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -28,30 +28,7 @@
#include "drm.h"
#include "nouveau_drv.h"
-MODULE_FIRMWARE("nouveau/nv50.ctxprog");
-MODULE_FIRMWARE("nouveau/nv50.ctxvals");
-MODULE_FIRMWARE("nouveau/nv84.ctxprog");
-MODULE_FIRMWARE("nouveau/nv84.ctxvals");
-MODULE_FIRMWARE("nouveau/nv86.ctxprog");
-MODULE_FIRMWARE("nouveau/nv86.ctxvals");
-MODULE_FIRMWARE("nouveau/nv92.ctxprog");
-MODULE_FIRMWARE("nouveau/nv92.ctxvals");
-MODULE_FIRMWARE("nouveau/nv94.ctxprog");
-MODULE_FIRMWARE("nouveau/nv94.ctxvals");
-MODULE_FIRMWARE("nouveau/nv96.ctxprog");
-MODULE_FIRMWARE("nouveau/nv96.ctxvals");
-MODULE_FIRMWARE("nouveau/nv98.ctxprog");
-MODULE_FIRMWARE("nouveau/nv98.ctxvals");
-MODULE_FIRMWARE("nouveau/nva0.ctxprog");
-MODULE_FIRMWARE("nouveau/nva0.ctxvals");
-MODULE_FIRMWARE("nouveau/nva5.ctxprog");
-MODULE_FIRMWARE("nouveau/nva5.ctxvals");
-MODULE_FIRMWARE("nouveau/nva8.ctxprog");
-MODULE_FIRMWARE("nouveau/nva8.ctxvals");
-MODULE_FIRMWARE("nouveau/nvaa.ctxprog");
-MODULE_FIRMWARE("nouveau/nvaa.ctxvals");
-MODULE_FIRMWARE("nouveau/nvac.ctxprog");
-MODULE_FIRMWARE("nouveau/nvac.ctxvals");
+#include "nouveau_grctx.h"
#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
@@ -111,9 +88,34 @@ nv50_graph_init_ctxctl(struct drm_device *dev)
NV_DEBUG(dev, "\n");
- nouveau_grctx_prog_load(dev);
- if (!dev_priv->engine.graph.ctxprog)
- dev_priv->engine.graph.accel_blocked = true;
+ if (nouveau_ctxfw) {
+ nouveau_grctx_prog_load(dev);
+ dev_priv->engine.graph.grctx_size = 0x70000;
+ }
+ if (!dev_priv->engine.graph.ctxprog) {
+ struct nouveau_grctx ctx = {};
+ uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL);
+ int i;
+ if (!cp) {
+ NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n");
+ dev_priv->engine.graph.accel_blocked = true;
+ return 0;
+ }
+ ctx.dev = dev;
+ ctx.mode = NOUVEAU_GRCTX_PROG;
+ ctx.data = cp;
+ ctx.ctxprog_max = 512;
+ if (!nv50_grctx_init(&ctx)) {
+ dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
+ } else {
+ dev_priv->engine.graph.accel_blocked = true;
+ }
+ kfree(cp);
+ }
nv_wr32(dev, 0x400320, 4);
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
@@ -193,13 +195,13 @@ nv50_graph_create_context(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
struct nouveau_gpuobj *ctx;
- uint32_t grctx_size = 0x70000;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
int hdr, ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC |
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
if (ret)
return ret;
@@ -209,7 +211,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
dev_priv->engine.instmem.prepare_access(dev, true);
nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
- grctx_size - 1);
+ pgraph->grctx_size - 1);
nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
@@ -217,7 +219,15 @@ nv50_graph_create_context(struct nouveau_channel *chan)
dev_priv->engine.instmem.finish_access(dev);
dev_priv->engine.instmem.prepare_access(dev, true);
- nouveau_grctx_vals_load(dev, ctx);
+ if (!pgraph->ctxprog) {
+ struct nouveau_grctx ctx = {};
+ ctx.dev = chan->dev;
+ ctx.mode = NOUVEAU_GRCTX_VALS;
+ ctx.data = chan->ramin_grctx->gpuobj;
+ nv50_grctx_init(&ctx);
+ } else {
+ nouveau_grctx_vals_load(dev, ctx);
+ }
nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
if ((dev_priv->chipset & 0xf0) == 0xa0)
nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
new file mode 100644
index 0000000..d105fcd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -0,0 +1,2367 @@
+/*
+ * Copyright 2009 Marcin Kościelnicki
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#define CP_FLAG_CLEAR 0
+#define CP_FLAG_SET 1
+#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
+#define CP_FLAG_SWAP_DIRECTION_LOAD 0
+#define CP_FLAG_SWAP_DIRECTION_SAVE 1
+#define CP_FLAG_UNK01 ((0 * 32) + 1)
+#define CP_FLAG_UNK01_CLEAR 0
+#define CP_FLAG_UNK01_SET 1
+#define CP_FLAG_UNK03 ((0 * 32) + 3)
+#define CP_FLAG_UNK03_CLEAR 0
+#define CP_FLAG_UNK03_SET 1
+#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
+#define CP_FLAG_USER_SAVE_NOT_PENDING 0
+#define CP_FLAG_USER_SAVE_PENDING 1
+#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
+#define CP_FLAG_USER_LOAD_NOT_PENDING 0
+#define CP_FLAG_USER_LOAD_PENDING 1
+#define CP_FLAG_UNK0B ((0 * 32) + 0xb)
+#define CP_FLAG_UNK0B_CLEAR 0
+#define CP_FLAG_UNK0B_SET 1
+#define CP_FLAG_UNK1D ((0 * 32) + 0x1d)
+#define CP_FLAG_UNK1D_CLEAR 0
+#define CP_FLAG_UNK1D_SET 1
+#define CP_FLAG_UNK20 ((1 * 32) + 0)
+#define CP_FLAG_UNK20_CLEAR 0
+#define CP_FLAG_UNK20_SET 1
+#define CP_FLAG_STATUS ((2 * 32) + 0)
+#define CP_FLAG_STATUS_BUSY 0
+#define CP_FLAG_STATUS_IDLE 1
+#define CP_FLAG_AUTO_SAVE ((2 * 32) + 4)
+#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
+#define CP_FLAG_AUTO_SAVE_PENDING 1
+#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5)
+#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
+#define CP_FLAG_AUTO_LOAD_PENDING 1
+#define CP_FLAG_XFER ((2 * 32) + 11)
+#define CP_FLAG_XFER_IDLE 0
+#define CP_FLAG_XFER_BUSY 1
+#define CP_FLAG_NEWCTX ((2 * 32) + 12)
+#define CP_FLAG_NEWCTX_BUSY 0
+#define CP_FLAG_NEWCTX_DONE 1
+#define CP_FLAG_ALWAYS ((2 * 32) + 13)
+#define CP_FLAG_ALWAYS_FALSE 0
+#define CP_FLAG_ALWAYS_TRUE 1
+
+#define CP_CTX 0x00100000
+#define CP_CTX_COUNT 0x000f0000
+#define CP_CTX_COUNT_SHIFT 16
+#define CP_CTX_REG 0x00003fff
+#define CP_LOAD_SR 0x00200000
+#define CP_LOAD_SR_VALUE 0x000fffff
+#define CP_BRA 0x00400000
+#define CP_BRA_IP 0x0001ff00
+#define CP_BRA_IP_SHIFT 8
+#define CP_BRA_IF_CLEAR 0x00000080
+#define CP_BRA_FLAG 0x0000007f
+#define CP_WAIT 0x00500000
+#define CP_WAIT_SET 0x00000080
+#define CP_WAIT_FLAG 0x0000007f
+#define CP_SET 0x00700000
+#define CP_SET_1 0x00000080
+#define CP_SET_FLAG 0x0000007f
+#define CP_NEWCTX 0x00600004
+#define CP_NEXT_TO_SWAP 0x00600005
+#define CP_SET_CONTEXT_POINTER 0x00600006
+#define CP_SET_XFER_POINTER 0x00600007
+#define CP_ENABLE 0x00600009
+#define CP_END 0x0060000c
+#define CP_NEXT_TO_CURRENT 0x0060000d
+#define CP_DISABLE1 0x0090ffff
+#define CP_DISABLE2 0x0091ffff
+#define CP_XFER_1 0x008000ff
+#define CP_XFER_2 0x008800ff
+#define CP_SEEK_1 0x00c000ff
+#define CP_SEEK_2 0x00c800ff
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_grctx.h"
+
+/*
+ * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
+ * the GPU itself that does context-switching, but it needs a special
+ * microcode to do it. And it's the driver's task to supply this microcode,
+ * further known as ctxprog, as well as the initial context values, known
+ * as ctxvals.
+ *
+ * Without ctxprog, you cannot switch contexts. Not even in software, since
+ * the majority of context [xfer strands] isn't accessible directly. You're
+ * stuck with a single channel, and you also suffer all the problems resulting
+ * from missing ctxvals, since you cannot load them.
+ *
+ * Without ctxvals, you're stuck with PGRAPH's default context. It's enough to
+ * run 2d operations, but trying to utilise 3d or CUDA will just lock you up,
+ * since you don't have... some sort of needed setup.
+ *
+ * Nouveau will just disable acceleration if not given ctxprog + ctxvals, since
+ * it's too much hassle to handle no-ctxprog as a special case.
+ */
+
+/*
+ * How ctxprogs work.
+ *
+ * The ctxprog is written in its own kind of microcode, with very small and
+ * crappy set of available commands. You upload it to a small [512 insns]
+ * area of memory on PGRAPH, and it'll be run when PFIFO wants PGRAPH to
+ * switch channel. or when the driver explicitely requests it. Stuff visible
+ * to ctxprog consists of: PGRAPH MMIO registers, PGRAPH context strands,
+ * the per-channel context save area in VRAM [known as ctxvals or grctx],
+ * 4 flags registers, a scratch register, two grctx pointers, plus many
+ * random poorly-understood details.
+ *
+ * When ctxprog runs, it's supposed to check what operations are asked of it,
+ * save old context if requested, optionally reset PGRAPH and switch to the
+ * new channel, and load the new context. Context consists of three major
+ * parts: subset of MMIO registers and two "xfer areas".
+ */
+
+/* TODO:
+ * - document unimplemented bits compared to nvidia
+ * - NVAx: make a TP subroutine, use it.
+ * - use 0x4008fc instead of 0x1540?
+ */
+
+enum cp_label {
+ cp_check_load = 1,
+ cp_setup_auto_load,
+ cp_setup_load,
+ cp_setup_save,
+ cp_swap_state,
+ cp_prepare_exit,
+ cp_exit,
+};
+
+static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
+
+/* Main function: construct the ctxprog skeleton, call the other functions. */
+
+int
+nv50_grctx_init(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ case 0xa0:
+ case 0xa5:
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ break;
+ default:
+ NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
+ "your NV%x card.\n", dev_priv->chipset);
+ NV_ERROR(ctx->dev, "Disabling acceleration. Please contact "
+ "the devs.\n");
+ return -ENOSYS;
+ }
+ /* decide whether we're loading/unloading the context */
+ cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
+ cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
+
+ cp_name(ctx, cp_check_load);
+ cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
+ cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
+ cp_bra (ctx, ALWAYS, TRUE, cp_exit);
+
+ /* setup for context load */
+ cp_name(ctx, cp_setup_auto_load);
+ cp_out (ctx, CP_DISABLE1);
+ cp_out (ctx, CP_DISABLE2);
+ cp_out (ctx, CP_ENABLE);
+ cp_out (ctx, CP_NEXT_TO_SWAP);
+ cp_set (ctx, UNK01, SET);
+ cp_name(ctx, cp_setup_load);
+ cp_out (ctx, CP_NEWCTX);
+ cp_wait(ctx, NEWCTX, BUSY);
+ cp_set (ctx, UNK1D, CLEAR);
+ cp_set (ctx, SWAP_DIRECTION, LOAD);
+ cp_bra (ctx, UNK0B, SET, cp_prepare_exit);
+ cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
+
+ /* setup for context save */
+ cp_name(ctx, cp_setup_save);
+ cp_set (ctx, UNK1D, SET);
+ cp_wait(ctx, STATUS, BUSY);
+ cp_set (ctx, UNK01, SET);
+ cp_set (ctx, SWAP_DIRECTION, SAVE);
+
+ /* general PGRAPH state */
+ cp_name(ctx, cp_swap_state);
+ cp_set (ctx, UNK03, SET);
+ cp_pos (ctx, 0x00004/4);
+ cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */
+ cp_pos (ctx, 0x00100/4);
+ nv50_graph_construct_mmio(ctx);
+ nv50_graph_construct_xfer1(ctx);
+ nv50_graph_construct_xfer2(ctx);
+
+ cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
+
+ cp_set (ctx, UNK20, SET);
+ cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */
+ cp_lsr (ctx, ctx->ctxvals_base);
+ cp_out (ctx, CP_SET_XFER_POINTER);
+ cp_lsr (ctx, 4);
+ cp_out (ctx, CP_SEEK_1);
+ cp_out (ctx, CP_XFER_1);
+ cp_wait(ctx, XFER, BUSY);
+
+ /* pre-exit state updates */
+ cp_name(ctx, cp_prepare_exit);
+ cp_set (ctx, UNK01, CLEAR);
+ cp_set (ctx, UNK03, CLEAR);
+ cp_set (ctx, UNK1D, CLEAR);
+
+ cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
+ cp_out (ctx, CP_NEXT_TO_CURRENT);
+
+ cp_name(ctx, cp_exit);
+ cp_set (ctx, USER_SAVE, NOT_PENDING);
+ cp_set (ctx, USER_LOAD, NOT_PENDING);
+ cp_out (ctx, CP_END);
+ ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */
+
+ return 0;
+}
+
+/*
+ * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which
+ * registers to save/restore and the default values for them.
+ */
+
+static void
+nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i, j;
+ int offset, base;
+ uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+
+ /* 0800 */
+ cp_ctx(ctx, 0x400808, 7);
+ gr_def(ctx, 0x400814, 0x00000030);
+ cp_ctx(ctx, 0x400834, 0x32);
+ if (dev_priv->chipset == 0x50) {
+ gr_def(ctx, 0x400834, 0xff400040);
+ gr_def(ctx, 0x400838, 0xfff00080);
+ gr_def(ctx, 0x40083c, 0xfff70090);
+ gr_def(ctx, 0x400840, 0xffe806a8);
+ }
+ gr_def(ctx, 0x400844, 0x00000002);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ gr_def(ctx, 0x400894, 0x00001000);
+ gr_def(ctx, 0x4008e8, 0x00000003);
+ gr_def(ctx, 0x4008ec, 0x00001000);
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x400908, 0xb);
+ else if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, 0x400908, 0xc);
+ else
+ cp_ctx(ctx, 0x400908, 0xe);
+
+ if (dev_priv->chipset >= 0xa0)
+ cp_ctx(ctx, 0x400b00, 0x1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ cp_ctx(ctx, 0x400b10, 0x1);
+ gr_def(ctx, 0x400b10, 0x0001629d);
+ cp_ctx(ctx, 0x400b20, 0x1);
+ gr_def(ctx, 0x400b20, 0x0001629d);
+ }
+
+ /* 0C00 */
+ cp_ctx(ctx, 0x400c08, 0x2);
+ gr_def(ctx, 0x400c08, 0x0000fe0c);
+
+ /* 1000 */
+ if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x401008, 0x4);
+ gr_def(ctx, 0x401014, 0x00001000);
+ } else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) {
+ cp_ctx(ctx, 0x401008, 0x5);
+ gr_def(ctx, 0x401018, 0x00001000);
+ } else {
+ cp_ctx(ctx, 0x401008, 0x5);
+ gr_def(ctx, 0x401018, 0x00004000);
+ }
+
+ /* 1400 */
+ cp_ctx(ctx, 0x401400, 0x8);
+ cp_ctx(ctx, 0x401424, 0x3);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x40142c, 0x0001fd87);
+ else
+ gr_def(ctx, 0x40142c, 0x00000187);
+ cp_ctx(ctx, 0x401540, 0x5);
+ gr_def(ctx, 0x401550, 0x00001018);
+
+ /* 1800 */
+ cp_ctx(ctx, 0x401814, 0x1);
+ gr_def(ctx, 0x401814, 0x000000ff);
+ if (dev_priv->chipset == 0x50) {
+ cp_ctx(ctx, 0x40181c, 0xe);
+ gr_def(ctx, 0x401850, 0x00000004);
+ } else if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x40181c, 0xf);
+ gr_def(ctx, 0x401854, 0x00000004);
+ } else {
+ cp_ctx(ctx, 0x40181c, 0x13);
+ gr_def(ctx, 0x401864, 0x00000004);
+ }
+
+ /* 1C00 */
+ cp_ctx(ctx, 0x401c00, 0x1);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ gr_def(ctx, 0x401c00, 0x0001005f);
+ break;
+ case 0x84:
+ case 0x86:
+ case 0x94:
+ gr_def(ctx, 0x401c00, 0x044d00df);
+ break;
+ case 0x92:
+ case 0x96:
+ case 0x98:
+ case 0xa0:
+ case 0xaa:
+ case 0xac:
+ gr_def(ctx, 0x401c00, 0x042500df);
+ break;
+ case 0xa5:
+ case 0xa8:
+ gr_def(ctx, 0x401c00, 0x142500df);
+ break;
+ }
+
+ /* 2400 */
+ cp_ctx(ctx, 0x402400, 0x1);
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x402408, 0x1);
+ else
+ cp_ctx(ctx, 0x402408, 0x2);
+ gr_def(ctx, 0x402408, 0x00000600);
+
+ /* 2800 */
+ cp_ctx(ctx, 0x402800, 0x1);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x402800, 0x00000006);
+
+ /* 2C00 */
+ cp_ctx(ctx, 0x402c08, 0x6);
+ if (dev_priv->chipset != 0x50)
+ gr_def(ctx, 0x402c14, 0x01000000);
+ gr_def(ctx, 0x402c18, 0x000000ff);
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x402ca0, 0x1);
+ else
+ cp_ctx(ctx, 0x402ca0, 0x2);
+ if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, 0x402ca0, 0x00000400);
+ else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
+ gr_def(ctx, 0x402ca0, 0x00000800);
+ else
+ gr_def(ctx, 0x402ca0, 0x00000400);
+ cp_ctx(ctx, 0x402cac, 0x4);
+
+ /* 3000 */
+ cp_ctx(ctx, 0x403004, 0x1);
+ gr_def(ctx, 0x403004, 0x00000001);
+
+ /* 3404 */
+ if (dev_priv->chipset >= 0xa0) {
+ cp_ctx(ctx, 0x403404, 0x1);
+ gr_def(ctx, 0x403404, 0x00000001);
+ }
+
+ /* 5000 */
+ cp_ctx(ctx, 0x405000, 0x1);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ gr_def(ctx, 0x405000, 0x00300080);
+ break;
+ case 0x84:
+ case 0xa0:
+ case 0xa5:
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ gr_def(ctx, 0x405000, 0x000e0080);
+ break;
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ gr_def(ctx, 0x405000, 0x00000080);
+ break;
+ }
+ cp_ctx(ctx, 0x405014, 0x1);
+ gr_def(ctx, 0x405014, 0x00000004);
+ cp_ctx(ctx, 0x40501c, 0x1);
+ cp_ctx(ctx, 0x405024, 0x1);
+ cp_ctx(ctx, 0x40502c, 0x1);
+
+ /* 5400 or maybe 4800 */
+ if (dev_priv->chipset == 0x50) {
+ offset = 0x405400;
+ cp_ctx(ctx, 0x405400, 0xea);
+ } else if (dev_priv->chipset < 0x94) {
+ offset = 0x405400;
+ cp_ctx(ctx, 0x405400, 0xcb);
+ } else if (dev_priv->chipset < 0xa0) {
+ offset = 0x405400;
+ cp_ctx(ctx, 0x405400, 0xcc);
+ } else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ offset = 0x404800;
+ cp_ctx(ctx, 0x404800, 0xda);
+ } else {
+ offset = 0x405400;
+ cp_ctx(ctx, 0x405400, 0xd4);
+ }
+ gr_def(ctx, offset + 0x0c, 0x00000002);
+ gr_def(ctx, offset + 0x10, 0x00000001);
+ if (dev_priv->chipset >= 0x94)
+ offset += 4;
+ gr_def(ctx, offset + 0x1c, 0x00000001);
+ gr_def(ctx, offset + 0x20, 0x00000100);
+ gr_def(ctx, offset + 0x38, 0x00000002);
+ gr_def(ctx, offset + 0x3c, 0x00000001);
+ gr_def(ctx, offset + 0x40, 0x00000001);
+ gr_def(ctx, offset + 0x50, 0x00000001);
+ gr_def(ctx, offset + 0x54, 0x003fffff);
+ gr_def(ctx, offset + 0x58, 0x00001fff);
+ gr_def(ctx, offset + 0x60, 0x00000001);
+ gr_def(ctx, offset + 0x64, 0x00000001);
+ gr_def(ctx, offset + 0x6c, 0x00000001);
+ gr_def(ctx, offset + 0x70, 0x00000001);
+ gr_def(ctx, offset + 0x74, 0x00000001);
+ gr_def(ctx, offset + 0x78, 0x00000004);
+ gr_def(ctx, offset + 0x7c, 0x00000001);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ offset += 4;
+ gr_def(ctx, offset + 0x80, 0x00000001);
+ gr_def(ctx, offset + 0x84, 0x00000001);
+ gr_def(ctx, offset + 0x88, 0x00000007);
+ gr_def(ctx, offset + 0x8c, 0x00000001);
+ gr_def(ctx, offset + 0x90, 0x00000007);
+ gr_def(ctx, offset + 0x94, 0x00000001);
+ gr_def(ctx, offset + 0x98, 0x00000001);
+ gr_def(ctx, offset + 0x9c, 0x00000001);
+ if (dev_priv->chipset == 0x50) {
+ gr_def(ctx, offset + 0xb0, 0x00000001);
+ gr_def(ctx, offset + 0xb4, 0x00000001);
+ gr_def(ctx, offset + 0xbc, 0x00000001);
+ gr_def(ctx, offset + 0xc0, 0x0000000a);
+ gr_def(ctx, offset + 0xd0, 0x00000040);
+ gr_def(ctx, offset + 0xd8, 0x00000002);
+ gr_def(ctx, offset + 0xdc, 0x00000100);
+ gr_def(ctx, offset + 0xe0, 0x00000001);
+ gr_def(ctx, offset + 0xe4, 0x00000100);
+ gr_def(ctx, offset + 0x100, 0x00000001);
+ gr_def(ctx, offset + 0x124, 0x00000004);
+ gr_def(ctx, offset + 0x13c, 0x00000001);
+ gr_def(ctx, offset + 0x140, 0x00000100);
+ gr_def(ctx, offset + 0x148, 0x00000001);
+ gr_def(ctx, offset + 0x154, 0x00000100);
+ gr_def(ctx, offset + 0x158, 0x00000001);
+ gr_def(ctx, offset + 0x15c, 0x00000100);
+ gr_def(ctx, offset + 0x164, 0x00000001);
+ gr_def(ctx, offset + 0x170, 0x00000100);
+ gr_def(ctx, offset + 0x174, 0x00000001);
+ gr_def(ctx, offset + 0x17c, 0x00000001);
+ gr_def(ctx, offset + 0x188, 0x00000002);
+ gr_def(ctx, offset + 0x190, 0x00000001);
+ gr_def(ctx, offset + 0x198, 0x00000001);
+ gr_def(ctx, offset + 0x1ac, 0x00000003);
+ offset += 0xd0;
+ } else {
+ gr_def(ctx, offset + 0xb0, 0x00000001);
+ gr_def(ctx, offset + 0xb4, 0x00000100);
+ gr_def(ctx, offset + 0xbc, 0x00000001);
+ gr_def(ctx, offset + 0xc8, 0x00000100);
+ gr_def(ctx, offset + 0xcc, 0x00000001);
+ gr_def(ctx, offset + 0xd0, 0x00000100);
+ gr_def(ctx, offset + 0xd8, 0x00000001);
+ gr_def(ctx, offset + 0xe4, 0x00000100);
+ }
+ gr_def(ctx, offset + 0xf8, 0x00000004);
+ gr_def(ctx, offset + 0xfc, 0x00000070);
+ gr_def(ctx, offset + 0x100, 0x00000080);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ offset += 4;
+ gr_def(ctx, offset + 0x114, 0x0000000c);
+ if (dev_priv->chipset == 0x50)
+ offset -= 4;
+ gr_def(ctx, offset + 0x11c, 0x00000008);
+ gr_def(ctx, offset + 0x120, 0x00000014);
+ if (dev_priv->chipset == 0x50) {
+ gr_def(ctx, offset + 0x124, 0x00000026);
+ offset -= 0x18;
+ } else {
+ gr_def(ctx, offset + 0x128, 0x00000029);
+ gr_def(ctx, offset + 0x12c, 0x00000027);
+ gr_def(ctx, offset + 0x130, 0x00000026);
+ gr_def(ctx, offset + 0x134, 0x00000008);
+ gr_def(ctx, offset + 0x138, 0x00000004);
+ gr_def(ctx, offset + 0x13c, 0x00000027);
+ }
+ gr_def(ctx, offset + 0x148, 0x00000001);
+ gr_def(ctx, offset + 0x14c, 0x00000002);
+ gr_def(ctx, offset + 0x150, 0x00000003);
+ gr_def(ctx, offset + 0x154, 0x00000004);
+ gr_def(ctx, offset + 0x158, 0x00000005);
+ gr_def(ctx, offset + 0x15c, 0x00000006);
+ gr_def(ctx, offset + 0x160, 0x00000007);
+ gr_def(ctx, offset + 0x164, 0x00000001);
+ gr_def(ctx, offset + 0x1a8, 0x000000cf);
+ if (dev_priv->chipset == 0x50)
+ offset -= 4;
+ gr_def(ctx, offset + 0x1d8, 0x00000080);
+ gr_def(ctx, offset + 0x1dc, 0x00000004);
+ gr_def(ctx, offset + 0x1e0, 0x00000004);
+ if (dev_priv->chipset == 0x50)
+ offset -= 4;
+ else
+ gr_def(ctx, offset + 0x1e4, 0x00000003);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ gr_def(ctx, offset + 0x1ec, 0x00000003);
+ offset += 8;
+ }
+ gr_def(ctx, offset + 0x1e8, 0x00000001);
+ if (dev_priv->chipset == 0x50)
+ offset -= 4;
+ gr_def(ctx, offset + 0x1f4, 0x00000012);
+ gr_def(ctx, offset + 0x1f8, 0x00000010);
+ gr_def(ctx, offset + 0x1fc, 0x0000000c);
+ gr_def(ctx, offset + 0x200, 0x00000001);
+ gr_def(ctx, offset + 0x210, 0x00000004);
+ gr_def(ctx, offset + 0x214, 0x00000002);
+ gr_def(ctx, offset + 0x218, 0x00000004);
+ if (dev_priv->chipset >= 0xa0)
+ offset += 4;
+ gr_def(ctx, offset + 0x224, 0x003fffff);
+ gr_def(ctx, offset + 0x228, 0x00001fff);
+ if (dev_priv->chipset == 0x50)
+ offset -= 0x20;
+ else if (dev_priv->chipset >= 0xa0) {
+ gr_def(ctx, offset + 0x250, 0x00000001);
+ gr_def(ctx, offset + 0x254, 0x00000001);
+ gr_def(ctx, offset + 0x258, 0x00000002);
+ offset += 0x10;
+ }
+ gr_def(ctx, offset + 0x250, 0x00000004);
+ gr_def(ctx, offset + 0x254, 0x00000014);
+ gr_def(ctx, offset + 0x258, 0x00000001);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ offset += 4;
+ gr_def(ctx, offset + 0x264, 0x00000002);
+ if (dev_priv->chipset >= 0xa0)
+ offset += 8;
+ gr_def(ctx, offset + 0x270, 0x00000001);
+ gr_def(ctx, offset + 0x278, 0x00000002);
+ gr_def(ctx, offset + 0x27c, 0x00001000);
+ if (dev_priv->chipset == 0x50)
+ offset -= 0xc;
+ else {
+ gr_def(ctx, offset + 0x280, 0x00000e00);
+ gr_def(ctx, offset + 0x284, 0x00001000);
+ gr_def(ctx, offset + 0x288, 0x00001e00);
+ }
+ gr_def(ctx, offset + 0x290, 0x00000001);
+ gr_def(ctx, offset + 0x294, 0x00000001);
+ gr_def(ctx, offset + 0x298, 0x00000001);
+ gr_def(ctx, offset + 0x29c, 0x00000001);
+ gr_def(ctx, offset + 0x2a0, 0x00000001);
+ gr_def(ctx, offset + 0x2b0, 0x00000200);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ gr_def(ctx, offset + 0x2b4, 0x00000200);
+ offset += 4;
+ }
+ if (dev_priv->chipset < 0xa0) {
+ gr_def(ctx, offset + 0x2b8, 0x00000001);
+ gr_def(ctx, offset + 0x2bc, 0x00000070);
+ gr_def(ctx, offset + 0x2c0, 0x00000080);
+ gr_def(ctx, offset + 0x2cc, 0x00000001);
+ gr_def(ctx, offset + 0x2d0, 0x00000070);
+ gr_def(ctx, offset + 0x2d4, 0x00000080);
+ } else {
+ gr_def(ctx, offset + 0x2b8, 0x00000001);
+ gr_def(ctx, offset + 0x2bc, 0x000000f0);
+ gr_def(ctx, offset + 0x2c0, 0x000000ff);
+ gr_def(ctx, offset + 0x2cc, 0x00000001);
+ gr_def(ctx, offset + 0x2d0, 0x000000f0);
+ gr_def(ctx, offset + 0x2d4, 0x000000ff);
+ gr_def(ctx, offset + 0x2dc, 0x00000009);
+ offset += 4;
+ }
+ gr_def(ctx, offset + 0x2e4, 0x00000001);
+ gr_def(ctx, offset + 0x2e8, 0x000000cf);
+ gr_def(ctx, offset + 0x2f0, 0x00000001);
+ gr_def(ctx, offset + 0x300, 0x000000cf);
+ gr_def(ctx, offset + 0x308, 0x00000002);
+ gr_def(ctx, offset + 0x310, 0x00000001);
+ gr_def(ctx, offset + 0x318, 0x00000001);
+ gr_def(ctx, offset + 0x320, 0x000000cf);
+ gr_def(ctx, offset + 0x324, 0x000000cf);
+ gr_def(ctx, offset + 0x328, 0x00000001);
+
+ /* 6000? */
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x4063e0, 0x1);
+
+ /* 6800 */
+ if (dev_priv->chipset < 0x90) {
+ cp_ctx(ctx, 0x406814, 0x2b);
+ gr_def(ctx, 0x406818, 0x00000f80);
+ gr_def(ctx, 0x406860, 0x007f0080);
+ gr_def(ctx, 0x40689c, 0x007f0080);
+ } else {
+ cp_ctx(ctx, 0x406814, 0x4);
+ if (dev_priv->chipset == 0x98)
+ gr_def(ctx, 0x406818, 0x00000f80);
+ else
+ gr_def(ctx, 0x406818, 0x00001f80);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ gr_def(ctx, 0x40681c, 0x00000030);
+ cp_ctx(ctx, 0x406830, 0x3);
+ }
+
+ /* 7000: per-ROP group state */
+ for (i = 0; i < 8; i++) {
+ if (units & (1<<(i+16))) {
+ cp_ctx(ctx, 0x407000 + (i<<8), 3);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
+ else if (dev_priv->chipset != 0xa5)
+ gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
+ else
+ gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
+ gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
+
+ if (dev_priv->chipset == 0x50) {
+ cp_ctx(ctx, 0x407010 + (i<<8), 1);
+ } else if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x407010 + (i<<8), 2);
+ gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
+ gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
+ } else {
+ cp_ctx(ctx, 0x407010 + (i<<8), 3);
+ gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
+ if (dev_priv->chipset != 0xa5)
+ gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
+ else
+ gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
+ }
+
+ cp_ctx(ctx, 0x407080 + (i<<8), 4);
+ if (dev_priv->chipset != 0xa5)
+ gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
+ else
+ gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
+ else
+ gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
+ gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, 0x407094 + (i<<8), 1);
+ else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+ cp_ctx(ctx, 0x407094 + (i<<8), 3);
+ else {
+ cp_ctx(ctx, 0x407094 + (i<<8), 4);
+ gr_def(ctx, 0x4070a0 + (i<<8), 1);
+ }
+ }
+ }
+
+ cp_ctx(ctx, 0x407c00, 0x3);
+ if (dev_priv->chipset < 0x90)
+ gr_def(ctx, 0x407c00, 0x00010040);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, 0x407c00, 0x00390040);
+ else
+ gr_def(ctx, 0x407c00, 0x003d0040);
+ gr_def(ctx, 0x407c08, 0x00000022);
+ if (dev_priv->chipset >= 0xa0) {
+ cp_ctx(ctx, 0x407c10, 0x3);
+ cp_ctx(ctx, 0x407c20, 0x1);
+ cp_ctx(ctx, 0x407c2c, 0x1);
+ }
+
+ if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x407d00, 0x9);
+ } else {
+ cp_ctx(ctx, 0x407d00, 0x15);
+ }
+ if (dev_priv->chipset == 0x98)
+ gr_def(ctx, 0x407d08, 0x00380040);
+ else {
+ if (dev_priv->chipset < 0x90)
+ gr_def(ctx, 0x407d08, 0x00010040);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, 0x407d08, 0x00390040);
+ else
+ gr_def(ctx, 0x407d08, 0x003d0040);
+ gr_def(ctx, 0x407d0c, 0x00000022);
+ }
+
+ /* 8000+: per-TP state */
+ for (i = 0; i < 10; i++) {
+ if (units & (1<<i)) {
+ if (dev_priv->chipset < 0xa0)
+ base = 0x408000 + (i<<12);
+ else
+ base = 0x408000 + (i<<11);
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0xc00;
+ else
+ offset = base + 0x80;
+ cp_ctx(ctx, offset + 0x00, 1);
+ gr_def(ctx, offset + 0x00, 0x0000ff0a);
+ cp_ctx(ctx, offset + 0x08, 1);
+
+ /* per-MP state */
+ for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) {
+ if (!(units & (1 << (j+24)))) continue;
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0x200 + (j<<7);
+ else
+ offset = base + 0x100 + (j<<7);
+ cp_ctx(ctx, offset, 0x20);
+ gr_def(ctx, offset + 0x00, 0x01800000);
+ gr_def(ctx, offset + 0x04, 0x00160000);
+ gr_def(ctx, offset + 0x08, 0x01800000);
+ gr_def(ctx, offset + 0x18, 0x0003ffff);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ gr_def(ctx, offset + 0x1c, 0x00080000);
+ break;
+ case 0x84:
+ gr_def(ctx, offset + 0x1c, 0x00880000);
+ break;
+ case 0x86:
+ gr_def(ctx, offset + 0x1c, 0x008c0000);
+ break;
+ case 0x92:
+ case 0x96:
+ case 0x98:
+ gr_def(ctx, offset + 0x1c, 0x118c0000);
+ break;
+ case 0x94:
+ gr_def(ctx, offset + 0x1c, 0x10880000);
+ break;
+ case 0xa0:
+ case 0xa5:
+ gr_def(ctx, offset + 0x1c, 0x310c0000);
+ break;
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ gr_def(ctx, offset + 0x1c, 0x300c0000);
+ break;
+ }
+ gr_def(ctx, offset + 0x40, 0x00010401);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x48, 0x00000040);
+ else
+ gr_def(ctx, offset + 0x48, 0x00000078);
+ gr_def(ctx, offset + 0x50, 0x000000bf);
+ gr_def(ctx, offset + 0x58, 0x00001210);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x5c, 0x00000080);
+ else
+ gr_def(ctx, offset + 0x5c, 0x08000080);
+ if (dev_priv->chipset >= 0xa0)
+ gr_def(ctx, offset + 0x68, 0x0000003e);
+ }
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, base + 0x300, 0x4);
+ else
+ cp_ctx(ctx, base + 0x300, 0x5);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, base + 0x304, 0x00007070);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, base + 0x304, 0x00027070);
+ else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+ gr_def(ctx, base + 0x304, 0x01127070);
+ else
+ gr_def(ctx, base + 0x304, 0x05127070);
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, base + 0x318, 1);
+ else
+ cp_ctx(ctx, base + 0x320, 1);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, base + 0x318, 0x0003ffff);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, base + 0x318, 0x03ffffff);
+ else
+ gr_def(ctx, base + 0x320, 0x07ffffff);
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, base + 0x324, 5);
+ else
+ cp_ctx(ctx, base + 0x328, 4);
+
+ if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, base + 0x340, 9);
+ offset = base + 0x340;
+ } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+ cp_ctx(ctx, base + 0x33c, 0xb);
+ offset = base + 0x344;
+ } else {
+ cp_ctx(ctx, base + 0x33c, 0xd);
+ offset = base + 0x344;
+ }
+ gr_def(ctx, offset + 0x0, 0x00120407);
+ gr_def(ctx, offset + 0x4, 0x05091507);
+ if (dev_priv->chipset == 0x84)
+ gr_def(ctx, offset + 0x8, 0x05100202);
+ else
+ gr_def(ctx, offset + 0x8, 0x05010202);
+ gr_def(ctx, offset + 0xc, 0x00030201);
+
+ cp_ctx(ctx, base + 0x400, 2);
+ gr_def(ctx, base + 0x404, 0x00000040);
+ cp_ctx(ctx, base + 0x40c, 2);
+ gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
+ gr_def(ctx, base + 0x410, 0x00141210);
+
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0x800;
+ else
+ offset = base + 0x500;
+ cp_ctx(ctx, offset, 6);
+ gr_def(ctx, offset + 0x0, 0x000001f0);
+ gr_def(ctx, offset + 0x4, 0x00000001);
+ gr_def(ctx, offset + 0x8, 0x00000003);
+ if (dev_priv->chipset == 0x50 || dev_priv->chipset >= 0xaa)
+ gr_def(ctx, offset + 0xc, 0x00008000);
+ gr_def(ctx, offset + 0x14, 0x00039e00);
+ cp_ctx(ctx, offset + 0x1c, 2);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x1c, 0x00000040);
+ else
+ gr_def(ctx, offset + 0x1c, 0x00000100);
+ gr_def(ctx, offset + 0x20, 0x00003800);
+
+ if (dev_priv->chipset >= 0xa0) {
+ cp_ctx(ctx, base + 0x54c, 2);
+ if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+ gr_def(ctx, base + 0x54c, 0x003fe006);
+ else
+ gr_def(ctx, base + 0x54c, 0x003fe007);
+ gr_def(ctx, base + 0x550, 0x003fe000);
+ }
+
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0xa00;
+ else
+ offset = base + 0x680;
+ cp_ctx(ctx, offset, 1);
+ gr_def(ctx, offset, 0x00404040);
+
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0xe00;
+ else
+ offset = base + 0x700;
+ cp_ctx(ctx, offset, 2);
+ if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, offset, 0x0077f005);
+ else if (dev_priv->chipset == 0xa5)
+ gr_def(ctx, offset, 0x6cf7f007);
+ else if (dev_priv->chipset == 0xa8)
+ gr_def(ctx, offset, 0x6cfff007);
+ else if (dev_priv->chipset == 0xac)
+ gr_def(ctx, offset, 0x0cfff007);
+ else
+ gr_def(ctx, offset, 0x0cf7f007);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x4, 0x00007fff);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, offset + 0x4, 0x003f7fff);
+ else
+ gr_def(ctx, offset + 0x4, 0x02bf7fff);
+ cp_ctx(ctx, offset + 0x2c, 1);
+ if (dev_priv->chipset == 0x50) {
+ cp_ctx(ctx, offset + 0x50, 9);
+ gr_def(ctx, offset + 0x54, 0x000003ff);
+ gr_def(ctx, offset + 0x58, 0x00000003);
+ gr_def(ctx, offset + 0x5c, 0x00000003);
+ gr_def(ctx, offset + 0x60, 0x000001ff);
+ gr_def(ctx, offset + 0x64, 0x0000001f);
+ gr_def(ctx, offset + 0x68, 0x0000000f);
+ gr_def(ctx, offset + 0x6c, 0x0000000f);
+ } else if(dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, offset + 0x50, 1);
+ cp_ctx(ctx, offset + 0x70, 1);
+ } else {
+ cp_ctx(ctx, offset + 0x50, 1);
+ cp_ctx(ctx, offset + 0x60, 5);
+ }
+ }
+ }
+}
+
+/*
+ * xfer areas. These are a pain.
+ *
+ * There are 2 xfer areas: the first one is big and contains all sorts of
+ * stuff, the second is small and contains some per-TP context.
+ *
+ * Each area is split into 8 "strands". The areas, when saved to grctx,
+ * are made of 8-word blocks. Each block contains a single word from
+ * each strand. The strands are independent of each other, their
+ * addresses are unrelated to each other, and data in them is closely
+ * packed together. The strand layout varies a bit between cards: here
+ * and there, a single word is thrown out in the middle and the whole
+ * strand is offset by a bit from corresponding one on another chipset.
+ * For this reason, addresses of stuff in strands are almost useless.
+ * Knowing sequence of stuff and size of gaps between them is much more
+ * useful, and that's how we build the strands in our generator.
+ *
+ * NVA0 takes this mess to a whole new level by cutting the old strands
+ * into a few dozen pieces [known as genes], rearranging them randomly,
+ * and putting them back together to make new strands. Hopefully these
+ * genes correspond more or less directly to the same PGRAPH subunits
+ * as in 400040 register.
+ *
+ * The most common value in default context is 0, and when the genes
+ * are separated by 0's, gene bounduaries are quite speculative...
+ * some of them can be clearly deduced, others can be guessed, and yet
+ * others won't be resolved without figuring out the real meaning of
+ * given ctxval. For the same reason, ending point of each strand
+ * is unknown. Except for strand 0, which is the longest strand and
+ * its end corresponds to end of the whole xfer.
+ *
+ * An unsolved mystery is the seek instruction: it takes an argument
+ * in bits 8-18, and that argument is clearly the place in strands to
+ * seek to... but the offsets don't seem to correspond to offsets as
+ * seen in grctx. Perhaps there's another, real, not randomly-changing
+ * addressing in strands, and the xfer insn just happens to skip over
+ * the unused bits? NV10-NV30 PIPE comes to mind...
+ *
+ * As far as I know, there's no way to access the xfer areas directly
+ * without the help of ctxprog.
+ */
+
+static inline void
+xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
+ int i;
+ if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
+ for (i = 0; i < num; i++)
+ nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val);
+ ctx->ctxvals_pos += num << 3;
+}
+
+/* Gene declarations... */
+
+static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
+
+static void
+nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+ int offset;
+ int size = 0;
+ uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+
+ offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
+ ctx->ctxvals_base = offset;
+
+ if (dev_priv->chipset < 0xa0) {
+ /* Strand 0 */
+ ctx->ctxvals_pos = offset;
+ switch (dev_priv->chipset) {
+ case 0x50:
+ xf_emit(ctx, 0x99, 0);
+ break;
+ case 0x84:
+ case 0x86:
+ xf_emit(ctx, 0x384, 0);
+ break;
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ xf_emit(ctx, 0x380, 0);
+ break;
+ }
+ nv50_graph_construct_gene_m2mf (ctx);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x84:
+ case 0x86:
+ case 0x98:
+ xf_emit(ctx, 0x4c4, 0);
+ break;
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 0x984, 0);
+ break;
+ }
+ nv50_graph_construct_gene_unk5(ctx);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0xa, 0);
+ else
+ xf_emit(ctx, 0xb, 0);
+ nv50_graph_construct_gene_unk4(ctx);
+ nv50_graph_construct_gene_unk3(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 1 */
+ ctx->ctxvals_pos = offset + 0x1;
+ nv50_graph_construct_gene_unk6(ctx);
+ nv50_graph_construct_gene_unk7(ctx);
+ nv50_graph_construct_gene_unk8(ctx);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x92:
+ xf_emit(ctx, 0xfb, 0);
+ break;
+ case 0x84:
+ xf_emit(ctx, 0xd3, 0);
+ break;
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 0xab, 0);
+ break;
+ case 0x86:
+ case 0x98:
+ xf_emit(ctx, 0x6b, 0);
+ break;
+ }
+ xf_emit(ctx, 2, 0x4e3bfdf);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 0xb, 0);
+ xf_emit(ctx, 2, 0x4e3bfdf);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 2 */
+ ctx->ctxvals_pos = offset + 0x2;
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x92:
+ xf_emit(ctx, 0xa80, 0);
+ break;
+ case 0x84:
+ xf_emit(ctx, 0xa7e, 0);
+ break;
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 0xa7c, 0);
+ break;
+ case 0x86:
+ case 0x98:
+ xf_emit(ctx, 0xa7a, 0);
+ break;
+ }
+ xf_emit(ctx, 1, 0x3fffff);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x1fff);
+ xf_emit(ctx, 0xe, 0);
+ nv50_graph_construct_gene_unk9(ctx);
+ nv50_graph_construct_gene_unk2(ctx);
+ nv50_graph_construct_gene_unk1(ctx);
+ nv50_graph_construct_gene_unk10(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 3: per-ROP group state */
+ ctx->ctxvals_pos = offset + 3;
+ for (i = 0; i < 6; i++)
+ if (units & (1 << (i + 16)))
+ nv50_graph_construct_gene_ropc(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strands 4-7: per-TP state */
+ for (i = 0; i < 4; i++) {
+ ctx->ctxvals_pos = offset + 4 + i;
+ if (units & (1 << (2 * i)))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << (2 * i + 1)))
+ nv50_graph_construct_xfer_tp(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+ } else {
+ /* Strand 0 */
+ ctx->ctxvals_pos = offset;
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x385, 0);
+ else
+ xf_emit(ctx, 0x384, 0);
+ nv50_graph_construct_gene_m2mf(ctx);
+ xf_emit(ctx, 0x950, 0);
+ nv50_graph_construct_gene_unk10(ctx);
+ xf_emit(ctx, 1, 0x0fac6881);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 3, 0);
+ }
+ nv50_graph_construct_gene_unk8(ctx);
+ if (dev_priv->chipset == 0xa0)
+ xf_emit(ctx, 0x189, 0);
+ else if (dev_priv->chipset < 0xa8)
+ xf_emit(ctx, 0x99, 0);
+ else if (dev_priv->chipset == 0xaa)
+ xf_emit(ctx, 0x65, 0);
+ else
+ xf_emit(ctx, 0x6d, 0);
+ nv50_graph_construct_gene_unk9(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 1 */
+ ctx->ctxvals_pos = offset + 1;
+ nv50_graph_construct_gene_unk1(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 2 */
+ ctx->ctxvals_pos = offset + 2;
+ if (dev_priv->chipset == 0xa0) {
+ nv50_graph_construct_gene_unk2(ctx);
+ }
+ xf_emit(ctx, 0x36, 0);
+ nv50_graph_construct_gene_unk5(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 3 */
+ ctx->ctxvals_pos = offset + 3;
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ nv50_graph_construct_gene_unk6(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 4 */
+ ctx->ctxvals_pos = offset + 4;
+ if (dev_priv->chipset == 0xa0)
+ xf_emit(ctx, 0xa80, 0);
+ else
+ xf_emit(ctx, 0xa7a, 0);
+ xf_emit(ctx, 1, 0x3fffff);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x1fff);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 5 */
+ ctx->ctxvals_pos = offset + 5;
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 0xb, 0);
+ xf_emit(ctx, 2, 0x4e3bfdf);
+ xf_emit(ctx, 3, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 0x4e3bfdf);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 0);
+ for (i = 0; i < 8; i++)
+ if (units & (1<<(i+16)))
+ nv50_graph_construct_gene_ropc(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 6 */
+ ctx->ctxvals_pos = offset + 6;
+ nv50_graph_construct_gene_unk3(ctx);
+ xf_emit(ctx, 0xb, 0);
+ nv50_graph_construct_gene_unk4(ctx);
+ nv50_graph_construct_gene_unk7(ctx);
+ if (units & (1 << 0))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 1))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 2))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 3))
+ nv50_graph_construct_xfer_tp(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 7 */
+ ctx->ctxvals_pos = offset + 7;
+ if (dev_priv->chipset == 0xa0) {
+ if (units & (1 << 4))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 5))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 6))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 7))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 8))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 9))
+ nv50_graph_construct_xfer_tp(ctx);
+ } else {
+ nv50_graph_construct_gene_unk2(ctx);
+ }
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+
+ ctx->ctxvals_pos = offset + size * 8;
+ ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
+ cp_lsr (ctx, offset);
+ cp_out (ctx, CP_SET_XFER_POINTER);
+ cp_lsr (ctx, size);
+ cp_out (ctx, CP_SEEK_1);
+ cp_out (ctx, CP_XFER_1);
+ cp_wait(ctx, XFER, BUSY);
+}
+
+/*
+ * non-trivial demagiced parts of ctx init go here
+ */
+
+static void
+nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
+{
+ /* m2mf state */
+ xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
+ xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */
+ xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */
+ xf_emit (ctx, 1, 0); /* OFFSET_IN */
+ xf_emit (ctx, 1, 0); /* OFFSET_OUT */
+ xf_emit (ctx, 1, 0); /* PITCH_IN */
+ xf_emit (ctx, 1, 0); /* PITCH_OUT */
+ xf_emit (ctx, 1, 0); /* LINE_LENGTH */
+ xf_emit (ctx, 1, 0); /* LINE_COUNT */
+ xf_emit (ctx, 1, 0x21); /* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */
+ xf_emit (ctx, 1, 1); /* LINEAR_IN */
+ xf_emit (ctx, 1, 0x2); /* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */
+ xf_emit (ctx, 1, 0x100); /* TILING_PITCH_IN */
+ xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_IN */
+ xf_emit (ctx, 1, 1); /* TILING_DEPTH_IN */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_IN_Z */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_IN */
+ xf_emit (ctx, 1, 1); /* LINEAR_OUT */
+ xf_emit (ctx, 1, 0x2); /* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */
+ xf_emit (ctx, 1, 0x100); /* TILING_PITCH_OUT */
+ xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_OUT */
+ xf_emit (ctx, 1, 1); /* TILING_DEPTH_OUT */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT_Z */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */
+ xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */
+ xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */
+}
+
+static void
+nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* end of area 2 on pre-NVA0, area 1 on NVAx */
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff);
+ else
+ xf_emit(ctx, 1, 0x7ff);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x86:
+ case 0x98:
+ case 0xaa:
+ case 0xac:
+ xf_emit(ctx, 0x542, 0);
+ break;
+ case 0x84:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 0x942, 0);
+ break;
+ case 0xa0:
+ xf_emit(ctx, 0x2042, 0);
+ break;
+ case 0xa5:
+ case 0xa8:
+ xf_emit(ctx, 0x842, 0);
+ break;
+ }
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x27);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x26);
+ xf_emit(ctx, 3, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx)
+{
+ /* end of area 2 on pre-NVA0, area 1 on NVAx */
+ xf_emit(ctx, 0x10, 0x04000000);
+ xf_emit(ctx, 0x24, 0);
+ xf_emit(ctx, 2, 0x04e3bfdf);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x1fe21);
+}
+
+static void
+nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x804);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0x8100c12);
+ }
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x10);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 3, 0);
+ else
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x804);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x1a);
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0x7f);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 6, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff);
+ else
+ xf_emit(ctx, 1, 0x7ff);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 0x38, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 0x38, 0);
+ xf_emit(ctx, 2, 0x88);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 0x16, 0);
+ xf_emit(ctx, 1, 0x26);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x3f800000);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 4, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 1, 0x10);
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 0x28, 0);
+ else
+ xf_emit(ctx, 0x25, 0);
+ xf_emit(ctx, 1, 0x52);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x26);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x00ffff00);
+ xf_emit(ctx, 1, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* end of area 0 on pre-NVA0, beginning of area 6 on NVAx */
+ xf_emit(ctx, 1, 0x3f);
+ xf_emit(ctx, 0xa, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 0x04000000);
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 4);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0x10, 0);
+ else
+ xf_emit(ctx, 0x11, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x1001);
+ xf_emit(ctx, 4, 0xffff);
+ xf_emit(ctx, 0x20, 0);
+ xf_emit(ctx, 0x10, 0x3f800000);
+ xf_emit(ctx, 1, 0x10);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0);
+ else
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 2, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx)
+{
+ /* middle of area 0 on pre-NVA0, middle of area 6 on NVAx */
+ xf_emit(ctx, 2, 0x04000000);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of area 0 on pre-NVA0 [after m2mf], end of area 2 on NVAx */
+ xf_emit(ctx, 2, 4);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x1c4d, 0);
+ else
+ xf_emit(ctx, 0x1c4b, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0x8100c12);
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 1, 0x27);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x3c1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x16, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 1, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* beginning of area 1 on pre-NVA0 [after m2mf], area 3 on NVAx */
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0xf);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 8, 0);
+ else
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x20);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x11, 0);
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 0xf, 0);
+ else
+ xf_emit(ctx, 0xe, 0);
+ xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 0xd, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 8);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff);
+ else
+ xf_emit(ctx, 1, 0x7ff);
+ if (dev_priv->chipset == 0xa8)
+ xf_emit(ctx, 1, 0x1e00);
+ xf_emit(ctx, 0xc, 0);
+ xf_emit(ctx, 1, 0xf);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0x125, 0);
+ else if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x126, 0);
+ else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
+ xf_emit(ctx, 0x124, 0);
+ else
+ xf_emit(ctx, 0x1f7, 0);
+ xf_emit(ctx, 1, 0xf);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 3, 0);
+ else
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0xa1, 0);
+ else
+ xf_emit(ctx, 0x5a, 0);
+ xf_emit(ctx, 1, 0xf);
+ if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x834, 0);
+ else if (dev_priv->chipset == 0xa0)
+ xf_emit(ctx, 0x1873, 0);
+ else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x8ba, 0);
+ else
+ xf_emit(ctx, 0x833, 0);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 0xf, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 6 on NVAx */
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 2, 1);
+ else
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0x100);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 8);
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 3, 1);
+ xf_emit(ctx, 1, 0xcf);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 3, 1);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x15);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x4444480);
+ xf_emit(ctx, 0x37, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx)
+{
+ /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 0 on NVAx */
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x100);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x10001);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x10001);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x10001);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 2);
+}
+
+static void
+nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of area 2 on pre-NVA0 [after m2mf], end of area 0 on NVAx */
+ xf_emit(ctx, 1, 0x3f800000);
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x12, 0);
+ xf_emit(ctx, 1, 0x00ffff00);
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 0xf, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 3);
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 0x04000000);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 5);
+ xf_emit(ctx, 1, 0x52);
+ if (dev_priv->chipset == 0x50) {
+ xf_emit(ctx, 0x13, 0);
+ } else {
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x11, 0);
+ else
+ xf_emit(ctx, 0x10, 0);
+ }
+ xf_emit(ctx, 0x10, 0x3f800000);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 0x26, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 1, 5);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 4, 0xffff);
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 3);
+ if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x1f, 0);
+ else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0xc, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x00ffff00);
+ xf_emit(ctx, 1, 0x1a);
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 3);
+ }
+ if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x26, 0);
+ else
+ xf_emit(ctx, 0x3c, 0);
+ xf_emit(ctx, 1, 0x102);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 4, 4);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff);
+ else
+ xf_emit(ctx, 1, 0x7ff);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x102);
+ xf_emit(ctx, 9, 0);
+ xf_emit(ctx, 4, 4);
+ xf_emit(ctx, 0x2c, 0);
+}
+
+static void
+nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int magic2;
+ if (dev_priv->chipset == 0x50) {
+ magic2 = 0x00003e60;
+ } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+ magic2 = 0x001ffe67;
+ } else {
+ magic2 = 0x00087e67;
+ }
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, magic2);
+ xf_emit(ctx, 4, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 7, 0);
+ if (dev_priv->chipset >= 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 0x15);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 4, 0);
+ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0x400);
+ xf_emit(ctx, 1, 0x300);
+ xf_emit(ctx, 1, 0x1001);
+ if (dev_priv->chipset != 0xa0) {
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 0);
+ else
+ xf_emit(ctx, 1, 0x15);
+ }
+ xf_emit(ctx, 3, 0);
+ }
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x13, 0);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 0x10, 0);
+ xf_emit(ctx, 0x10, 0x3f800000);
+ xf_emit(ctx, 0x19, 0);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x3f);
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x1001);
+ xf_emit(ctx, 0xb, 0);
+ } else {
+ xf_emit(ctx, 0xc, 0);
+ }
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x11);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 4, 0);
+ else
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 3, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, magic2);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 0x18, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x16, 0);
+ } else {
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 0x1b, 0);
+ else
+ xf_emit(ctx, 0x15, 0);
+ }
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 1);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 4, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 0x10, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 0x10, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 3, 0);
+ }
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x5b, 0);
+}
+
+static void
+nv50_graph_construct_xfer_tp_x1(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int magic3;
+ if (dev_priv->chipset == 0x50)
+ magic3 = 0x1000;
+ else if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8)
+ magic3 = 0x1e00;
+ else
+ magic3 = 0;
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 4);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x24, 0);
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 0x14, 0);
+ else
+ xf_emit(ctx, 0x15, 0);
+ xf_emit(ctx, 2, 4);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0x03020100);
+ else
+ xf_emit(ctx, 1, 0x00608080);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0x80);
+ if (magic3)
+ xf_emit(ctx, 1, magic3);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 0x24, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0x03020100);
+ xf_emit(ctx, 1, 3);
+ if (magic3)
+ xf_emit(ctx, 1, magic3);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 4);
+ if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
+ xf_emit(ctx, 0x1024, 0);
+ else if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0xa24, 0);
+ else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
+ xf_emit(ctx, 0x214, 0);
+ else
+ xf_emit(ctx, 0x414, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 2, 0);
+}
+
+static void
+nv50_graph_construct_xfer_tp_x2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int magic1, magic2;
+ if (dev_priv->chipset == 0x50) {
+ magic1 = 0x3ff;
+ magic2 = 0x00003e60;
+ } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+ magic1 = 0x7ff;
+ magic2 = 0x001ffe67;
+ } else {
+ magic1 = 0x7ff;
+ magic2 = 0x00087e67;
+ }
+ xf_emit(ctx, 3, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0xc, 0);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 0xb, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 4, 0xffff);
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 1, 0);
+ } else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0xa, 0);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 0x18, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 1, 0);
+ }
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 3, 0xcf);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0xa, 0);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, magic2);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x11);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 2, 1);
+ else
+ xf_emit(ctx, 1, 1);
+ if(dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, magic1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x28, 0);
+ xf_emit(ctx, 8, 8);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 8, 0x400);
+ xf_emit(ctx, 8, 0x300);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x20);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 0x100);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x40);
+ xf_emit(ctx, 1, 0x100);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 4, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, magic2);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 9, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x400);
+ xf_emit(ctx, 1, 0x300);
+ xf_emit(ctx, 1, 0x1001);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 4, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 1, 0xf);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 0x15, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 3, 0);
+ } else
+ xf_emit(ctx, 0x17, 0);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 1, magic2);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 3, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 2, 1);
+ else
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 2, 0);
+ else if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0);
+}
+
+static void
+nv50_graph_construct_xfer_tp_x3(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 2, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x2a712488);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x4085c000);
+ xf_emit(ctx, 1, 0x40);
+ xf_emit(ctx, 1, 0x100);
+ xf_emit(ctx, 1, 0x10100);
+ xf_emit(ctx, 1, 0x02800000);
+}
+
+static void
+nv50_graph_construct_xfer_tp_x4(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 2, 0x04e3bfdf);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x00ffff00);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 2, 1);
+ else
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x00ffff00);
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x30201000);
+ xf_emit(ctx, 1, 0x70605040);
+ xf_emit(ctx, 1, 0xb8a89888);
+ xf_emit(ctx, 1, 0xf8e8d8c8);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x1a);
+}
+
+static void
+nv50_graph_construct_xfer_tp_x5(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0xfac6881);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0xb, 0);
+ else
+ xf_emit(ctx, 0xa, 0);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0xfac6881);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 6, 0);
+ } else {
+ xf_emit(ctx, 0xb, 0);
+ }
+}
+
+static void
+nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ if (dev_priv->chipset < 0xa0) {
+ nv50_graph_construct_xfer_tp_x1(ctx);
+ nv50_graph_construct_xfer_tp_x2(ctx);
+ nv50_graph_construct_xfer_tp_x3(ctx);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0xf, 0);
+ else
+ xf_emit(ctx, 0x12, 0);
+ nv50_graph_construct_xfer_tp_x4(ctx);
+ } else {
+ nv50_graph_construct_xfer_tp_x3(ctx);
+ if (dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0xc, 0);
+ else
+ xf_emit(ctx, 0xa, 0);
+ nv50_graph_construct_xfer_tp_x2(ctx);
+ nv50_graph_construct_xfer_tp_x5(ctx);
+ nv50_graph_construct_xfer_tp_x4(ctx);
+ nv50_graph_construct_xfer_tp_x1(ctx);
+ }
+}
+
+static void
+nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i, mpcnt;
+ if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
+ mpcnt = 1;
+ else if (dev_priv->chipset < 0xa0 || dev_priv->chipset >= 0xa8)
+ mpcnt = 2;
+ else
+ mpcnt = 3;
+ for (i = 0; i < mpcnt; i++) {
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 0x80007004);
+ xf_emit(ctx, 1, 0x04000400);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0xc0);
+ xf_emit(ctx, 1, 0x1000);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) {
+ xf_emit(ctx, 1, 0xe00);
+ xf_emit(ctx, 1, 0x1e00);
+ }
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 2, 0x1000);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 2);
+ if (dev_priv->chipset >= 0xaa)
+ xf_emit(ctx, 0xb, 0);
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 0xc, 0);
+ else
+ xf_emit(ctx, 0xa, 0);
+ }
+ xf_emit(ctx, 1, 0x08100c12);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 1, 0x1fe21);
+ }
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 4, 0xffff);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0x10001);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x1fe21);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x08100c12);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 0xfac6881);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 9, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 3, 1);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 0x10, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 0x18, 1);
+ xf_emit(ctx, 3, 0);
+ }
+ xf_emit(ctx, 1, 4);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0x3a0, 0);
+ else if (dev_priv->chipset < 0x94)
+ xf_emit(ctx, 0x3a2, 0);
+ else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
+ xf_emit(ctx, 0x39f, 0);
+ else
+ xf_emit(ctx, 0x3a3, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x2d, 0);
+}
+
+static void
+nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+ uint32_t offset;
+ uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+ int size = 0;
+
+ offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
+
+ if (dev_priv->chipset < 0xa0) {
+ for (i = 0; i < 8; i++) {
+ ctx->ctxvals_pos = offset + i;
+ if (i == 0)
+ xf_emit(ctx, 1, 0x08100c12);
+ if (units & (1 << i))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+ } else {
+ /* Strand 0: TPs 0, 1 */
+ ctx->ctxvals_pos = offset;
+ xf_emit(ctx, 1, 0x08100c12);
+ if (units & (1 << 0))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 1))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 0: TPs 2, 3 */
+ ctx->ctxvals_pos = offset + 1;
+ if (units & (1 << 2))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 3))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 0: TPs 4, 5, 6 */
+ ctx->ctxvals_pos = offset + 2;
+ if (units & (1 << 4))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 5))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 6))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 0: TPs 7, 8, 9 */
+ ctx->ctxvals_pos = offset + 3;
+ if (units & (1 << 7))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 8))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 9))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+ ctx->ctxvals_pos = offset + size * 8;
+ ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
+ cp_lsr (ctx, offset);
+ cp_out (ctx, CP_SET_XFER_POINTER);
+ cp_lsr (ctx, size);
+ cp_out (ctx, CP_SEEK_2);
+ cp_out (ctx, CP_XFER_2);
+ cp_wait(ctx, XFER, BUSY);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index f0dc4e3..de1f5b0 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -390,7 +390,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
if (gpuobj->im_backing)
return -EINVAL;
- *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1);
+ *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE);
if (*sz == 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 1cc7b93..ed38262 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -30,6 +30,9 @@ $(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
$(call if_changed,mkregtable)
+$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
$(obj)/r200.o: $(obj)/r200_reg_safe.h
@@ -42,6 +45,8 @@ $(obj)/r420.o: $(obj)/r420_reg_safe.h
$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
+$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
+
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
radeon_irq.o r300_cmdbuf.o r600_cp.o
# add KMS driver
@@ -54,8 +59,10 @@ radeon-y += radeon_device.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
- r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o
+ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
+ evergreen.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
+radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 7f152f6..d75788f 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -881,8 +881,6 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
- attr &= 0x38;
- attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = atom_get_src(ctx, attr, ptr);
@@ -897,8 +895,6 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
- attr &= 0x38;
- attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = atom_get_src(ctx, attr, ptr);
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 91ad0d1..6732b5d 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2006-2007 Advanced Micro Devices, Inc.
+ * Copyright 2006-2007 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,10 +20,12 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-/****************************************************************************/
+
+/****************************************************************************/
/*Portion I: Definitions shared between VBIOS and Driver */
/****************************************************************************/
+
#ifndef _ATOMBIOS_H
#define _ATOMBIOS_H
@@ -40,39 +42,46 @@
#endif
#ifdef _H2INC
-#ifndef ULONG
-typedef unsigned long ULONG;
-#endif
+ #ifndef ULONG
+ typedef unsigned long ULONG;
+ #endif
-#ifndef UCHAR
-typedef unsigned char UCHAR;
-#endif
+ #ifndef UCHAR
+ typedef unsigned char UCHAR;
+ #endif
-#ifndef USHORT
-typedef unsigned short USHORT;
-#endif
+ #ifndef USHORT
+ typedef unsigned short USHORT;
+ #endif
#endif
-
-#define ATOM_DAC_A 0
+
+#define ATOM_DAC_A 0
#define ATOM_DAC_B 1
#define ATOM_EXT_DAC 2
#define ATOM_CRTC1 0
#define ATOM_CRTC2 1
+#define ATOM_CRTC3 2
+#define ATOM_CRTC4 3
+#define ATOM_CRTC5 4
+#define ATOM_CRTC6 5
+#define ATOM_CRTC_INVALID 0xFF
#define ATOM_DIGA 0
#define ATOM_DIGB 1
#define ATOM_PPLL1 0
#define ATOM_PPLL2 1
+#define ATOM_DCPLL 2
+#define ATOM_PPLL_INVALID 0xFF
#define ATOM_SCALER1 0
#define ATOM_SCALER2 1
-#define ATOM_SCALER_DISABLE 0
-#define ATOM_SCALER_CENTER 1
-#define ATOM_SCALER_EXPANSION 2
-#define ATOM_SCALER_MULTI_EX 3
+#define ATOM_SCALER_DISABLE 0
+#define ATOM_SCALER_CENTER 1
+#define ATOM_SCALER_EXPANSION 2
+#define ATOM_SCALER_MULTI_EX 3
#define ATOM_DISABLE 0
#define ATOM_ENABLE 1
@@ -82,6 +91,7 @@ typedef unsigned short USHORT;
#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5)
#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5)
#define ATOM_ENCODER_INIT (ATOM_DISABLE+7)
+#define ATOM_GET_STATUS (ATOM_DISABLE+8)
#define ATOM_BLANKING 1
#define ATOM_BLANKING_OFF 0
@@ -114,7 +124,7 @@ typedef unsigned short USHORT;
#define ATOM_DAC2_CV ATOM_DAC1_CV
#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC
#define ATOM_DAC2_PAL ATOM_DAC1_PAL
-
+
#define ATOM_PM_ON 0
#define ATOM_PM_STANDBY 1
#define ATOM_PM_SUSPEND 2
@@ -134,6 +144,7 @@ typedef unsigned short USHORT;
#define ATOM_PANEL_MISC_TEMPORAL 0x00000040
#define ATOM_PANEL_MISC_API_ENABLED 0x00000080
+
#define MEMTYPE_DDR1 "DDR1"
#define MEMTYPE_DDR2 "DDR2"
#define MEMTYPE_DDR3 "DDR3"
@@ -145,19 +156,19 @@ typedef unsigned short USHORT;
/* Maximum size of that FireGL flag string */
-#define ATOM_FIREGL_FLAG_STRING "FGL" /* Flag used to enable FireGL Support */
-#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 /* sizeof( ATOM_FIREGL_FLAG_STRING ) */
+#define ATOM_FIREGL_FLAG_STRING "FGL" //Flag used to enable FireGL Support
+#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 //sizeof( ATOM_FIREGL_FLAG_STRING )
-#define ATOM_FAKE_DESKTOP_STRING "DSK" /* Flag used to enable mobile ASIC on Desktop */
-#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING
+#define ATOM_FAKE_DESKTOP_STRING "DSK" //Flag used to enable mobile ASIC on Desktop
+#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING
-#define ATOM_M54T_FLAG_STRING "M54T" /* Flag used to enable M54T Support */
-#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 /* sizeof( ATOM_M54T_FLAG_STRING ) */
+#define ATOM_M54T_FLAG_STRING "M54T" //Flag used to enable M54T Support
+#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 //sizeof( ATOM_M54T_FLAG_STRING )
#define HW_ASSISTED_I2C_STATUS_FAILURE 2
#define HW_ASSISTED_I2C_STATUS_SUCCESS 1
-#pragma pack(1) /* BIOS data must use byte aligment */
+#pragma pack(1) /* BIOS data must use byte aligment */
/* Define offset to location of ROM header. */
@@ -165,367 +176,410 @@ typedef unsigned short USHORT;
#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L
#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94
-#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */
+#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */
#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f
#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e
/* Common header for all ROM Data tables.
- Every table pointed _ATOM_MASTER_DATA_TABLE has this common header.
+ Every table pointed _ATOM_MASTER_DATA_TABLE has this common header.
And the pointer actually points to this header. */
-typedef struct _ATOM_COMMON_TABLE_HEADER {
- USHORT usStructureSize;
- UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */
- UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */
- /*Image can't be updated, while Driver needs to carry the new table! */
-} ATOM_COMMON_TABLE_HEADER;
-
-typedef struct _ATOM_ROM_HEADER {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios,
- atombios should init it as "ATOM", don't change the position */
- USHORT usBiosRuntimeSegmentAddress;
- USHORT usProtectedModeInfoOffset;
- USHORT usConfigFilenameOffset;
- USHORT usCRC_BlockOffset;
- USHORT usBIOS_BootupMessageOffset;
- USHORT usInt10Offset;
- USHORT usPciBusDevInitCode;
- USHORT usIoBaseAddress;
- USHORT usSubsystemVendorID;
- USHORT usSubsystemID;
- USHORT usPCI_InfoOffset;
- USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
- USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */
- UCHAR ucExtendedFunctionCode;
- UCHAR ucReserved;
-} ATOM_ROM_HEADER;
+typedef struct _ATOM_COMMON_TABLE_HEADER
+{
+ USHORT usStructureSize;
+ UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */
+ UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */
+ /*Image can't be updated, while Driver needs to carry the new table! */
+}ATOM_COMMON_TABLE_HEADER;
+
+typedef struct _ATOM_ROM_HEADER
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios,
+ atombios should init it as "ATOM", don't change the position */
+ USHORT usBiosRuntimeSegmentAddress;
+ USHORT usProtectedModeInfoOffset;
+ USHORT usConfigFilenameOffset;
+ USHORT usCRC_BlockOffset;
+ USHORT usBIOS_BootupMessageOffset;
+ USHORT usInt10Offset;
+ USHORT usPciBusDevInitCode;
+ USHORT usIoBaseAddress;
+ USHORT usSubsystemVendorID;
+ USHORT usSubsystemID;
+ USHORT usPCI_InfoOffset;
+ USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
+ USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */
+ UCHAR ucExtendedFunctionCode;
+ UCHAR ucReserved;
+}ATOM_ROM_HEADER;
/*==============================Command Table Portion==================================== */
#ifdef UEFI_BUILD
-#define UTEMP USHORT
-#define USHORT void*
+ #define UTEMP USHORT
+ #define USHORT void*
#endif
-typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES {
- USHORT ASIC_Init; /* Function Table, used by various SW components,latest version 1.1 */
- USHORT GetDisplaySurfaceSize; /* Atomic Table, Used by Bios when enabling HW ICON */
- USHORT ASIC_RegistersInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
- USHORT VRAM_BlockVenderDetection; /* Atomic Table, used only by Bios */
- USHORT DIGxEncoderControl; /* Only used by Bios */
- USHORT MemoryControllerInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
- USHORT EnableCRTCMemReq; /* Function Table,directly used by various SW components,latest version 2.1 */
- USHORT MemoryParamAdjust; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed */
- USHORT DVOEncoderControl; /* Function Table,directly used by various SW components,latest version 1.2 */
- USHORT GPIOPinControl; /* Atomic Table, only used by Bios */
- USHORT SetEngineClock; /*Function Table,directly used by various SW components,latest version 1.1 */
- USHORT SetMemoryClock; /* Function Table,directly used by various SW components,latest version 1.1 */
- USHORT SetPixelClock; /*Function Table,directly used by various SW components,latest version 1.2 */
- USHORT DynamicClockGating; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
- USHORT ResetMemoryDLL; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT ResetMemoryDevice; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT MemoryPLLInit;
- USHORT AdjustDisplayPll; /* only used by Bios */
- USHORT AdjustMemoryController; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT EnableASIC_StaticPwrMgt; /* Atomic Table, only used by Bios */
- USHORT ASIC_StaticPwrMgtStatusChange; /* Obsolete, only used by Bios */
- USHORT DAC_LoadDetection; /* Atomic Table, directly used by various SW components,latest version 1.2 */
- USHORT LVTMAEncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.3 */
- USHORT LCD1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT DAC1EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT DAC2EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT DVOOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT CV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT GetConditionalGoldenSetting; /* only used by Bios */
- USHORT TVEncoderControl; /* Function Table,directly used by various SW components,latest version 1.1 */
- USHORT TMDSAEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */
- USHORT LVDSEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */
- USHORT TV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT EnableScaler; /* Atomic Table, used only by Bios */
- USHORT BlankCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT EnableCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT GetPixelClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT EnableVGA_Render; /* Function Table,directly used by various SW components,latest version 1.1 */
- USHORT EnableVGA_Access; /* Obsolete , only used by Bios */
- USHORT SetCRTC_Timing; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT SetCRTC_OverScan; /* Atomic Table, used by various SW components,latest version 1.1 */
- USHORT SetCRTC_Replication; /* Atomic Table, used only by Bios */
- USHORT SelectCRTC_Source; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT EnableGraphSurfaces; /* Atomic Table, used only by Bios */
- USHORT UpdateCRTC_DoubleBufferRegisters;
- USHORT LUT_AutoFill; /* Atomic Table, only used by Bios */
- USHORT EnableHW_IconCursor; /* Atomic Table, only used by Bios */
- USHORT GetMemoryClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT GetEngineClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT SetCRTC_UsingDTDTiming; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT ExternalEncoderControl; /* Atomic Table, directly used by various SW components,latest version 2.1 */
- USHORT LVTMAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT VRAM_BlockDetectionByStrap; /* Atomic Table, used only by Bios */
- USHORT MemoryCleanUp; /* Atomic Table, only used by Bios */
- USHORT ProcessI2cChannelTransaction; /* Function Table,only used by Bios */
- USHORT WriteOneByteToHWAssistedI2C; /* Function Table,indirectly used by various SW components */
- USHORT ReadHWAssistedI2CStatus; /* Atomic Table, indirectly used by various SW components */
- USHORT SpeedFanControl; /* Function Table,indirectly used by various SW components,called from ASIC_Init */
- USHORT PowerConnectorDetection; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT MC_Synchronization; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT ComputeMemoryEnginePLL; /* Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock */
- USHORT MemoryRefreshConversion; /* Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock */
- USHORT VRAM_GetCurrentInfoBlock; /* Atomic Table, used only by Bios */
- USHORT DynamicMemorySettings; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT MemoryTraining; /* Atomic Table, used only by Bios */
- USHORT EnableSpreadSpectrumOnPPLL; /* Atomic Table, directly used by various SW components,latest version 1.2 */
- USHORT TMDSAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT SetVoltage; /* Function Table,directly and/or indirectly used by various SW components,latest version 1.1 */
- USHORT DAC1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT DAC2OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT SetupHWAssistedI2CStatus; /* Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" */
- USHORT ClockSource; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
- USHORT MemoryDeviceInit; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT EnableYUV; /* Atomic Table, indirectly used by various SW components,called from EnableVGARender */
- USHORT DIG1EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
- USHORT DIG2EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
- USHORT DIG1TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
- USHORT DIG2TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
- USHORT ProcessAuxChannelTransaction; /* Function Table,only used by Bios */
- USHORT DPEncoderService; /* Function Table,only used by Bios */
-} ATOM_MASTER_LIST_OF_COMMAND_TABLES;
-
-/* For backward compatible */
+typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+ USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1
+ USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON
+ USHORT ASIC_RegistersInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT VRAM_BlockVenderDetection; //Atomic Table, used only by Bios
+ USHORT DIGxEncoderControl; //Only used by Bios
+ USHORT MemoryControllerInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT EnableCRTCMemReq; //Function Table,directly used by various SW components,latest version 2.1
+ USHORT MemoryParamAdjust; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed
+ USHORT DVOEncoderControl; //Function Table,directly used by various SW components,latest version 1.2
+ USHORT GPIOPinControl; //Atomic Table, only used by Bios
+ USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2
+ USHORT DynamicClockGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT MemoryPLLInit;
+ USHORT AdjustDisplayPll; //only used by Bios
+ USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
+ USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios
+ USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
+ USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
+ USHORT LCD1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
+ USHORT GetConditionalGoldenSetting; //only used by Bios
+ USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT TMDSAEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
+ USHORT LVDSEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
+ USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
+ USHORT EnableScaler; //Atomic Table, used only by Bios
+ USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT GetPixelClock; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT EnableVGA_Render; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT GetSCLKOverMCLKRatio; //Atomic Table, only used by Bios
+ USHORT SetCRTC_Timing; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT SetCRTC_OverScan; //Atomic Table, used by various SW components,latest version 1.1
+ USHORT SetCRTC_Replication; //Atomic Table, used only by Bios
+ USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios
+ USHORT UpdateCRTC_DoubleBufferRegisters;
+ USHORT LUT_AutoFill; //Atomic Table, only used by Bios
+ USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios
+ USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT GetEngineClock; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT SetCRTC_UsingDTDTiming; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT ExternalEncoderControl; //Atomic Table, directly used by various SW components,latest version 2.1
+ USHORT LVTMAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT VRAM_BlockDetectionByStrap; //Atomic Table, used only by Bios
+ USHORT MemoryCleanUp; //Atomic Table, only used by Bios
+ USHORT ProcessI2cChannelTransaction; //Function Table,only used by Bios
+ USHORT WriteOneByteToHWAssistedI2C; //Function Table,indirectly used by various SW components
+ USHORT ReadHWAssistedI2CStatus; //Atomic Table, indirectly used by various SW components
+ USHORT SpeedFanControl; //Function Table,indirectly used by various SW components,called from ASIC_Init
+ USHORT PowerConnectorDetection; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT MC_Synchronization; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT ComputeMemoryEnginePLL; //Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock
+ USHORT MemoryRefreshConversion; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock
+ USHORT VRAM_GetCurrentInfoBlock; //Atomic Table, used only by Bios
+ USHORT DynamicMemorySettings; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT MemoryTraining; //Atomic Table, used only by Bios
+ USHORT EnableSpreadSpectrumOnPPLL; //Atomic Table, directly used by various SW components,latest version 1.2
+ USHORT TMDSAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
+ USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC2OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT SetupHWAssistedI2CStatus; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
+ USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT EnableYUV; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
+ USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios
+ USHORT DPEncoderService; //Function Table,only used by Bios
+}ATOM_MASTER_LIST_OF_COMMAND_TABLES;
+
+// For backward compatible
#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction
#define UNIPHYTransmitterControl DIG1TransmitterControl
#define LVTMATransmitterControl DIG2TransmitterControl
#define SetCRTC_DPM_State GetConditionalGoldenSetting
#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
+#define HPDInterruptService ReadHWAssistedI2CStatus
+#define EnableVGA_Access GetSCLKOverMCLKRatio
-typedef struct _ATOM_MASTER_COMMAND_TABLE {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
-} ATOM_MASTER_COMMAND_TABLE;
-
-/****************************************************************************/
-/* Structures used in every command table */
-/****************************************************************************/
-typedef struct _ATOM_TABLE_ATTRIBUTE {
+typedef struct _ATOM_MASTER_COMMAND_TABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
+}ATOM_MASTER_COMMAND_TABLE;
+
+/****************************************************************************/
+// Structures used in every command table
+/****************************************************************************/
+typedef struct _ATOM_TABLE_ATTRIBUTE
+{
#if ATOM_BIG_ENDIAN
- USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */
- USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */
- USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */
+ USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag
+ USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword),
+ USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword),
#else
- USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */
- USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */
- USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */
+ USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword),
+ USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword),
+ USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag
#endif
-} ATOM_TABLE_ATTRIBUTE;
-
-typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS {
- ATOM_TABLE_ATTRIBUTE sbfAccess;
- USHORT susAccess;
-} ATOM_TABLE_ATTRIBUTE_ACCESS;
+}ATOM_TABLE_ATTRIBUTE;
-/****************************************************************************/
-/* Common header for all command tables. */
-/* Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. */
-/* And the pointer actually points to this header. */
-/****************************************************************************/
-typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER {
- ATOM_COMMON_TABLE_HEADER CommonHeader;
- ATOM_TABLE_ATTRIBUTE TableAttribute;
-} ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
+typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS
+{
+ ATOM_TABLE_ATTRIBUTE sbfAccess;
+ USHORT susAccess;
+}ATOM_TABLE_ATTRIBUTE_ACCESS;
+
+/****************************************************************************/
+// Common header for all command tables.
+// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header.
+// And the pointer actually points to this header.
+/****************************************************************************/
+typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER
+{
+ ATOM_COMMON_TABLE_HEADER CommonHeader;
+ ATOM_TABLE_ATTRIBUTE TableAttribute;
+}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
-/****************************************************************************/
-/* Structures used by ComputeMemoryEnginePLLTable */
-/****************************************************************************/
+/****************************************************************************/
+// Structures used by ComputeMemoryEnginePLLTable
+/****************************************************************************/
#define COMPUTE_MEMORY_PLL_PARAM 1
#define COMPUTE_ENGINE_PLL_PARAM 2
-typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS {
- ULONG ulClock; /* When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div */
- UCHAR ucAction; /* 0:reserved //1:Memory //2:Engine */
- UCHAR ucReserved; /* may expand to return larger Fbdiv later */
- UCHAR ucFbDiv; /* return value */
- UCHAR ucPostDiv; /* return value */
-} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
-
-typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 {
- ULONG ulClock; /* When return, [23:0] return real clock */
- UCHAR ucAction; /* 0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register */
- USHORT usFbDiv; /* return Feedback value to be written to register */
- UCHAR ucPostDiv; /* return post div to be written to register */
-} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
+{
+ ULONG ulClock; //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div
+ UCHAR ucAction; //0:reserved //1:Memory //2:Engine
+ UCHAR ucReserved; //may expand to return larger Fbdiv later
+ UCHAR ucFbDiv; //return value
+ UCHAR ucPostDiv; //return value
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2
+{
+ ULONG ulClock; //When return, [23:0] return real clock
+ UCHAR ucAction; //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register
+ USHORT usFbDiv; //return Feedback value to be written to register
+ UCHAR ucPostDiv; //return post div to be written to register
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
-#define SET_CLOCK_FREQ_MASK 0x00FFFFFF /* Clock change tables only take bit [23:0] as the requested clock value */
-#define USE_NON_BUS_CLOCK_MASK 0x01000000 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */
-#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */
-#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */
-#define FIRST_TIME_CHANGE_CLOCK 0x08000000 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */
-#define SKIP_SW_PROGRAM_PLL 0x10000000 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */
+
+#define SET_CLOCK_FREQ_MASK 0x00FFFFFF //Clock change tables only take bit [23:0] as the requested clock value
+#define USE_NON_BUS_CLOCK_MASK 0x01000000 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 //Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define FIRST_TIME_CHANGE_CLOCK 0x08000000 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define SKIP_SW_PROGRAM_PLL 0x10000000 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK
-#define b3USE_NON_BUS_CLOCK_MASK 0x01 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */
-#define b3USE_MEMORY_SELF_REFRESH 0x02 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */
-#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */
-#define b3FIRST_TIME_CHANGE_CLOCK 0x08 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */
-#define b3SKIP_SW_PROGRAM_PLL 0x10 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */
+#define b3USE_NON_BUS_CLOCK_MASK 0x01 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define b3USE_MEMORY_SELF_REFRESH 0x02 //Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define b3FIRST_TIME_CHANGE_CLOCK 0x08 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define b3SKIP_SW_PROGRAM_PLL 0x10 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
-typedef struct _ATOM_COMPUTE_CLOCK_FREQ {
+typedef struct _ATOM_COMPUTE_CLOCK_FREQ
+{
#if ATOM_BIG_ENDIAN
- ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */
- ULONG ulClockFreq:24; /* in unit of 10kHz */
+ ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
+ ULONG ulClockFreq:24; // in unit of 10kHz
#else
- ULONG ulClockFreq:24; /* in unit of 10kHz */
- ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */
+ ULONG ulClockFreq:24; // in unit of 10kHz
+ ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
#endif
-} ATOM_COMPUTE_CLOCK_FREQ;
-
-typedef struct _ATOM_S_MPLL_FB_DIVIDER {
- USHORT usFbDivFrac;
- USHORT usFbDiv;
-} ATOM_S_MPLL_FB_DIVIDER;
+}ATOM_COMPUTE_CLOCK_FREQ;
-typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 {
- union {
- ATOM_COMPUTE_CLOCK_FREQ ulClock; /* Input Parameter */
- ATOM_S_MPLL_FB_DIVIDER ulFbDiv; /* Output Parameter */
- };
- UCHAR ucRefDiv; /* Output Parameter */
- UCHAR ucPostDiv; /* Output Parameter */
- UCHAR ucCntlFlag; /* Output Parameter */
- UCHAR ucReserved;
-} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
+typedef struct _ATOM_S_MPLL_FB_DIVIDER
+{
+ USHORT usFbDivFrac;
+ USHORT usFbDiv;
+}ATOM_S_MPLL_FB_DIVIDER;
-/* ucCntlFlag */
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
+{
+ union
+ {
+ ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
+ };
+ UCHAR ucRefDiv; //Output Parameter
+ UCHAR ucPostDiv; //Output Parameter
+ UCHAR ucCntlFlag; //Output Parameter
+ UCHAR ucReserved;
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
+
+// ucCntlFlag
#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1
#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2
#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4
+#define ATOM_PLL_CNTL_FLAG_SPLL_ISPARE_9 8
-typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER {
- ATOM_COMPUTE_CLOCK_FREQ ulClock;
- ULONG ulReserved[2];
-} DYNAMICE_MEMORY_SETTINGS_PARAMETER;
-
-typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER {
- ATOM_COMPUTE_CLOCK_FREQ ulClock;
- ULONG ulMemoryClock;
- ULONG ulReserved;
-} DYNAMICE_ENGINE_SETTINGS_PARAMETER;
-
-/****************************************************************************/
-/* Structures used by SetEngineClockTable */
-/****************************************************************************/
-typedef struct _SET_ENGINE_CLOCK_PARAMETERS {
- ULONG ulTargetEngineClock; /* In 10Khz unit */
-} SET_ENGINE_CLOCK_PARAMETERS;
-typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION {
- ULONG ulTargetEngineClock; /* In 10Khz unit */
- COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
-} SET_ENGINE_CLOCK_PS_ALLOCATION;
+// V4 are only used for APU which PLL outside GPU
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
+{
+#if ATOM_BIG_ENDIAN
+ ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
+ ULONG ulClock:24; //Input= target clock, output = actual clock
+#else
+ ULONG ulClock:24; //Input= target clock, output = actual clock
+ ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
+#endif
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
-/****************************************************************************/
-/* Structures used by SetMemoryClockTable */
-/****************************************************************************/
-typedef struct _SET_MEMORY_CLOCK_PARAMETERS {
- ULONG ulTargetMemoryClock; /* In 10Khz unit */
-} SET_MEMORY_CLOCK_PARAMETERS;
+typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
+{
+ ATOM_COMPUTE_CLOCK_FREQ ulClock;
+ ULONG ulReserved[2];
+}DYNAMICE_MEMORY_SETTINGS_PARAMETER;
-typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION {
- ULONG ulTargetMemoryClock; /* In 10Khz unit */
- COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
-} SET_MEMORY_CLOCK_PS_ALLOCATION;
+typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
+{
+ ATOM_COMPUTE_CLOCK_FREQ ulClock;
+ ULONG ulMemoryClock;
+ ULONG ulReserved;
+}DYNAMICE_ENGINE_SETTINGS_PARAMETER;
+
+/****************************************************************************/
+// Structures used by SetEngineClockTable
+/****************************************************************************/
+typedef struct _SET_ENGINE_CLOCK_PARAMETERS
+{
+ ULONG ulTargetEngineClock; //In 10Khz unit
+}SET_ENGINE_CLOCK_PARAMETERS;
-/****************************************************************************/
-/* Structures used by ASIC_Init.ctb */
-/****************************************************************************/
-typedef struct _ASIC_INIT_PARAMETERS {
- ULONG ulDefaultEngineClock; /* In 10Khz unit */
- ULONG ulDefaultMemoryClock; /* In 10Khz unit */
-} ASIC_INIT_PARAMETERS;
+typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION
+{
+ ULONG ulTargetEngineClock; //In 10Khz unit
+ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_ENGINE_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by SetMemoryClockTable
+/****************************************************************************/
+typedef struct _SET_MEMORY_CLOCK_PARAMETERS
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+}SET_MEMORY_CLOCK_PARAMETERS;
-typedef struct _ASIC_INIT_PS_ALLOCATION {
- ASIC_INIT_PARAMETERS sASICInitClocks;
- SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; /* Caller doesn't need to init this structure */
-} ASIC_INIT_PS_ALLOCATION;
+typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_MEMORY_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by ASIC_Init.ctb
+/****************************************************************************/
+typedef struct _ASIC_INIT_PARAMETERS
+{
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+}ASIC_INIT_PARAMETERS;
-/****************************************************************************/
-/* Structure used by DynamicClockGatingTable.ctb */
-/****************************************************************************/
-typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS {
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucPadding[3];
-} DYNAMIC_CLOCK_GATING_PARAMETERS;
+typedef struct _ASIC_INIT_PS_ALLOCATION
+{
+ ASIC_INIT_PARAMETERS sASICInitClocks;
+ SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure
+}ASIC_INIT_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structure used by DynamicClockGatingTable.ctb
+/****************************************************************************/
+typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[3];
+}DYNAMIC_CLOCK_GATING_PARAMETERS;
#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS
-/****************************************************************************/
-/* Structure used by EnableASIC_StaticPwrMgtTable.ctb */
-/****************************************************************************/
-typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS {
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucPadding[3];
-} ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
+/****************************************************************************/
+// Structure used by EnableASIC_StaticPwrMgtTable.ctb
+/****************************************************************************/
+typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[3];
+}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
-/****************************************************************************/
-/* Structures used by DAC_LoadDetectionTable.ctb */
-/****************************************************************************/
-typedef struct _DAC_LOAD_DETECTION_PARAMETERS {
- USHORT usDeviceID; /* {ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} */
- UCHAR ucDacType; /* {ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} */
- UCHAR ucMisc; /* Valid only when table revision =1.3 and above */
-} DAC_LOAD_DETECTION_PARAMETERS;
+/****************************************************************************/
+// Structures used by DAC_LoadDetectionTable.ctb
+/****************************************************************************/
+typedef struct _DAC_LOAD_DETECTION_PARAMETERS
+{
+ USHORT usDeviceID; //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT}
+ UCHAR ucDacType; //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC}
+ UCHAR ucMisc; //Valid only when table revision =1.3 and above
+}DAC_LOAD_DETECTION_PARAMETERS;
-/* DAC_LOAD_DETECTION_PARAMETERS.ucMisc */
+// DAC_LOAD_DETECTION_PARAMETERS.ucMisc
#define DAC_LOAD_MISC_YPrPb 0x01
-typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION {
- DAC_LOAD_DETECTION_PARAMETERS sDacload;
- ULONG Reserved[2]; /* Don't set this one, allocation for EXT DAC */
-} DAC_LOAD_DETECTION_PS_ALLOCATION;
-
-/****************************************************************************/
-/* Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb */
-/****************************************************************************/
-typedef struct _DAC_ENCODER_CONTROL_PARAMETERS {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucDacStandard; /* See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) */
- UCHAR ucAction; /* 0: turn off encoder */
- /* 1: setup and turn on encoder */
- /* 7: ATOM_ENCODER_INIT Initialize DAC */
-} DAC_ENCODER_CONTROL_PARAMETERS;
+typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION
+{
+ DAC_LOAD_DETECTION_PARAMETERS sDacload;
+ ULONG Reserved[2];// Don't set this one, allocation for EXT DAC
+}DAC_LOAD_DETECTION_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb
+/****************************************************************************/
+typedef struct _DAC_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucDacStandard; // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0)
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+ // 7: ATOM_ENCODER_INIT Initialize DAC
+}DAC_ENCODER_CONTROL_PARAMETERS;
#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS
-/****************************************************************************/
-/* Structures used by DIG1EncoderControlTable */
-/* DIG2EncoderControlTable */
-/* ExternalEncoderControlTable */
-/****************************************************************************/
-typedef struct _DIG_ENCODER_CONTROL_PARAMETERS {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucConfig;
- /* [2] Link Select: */
- /* =0: PHY linkA if bfLane<3 */
- /* =1: PHY linkB if bfLanes<3 */
- /* =0: PHY linkA+B if bfLanes=3 */
- /* [3] Transmitter Sel */
- /* =0: UNIPHY or PCIEPHY */
- /* =1: LVTMA */
- UCHAR ucAction; /* =0: turn off encoder */
- /* =1: turn on encoder */
- UCHAR ucEncoderMode;
- /* =0: DP encoder */
- /* =1: LVDS encoder */
- /* =2: DVI encoder */
- /* =3: HDMI encoder */
- /* =4: SDVO encoder */
- UCHAR ucLaneNum; /* how many lanes to enable */
- UCHAR ucReserved[2];
-} DIG_ENCODER_CONTROL_PARAMETERS;
+/****************************************************************************/
+// Structures used by DIG1EncoderControlTable
+// DIG2EncoderControlTable
+// ExternalEncoderControlTable
+/****************************************************************************/
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucConfig;
+ // [2] Link Select:
+ // =0: PHY linkA if bfLane<3
+ // =1: PHY linkB if bfLanes<3
+ // =0: PHY linkA+B if bfLanes=3
+ // [3] Transmitter Sel
+ // =0: UNIPHY or PCIEPHY
+ // =1: LVTMA
+ UCHAR ucAction; // =0: turn off encoder
+ // =1: turn on encoder
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucReserved[2];
+}DIG_ENCODER_CONTROL_PARAMETERS;
#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS
#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS
-/* ucConfig */
+//ucConfig
#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01
#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00
#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01
@@ -539,52 +593,57 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS {
#define ATOM_ENCODER_CONFIG_LVTMA 0x08
#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00
#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08
-#define ATOM_ENCODER_CONFIG_DIGB 0x80 /* VBIOS Internal use, outside SW should set this bit=0 */
-/* ucAction */
-/* ATOM_ENABLE: Enable Encoder */
-/* ATOM_DISABLE: Disable Encoder */
+#define ATOM_ENCODER_CONFIG_DIGB 0x80 // VBIOS Internal use, outside SW should set this bit=0
+// ucAction
+// ATOM_ENABLE: Enable Encoder
+// ATOM_DISABLE: Disable Encoder
-/* ucEncoderMode */
+//ucEncoderMode
#define ATOM_ENCODER_MODE_DP 0
#define ATOM_ENCODER_MODE_LVDS 1
#define ATOM_ENCODER_MODE_DVI 2
#define ATOM_ENCODER_MODE_HDMI 3
#define ATOM_ENCODER_MODE_SDVO 4
+#define ATOM_ENCODER_MODE_DP_AUDIO 5
#define ATOM_ENCODER_MODE_TV 13
#define ATOM_ENCODER_MODE_CV 14
#define ATOM_ENCODER_MODE_CRT 15
-typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 {
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
+{
#if ATOM_BIG_ENDIAN
- UCHAR ucReserved1:2;
- UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */
- UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */
- UCHAR ucReserved:1;
- UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */
+ UCHAR ucReserved1:2;
+ UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF
+ UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F
+ UCHAR ucReserved:1;
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
#else
- UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */
- UCHAR ucReserved:1;
- UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */
- UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */
- UCHAR ucReserved1:2;
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
+ UCHAR ucReserved:1;
+ UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F
+ UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF
+ UCHAR ucReserved1:2;
#endif
-} ATOM_DIG_ENCODER_CONFIG_V2;
+}ATOM_DIG_ENCODER_CONFIG_V2;
-typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
- UCHAR ucAction;
- UCHAR ucEncoderMode;
- /* =0: DP encoder */
- /* =1: LVDS encoder */
- /* =2: DVI encoder */
- /* =3: HDMI encoder */
- /* =4: SDVO encoder */
- UCHAR ucLaneNum; /* how many lanes to enable */
- UCHAR ucReserved[2];
-} DIG_ENCODER_CONTROL_PARAMETERS_V2;
-/* ucConfig */
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
+ UCHAR ucAction;
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucStatus; // = DP_LINK_TRAINING_COMPLETE or DP_LINK_TRAINING_INCOMPLETE, only used by VBIOS with command ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS
+ UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V2;
+
+//ucConfig
#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01
#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00
#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01
@@ -596,58 +655,122 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08
#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10
-/****************************************************************************/
-/* Structures used by UNIPHYTransmitterControlTable */
-/* LVTMATransmitterControlTable */
-/* DVOOutputControlTable */
-/****************************************************************************/
-typedef struct _ATOM_DP_VS_MODE {
- UCHAR ucLaneSel;
- UCHAR ucLaneSet;
-} ATOM_DP_VS_MODE;
-
-typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
- union {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */
- ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */
+// ucAction:
+// ATOM_DISABLE
+// ATOM_ENABLE
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b
+#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c
+#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d
+#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e
+#define ATOM_ENCODER_CMD_SETUP 0x0f
+
+// ucStatus
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00
+
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucReserved1:1;
+ UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
+ UCHAR ucReserved:3;
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
+#else
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
+ UCHAR ucReserved:3;
+ UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
+ UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V3;
+
+#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70
+
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ ATOM_DIG_ENCODER_CONFIG_V3 acConfig;
+ UCHAR ucAction;
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ // =5: DP audio
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+ UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V3;
+
+
+// define ucBitPerColor:
+#define PANEL_BPC_UNDEFINE 0x00
+#define PANEL_6BIT_PER_COLOR 0x01
+#define PANEL_8BIT_PER_COLOR 0x02
+#define PANEL_10BIT_PER_COLOR 0x03
+#define PANEL_12BIT_PER_COLOR 0x04
+#define PANEL_16BIT_PER_COLOR 0x05
+
+/****************************************************************************/
+// Structures used by UNIPHYTransmitterControlTable
+// LVTMATransmitterControlTable
+// DVOOutputControlTable
+/****************************************************************************/
+typedef struct _ATOM_DP_VS_MODE
+{
+ UCHAR ucLaneSel;
+ UCHAR ucLaneSet;
+}ATOM_DP_VS_MODE;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
};
- UCHAR ucConfig;
- /* [0]=0: 4 lane Link, */
- /* =1: 8 lane Link ( Dual Links TMDS ) */
- /* [1]=0: InCoherent mode */
- /* =1: Coherent Mode */
- /* [2] Link Select: */
- /* =0: PHY linkA if bfLane<3 */
- /* =1: PHY linkB if bfLanes<3 */
- /* =0: PHY linkA+B if bfLanes=3 */
- /* [5:4]PCIE lane Sel */
- /* =0: lane 0~3 or 0~7 */
- /* =1: lane 4~7 */
- /* =2: lane 8~11 or 8~15 */
- /* =3: lane 12~15 */
- UCHAR ucAction; /* =0: turn off encoder */
- /* =1: turn on encoder */
- UCHAR ucReserved[4];
-} DIG_TRANSMITTER_CONTROL_PARAMETERS;
-
-#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS
-
-/* ucInitInfo */
-#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff
-
-/* ucConfig */
+ UCHAR ucConfig;
+ // [0]=0: 4 lane Link,
+ // =1: 8 lane Link ( Dual Links TMDS )
+ // [1]=0: InCoherent mode
+ // =1: Coherent Mode
+ // [2] Link Select:
+ // =0: PHY linkA if bfLane<3
+ // =1: PHY linkB if bfLanes<3
+ // =0: PHY linkA+B if bfLanes=3
+ // [5:4]PCIE lane Sel
+ // =0: lane 0~3 or 0~7
+ // =1: lane 4~7
+ // =2: lane 8~11 or 8~15
+ // =3: lane 12~15
+ UCHAR ucAction; // =0: turn off encoder
+ // =1: turn on encoder
+ UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS;
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS
+
+//ucInitInfo
+#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff
+
+//ucConfig
#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01
#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02
#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04
#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00
#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04
-#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00
+#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00
#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04
-#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
-#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
-#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
+#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30
#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00
@@ -661,7 +784,7 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80
#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0
-/* ucAction */
+//ucAction
#define ATOM_TRANSMITTER_ACTION_DISABLE 0
#define ATOM_TRANSMITTER_ACTION_ENABLE 1
#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2
@@ -674,93 +797,168 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9
#define ATOM_TRANSMITTER_ACTION_SETUP 10
#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11
+#define ATOM_TRANSMITTER_ACTION_POWER_ON 12
+#define ATOM_TRANSMITTER_ACTION_POWER_OFF 13
-/* Following are used for DigTransmitterControlTable ver1.2 */
-typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 {
+// Following are used for DigTransmitterControlTable ver1.2
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2
+{
#if ATOM_BIG_ENDIAN
- UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */
- /* =1 Dig Transmitter 2 ( Uniphy CD ) */
- /* =2 Dig Transmitter 3 ( Uniphy EF ) */
- UCHAR ucReserved:1;
- UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */
- UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */
- UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */
- /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */
-
- UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */
- UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+ UCHAR ucReserved:1;
+ UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
#else
- UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */
- UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */
- UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */
- /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */
- UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */
- UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */
- UCHAR ucReserved:1;
- UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */
- /* =1 Dig Transmitter 2 ( Uniphy CD ) */
- /* =2 Dig Transmitter 3 ( Uniphy EF ) */
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+ UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector
+ UCHAR ucReserved:1;
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
#endif
-} ATOM_DIG_TRANSMITTER_CONFIG_V2;
+}ATOM_DIG_TRANSMITTER_CONFIG_V2;
-/* ucConfig */
-/* Bit0 */
+//ucConfig
+//Bit0
#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01
-/* Bit1 */
+//Bit1
#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02
-/* Bit2 */
+//Bit2
#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04
-#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00
#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04
-/* Bit3 */
+// Bit3
#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08
-#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */
-#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
-/* Bit4 */
+// Bit4
#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10
-/* Bit7:6 */
+// Bit7:6
#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0
-#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 /* AB */
-#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 /* CD */
-#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 /* EF */
-
-typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 {
- union {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */
- ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 //AB
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 //CD
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 //EF
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
};
- ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
- UCHAR ucAction; /* define as ATOM_TRANSMITER_ACTION_XXX */
- UCHAR ucReserved[4];
-} DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
+ ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
+ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
+ UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
-/****************************************************************************/
-/* Structures used by DAC1OuputControlTable */
-/* DAC2OuputControlTable */
-/* LVTMAOutputControlTable (Before DEC30) */
-/* TMDSAOutputControlTable (Before DEC30) */
-/****************************************************************************/
-typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS {
- UCHAR ucAction; /* Possible input:ATOM_ENABLE||ATOMDISABLE */
- /* When the display is LCD, in addition to above: */
- /* ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| */
- /* ATOM_LCD_SELFTEST_STOP */
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+#else
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V3;
- UCHAR aucPadding[3]; /* padding to DWORD aligned */
-} DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+ };
+ ATOM_DIG_TRANSMITTER_CONFIG_V3 acConfig;
+ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
+ UCHAR ucLaneNum;
+ UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V3;
+
+//ucConfig
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V3_DUAL_LINK_CONNECTOR 0x01
+
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V3_COHERENT 0x02
+
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V3_LINK_SEL_MASK 0x04
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKA 0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKB 0x04
+
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V3_ENCODER_SEL_MASK 0x08
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG1_ENCODER 0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG2_ENCODER 0x08
+
+// Bit5:4
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SEL_MASK 0x30
+#define ATOM_TRASMITTER_CONFIG_V3_P1PLL 0x00
+#define ATOM_TRASMITTER_CONFIG_V3_P2PLL 0x10
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SRC_EXT 0x20
+
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER_SEL_MASK 0xC0
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER1 0x00 //AB
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF
+
+/****************************************************************************/
+// Structures used by DAC1OuputControlTable
+// DAC2OuputControlTable
+// LVTMAOutputControlTable (Before DEC30)
+// TMDSAOutputControlTable (Before DEC30)
+/****************************************************************************/
+typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+{
+ UCHAR ucAction; // Possible input:ATOM_ENABLE||ATOMDISABLE
+ // When the display is LCD, in addition to above:
+ // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START||
+ // ATOM_LCD_SELFTEST_STOP
+
+ UCHAR aucPadding[3]; // padding to DWORD aligned
+}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
-#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+
+#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
-#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
@@ -782,397 +980,550 @@ typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS {
#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION
#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS
-/****************************************************************************/
-/* Structures used by BlankCRTCTable */
-/****************************************************************************/
-typedef struct _BLANK_CRTC_PARAMETERS {
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucBlanking; /* ATOM_BLANKING or ATOM_BLANKINGOFF */
- USHORT usBlackColorRCr;
- USHORT usBlackColorGY;
- USHORT usBlackColorBCb;
-} BLANK_CRTC_PARAMETERS;
+/****************************************************************************/
+// Structures used by BlankCRTCTable
+/****************************************************************************/
+typedef struct _BLANK_CRTC_PARAMETERS
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucBlanking; // ATOM_BLANKING or ATOM_BLANKINGOFF
+ USHORT usBlackColorRCr;
+ USHORT usBlackColorGY;
+ USHORT usBlackColorBCb;
+}BLANK_CRTC_PARAMETERS;
#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS
-/****************************************************************************/
-/* Structures used by EnableCRTCTable */
-/* EnableCRTCMemReqTable */
-/* UpdateCRTC_DoubleBufferRegistersTable */
-/****************************************************************************/
-typedef struct _ENABLE_CRTC_PARAMETERS {
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucPadding[2];
-} ENABLE_CRTC_PARAMETERS;
+/****************************************************************************/
+// Structures used by EnableCRTCTable
+// EnableCRTCMemReqTable
+// UpdateCRTC_DoubleBufferRegistersTable
+/****************************************************************************/
+typedef struct _ENABLE_CRTC_PARAMETERS
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[2];
+}ENABLE_CRTC_PARAMETERS;
#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS
-/****************************************************************************/
-/* Structures used by SetCRTC_OverScanTable */
-/****************************************************************************/
-typedef struct _SET_CRTC_OVERSCAN_PARAMETERS {
- USHORT usOverscanRight; /* right */
- USHORT usOverscanLeft; /* left */
- USHORT usOverscanBottom; /* bottom */
- USHORT usOverscanTop; /* top */
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucPadding[3];
-} SET_CRTC_OVERSCAN_PARAMETERS;
+/****************************************************************************/
+// Structures used by SetCRTC_OverScanTable
+/****************************************************************************/
+typedef struct _SET_CRTC_OVERSCAN_PARAMETERS
+{
+ USHORT usOverscanRight; // right
+ USHORT usOverscanLeft; // left
+ USHORT usOverscanBottom; // bottom
+ USHORT usOverscanTop; // top
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucPadding[3];
+}SET_CRTC_OVERSCAN_PARAMETERS;
#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS
-/****************************************************************************/
-/* Structures used by SetCRTC_ReplicationTable */
-/****************************************************************************/
-typedef struct _SET_CRTC_REPLICATION_PARAMETERS {
- UCHAR ucH_Replication; /* horizontal replication */
- UCHAR ucV_Replication; /* vertical replication */
- UCHAR usCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucPadding;
-} SET_CRTC_REPLICATION_PARAMETERS;
+/****************************************************************************/
+// Structures used by SetCRTC_ReplicationTable
+/****************************************************************************/
+typedef struct _SET_CRTC_REPLICATION_PARAMETERS
+{
+ UCHAR ucH_Replication; // horizontal replication
+ UCHAR ucV_Replication; // vertical replication
+ UCHAR usCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucPadding;
+}SET_CRTC_REPLICATION_PARAMETERS;
#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS
-/****************************************************************************/
-/* Structures used by SelectCRTC_SourceTable */
-/****************************************************************************/
-typedef struct _SELECT_CRTC_SOURCE_PARAMETERS {
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucDevice; /* ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... */
- UCHAR ucPadding[2];
-} SELECT_CRTC_SOURCE_PARAMETERS;
+/****************************************************************************/
+// Structures used by SelectCRTC_SourceTable
+/****************************************************************************/
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucDevice; // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|....
+ UCHAR ucPadding[2];
+}SELECT_CRTC_SOURCE_PARAMETERS;
#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS
-typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 {
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucEncoderID; /* DAC1/DAC2/TVOUT/DIG1/DIG2/DVO */
- UCHAR ucEncodeMode; /* Encoding mode, only valid when using DIG1/DIG2/DVO */
- UCHAR ucPadding;
-} SELECT_CRTC_SOURCE_PARAMETERS_V2;
-
-/* ucEncoderID */
-/* #define ASIC_INT_DAC1_ENCODER_ID 0x00 */
-/* #define ASIC_INT_TV_ENCODER_ID 0x02 */
-/* #define ASIC_INT_DIG1_ENCODER_ID 0x03 */
-/* #define ASIC_INT_DAC2_ENCODER_ID 0x04 */
-/* #define ASIC_EXT_TV_ENCODER_ID 0x06 */
-/* #define ASIC_INT_DVO_ENCODER_ID 0x07 */
-/* #define ASIC_INT_DIG2_ENCODER_ID 0x09 */
-/* #define ASIC_EXT_DIG_ENCODER_ID 0x05 */
-
-/* ucEncodeMode */
-/* #define ATOM_ENCODER_MODE_DP 0 */
-/* #define ATOM_ENCODER_MODE_LVDS 1 */
-/* #define ATOM_ENCODER_MODE_DVI 2 */
-/* #define ATOM_ENCODER_MODE_HDMI 3 */
-/* #define ATOM_ENCODER_MODE_SDVO 4 */
-/* #define ATOM_ENCODER_MODE_TV 13 */
-/* #define ATOM_ENCODER_MODE_CV 14 */
-/* #define ATOM_ENCODER_MODE_CRT 15 */
-
-/****************************************************************************/
-/* Structures used by SetPixelClockTable */
-/* GetPixelClockTable */
-/****************************************************************************/
-/* Major revision=1., Minor revision=1 */
-typedef struct _PIXEL_CLOCK_PARAMETERS {
- USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
- /* 0 means disable PPLL */
- USHORT usRefDiv; /* Reference divider */
- USHORT usFbDiv; /* feedback divider */
- UCHAR ucPostDiv; /* post divider */
- UCHAR ucFracFbDiv; /* fractional feedback divider */
- UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
- UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */
- UCHAR ucCRTC; /* Which CRTC uses this Ppll */
- UCHAR ucPadding;
-} PIXEL_CLOCK_PARAMETERS;
-
-/* Major revision=1., Minor revision=2, add ucMiscIfno */
-/* ucMiscInfo: */
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucEncoderID; // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO
+ UCHAR ucEncodeMode; // Encoding mode, only valid when using DIG1/DIG2/DVO
+ UCHAR ucPadding;
+}SELECT_CRTC_SOURCE_PARAMETERS_V2;
+
+//ucEncoderID
+//#define ASIC_INT_DAC1_ENCODER_ID 0x00
+//#define ASIC_INT_TV_ENCODER_ID 0x02
+//#define ASIC_INT_DIG1_ENCODER_ID 0x03
+//#define ASIC_INT_DAC2_ENCODER_ID 0x04
+//#define ASIC_EXT_TV_ENCODER_ID 0x06
+//#define ASIC_INT_DVO_ENCODER_ID 0x07
+//#define ASIC_INT_DIG2_ENCODER_ID 0x09
+//#define ASIC_EXT_DIG_ENCODER_ID 0x05
+
+//ucEncodeMode
+//#define ATOM_ENCODER_MODE_DP 0
+//#define ATOM_ENCODER_MODE_LVDS 1
+//#define ATOM_ENCODER_MODE_DVI 2
+//#define ATOM_ENCODER_MODE_HDMI 3
+//#define ATOM_ENCODER_MODE_SDVO 4
+//#define ATOM_ENCODER_MODE_TV 13
+//#define ATOM_ENCODER_MODE_CV 14
+//#define ATOM_ENCODER_MODE_CRT 15
+
+/****************************************************************************/
+// Structures used by SetPixelClockTable
+// GetPixelClockTable
+/****************************************************************************/
+//Major revision=1., Minor revision=1
+typedef struct _PIXEL_CLOCK_PARAMETERS
+{
+ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+ // 0 means disable PPLL
+ USHORT usRefDiv; // Reference divider
+ USHORT usFbDiv; // feedback divider
+ UCHAR ucPostDiv; // post divider
+ UCHAR ucFracFbDiv; // fractional feedback divider
+ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
+ UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER
+ UCHAR ucCRTC; // Which CRTC uses this Ppll
+ UCHAR ucPadding;
+}PIXEL_CLOCK_PARAMETERS;
+
+//Major revision=1., Minor revision=2, add ucMiscIfno
+//ucMiscInfo:
#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1
#define MISC_DEVICE_INDEX_MASK 0xF0
#define MISC_DEVICE_INDEX_SHIFT 4
-typedef struct _PIXEL_CLOCK_PARAMETERS_V2 {
- USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
- /* 0 means disable PPLL */
- USHORT usRefDiv; /* Reference divider */
- USHORT usFbDiv; /* feedback divider */
- UCHAR ucPostDiv; /* post divider */
- UCHAR ucFracFbDiv; /* fractional feedback divider */
- UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
- UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */
- UCHAR ucCRTC; /* Which CRTC uses this Ppll */
- UCHAR ucMiscInfo; /* Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog */
-} PIXEL_CLOCK_PARAMETERS_V2;
-
-/* Major revision=1., Minor revision=3, structure/definition change */
-/* ucEncoderMode: */
-/* ATOM_ENCODER_MODE_DP */
-/* ATOM_ENOCDER_MODE_LVDS */
-/* ATOM_ENOCDER_MODE_DVI */
-/* ATOM_ENOCDER_MODE_HDMI */
-/* ATOM_ENOCDER_MODE_SDVO */
-/* ATOM_ENCODER_MODE_TV 13 */
-/* ATOM_ENCODER_MODE_CV 14 */
-/* ATOM_ENCODER_MODE_CRT 15 */
-
-/* ucDVOConfig */
-/* #define DVO_ENCODER_CONFIG_RATE_SEL 0x01 */
-/* #define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 */
-/* #define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 */
-/* #define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c */
-/* #define DVO_ENCODER_CONFIG_LOW12BIT 0x00 */
-/* #define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 */
-/* #define DVO_ENCODER_CONFIG_24BIT 0x08 */
-
-/* ucMiscInfo: also changed, see below */
+typedef struct _PIXEL_CLOCK_PARAMETERS_V2
+{
+ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+ // 0 means disable PPLL
+ USHORT usRefDiv; // Reference divider
+ USHORT usFbDiv; // feedback divider
+ UCHAR ucPostDiv; // post divider
+ UCHAR ucFracFbDiv; // fractional feedback divider
+ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
+ UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER
+ UCHAR ucCRTC; // Which CRTC uses this Ppll
+ UCHAR ucMiscInfo; // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog
+}PIXEL_CLOCK_PARAMETERS_V2;
+
+//Major revision=1., Minor revision=3, structure/definition change
+//ucEncoderMode:
+//ATOM_ENCODER_MODE_DP
+//ATOM_ENOCDER_MODE_LVDS
+//ATOM_ENOCDER_MODE_DVI
+//ATOM_ENOCDER_MODE_HDMI
+//ATOM_ENOCDER_MODE_SDVO
+//ATOM_ENCODER_MODE_TV 13
+//ATOM_ENCODER_MODE_CV 14
+//ATOM_ENCODER_MODE_CRT 15
+
+//ucDVOConfig
+//#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
+//#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
+//#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
+//#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c
+//#define DVO_ENCODER_CONFIG_LOW12BIT 0x00
+//#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
+//#define DVO_ENCODER_CONFIG_24BIT 0x08
+
+//ucMiscInfo: also changed, see below
#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01
#define PIXEL_CLOCK_MISC_VGA_MODE 0x02
#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04
#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00
#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04
#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08
+#define PIXEL_CLOCK_MISC_REF_DIV_SRC 0x10
+// V1.4 for RoadRunner
+#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10
+#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20
-typedef struct _PIXEL_CLOCK_PARAMETERS_V3 {
- USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
- /* 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. */
- USHORT usRefDiv; /* Reference divider */
- USHORT usFbDiv; /* feedback divider */
- UCHAR ucPostDiv; /* post divider */
- UCHAR ucFracFbDiv; /* fractional feedback divider */
- UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
- UCHAR ucTransmitterId; /* graphic encoder id defined in objectId.h */
- union {
- UCHAR ucEncoderMode; /* encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ */
- UCHAR ucDVOConfig; /* when use DVO, need to know SDR/DDR, 12bit or 24bit */
+typedef struct _PIXEL_CLOCK_PARAMETERS_V3
+{
+ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+ // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0.
+ USHORT usRefDiv; // Reference divider
+ USHORT usFbDiv; // feedback divider
+ UCHAR ucPostDiv; // post divider
+ UCHAR ucFracFbDiv; // fractional feedback divider
+ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
+ UCHAR ucTransmitterId; // graphic encoder id defined in objectId.h
+ union
+ {
+ UCHAR ucEncoderMode; // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/
+ UCHAR ucDVOConfig; // when use DVO, need to know SDR/DDR, 12bit or 24bit
};
- UCHAR ucMiscInfo; /* bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel */
- /* bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source */
-} PIXEL_CLOCK_PARAMETERS_V3;
+ UCHAR ucMiscInfo; // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel
+ // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source
+ // bit[4]=0:use XTALIN as the source of reference divider,=1 use the pre-defined clock as the source of reference divider
+}PIXEL_CLOCK_PARAMETERS_V3;
#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2
#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST
-/****************************************************************************/
-/* Structures used by AdjustDisplayPllTable */
-/****************************************************************************/
-typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS {
+typedef struct _PIXEL_CLOCK_PARAMETERS_V5
+{
+ UCHAR ucCRTC; // ATOM_CRTC1~6, indicate the CRTC controller to
+ // drive the pixel clock. not used for DCPLL case.
+ union{
+ UCHAR ucReserved;
+ UCHAR ucFracFbDiv; // [gphan] temporary to prevent build problem. remove it after driver code is changed.
+ };
+ USHORT usPixelClock; // target the pixel clock to drive the CRTC timing
+ // 0 means disable PPLL/DCPLL.
+ USHORT usFbDiv; // feedback divider integer part.
+ UCHAR ucPostDiv; // post divider.
+ UCHAR ucRefDiv; // Reference divider
+ UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+ UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h,
+ // indicate which graphic encoder will be used.
+ UCHAR ucEncoderMode; // Encoder mode:
+ UCHAR ucMiscInfo; // bit[0]= Force program PPLL
+ // bit[1]= when VGA timing is used.
+ // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+ // bit[4]= RefClock source for PPLL.
+ // =0: XTLAIN( default mode )
+ // =1: other external clock source, which is pre-defined
+ // by VBIOS depend on the feature required.
+ // bit[7:5]: reserved.
+ ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V5;
+
+#define PIXEL_CLOCK_V5_MISC_FORCE_PROG_PPLL 0x01
+#define PIXEL_CLOCK_V5_MISC_VGA_MODE 0x02
+#define PIXEL_CLOCK_V5_MISC_HDMI_BPP_MASK 0x0c
+#define PIXEL_CLOCK_V5_MISC_HDMI_24BPP 0x00
+#define PIXEL_CLOCK_V5_MISC_HDMI_30BPP 0x04
+#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08
+#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
+{
+ PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2
+{
+ UCHAR ucStatus;
+ UCHAR ucRefDivSrc; // =1: reference clock source from XTALIN, =0: source from PCIE ref clock
+ UCHAR ucReserved[2];
+}GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3
+{
+ PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3;
+
+/****************************************************************************/
+// Structures used by AdjustDisplayPllTable
+/****************************************************************************/
+typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS
+{
USHORT usPixelClock;
UCHAR ucTransmitterID;
UCHAR ucEncodeMode;
- union {
- UCHAR ucDVOConfig; /* if DVO, need passing link rate and output 12bitlow or 24bit */
- UCHAR ucConfig; /* if none DVO, not defined yet */
+ union
+ {
+ UCHAR ucDVOConfig; //if DVO, need passing link rate and output 12bitlow or 24bit
+ UCHAR ucConfig; //if none DVO, not defined yet
};
UCHAR ucReserved[3];
-} ADJUST_DISPLAY_PLL_PARAMETERS;
+}ADJUST_DISPLAY_PLL_PARAMETERS;
#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10
-
#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS
-/****************************************************************************/
-/* Structures used by EnableYUVTable */
-/****************************************************************************/
-typedef struct _ENABLE_YUV_PARAMETERS {
- UCHAR ucEnable; /* ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) */
- UCHAR ucCRTC; /* Which CRTC needs this YUV or RGB format */
- UCHAR ucPadding[2];
-} ENABLE_YUV_PARAMETERS;
+typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
+{
+ USHORT usPixelClock; // target pixel clock
+ UCHAR ucTransmitterID; // transmitter id defined in objectid.h
+ UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI
+ UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
+ UCHAR ucReserved[3];
+}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
+
+// usDispPllConfig v1.2 for RoadRunner
+#define DISPPLL_CONFIG_DVO_RATE_SEL 0x0001 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_DDR_SPEED 0x0000 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_SDR_SPEED 0x0001 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_OUTPUT_SEL 0x000c // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_LOW12BIT 0x0000 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_UPPER12BIT 0x0004 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_24BIT 0x0008 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_SS_ENABLE 0x0010 // Only used when ucEncoderMode = DP or LVDS
+#define DISPPLL_CONFIG_COHERENT_MODE 0x0020 // Only used when ucEncoderMode = TMDS or HDMI
+#define DISPPLL_CONFIG_DUAL_LINK 0x0040 // Only used when ucEncoderMode = TMDS or LVDS
+
+
+typedef struct _ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3
+{
+ ULONG ulDispPllFreq; // return display PPLL freq which is used to generate the pixclock, and related idclk, symclk etc
+ UCHAR ucRefDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider and post_div ( if it is not given )
+ UCHAR ucPostDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider
+ UCHAR ucReserved[2];
+}ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3;
+
+typedef struct _ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3
+{
+ union
+ {
+ ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 sInput;
+ ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 sOutput;
+ };
+} ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3;
+
+/****************************************************************************/
+// Structures used by EnableYUVTable
+/****************************************************************************/
+typedef struct _ENABLE_YUV_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB)
+ UCHAR ucCRTC; // Which CRTC needs this YUV or RGB format
+ UCHAR ucPadding[2];
+}ENABLE_YUV_PARAMETERS;
#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS
-/****************************************************************************/
-/* Structures used by GetMemoryClockTable */
-/****************************************************************************/
-typedef struct _GET_MEMORY_CLOCK_PARAMETERS {
- ULONG ulReturnMemoryClock; /* current memory speed in 10KHz unit */
+/****************************************************************************/
+// Structures used by GetMemoryClockTable
+/****************************************************************************/
+typedef struct _GET_MEMORY_CLOCK_PARAMETERS
+{
+ ULONG ulReturnMemoryClock; // current memory speed in 10KHz unit
} GET_MEMORY_CLOCK_PARAMETERS;
#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS
-/****************************************************************************/
-/* Structures used by GetEngineClockTable */
-/****************************************************************************/
-typedef struct _GET_ENGINE_CLOCK_PARAMETERS {
- ULONG ulReturnEngineClock; /* current engine speed in 10KHz unit */
+/****************************************************************************/
+// Structures used by GetEngineClockTable
+/****************************************************************************/
+typedef struct _GET_ENGINE_CLOCK_PARAMETERS
+{
+ ULONG ulReturnEngineClock; // current engine speed in 10KHz unit
} GET_ENGINE_CLOCK_PARAMETERS;
#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS
-/****************************************************************************/
-/* Following Structures and constant may be obsolete */
-/****************************************************************************/
-/* Maxium 8 bytes,the data read in will be placed in the parameter space. */
-/* Read operaion successeful when the paramter space is non-zero, otherwise read operation failed */
-typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS {
- USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
- USHORT usVRAMAddress; /* Adress in Frame Buffer where to pace raw EDID */
- USHORT usStatus; /* When use output: lower byte EDID checksum, high byte hardware status */
- /* WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte */
- UCHAR ucSlaveAddr; /* Read from which slave */
- UCHAR ucLineNumber; /* Read from which HW assisted line */
-} READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
+/****************************************************************************/
+// Following Structures and constant may be obsolete
+/****************************************************************************/
+//Maxium 8 bytes,the data read in will be placed in the parameter space.
+//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed
+typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+{
+ USHORT usPrescale; //Ratio between Engine clock and I2C clock
+ USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID
+ USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status
+ //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte
+ UCHAR ucSlaveAddr; //Read from which slave
+ UCHAR ucLineNumber; //Read from which HW assisted line
+}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+
#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0
#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1
#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2
#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3
#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4
-typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS {
- USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
- USHORT usByteOffset; /* Write to which byte */
- /* Upper portion of usByteOffset is Format of data */
- /* 1bytePS+offsetPS */
- /* 2bytesPS+offsetPS */
- /* blockID+offsetPS */
- /* blockID+offsetID */
- /* blockID+counterID+offsetID */
- UCHAR ucData; /* PS data1 */
- UCHAR ucStatus; /* Status byte 1=success, 2=failure, Also is used as PS data2 */
- UCHAR ucSlaveAddr; /* Write to which slave */
- UCHAR ucLineNumber; /* Write from which HW assisted line */
-} WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
+typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+{
+ USHORT usPrescale; //Ratio between Engine clock and I2C clock
+ USHORT usByteOffset; //Write to which byte
+ //Upper portion of usByteOffset is Format of data
+ //1bytePS+offsetPS
+ //2bytesPS+offsetPS
+ //blockID+offsetPS
+ //blockID+offsetID
+ //blockID+counterID+offsetID
+ UCHAR ucData; //PS data1
+ UCHAR ucStatus; //Status byte 1=success, 2=failure, Also is used as PS data2
+ UCHAR ucSlaveAddr; //Write to which slave
+ UCHAR ucLineNumber; //Write from which HW assisted line
+}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
-typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS {
- USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
- UCHAR ucSlaveAddr; /* Write to which slave */
- UCHAR ucLineNumber; /* Write from which HW assisted line */
-} SET_UP_HW_I2C_DATA_PARAMETERS;
+typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS
+{
+ USHORT usPrescale; //Ratio between Engine clock and I2C clock
+ UCHAR ucSlaveAddr; //Write to which slave
+ UCHAR ucLineNumber; //Write from which HW assisted line
+}SET_UP_HW_I2C_DATA_PARAMETERS;
+
/**************************************************************************/
#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
-/****************************************************************************/
-/* Structures used by PowerConnectorDetectionTable */
-/****************************************************************************/
-typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS {
- UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */
- UCHAR ucPwrBehaviorId;
- USHORT usPwrBudget; /* how much power currently boot to in unit of watt */
-} POWER_CONNECTOR_DETECTION_PARAMETERS;
-
-typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION {
- UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */
- UCHAR ucReserved;
- USHORT usPwrBudget; /* how much power currently boot to in unit of watt */
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
-} POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
+/****************************************************************************/
+// Structures used by PowerConnectorDetectionTable
+/****************************************************************************/
+typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS
+{
+ UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected
+ UCHAR ucPwrBehaviorId;
+ USHORT usPwrBudget; //how much power currently boot to in unit of watt
+}POWER_CONNECTOR_DETECTION_PARAMETERS;
+
+typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION
+{
+ UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected
+ UCHAR ucReserved;
+ USHORT usPwrBudget; //how much power currently boot to in unit of watt
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
/****************************LVDS SS Command Table Definitions**********************/
-/****************************************************************************/
-/* Structures used by EnableSpreadSpectrumOnPPLLTable */
-/****************************************************************************/
-typedef struct _ENABLE_LVDS_SS_PARAMETERS {
- USHORT usSpreadSpectrumPercentage;
- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
- UCHAR ucSpreadSpectrumStepSize_Delay; /* bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucPadding[3];
-} ENABLE_LVDS_SS_PARAMETERS;
-
-/* ucTableFormatRevision=1,ucTableContentRevision=2 */
-typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 {
- USHORT usSpreadSpectrumPercentage;
- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
- UCHAR ucSpreadSpectrumStep; /* */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucSpreadSpectrumDelay;
- UCHAR ucSpreadSpectrumRange;
- UCHAR ucPadding;
-} ENABLE_LVDS_SS_PARAMETERS_V2;
-
-/* This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. */
-typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL {
- USHORT usSpreadSpectrumPercentage;
- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
- UCHAR ucSpreadSpectrumStep; /* */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucSpreadSpectrumDelay;
- UCHAR ucSpreadSpectrumRange;
- UCHAR ucPpll; /* ATOM_PPLL1/ATOM_PPLL2 */
-} ENABLE_SPREAD_SPECTRUM_ON_PPLL;
+/****************************************************************************/
+// Structures used by EnableSpreadSpectrumOnPPLLTable
+/****************************************************************************/
+typedef struct _ENABLE_LVDS_SS_PARAMETERS
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+ UCHAR ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY
+ UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[3];
+}ENABLE_LVDS_SS_PARAMETERS;
+
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+ UCHAR ucSpreadSpectrumStep; //
+ UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucSpreadSpectrumDelay;
+ UCHAR ucSpreadSpectrumRange;
+ UCHAR ucPadding;
+}ENABLE_LVDS_SS_PARAMETERS_V2;
+
+//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS.
+typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+ UCHAR ucSpreadSpectrumStep; //
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucSpreadSpectrumDelay;
+ UCHAR ucSpreadSpectrumRange;
+ UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL;
+
+typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread.
+ // Bit[1]: 1-Ext. 0-Int.
+ // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+ // Bits[7:4] reserved
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]
+ USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2;
+
+#define ATOM_PPLL_SS_TYPE_V2_DOWN_SPREAD 0x00
+#define ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD 0x01
+#define ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD 0x02
+#define ATOM_PPLL_SS_TYPE_V2_PPLL_SEL_MASK 0x0c
+#define ATOM_PPLL_SS_TYPE_V2_P1PLL 0x00
+#define ATOM_PPLL_SS_TYPE_V2_P2PLL 0x04
+#define ATOM_PPLL_SS_TYPE_V2_DCPLL 0x08
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK 0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT 0
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8
#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL
/**************************************************************************/
-typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION {
- PIXEL_CLOCK_PARAMETERS sPCLKInput;
- ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved; /* Caller doesn't need to init this portion */
-} SET_PIXEL_CLOCK_PS_ALLOCATION;
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION
+{
+ PIXEL_CLOCK_PARAMETERS sPCLKInput;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion
+}SET_PIXEL_CLOCK_PS_ALLOCATION;
#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION
-/****************************************************************************/
-/* Structures used by ### */
-/****************************************************************************/
-typedef struct _MEMORY_TRAINING_PARAMETERS {
- ULONG ulTargetMemoryClock; /* In 10Khz unit */
-} MEMORY_TRAINING_PARAMETERS;
+/****************************************************************************/
+// Structures used by ###
+/****************************************************************************/
+typedef struct _MEMORY_TRAINING_PARAMETERS
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+}MEMORY_TRAINING_PARAMETERS;
#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS
+
/****************************LVDS and other encoder command table definitions **********************/
-/****************************************************************************/
-/* Structures used by LVDSEncoderControlTable (Before DCE30) */
-/* LVTMAEncoderControlTable (Before DCE30) */
-/* TMDSAEncoderControlTable (Before DCE30) */
-/****************************************************************************/
-typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucMisc; /* bit0=0: Enable single link */
- /* =1: Enable dual link */
- /* Bit1=0: 666RGB */
- /* =1: 888RGB */
- UCHAR ucAction; /* 0: turn off encoder */
- /* 1: setup and turn on encoder */
-} LVDS_ENCODER_CONTROL_PARAMETERS;
-#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS
+/****************************************************************************/
+// Structures used by LVDSEncoderControlTable (Before DCE30)
+// LVTMAEncoderControlTable (Before DCE30)
+// TMDSAEncoderControlTable (Before DCE30)
+/****************************************************************************/
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucMisc; // bit0=0: Enable single link
+ // =1: Enable dual link
+ // Bit1=0: 666RGB
+ // =1: 888RGB
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+}LVDS_ENCODER_CONTROL_PARAMETERS;
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS
+
#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS
#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS
#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS
#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS
-/* ucTableFormatRevision=1,ucTableContentRevision=2 */
-typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucMisc; /* see PANEL_ENCODER_MISC_xx definitions below */
- UCHAR ucAction; /* 0: turn off encoder */
- /* 1: setup and turn on encoder */
- UCHAR ucTruncate; /* bit0=0: Disable truncate */
- /* =1: Enable truncate */
- /* bit4=0: 666RGB */
- /* =1: 888RGB */
- UCHAR ucSpatial; /* bit0=0: Disable spatial dithering */
- /* =1: Enable spatial dithering */
- /* bit4=0: 666RGB */
- /* =1: 888RGB */
- UCHAR ucTemporal; /* bit0=0: Disable temporal dithering */
- /* =1: Enable temporal dithering */
- /* bit4=0: 666RGB */
- /* =1: 888RGB */
- /* bit5=0: Gray level 2 */
- /* =1: Gray level 4 */
- UCHAR ucFRC; /* bit4=0: 25FRC_SEL pattern E */
- /* =1: 25FRC_SEL pattern F */
- /* bit6:5=0: 50FRC_SEL pattern A */
- /* =1: 50FRC_SEL pattern B */
- /* =2: 50FRC_SEL pattern C */
- /* =3: 50FRC_SEL pattern D */
- /* bit7=0: 75FRC_SEL pattern E */
- /* =1: 75FRC_SEL pattern F */
-} LVDS_ENCODER_CONTROL_PARAMETERS_V2;
-#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucMisc; // see PANEL_ENCODER_MISC_xx defintions below
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+ UCHAR ucTruncate; // bit0=0: Disable truncate
+ // =1: Enable truncate
+ // bit4=0: 666RGB
+ // =1: 888RGB
+ UCHAR ucSpatial; // bit0=0: Disable spatial dithering
+ // =1: Enable spatial dithering
+ // bit4=0: 666RGB
+ // =1: 888RGB
+ UCHAR ucTemporal; // bit0=0: Disable temporal dithering
+ // =1: Enable temporal dithering
+ // bit4=0: 666RGB
+ // =1: 888RGB
+ // bit5=0: Gray level 2
+ // =1: Gray level 4
+ UCHAR ucFRC; // bit4=0: 25FRC_SEL pattern E
+ // =1: 25FRC_SEL pattern F
+ // bit6:5=0: 50FRC_SEL pattern A
+ // =1: 50FRC_SEL pattern B
+ // =2: 50FRC_SEL pattern C
+ // =3: 50FRC_SEL pattern D
+ // bit7=0: 75FRC_SEL pattern E
+ // =1: 75FRC_SEL pattern F
+}LVDS_ENCODER_CONTROL_PARAMETERS_V2;
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
+
#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
-
+
#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2
@@ -1185,38 +1536,42 @@ typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3
-/****************************************************************************/
-/* Structures used by ### */
-/****************************************************************************/
-typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS {
- UCHAR ucEnable; /* Enable or Disable External TMDS encoder */
- UCHAR ucMisc; /* Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} */
- UCHAR ucPadding[2];
-} ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
-
-typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION {
- ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
-} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
+/****************************************************************************/
+// Structures used by ###
+/****************************************************************************/
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS
+{
+ UCHAR ucEnable; // Enable or Disable External TMDS encoder
+ UCHAR ucMisc; // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB}
+ UCHAR ucPadding[2];
+}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION
+{
+ ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
-typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 {
- ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
-} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2
+{
+ ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
-typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION {
- DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
-} EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION
+{
+ DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
-/****************************************************************************/
-/* Structures used by DVOEncoderControlTable */
-/****************************************************************************/
-/* ucTableFormatRevision=1,ucTableContentRevision=3 */
+/****************************************************************************/
+// Structures used by DVOEncoderControlTable
+/****************************************************************************/
+//ucTableFormatRevision=1,ucTableContentRevision=3
-/* ucDVOConfig: */
+//ucDVOConfig:
#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
@@ -1225,21 +1580,22 @@ typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION {
#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
#define DVO_ENCODER_CONFIG_24BIT 0x08
-typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
- USHORT usPixelClock;
- UCHAR ucDVOConfig;
- UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */
- UCHAR ucReseved[4];
-} DVO_ENCODER_CONTROL_PARAMETERS_V3;
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
+{
+ USHORT usPixelClock;
+ UCHAR ucDVOConfig;
+ UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+ UCHAR ucReseved[4];
+}DVO_ENCODER_CONTROL_PARAMETERS_V3;
#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3
-/* ucTableFormatRevision=1 */
-/* ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for */
-/* bit1=0: non-coherent mode */
-/* =1: coherent mode */
+//ucTableFormatRevision=1
+//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for
+// bit1=0: non-coherent mode
+// =1: coherent mode
-/* ========================================================================================== */
-/* Only change is here next time when changing encoder parameter definitions again! */
+//==========================================================================================
+//Only change is here next time when changing encoder parameter definitions again!
#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST
@@ -1252,7 +1608,7 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS
#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION
-/* ========================================================================================== */
+//==========================================================================================
#define PANEL_ENCODER_MISC_DUAL 0x01
#define PANEL_ENCODER_MISC_COHERENT 0x02
#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04
@@ -1281,159 +1637,159 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
#define PANEL_ENCODER_75FRC_E 0x00
#define PANEL_ENCODER_75FRC_F 0x80
-/****************************************************************************/
-/* Structures used by SetVoltageTable */
-/****************************************************************************/
+/****************************************************************************/
+// Structures used by SetVoltageTable
+/****************************************************************************/
#define SET_VOLTAGE_TYPE_ASIC_VDDC 1
#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2
#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3
#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4
#define SET_VOLTAGE_INIT_MODE 5
-#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 /* Gets the Max. voltage for the soldered Asic */
+#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 //Gets the Max. voltage for the soldered Asic
#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1
#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2
#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4
#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0
-#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
+#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2
-typedef struct _SET_VOLTAGE_PARAMETERS {
- UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
- UCHAR ucVoltageMode; /* To set all, to set source A or source B or ... */
- UCHAR ucVoltageIndex; /* An index to tell which voltage level */
- UCHAR ucReserved;
-} SET_VOLTAGE_PARAMETERS;
-
-typedef struct _SET_VOLTAGE_PARAMETERS_V2 {
- UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
- UCHAR ucVoltageMode; /* Not used, maybe use for state machine for differen power mode */
- USHORT usVoltageLevel; /* real voltage level */
-} SET_VOLTAGE_PARAMETERS_V2;
-
-typedef struct _SET_VOLTAGE_PS_ALLOCATION {
- SET_VOLTAGE_PARAMETERS sASICSetVoltage;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
-} SET_VOLTAGE_PS_ALLOCATION;
-
-/****************************************************************************/
-/* Structures used by TVEncoderControlTable */
-/****************************************************************************/
-typedef struct _TV_ENCODER_CONTROL_PARAMETERS {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucTvStandard; /* See definition "ATOM_TV_NTSC ..." */
- UCHAR ucAction; /* 0: turn off encoder */
- /* 1: setup and turn on encoder */
-} TV_ENCODER_CONTROL_PARAMETERS;
-
-typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION {
- TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Don't set this one */
-} TV_ENCODER_CONTROL_PS_ALLOCATION;
-
-/* ==============================Data Table Portion==================================== */
-
-#ifdef UEFI_BUILD
-#define UTEMP USHORT
-#define USHORT void*
-#endif
-
-/****************************************************************************/
-/* Structure used in Data.mtb */
-/****************************************************************************/
-typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES {
- USHORT UtilityPipeLine; /* Offest for the utility to get parser info,Don't change this position! */
- USHORT MultimediaCapabilityInfo; /* Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios */
- USHORT MultimediaConfigInfo; /* Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios */
- USHORT StandardVESA_Timing; /* Only used by Bios */
- USHORT FirmwareInfo; /* Shared by various SW components,latest version 1.4 */
- USHORT DAC_Info; /* Will be obsolete from R600 */
- USHORT LVDS_Info; /* Shared by various SW components,latest version 1.1 */
- USHORT TMDS_Info; /* Will be obsolete from R600 */
- USHORT AnalogTV_Info; /* Shared by various SW components,latest version 1.1 */
- USHORT SupportedDevicesInfo; /* Will be obsolete from R600 */
- USHORT GPIO_I2C_Info; /* Shared by various SW components,latest version 1.2 will be used from R600 */
- USHORT VRAM_UsageByFirmware; /* Shared by various SW components,latest version 1.3 will be used from R600 */
- USHORT GPIO_Pin_LUT; /* Shared by various SW components,latest version 1.1 */
- USHORT VESA_ToInternalModeLUT; /* Only used by Bios */
- USHORT ComponentVideoInfo; /* Shared by various SW components,latest version 2.1 will be used from R600 */
- USHORT PowerPlayInfo; /* Shared by various SW components,latest version 2.1,new design from R600 */
- USHORT CompassionateData; /* Will be obsolete from R600 */
- USHORT SaveRestoreInfo; /* Only used by Bios */
- USHORT PPLL_SS_Info; /* Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info */
- USHORT OemInfo; /* Defined and used by external SW, should be obsolete soon */
- USHORT XTMDS_Info; /* Will be obsolete from R600 */
- USHORT MclkSS_Info; /* Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used */
- USHORT Object_Header; /* Shared by various SW components,latest version 1.1 */
- USHORT IndirectIOAccess; /* Only used by Bios,this table position can't change at all!! */
- USHORT MC_InitParameter; /* Only used by command table */
- USHORT ASIC_VDDC_Info; /* Will be obsolete from R600 */
- USHORT ASIC_InternalSS_Info; /* New tabel name from R600, used to be called "ASIC_MVDDC_Info" */
- USHORT TV_VideoMode; /* Only used by command table */
- USHORT VRAM_Info; /* Only used by command table, latest version 1.3 */
- USHORT MemoryTrainingInfo; /* Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 */
- USHORT IntegratedSystemInfo; /* Shared by various SW components */
- USHORT ASIC_ProfilingInfo; /* New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 */
- USHORT VoltageObjectInfo; /* Shared by various SW components, latest version 1.1 */
- USHORT PowerSourceInfo; /* Shared by various SW components, latest versoin 1.1 */
-} ATOM_MASTER_LIST_OF_DATA_TABLES;
-
-#ifdef UEFI_BUILD
-#define USHORT UTEMP
-#endif
+typedef struct _SET_VOLTAGE_PARAMETERS
+{
+ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+ UCHAR ucVoltageMode; // To set all, to set source A or source B or ...
+ UCHAR ucVoltageIndex; // An index to tell which voltage level
+ UCHAR ucReserved;
+}SET_VOLTAGE_PARAMETERS;
-typedef struct _ATOM_MASTER_DATA_TABLE {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
-} ATOM_MASTER_DATA_TABLE;
+typedef struct _SET_VOLTAGE_PARAMETERS_V2
+{
+ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+ UCHAR ucVoltageMode; // Not used, maybe use for state machine for differen power mode
+ USHORT usVoltageLevel; // real voltage level
+}SET_VOLTAGE_PARAMETERS_V2;
-/****************************************************************************/
-/* Structure used in MultimediaCapabilityInfoTable */
-/****************************************************************************/
-typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulSignature; /* HW info table signature string "$ATI" */
- UCHAR ucI2C_Type; /* I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) */
- UCHAR ucTV_OutInfo; /* Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) */
- UCHAR ucVideoPortInfo; /* Provides the video port capabilities */
- UCHAR ucHostPortInfo; /* Provides host port configuration information */
-} ATOM_MULTIMEDIA_CAPABILITY_INFO;
+typedef struct _SET_VOLTAGE_PS_ALLOCATION
+{
+ SET_VOLTAGE_PARAMETERS sASICSetVoltage;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}SET_VOLTAGE_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by TVEncoderControlTable
+/****************************************************************************/
+typedef struct _TV_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucTvStandard; // See definition "ATOM_TV_NTSC ..."
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+}TV_ENCODER_CONTROL_PARAMETERS;
-/****************************************************************************/
-/* Structure used in MultimediaConfigInfoTable */
-/****************************************************************************/
-typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulSignature; /* MM info table signature sting "$MMT" */
- UCHAR ucTunerInfo; /* Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) */
- UCHAR ucAudioChipInfo; /* List the audio chip type (3:0) product type (4) and OEM revision (7:5) */
- UCHAR ucProductID; /* Defines as OEM ID or ATI board ID dependent on product type setting */
- UCHAR ucMiscInfo1; /* Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) */
- UCHAR ucMiscInfo2; /* I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) */
- UCHAR ucMiscInfo3; /* Video Decoder Type (3:0) Video In Standard/Crystal (7:4) */
- UCHAR ucMiscInfo4; /* Video Decoder Host Config (2:0) reserved (7:3) */
- UCHAR ucVideoInput0Info; /* Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
- UCHAR ucVideoInput1Info; /* Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
- UCHAR ucVideoInput2Info; /* Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
- UCHAR ucVideoInput3Info; /* Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
- UCHAR ucVideoInput4Info; /* Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
-} ATOM_MULTIMEDIA_CONFIG_INFO;
+typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION
+{
+ TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; // Don't set this one
+}TV_ENCODER_CONTROL_PS_ALLOCATION;
-/****************************************************************************/
-/* Structures used in FirmwareInfoTable */
-/****************************************************************************/
+//==============================Data Table Portion====================================
-/* usBIOSCapability Definition: */
-/* Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */
-/* Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */
-/* Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */
-/* Others: Reserved */
+/****************************************************************************/
+// Structure used in Data.mtb
+/****************************************************************************/
+typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
+{
+ USHORT UtilityPipeLine; // Offest for the utility to get parser info,Don't change this position!
+ USHORT MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios
+ USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
+ USHORT StandardVESA_Timing; // Only used by Bios
+ USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
+ USHORT DAC_Info; // Will be obsolete from R600
+ USHORT LVDS_Info; // Shared by various SW components,latest version 1.1
+ USHORT TMDS_Info; // Will be obsolete from R600
+ USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
+ USHORT SupportedDevicesInfo; // Will be obsolete from R600
+ USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600
+ USHORT VRAM_UsageByFirmware; // Shared by various SW components,latest version 1.3 will be used from R600
+ USHORT GPIO_Pin_LUT; // Shared by various SW components,latest version 1.1
+ USHORT VESA_ToInternalModeLUT; // Only used by Bios
+ USHORT ComponentVideoInfo; // Shared by various SW components,latest version 2.1 will be used from R600
+ USHORT PowerPlayInfo; // Shared by various SW components,latest version 2.1,new design from R600
+ USHORT CompassionateData; // Will be obsolete from R600
+ USHORT SaveRestoreInfo; // Only used by Bios
+ USHORT PPLL_SS_Info; // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info
+ USHORT OemInfo; // Defined and used by external SW, should be obsolete soon
+ USHORT XTMDS_Info; // Will be obsolete from R600
+ USHORT MclkSS_Info; // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used
+ USHORT Object_Header; // Shared by various SW components,latest version 1.1
+ USHORT IndirectIOAccess; // Only used by Bios,this table position can't change at all!!
+ USHORT MC_InitParameter; // Only used by command table
+ USHORT ASIC_VDDC_Info; // Will be obsolete from R600
+ USHORT ASIC_InternalSS_Info; // New tabel name from R600, used to be called "ASIC_MVDDC_Info"
+ USHORT TV_VideoMode; // Only used by command table
+ USHORT VRAM_Info; // Only used by command table, latest version 1.3
+ USHORT MemoryTrainingInfo; // Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1
+ USHORT IntegratedSystemInfo; // Shared by various SW components
+ USHORT ASIC_ProfilingInfo; // New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600
+ USHORT VoltageObjectInfo; // Shared by various SW components, latest version 1.1
+ USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
+}ATOM_MASTER_LIST_OF_DATA_TABLES;
+
+typedef struct _ATOM_MASTER_DATA_TABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
+}ATOM_MASTER_DATA_TABLE;
+
+/****************************************************************************/
+// Structure used in MultimediaCapabilityInfoTable
+/****************************************************************************/
+typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulSignature; // HW info table signature string "$ATI"
+ UCHAR ucI2C_Type; // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc)
+ UCHAR ucTV_OutInfo; // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7)
+ UCHAR ucVideoPortInfo; // Provides the video port capabilities
+ UCHAR ucHostPortInfo; // Provides host port configuration information
+}ATOM_MULTIMEDIA_CAPABILITY_INFO;
+
+/****************************************************************************/
+// Structure used in MultimediaConfigInfoTable
+/****************************************************************************/
+typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulSignature; // MM info table signature sting "$MMT"
+ UCHAR ucTunerInfo; // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5)
+ UCHAR ucAudioChipInfo; // List the audio chip type (3:0) product type (4) and OEM revision (7:5)
+ UCHAR ucProductID; // Defines as OEM ID or ATI board ID dependent on product type setting
+ UCHAR ucMiscInfo1; // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7)
+ UCHAR ucMiscInfo2; // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6)
+ UCHAR ucMiscInfo3; // Video Decoder Type (3:0) Video In Standard/Crystal (7:4)
+ UCHAR ucMiscInfo4; // Video Decoder Host Config (2:0) reserved (7:3)
+ UCHAR ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+}ATOM_MULTIMEDIA_CONFIG_INFO;
+
+/****************************************************************************/
+// Structures used in FirmwareInfoTable
+/****************************************************************************/
+
+// usBIOSCapability Defintion:
+// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted;
+// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported;
+// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported;
+// Others: Reserved
#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001
#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002
#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004
-#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008
-#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 // (valid from v1.1 ~v1.4):=1: memclk SS enable, =0 memclk SS disable.
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 // (valid from v1.1 ~v1.4):=1: engclk SS enable, =0 engclk SS disable.
#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020
#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040
#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080
@@ -1441,242 +1797,292 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO {
#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00
#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000
#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_EXT_SS_SUPPORT 0x0008 // (valid from v2.1 ): =1: memclk ss enable with external ss chip
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_EXT_SS_SUPPORT 0x0010 // (valid from v2.1 ): =1: engclk ss enable with external ss chip
#ifndef _H2INC
-/* Please don't add or expand this bitfield structure below, this one will retire soon.! */
-typedef struct _ATOM_FIRMWARE_CAPABILITY {
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_FIRMWARE_CAPABILITY
+{
#if ATOM_BIG_ENDIAN
- USHORT Reserved:3;
- USHORT HyperMemory_Size:4;
- USHORT HyperMemory_Support:1;
- USHORT PPMode_Assigned:1;
- USHORT WMI_SUPPORT:1;
- USHORT GPUControlsBL:1;
- USHORT EngineClockSS_Support:1;
- USHORT MemoryClockSS_Support:1;
- USHORT ExtendedDesktopSupport:1;
- USHORT DualCRTC_Support:1;
- USHORT FirmwarePosted:1;
+ USHORT Reserved:3;
+ USHORT HyperMemory_Size:4;
+ USHORT HyperMemory_Support:1;
+ USHORT PPMode_Assigned:1;
+ USHORT WMI_SUPPORT:1;
+ USHORT GPUControlsBL:1;
+ USHORT EngineClockSS_Support:1;
+ USHORT MemoryClockSS_Support:1;
+ USHORT ExtendedDesktopSupport:1;
+ USHORT DualCRTC_Support:1;
+ USHORT FirmwarePosted:1;
#else
- USHORT FirmwarePosted:1;
- USHORT DualCRTC_Support:1;
- USHORT ExtendedDesktopSupport:1;
- USHORT MemoryClockSS_Support:1;
- USHORT EngineClockSS_Support:1;
- USHORT GPUControlsBL:1;
- USHORT WMI_SUPPORT:1;
- USHORT PPMode_Assigned:1;
- USHORT HyperMemory_Support:1;
- USHORT HyperMemory_Size:4;
- USHORT Reserved:3;
+ USHORT FirmwarePosted:1;
+ USHORT DualCRTC_Support:1;
+ USHORT ExtendedDesktopSupport:1;
+ USHORT MemoryClockSS_Support:1;
+ USHORT EngineClockSS_Support:1;
+ USHORT GPUControlsBL:1;
+ USHORT WMI_SUPPORT:1;
+ USHORT PPMode_Assigned:1;
+ USHORT HyperMemory_Support:1;
+ USHORT HyperMemory_Size:4;
+ USHORT Reserved:3;
#endif
-} ATOM_FIRMWARE_CAPABILITY;
+}ATOM_FIRMWARE_CAPABILITY;
-typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS {
- ATOM_FIRMWARE_CAPABILITY sbfAccess;
- USHORT susAccess;
-} ATOM_FIRMWARE_CAPABILITY_ACCESS;
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+ ATOM_FIRMWARE_CAPABILITY sbfAccess;
+ USHORT susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
#else
-typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS {
- USHORT susAccess;
-} ATOM_FIRMWARE_CAPABILITY_ACCESS;
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+ USHORT susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
#endif
-typedef struct _ATOM_FIRMWARE_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulFirmwareRevision;
- ULONG ulDefaultEngineClock; /* In 10Khz unit */
- ULONG ulDefaultMemoryClock; /* In 10Khz unit */
- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
- ULONG ulASICMaxEngineClock; /* In 10Khz unit */
- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
- UCHAR ucASICMaxTemperature;
- UCHAR ucPadding[3]; /* Don't use them */
- ULONG aulReservedForBIOS[3]; /* Don't use them */
- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit, the definitions above can't change!!! */
- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
- USHORT usReferenceClock; /* In 10Khz unit */
- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
- UCHAR ucDesign_ID; /* Indicate what is the board design */
- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
-} ATOM_FIRMWARE_INFO;
-
-typedef struct _ATOM_FIRMWARE_INFO_V1_2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulFirmwareRevision;
- ULONG ulDefaultEngineClock; /* In 10Khz unit */
- ULONG ulDefaultMemoryClock; /* In 10Khz unit */
- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
- ULONG ulASICMaxEngineClock; /* In 10Khz unit */
- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
- UCHAR ucASICMaxTemperature;
- UCHAR ucMinAllowedBL_Level;
- UCHAR ucPadding[2]; /* Don't use them */
- ULONG aulReservedForBIOS[2]; /* Don't use them */
- ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
- USHORT usReferenceClock; /* In 10Khz unit */
- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
- UCHAR ucDesign_ID; /* Indicate what is the board design */
- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
-} ATOM_FIRMWARE_INFO_V1_2;
-
-typedef struct _ATOM_FIRMWARE_INFO_V1_3 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulFirmwareRevision;
- ULONG ulDefaultEngineClock; /* In 10Khz unit */
- ULONG ulDefaultMemoryClock; /* In 10Khz unit */
- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
- ULONG ulASICMaxEngineClock; /* In 10Khz unit */
- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
- UCHAR ucASICMaxTemperature;
- UCHAR ucMinAllowedBL_Level;
- UCHAR ucPadding[2]; /* Don't use them */
- ULONG aulReservedForBIOS; /* Don't use them */
- ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */
- ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
- USHORT usReferenceClock; /* In 10Khz unit */
- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
- UCHAR ucDesign_ID; /* Indicate what is the board design */
- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
-} ATOM_FIRMWARE_INFO_V1_3;
-
-typedef struct _ATOM_FIRMWARE_INFO_V1_4 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulFirmwareRevision;
- ULONG ulDefaultEngineClock; /* In 10Khz unit */
- ULONG ulDefaultMemoryClock; /* In 10Khz unit */
- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
- ULONG ulASICMaxEngineClock; /* In 10Khz unit */
- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
- UCHAR ucASICMaxTemperature;
- UCHAR ucMinAllowedBL_Level;
- USHORT usBootUpVDDCVoltage; /* In MV unit */
- USHORT usLcdMinPixelClockPLL_Output; /* In MHz unit */
- USHORT usLcdMaxPixelClockPLL_Output; /* In MHz unit */
- ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */
- ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
- USHORT usReferenceClock; /* In 10Khz unit */
- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
- UCHAR ucDesign_ID; /* Indicate what is the board design */
- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
-} ATOM_FIRMWARE_INFO_V1_4;
-
-#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V1_4
-
-/****************************************************************************/
-/* Structures used in IntegratedSystemInfoTable */
-/****************************************************************************/
+typedef struct _ATOM_FIRMWARE_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucPadding[3]; //Don't use them
+ ULONG aulReservedForBIOS[3]; //Don't use them
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit, the definitions above can't change!!!
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ UCHAR ucPadding[2]; //Don't use them
+ ULONG aulReservedForBIOS[2]; //Don't use them
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_2;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ UCHAR ucPadding[2]; //Don't use them
+ ULONG aulReservedForBIOS; //Don't use them
+ ULONG ul3DAccelerationEngineClock;//In 10Khz unit
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_3;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_4
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ USHORT usBootUpVDDCVoltage; //In MV unit
+ USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
+ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
+ ULONG ul3DAccelerationEngineClock;//In 10Khz unit
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_4;
+
+//the structure below to be used from Cypress
+typedef struct _ATOM_FIRMWARE_INFO_V2_1
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulReserved1;
+ ULONG ulReserved2;
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock
+ ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit
+ UCHAR ucReserved1; //Was ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ USHORT usBootUpVDDCVoltage; //In MV unit
+ USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
+ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
+ ULONG ulReserved4; //Was ulAsicMaximumVoltage
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usCoreReferenceClock; //In 10Khz unit
+ USHORT usMemoryReferenceClock; //In 10Khz unit
+ USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+ UCHAR ucReserved4[3];
+}ATOM_FIRMWARE_INFO_V2_1;
+
+
+#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1
+
+/****************************************************************************/
+// Structures used in IntegratedSystemInfoTable
+/****************************************************************************/
#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2
#define IGP_CAP_FLAG_AC_CARD 0x4
#define IGP_CAP_FLAG_SDVO_CARD 0x8
#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10
-typedef struct _ATOM_INTEGRATED_SYSTEM_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulBootUpEngineClock; /* in 10kHz unit */
- ULONG ulBootUpMemoryClock; /* in 10kHz unit */
- ULONG ulMaxSystemMemoryClock; /* in 10kHz unit */
- ULONG ulMinSystemMemoryClock; /* in 10kHz unit */
- UCHAR ucNumberOfCyclesInPeriodHi;
- UCHAR ucLCDTimingSel; /* =0:not valid.!=0 sel this timing descriptor from LCD EDID. */
- USHORT usReserved1;
- USHORT usInterNBVoltageLow; /* An intermidiate PMW value to set the voltage */
- USHORT usInterNBVoltageHigh; /* Another intermidiate PMW value to set the voltage */
- ULONG ulReserved[2];
-
- USHORT usFSBClock; /* In MHz unit */
- USHORT usCapabilityFlag; /* Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable */
- /* Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card */
- /* Bit[4]==1: P/2 mode, ==0: P/1 mode */
- USHORT usPCIENBCfgReg7; /* bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal */
- USHORT usK8MemoryClock; /* in MHz unit */
- USHORT usK8SyncStartDelay; /* in 0.01 us unit */
- USHORT usK8DataReturnTime; /* in 0.01 us unit */
- UCHAR ucMaxNBVoltage;
- UCHAR ucMinNBVoltage;
- UCHAR ucMemoryType; /* [7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved */
- UCHAR ucNumberOfCyclesInPeriod; /* CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod */
- UCHAR ucStartingPWM_HighTime; /* CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime */
- UCHAR ucHTLinkWidth; /* 16 bit vs. 8 bit */
- UCHAR ucMaxNBVoltageHigh;
- UCHAR ucMinNBVoltageHigh;
-} ATOM_INTEGRATED_SYSTEM_INFO;
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock; //in 10kHz unit
+ ULONG ulBootUpMemoryClock; //in 10kHz unit
+ ULONG ulMaxSystemMemoryClock; //in 10kHz unit
+ ULONG ulMinSystemMemoryClock; //in 10kHz unit
+ UCHAR ucNumberOfCyclesInPeriodHi;
+ UCHAR ucLCDTimingSel; //=0:not valid.!=0 sel this timing descriptor from LCD EDID.
+ USHORT usReserved1;
+ USHORT usInterNBVoltageLow; //An intermidiate PMW value to set the voltage
+ USHORT usInterNBVoltageHigh; //Another intermidiate PMW value to set the voltage
+ ULONG ulReserved[2];
+
+ USHORT usFSBClock; //In MHz unit
+ USHORT usCapabilityFlag; //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable
+ //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card
+ //Bit[4]==1: P/2 mode, ==0: P/1 mode
+ USHORT usPCIENBCfgReg7; //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal
+ USHORT usK8MemoryClock; //in MHz unit
+ USHORT usK8SyncStartDelay; //in 0.01 us unit
+ USHORT usK8DataReturnTime; //in 0.01 us unit
+ UCHAR ucMaxNBVoltage;
+ UCHAR ucMinNBVoltage;
+ UCHAR ucMemoryType; //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved
+ UCHAR ucNumberOfCyclesInPeriod; //CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod
+ UCHAR ucStartingPWM_HighTime; //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime
+ UCHAR ucHTLinkWidth; //16 bit vs. 8 bit
+ UCHAR ucMaxNBVoltageHigh;
+ UCHAR ucMinNBVoltageHigh;
+}ATOM_INTEGRATED_SYSTEM_INFO;
/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO
-ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock
+ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock
For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock
ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
For AMD IGP,for now this can be 0
-ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
+ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
For AMD IGP,for now this can be 0
-usFSBClock: For Intel IGP,it's FSB Freq
+usFSBClock: For Intel IGP,it's FSB Freq
For AMD IGP,it's HT Link Speed
usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200
@@ -1687,98 +2093,113 @@ VC:Voltage Control
ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
-ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value.
-ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0
+ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value.
+ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0
ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
*/
+
/*
The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST;
-Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need.
+Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need.
The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries.
SW components can access the IGP system infor structure in the same way as before
*/
-typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulBootUpEngineClock; /* in 10kHz unit */
- ULONG ulReserved1[2]; /* must be 0x0 for the reserved */
- ULONG ulBootUpUMAClock; /* in 10kHz unit */
- ULONG ulBootUpSidePortClock; /* in 10kHz unit */
- ULONG ulMinSidePortClock; /* in 10kHz unit */
- ULONG ulReserved2[6]; /* must be 0x0 for the reserved */
- ULONG ulSystemConfig; /* see explanation below */
- ULONG ulBootUpReqDisplayVector;
- ULONG ulOtherDisplayMisc;
- ULONG ulDDISlot1Config;
- ULONG ulDDISlot2Config;
- UCHAR ucMemoryType; /* [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved */
- UCHAR ucUMAChannelNumber;
- UCHAR ucDockingPinBit;
- UCHAR ucDockingPinPolarity;
- ULONG ulDockingPinCFGInfo;
- ULONG ulCPUCapInfo;
- USHORT usNumberOfCyclesInPeriod;
- USHORT usMaxNBVoltage;
- USHORT usMinNBVoltage;
- USHORT usBootUpNBVoltage;
- ULONG ulHTLinkFreq; /* in 10Khz */
- USHORT usMinHTLinkWidth;
- USHORT usMaxHTLinkWidth;
- USHORT usUMASyncStartDelay;
- USHORT usUMADataReturnTime;
- USHORT usLinkStatusZeroTime;
- USHORT usReserved;
- ULONG ulHighVoltageHTLinkFreq; /* in 10Khz */
- ULONG ulLowVoltageHTLinkFreq; /* in 10Khz */
- USHORT usMaxUpStreamHTLinkWidth;
- USHORT usMaxDownStreamHTLinkWidth;
- USHORT usMinUpStreamHTLinkWidth;
- USHORT usMinDownStreamHTLinkWidth;
- ULONG ulReserved3[97]; /* must be 0x0 */
-} ATOM_INTEGRATED_SYSTEM_INFO_V2;
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock; //in 10kHz unit
+ ULONG ulReserved1[2]; //must be 0x0 for the reserved
+ ULONG ulBootUpUMAClock; //in 10kHz unit
+ ULONG ulBootUpSidePortClock; //in 10kHz unit
+ ULONG ulMinSidePortClock; //in 10kHz unit
+ ULONG ulReserved2[6]; //must be 0x0 for the reserved
+ ULONG ulSystemConfig; //see explanation below
+ ULONG ulBootUpReqDisplayVector;
+ ULONG ulOtherDisplayMisc;
+ ULONG ulDDISlot1Config;
+ ULONG ulDDISlot2Config;
+ UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+ UCHAR ucUMAChannelNumber;
+ UCHAR ucDockingPinBit;
+ UCHAR ucDockingPinPolarity;
+ ULONG ulDockingPinCFGInfo;
+ ULONG ulCPUCapInfo;
+ USHORT usNumberOfCyclesInPeriod;
+ USHORT usMaxNBVoltage;
+ USHORT usMinNBVoltage;
+ USHORT usBootUpNBVoltage;
+ ULONG ulHTLinkFreq; //in 10Khz
+ USHORT usMinHTLinkWidth;
+ USHORT usMaxHTLinkWidth;
+ USHORT usUMASyncStartDelay;
+ USHORT usUMADataReturnTime;
+ USHORT usLinkStatusZeroTime;
+ USHORT usDACEfuse; //for storing badgap value (for RS880 only)
+ ULONG ulHighVoltageHTLinkFreq; // in 10Khz
+ ULONG ulLowVoltageHTLinkFreq; // in 10Khz
+ USHORT usMaxUpStreamHTLinkWidth;
+ USHORT usMaxDownStreamHTLinkWidth;
+ USHORT usMinUpStreamHTLinkWidth;
+ USHORT usMinDownStreamHTLinkWidth;
+ USHORT usFirmwareVersion; //0 means FW is not supported. Otherwise it's the FW version loaded by SBIOS and driver should enable FW.
+ USHORT usFullT0Time; // Input to calculate minimum HT link change time required by NB P-State. Unit is 0.01us.
+ ULONG ulReserved3[96]; //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V2;
/*
ulBootUpEngineClock: Boot-up Engine Clock in 10Khz;
ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present
ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock
-ulSystemConfig:
-Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode;
+ulSystemConfig:
+Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode;
Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state
=0: system boots up at driver control state. Power state depends on PowerPlay table.
Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used.
Bit[3]=1: Only one power state(Performance) will be supported.
=0: Multiple power states supported from PowerPlay table.
-Bit[4]=1: CLMC is supported and enabled on current system.
- =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.
-Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.
+Bit[4]=1: CLMC is supported and enabled on current system.
+ =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.
+Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.
=0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied.
Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored.
=0: Voltage settings is determined by powerplay table.
Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue.
=0: Enable CLMC as regular mode, CDLD and CILR will be enabled.
+Bit[8]=1: CDLF is supported and enabled on current system.
+ =0: CDLF is not supported or enabled on current system.
+Bit[9]=1: DLL Shut Down feature is enabled on current system.
+ =0: DLL Shut Down feature is not enabled or supported on current system.
ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions.
ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion;
- [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition;
+ [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSupportedStd definition;
ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design).
[3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
- [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
- [15:8] - Lane configuration attribute;
+ [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 4=1 lane 3:0; bit 5=1 lane 7:4; bit 6=1 lane 11:8; bit 7=1 lane 15:12)
+ When a DDI connector is not "paired" (meaming two connections mutualexclusive on chassis or docking, only one of them can be connected at one time.
+ in both chassis and docking, SBIOS has to duplicate the same PCIE lane info from chassis to docking or vice versa. For example:
+ one DDI connector is only populated in docking with PCIE lane 8-11, but there is no paired connection on chassis, SBIOS has to copy bit 6 to bit 2.
+
+ [15:8] - Lane configuration attribute;
[23:16]- Connector type, possible value:
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D
CONNECTOR_OBJECT_ID_HDMI_TYPE_A
CONNECTOR_OBJECT_ID_DISPLAYPORT
+ CONNECTOR_OBJECT_ID_eDP
[31:24]- Reserved
ulDDISlot2Config: Same as Slot1.
@@ -1787,29 +2208,31 @@ For IGP, Hypermemory is the only memory type showed in CCC.
ucUMAChannelNumber: how many channels for the UMA;
-ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin
+ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin
ucDockingPinBit: which bit in this register to read the pin status;
ucDockingPinPolarity:Polarity of the pin when docked;
ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
-usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode.
+
+usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode.
usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode.
GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0
PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1
GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE
+
usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value.
ulHTLinkFreq: Bootup HT link Frequency in 10Khz.
-usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth.
- If CDLW enabled, both upstream and downstream width should be the same during bootup.
-usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
+usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth.
If CDLW enabled, both upstream and downstream width should be the same during bootup.
+usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
+ If CDLW enabled, both upstream and downstream width should be the same during bootup.
-usUMASyncStartDelay: Memory access latency, required for watermark calculation
+usUMASyncStartDelay: Memory access latency, required for watermark calculation
usUMADataReturnTime: Memory access latency, required for watermark calculation
-usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us
+usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us
for Griffin or Greyhound. SBIOS needs to convert to actual time by:
if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us)
if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us)
@@ -1817,7 +2240,7 @@ for Griffin or Greyhound. SBIOS needs to convert to actual time by:
if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us)
ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0.
- This must be less than or equal to ulHTLinkFreq(bootup frequency).
+ This must be less than or equal to ulHTLinkFreq(bootup frequency).
ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0.
This must be less than or equal to ulHighVoltageHTLinkFreq.
@@ -1827,14 +2250,17 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep
usMinDownStreamHTLinkWidth: same as above.
*/
+
#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
-#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004
+#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004
#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008
#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010
#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020
#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040
#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080
+#define SYSTEM_CONFIG_CDLF_ENABLED 0x00000100
+#define SYSTEM_CONFIG_DLL_SHUTDOWN_ENABLED 0x00000200
#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF
@@ -1851,6 +2277,41 @@ usMinDownStreamHTLinkWidth: same as above.
#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000
+// IntegratedSystemInfoTable new Rev is V5 after V2, because of the real rev of V2 is v1.4. This rev is used for RR
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock; //in 10kHz unit
+ ULONG ulDentistVCOFreq; //Dentist VCO clock in 10kHz unit, the source of GPU SCLK, LCLK, UCLK and VCLK.
+ ULONG ulLClockFreq; //GPU Lclk freq in 10kHz unit, have relationship with NCLK in NorthBridge
+ ULONG ulBootUpUMAClock; //in 10kHz unit
+ ULONG ulReserved1[8]; //must be 0x0 for the reserved
+ ULONG ulBootUpReqDisplayVector;
+ ULONG ulOtherDisplayMisc;
+ ULONG ulReserved2[4]; //must be 0x0 for the reserved
+ ULONG ulSystemConfig; //TBD
+ ULONG ulCPUCapInfo; //TBD
+ USHORT usMaxNBVoltage; //high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+ USHORT usMinNBVoltage; //low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+ USHORT usBootUpNBVoltage; //boot up NB voltage
+ UCHAR ucHtcTmpLmt; //bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD
+ UCHAR ucTjOffset; //bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD
+ ULONG ulReserved3[4]; //must be 0x0 for the reserved
+ ULONG ulDDISlot1Config; //see above ulDDISlot1Config definition
+ ULONG ulDDISlot2Config;
+ ULONG ulDDISlot3Config;
+ ULONG ulDDISlot4Config;
+ ULONG ulReserved4[4]; //must be 0x0 for the reserved
+ UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+ UCHAR ucUMAChannelNumber;
+ USHORT usReserved;
+ ULONG ulReserved5[4]; //must be 0x0 for the reserved
+ ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];//arrays with values for CSR M3 arbiter for default
+ ULONG ulCSR_M3_ARB_CNTL_UVD[10]; //arrays with values for CSR M3 arbiter for UVD playback
+ ULONG ulCSR_M3_ARB_CNTL_FS3D[10];//arrays with values for CSR M3 arbiter for Full Screen 3D applications
+ ULONG ulReserved6[61]; //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V5;
+
#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000
#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001
#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002
@@ -1866,8 +2327,8 @@ usMinDownStreamHTLinkWidth: same as above.
#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C
#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D
-/* define ASIC internal encoder id ( bit vector ) */
-#define ASIC_INT_DAC1_ENCODER_ID 0x00
+// define ASIC internal encoder id ( bit vector ), used for CRTC_SourceSelTable
+#define ASIC_INT_DAC1_ENCODER_ID 0x00
#define ASIC_INT_TV_ENCODER_ID 0x02
#define ASIC_INT_DIG1_ENCODER_ID 0x03
#define ASIC_INT_DAC2_ENCODER_ID 0x04
@@ -1875,10 +2336,24 @@ usMinDownStreamHTLinkWidth: same as above.
#define ASIC_INT_DVO_ENCODER_ID 0x07
#define ASIC_INT_DIG2_ENCODER_ID 0x09
#define ASIC_EXT_DIG_ENCODER_ID 0x05
+#define ASIC_EXT_DIG2_ENCODER_ID 0x08
+#define ASIC_INT_DIG3_ENCODER_ID 0x0a
+#define ASIC_INT_DIG4_ENCODER_ID 0x0b
+#define ASIC_INT_DIG5_ENCODER_ID 0x0c
+#define ASIC_INT_DIG6_ENCODER_ID 0x0d
-/* define Encoder attribute */
+//define Encoder attribute
#define ATOM_ANALOG_ENCODER 0
-#define ATOM_DIGITAL_ENCODER 1
+#define ATOM_DIGITAL_ENCODER 1
+#define ATOM_DP_ENCODER 2
+
+#define ATOM_ENCODER_ENUM_MASK 0x70
+#define ATOM_ENCODER_ENUM_ID1 0x00
+#define ATOM_ENCODER_ENUM_ID2 0x10
+#define ATOM_ENCODER_ENUM_ID3 0x20
+#define ATOM_ENCODER_ENUM_ID4 0x30
+#define ATOM_ENCODER_ENUM_ID5 0x40
+#define ATOM_ENCODER_ENUM_ID6 0x50
#define ATOM_DEVICE_CRT1_INDEX 0x00000000
#define ATOM_DEVICE_LCD1_INDEX 0x00000001
@@ -1886,45 +2361,40 @@ usMinDownStreamHTLinkWidth: same as above.
#define ATOM_DEVICE_DFP1_INDEX 0x00000003
#define ATOM_DEVICE_CRT2_INDEX 0x00000004
#define ATOM_DEVICE_LCD2_INDEX 0x00000005
-#define ATOM_DEVICE_TV2_INDEX 0x00000006
+#define ATOM_DEVICE_DFP6_INDEX 0x00000006
#define ATOM_DEVICE_DFP2_INDEX 0x00000007
#define ATOM_DEVICE_CV_INDEX 0x00000008
-#define ATOM_DEVICE_DFP3_INDEX 0x00000009
-#define ATOM_DEVICE_DFP4_INDEX 0x0000000A
-#define ATOM_DEVICE_DFP5_INDEX 0x0000000B
+#define ATOM_DEVICE_DFP3_INDEX 0x00000009
+#define ATOM_DEVICE_DFP4_INDEX 0x0000000A
+#define ATOM_DEVICE_DFP5_INDEX 0x0000000B
+
#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C
#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D
#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E
#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F
#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1)
#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO
-#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1)
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1 )
#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1)
-#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX)
-#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX)
-#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX)
-#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX)
-#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX)
-#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX)
-#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX)
-#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX)
-#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX)
-#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX)
-#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX )
-#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX)
-
-#define ATOM_DEVICE_CRT_SUPPORT \
- (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
-#define ATOM_DEVICE_DFP_SUPPORT \
- (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | \
- ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | \
- ATOM_DEVICE_DFP5_SUPPORT)
-#define ATOM_DEVICE_TV_SUPPORT \
- (ATOM_DEVICE_TV1_SUPPORT | ATOM_DEVICE_TV2_SUPPORT)
-#define ATOM_DEVICE_LCD_SUPPORT \
- (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
+#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX )
+#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX )
+#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX )
+#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX )
+#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX )
+#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX )
+#define ATOM_DEVICE_DFP6_SUPPORT (0x1L << ATOM_DEVICE_DFP6_INDEX )
+#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX )
+#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX )
+#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX )
+#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX )
+#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX )
+
+#define ATOM_DEVICE_CRT_SUPPORT (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
+#define ATOM_DEVICE_DFP_SUPPORT (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT | ATOM_DEVICE_DFP6_SUPPORT)
+#define ATOM_DEVICE_TV_SUPPORT (ATOM_DEVICE_TV1_SUPPORT)
+#define ATOM_DEVICE_LCD_SUPPORT (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0
#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004
@@ -1942,6 +2412,7 @@ usMinDownStreamHTLinkWidth: same as above.
#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E
#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F
+
#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F
#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000
#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000
@@ -1958,139 +2429,150 @@ usMinDownStreamHTLinkWidth: same as above.
#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004
#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001
#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002
-#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 /* For IGP RS600 */
-#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 /* For IGP RS690 */
+#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 //For IGP RS600
+#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 //For IGP RS690
#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080
#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007
#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000
#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001
-/* usDeviceSupport: */
-/* Bits0 = 0 - no CRT1 support= 1- CRT1 is supported */
-/* Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported */
-/* Bit 2 = 0 - no TV1 support= 1- TV1 is supported */
-/* Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported */
-/* Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported */
-/* Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported */
-/* Bit 6 = 0 - no TV2 support= 1- TV2 is supported */
-/* Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported */
-/* Bit 8 = 0 - no CV support= 1- CV is supported */
-/* Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported */
-/* Byte1 (Supported Device Info) */
-/* Bit 0 = = 0 - no CV support= 1- CV is supported */
-/* */
-/* */
-
-/* ucI2C_ConfigID */
-/* [7:0] - I2C LINE Associate ID */
-/* = 0 - no I2C */
-/* [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) */
-/* = 0, [6:0]=SW assisted I2C ID */
-/* [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use */
-/* = 2, HW engine for Multimedia use */
-/* = 3-7 Reserved for future I2C engines */
-/* [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C */
-
-typedef struct _ATOM_I2C_ID_CONFIG {
-#if ATOM_BIG_ENDIAN
- UCHAR bfHW_Capable:1;
- UCHAR bfHW_EngineID:3;
- UCHAR bfI2C_LineMux:4;
-#else
- UCHAR bfI2C_LineMux:4;
- UCHAR bfHW_EngineID:3;
- UCHAR bfHW_Capable:1;
-#endif
-} ATOM_I2C_ID_CONFIG;
-
-typedef union _ATOM_I2C_ID_CONFIG_ACCESS {
- ATOM_I2C_ID_CONFIG sbfAccess;
- UCHAR ucAccess;
-} ATOM_I2C_ID_CONFIG_ACCESS;
+// usDeviceSupport:
+// Bits0 = 0 - no CRT1 support= 1- CRT1 is supported
+// Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported
+// Bit 2 = 0 - no TV1 support= 1- TV1 is supported
+// Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported
+// Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported
+// Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported
+// Bit 6 = 0 - no DFP6 support= 1- DFP6 is supported
+// Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported
+// Bit 8 = 0 - no CV support= 1- CV is supported
+// Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported
+// Bit 10 = 0 - no DFP4 support= 1- DFP4 is supported
+// Bit 11 = 0 - no DFP5 support= 1- DFP5 is supported
+//
+//
/****************************************************************************/
-/* Structure used in GPIO_I2C_InfoTable */
+/* Structure used in MclkSS_InfoTable */
/****************************************************************************/
-typedef struct _ATOM_GPIO_I2C_ASSIGMENT {
- USHORT usClkMaskRegisterIndex;
- USHORT usClkEnRegisterIndex;
- USHORT usClkY_RegisterIndex;
- USHORT usClkA_RegisterIndex;
- USHORT usDataMaskRegisterIndex;
- USHORT usDataEnRegisterIndex;
- USHORT usDataY_RegisterIndex;
- USHORT usDataA_RegisterIndex;
- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
- UCHAR ucClkMaskShift;
- UCHAR ucClkEnShift;
- UCHAR ucClkY_Shift;
- UCHAR ucClkA_Shift;
- UCHAR ucDataMaskShift;
- UCHAR ucDataEnShift;
- UCHAR ucDataY_Shift;
- UCHAR ucDataA_Shift;
- UCHAR ucReserved1;
- UCHAR ucReserved2;
-} ATOM_GPIO_I2C_ASSIGMENT;
-
-typedef struct _ATOM_GPIO_I2C_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
-} ATOM_GPIO_I2C_INFO;
+// ucI2C_ConfigID
+// [7:0] - I2C LINE Associate ID
+// = 0 - no I2C
+// [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection)
+// = 0, [6:0]=SW assisted I2C ID
+// [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use
+// = 2, HW engine for Multimedia use
+// = 3-7 Reserved for future I2C engines
+// [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C
+
+typedef struct _ATOM_I2C_ID_CONFIG
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR bfHW_Capable:1;
+ UCHAR bfHW_EngineID:3;
+ UCHAR bfI2C_LineMux:4;
+#else
+ UCHAR bfI2C_LineMux:4;
+ UCHAR bfHW_EngineID:3;
+ UCHAR bfHW_Capable:1;
+#endif
+}ATOM_I2C_ID_CONFIG;
-/****************************************************************************/
-/* Common Structure used in other structures */
-/****************************************************************************/
+typedef union _ATOM_I2C_ID_CONFIG_ACCESS
+{
+ ATOM_I2C_ID_CONFIG sbfAccess;
+ UCHAR ucAccess;
+}ATOM_I2C_ID_CONFIG_ACCESS;
+
+
+/****************************************************************************/
+// Structure used in GPIO_I2C_InfoTable
+/****************************************************************************/
+typedef struct _ATOM_GPIO_I2C_ASSIGMENT
+{
+ USHORT usClkMaskRegisterIndex;
+ USHORT usClkEnRegisterIndex;
+ USHORT usClkY_RegisterIndex;
+ USHORT usClkA_RegisterIndex;
+ USHORT usDataMaskRegisterIndex;
+ USHORT usDataEnRegisterIndex;
+ USHORT usDataY_RegisterIndex;
+ USHORT usDataA_RegisterIndex;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+ UCHAR ucClkMaskShift;
+ UCHAR ucClkEnShift;
+ UCHAR ucClkY_Shift;
+ UCHAR ucClkA_Shift;
+ UCHAR ucDataMaskShift;
+ UCHAR ucDataEnShift;
+ UCHAR ucDataY_Shift;
+ UCHAR ucDataA_Shift;
+ UCHAR ucReserved1;
+ UCHAR ucReserved2;
+}ATOM_GPIO_I2C_ASSIGMENT;
+
+typedef struct _ATOM_GPIO_I2C_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_GPIO_I2C_INFO;
+
+/****************************************************************************/
+// Common Structure used in other structures
+/****************************************************************************/
#ifndef _H2INC
-
-/* Please don't add or expand this bitfield structure below, this one will retire soon.! */
-typedef struct _ATOM_MODE_MISC_INFO {
+
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_MODE_MISC_INFO
+{
#if ATOM_BIG_ENDIAN
- USHORT Reserved:6;
- USHORT RGB888:1;
- USHORT DoubleClock:1;
- USHORT Interlace:1;
- USHORT CompositeSync:1;
- USHORT V_ReplicationBy2:1;
- USHORT H_ReplicationBy2:1;
- USHORT VerticalCutOff:1;
- USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */
- USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */
- USHORT HorizontalCutOff:1;
+ USHORT Reserved:6;
+ USHORT RGB888:1;
+ USHORT DoubleClock:1;
+ USHORT Interlace:1;
+ USHORT CompositeSync:1;
+ USHORT V_ReplicationBy2:1;
+ USHORT H_ReplicationBy2:1;
+ USHORT VerticalCutOff:1;
+ USHORT VSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT HSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT HorizontalCutOff:1;
#else
- USHORT HorizontalCutOff:1;
- USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */
- USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */
- USHORT VerticalCutOff:1;
- USHORT H_ReplicationBy2:1;
- USHORT V_ReplicationBy2:1;
- USHORT CompositeSync:1;
- USHORT Interlace:1;
- USHORT DoubleClock:1;
- USHORT RGB888:1;
- USHORT Reserved:6;
+ USHORT HorizontalCutOff:1;
+ USHORT HSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT VSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT VerticalCutOff:1;
+ USHORT H_ReplicationBy2:1;
+ USHORT V_ReplicationBy2:1;
+ USHORT CompositeSync:1;
+ USHORT Interlace:1;
+ USHORT DoubleClock:1;
+ USHORT RGB888:1;
+ USHORT Reserved:6;
#endif
-} ATOM_MODE_MISC_INFO;
-
-typedef union _ATOM_MODE_MISC_INFO_ACCESS {
- ATOM_MODE_MISC_INFO sbfAccess;
- USHORT usAccess;
-} ATOM_MODE_MISC_INFO_ACCESS;
-
+}ATOM_MODE_MISC_INFO;
+
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{
+ ATOM_MODE_MISC_INFO sbfAccess;
+ USHORT usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+
#else
-
-typedef union _ATOM_MODE_MISC_INFO_ACCESS {
- USHORT usAccess;
-} ATOM_MODE_MISC_INFO_ACCESS;
-
+
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{
+ USHORT usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+
#endif
-/* usModeMiscInfo- */
+// usModeMiscInfo-
#define ATOM_H_CUTOFF 0x01
-#define ATOM_HSYNC_POLARITY 0x02 /* 0=Active High, 1=Active Low */
-#define ATOM_VSYNC_POLARITY 0x04 /* 0=Active High, 1=Active Low */
+#define ATOM_HSYNC_POLARITY 0x02 //0=Active High, 1=Active Low
+#define ATOM_VSYNC_POLARITY 0x04 //0=Active High, 1=Active Low
#define ATOM_V_CUTOFF 0x08
#define ATOM_H_REPLICATIONBY2 0x10
#define ATOM_V_REPLICATIONBY2 0x20
@@ -2099,10 +2581,10 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS {
#define ATOM_DOUBLE_CLOCK_MODE 0x100
#define ATOM_RGB888_MODE 0x200
-/* usRefreshRate- */
+//usRefreshRate-
#define ATOM_REFRESH_43 43
#define ATOM_REFRESH_47 47
-#define ATOM_REFRESH_56 56
+#define ATOM_REFRESH_56 56
#define ATOM_REFRESH_60 60
#define ATOM_REFRESH_65 65
#define ATOM_REFRESH_70 70
@@ -2110,192 +2592,233 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS {
#define ATOM_REFRESH_75 75
#define ATOM_REFRESH_85 85
-/* ATOM_MODE_TIMING data are exactly the same as VESA timing data. */
-/* Translation from EDID to ATOM_MODE_TIMING, use the following formula. */
-/* */
-/* VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK */
-/* = EDID_HA + EDID_HBL */
-/* VESA_HDISP = VESA_ACTIVE = EDID_HA */
-/* VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH */
-/* = EDID_HA + EDID_HSO */
-/* VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW */
-/* VESA_BORDER = EDID_BORDER */
-
-/****************************************************************************/
-/* Structure used in SetCRTC_UsingDTDTimingTable */
-/****************************************************************************/
-typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS {
- USHORT usH_Size;
- USHORT usH_Blanking_Time;
- USHORT usV_Size;
- USHORT usV_Blanking_Time;
- USHORT usH_SyncOffset;
- USHORT usH_SyncWidth;
- USHORT usV_SyncOffset;
- USHORT usV_SyncWidth;
- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
- UCHAR ucH_Border; /* From DFP EDID */
- UCHAR ucV_Border;
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucPadding[3];
-} SET_CRTC_USING_DTD_TIMING_PARAMETERS;
-
-/****************************************************************************/
-/* Structure used in SetCRTC_TimingTable */
-/****************************************************************************/
-typedef struct _SET_CRTC_TIMING_PARAMETERS {
- USHORT usH_Total; /* horizontal total */
- USHORT usH_Disp; /* horizontal display */
- USHORT usH_SyncStart; /* horozontal Sync start */
- USHORT usH_SyncWidth; /* horizontal Sync width */
- USHORT usV_Total; /* vertical total */
- USHORT usV_Disp; /* vertical display */
- USHORT usV_SyncStart; /* vertical Sync start */
- USHORT usV_SyncWidth; /* vertical Sync width */
- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucOverscanRight; /* right */
- UCHAR ucOverscanLeft; /* left */
- UCHAR ucOverscanBottom; /* bottom */
- UCHAR ucOverscanTop; /* top */
- UCHAR ucReserved;
-} SET_CRTC_TIMING_PARAMETERS;
+// ATOM_MODE_TIMING data are exactly the same as VESA timing data.
+// Translation from EDID to ATOM_MODE_TIMING, use the following formula.
+//
+// VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK
+// = EDID_HA + EDID_HBL
+// VESA_HDISP = VESA_ACTIVE = EDID_HA
+// VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH
+// = EDID_HA + EDID_HSO
+// VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW
+// VESA_BORDER = EDID_BORDER
+
+/****************************************************************************/
+// Structure used in SetCRTC_UsingDTDTimingTable
+/****************************************************************************/
+typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS
+{
+ USHORT usH_Size;
+ USHORT usH_Blanking_Time;
+ USHORT usV_Size;
+ USHORT usV_Blanking_Time;
+ USHORT usH_SyncOffset;
+ USHORT usH_SyncWidth;
+ USHORT usV_SyncOffset;
+ USHORT usV_SyncWidth;
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ UCHAR ucH_Border; // From DFP EDID
+ UCHAR ucV_Border;
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucPadding[3];
+}SET_CRTC_USING_DTD_TIMING_PARAMETERS;
+
+/****************************************************************************/
+// Structure used in SetCRTC_TimingTable
+/****************************************************************************/
+typedef struct _SET_CRTC_TIMING_PARAMETERS
+{
+ USHORT usH_Total; // horizontal total
+ USHORT usH_Disp; // horizontal display
+ USHORT usH_SyncStart; // horozontal Sync start
+ USHORT usH_SyncWidth; // horizontal Sync width
+ USHORT usV_Total; // vertical total
+ USHORT usV_Disp; // vertical display
+ USHORT usV_SyncStart; // vertical Sync start
+ USHORT usV_SyncWidth; // vertical Sync width
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucOverscanRight; // right
+ UCHAR ucOverscanLeft; // left
+ UCHAR ucOverscanBottom; // bottom
+ UCHAR ucOverscanTop; // top
+ UCHAR ucReserved;
+}SET_CRTC_TIMING_PARAMETERS;
#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS
-/****************************************************************************/
-/* Structure used in StandardVESA_TimingTable */
-/* AnalogTV_InfoTable */
-/* ComponentVideoInfoTable */
-/****************************************************************************/
-typedef struct _ATOM_MODE_TIMING {
- USHORT usCRTC_H_Total;
- USHORT usCRTC_H_Disp;
- USHORT usCRTC_H_SyncStart;
- USHORT usCRTC_H_SyncWidth;
- USHORT usCRTC_V_Total;
- USHORT usCRTC_V_Disp;
- USHORT usCRTC_V_SyncStart;
- USHORT usCRTC_V_SyncWidth;
- USHORT usPixelClock; /* in 10Khz unit */
- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
- USHORT usCRTC_OverscanRight;
- USHORT usCRTC_OverscanLeft;
- USHORT usCRTC_OverscanBottom;
- USHORT usCRTC_OverscanTop;
- USHORT usReserve;
- UCHAR ucInternalModeNumber;
- UCHAR ucRefreshRate;
-} ATOM_MODE_TIMING;
-
-typedef struct _ATOM_DTD_FORMAT {
- USHORT usPixClk;
- USHORT usHActive;
- USHORT usHBlanking_Time;
- USHORT usVActive;
- USHORT usVBlanking_Time;
- USHORT usHSyncOffset;
- USHORT usHSyncWidth;
- USHORT usVSyncOffset;
- USHORT usVSyncWidth;
- USHORT usImageHSize;
- USHORT usImageVSize;
- UCHAR ucHBorder;
- UCHAR ucVBorder;
- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
- UCHAR ucInternalModeNumber;
- UCHAR ucRefreshRate;
-} ATOM_DTD_FORMAT;
-
-/****************************************************************************/
-/* Structure used in LVDS_InfoTable */
-/* * Need a document to describe this table */
-/****************************************************************************/
+/****************************************************************************/
+// Structure used in StandardVESA_TimingTable
+// AnalogTV_InfoTable
+// ComponentVideoInfoTable
+/****************************************************************************/
+typedef struct _ATOM_MODE_TIMING
+{
+ USHORT usCRTC_H_Total;
+ USHORT usCRTC_H_Disp;
+ USHORT usCRTC_H_SyncStart;
+ USHORT usCRTC_H_SyncWidth;
+ USHORT usCRTC_V_Total;
+ USHORT usCRTC_V_Disp;
+ USHORT usCRTC_V_SyncStart;
+ USHORT usCRTC_V_SyncWidth;
+ USHORT usPixelClock; //in 10Khz unit
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ USHORT usCRTC_OverscanRight;
+ USHORT usCRTC_OverscanLeft;
+ USHORT usCRTC_OverscanBottom;
+ USHORT usCRTC_OverscanTop;
+ USHORT usReserve;
+ UCHAR ucInternalModeNumber;
+ UCHAR ucRefreshRate;
+}ATOM_MODE_TIMING;
+
+typedef struct _ATOM_DTD_FORMAT
+{
+ USHORT usPixClk;
+ USHORT usHActive;
+ USHORT usHBlanking_Time;
+ USHORT usVActive;
+ USHORT usVBlanking_Time;
+ USHORT usHSyncOffset;
+ USHORT usHSyncWidth;
+ USHORT usVSyncOffset;
+ USHORT usVSyncWidth;
+ USHORT usImageHSize;
+ USHORT usImageVSize;
+ UCHAR ucHBorder;
+ UCHAR ucVBorder;
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ UCHAR ucInternalModeNumber;
+ UCHAR ucRefreshRate;
+}ATOM_DTD_FORMAT;
+
+/****************************************************************************/
+// Structure used in LVDS_InfoTable
+// * Need a document to describe this table
+/****************************************************************************/
#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
-/* Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. */
-/* Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL */
-#define LCDPANEL_CAP_READ_EDID 0x1
-
-/* ucTableFormatRevision=1 */
-/* ucTableContentRevision=1 */
-typedef struct _ATOM_LVDS_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_DTD_FORMAT sLCDTiming;
- USHORT usModePatchTableOffset;
- USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */
- USHORT usOffDelayInMs;
- UCHAR ucPowerSequenceDigOntoDEin10Ms;
- UCHAR ucPowerSequenceDEtoBLOnin10Ms;
- UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */
- /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */
- /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */
- /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */
- UCHAR ucPanelDefaultRefreshRate;
- UCHAR ucPanelIdentification;
- UCHAR ucSS_Id;
-} ATOM_LVDS_INFO;
-
-/* ucTableFormatRevision=1 */
-/* ucTableContentRevision=2 */
-typedef struct _ATOM_LVDS_INFO_V12 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_DTD_FORMAT sLCDTiming;
- USHORT usExtInfoTableOffset;
- USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */
- USHORT usOffDelayInMs;
- UCHAR ucPowerSequenceDigOntoDEin10Ms;
- UCHAR ucPowerSequenceDEtoBLOnin10Ms;
- UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */
- /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */
- /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */
- /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */
- UCHAR ucPanelDefaultRefreshRate;
- UCHAR ucPanelIdentification;
- UCHAR ucSS_Id;
- USHORT usLCDVenderID;
- USHORT usLCDProductID;
- UCHAR ucLCDPanel_SpecialHandlingCap;
- UCHAR ucPanelInfoSize; /* start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable */
- UCHAR ucReserved[2];
-} ATOM_LVDS_INFO_V12;
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct _ATOM_LVDS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT sLCDTiming;
+ USHORT usModePatchTableOffset;
+ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
+ USHORT usOffDelayInMs;
+ UCHAR ucPowerSequenceDigOntoDEin10Ms;
+ UCHAR ucPowerSequenceDEtoBLOnin10Ms;
+ UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+ // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+ // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+ // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+ UCHAR ucPanelDefaultRefreshRate;
+ UCHAR ucPanelIdentification;
+ UCHAR ucSS_Id;
+}ATOM_LVDS_INFO;
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_LVDS_INFO_V12
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT sLCDTiming;
+ USHORT usExtInfoTableOffset;
+ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
+ USHORT usOffDelayInMs;
+ UCHAR ucPowerSequenceDigOntoDEin10Ms;
+ UCHAR ucPowerSequenceDEtoBLOnin10Ms;
+ UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+ // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+ // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+ // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+ UCHAR ucPanelDefaultRefreshRate;
+ UCHAR ucPanelIdentification;
+ UCHAR ucSS_Id;
+ USHORT usLCDVenderID;
+ USHORT usLCDProductID;
+ UCHAR ucLCDPanel_SpecialHandlingCap;
+ UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+ UCHAR ucReserved[2];
+}ATOM_LVDS_INFO_V12;
+
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12.
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL
+#define LCDPANEL_CAP_READ_EDID 0x1
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define LCDPANEL_CAP_DRR_SUPPORTED 0x2
+
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define LCDPANEL_CAP_eDP 0x4
+
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6 5 4
+ // 0 0 0 - Color bit depth is undefined
+ // 0 0 1 - 6 Bits per Primary Color
+ // 0 1 0 - 8 Bits per Primary Color
+ // 0 1 1 - 10 Bits per Primary Color
+ // 1 0 0 - 12 Bits per Primary Color
+ // 1 0 1 - 14 Bits per Primary Color
+ // 1 1 0 - 16 Bits per Primary Color
+ // 1 1 1 - Reserved
+
+#define PANEL_COLOR_BIT_DEPTH_MASK 0x70
+
+// Bit7:{=0:Random Dithering disabled;1 Random Dithering enabled}
+#define PANEL_RANDOM_DITHER 0x80
+#define PANEL_RANDOM_DITHER_MASK 0x80
+
#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12
-typedef struct _ATOM_PATCH_RECORD_MODE {
- UCHAR ucRecordType;
- USHORT usHDisp;
- USHORT usVDisp;
-} ATOM_PATCH_RECORD_MODE;
+typedef struct _ATOM_PATCH_RECORD_MODE
+{
+ UCHAR ucRecordType;
+ USHORT usHDisp;
+ USHORT usVDisp;
+}ATOM_PATCH_RECORD_MODE;
-typedef struct _ATOM_LCD_RTS_RECORD {
- UCHAR ucRecordType;
- UCHAR ucRTSValue;
-} ATOM_LCD_RTS_RECORD;
+typedef struct _ATOM_LCD_RTS_RECORD
+{
+ UCHAR ucRecordType;
+ UCHAR ucRTSValue;
+}ATOM_LCD_RTS_RECORD;
-/* !! If the record below exits, it shoud always be the first record for easy use in command table!!! */
-typedef struct _ATOM_LCD_MODE_CONTROL_CAP {
- UCHAR ucRecordType;
- USHORT usLCDCap;
-} ATOM_LCD_MODE_CONTROL_CAP;
+//!! If the record below exits, it shoud always be the first record for easy use in command table!!!
+// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead.
+typedef struct _ATOM_LCD_MODE_CONTROL_CAP
+{
+ UCHAR ucRecordType;
+ USHORT usLCDCap;
+}ATOM_LCD_MODE_CONTROL_CAP;
#define LCD_MODE_CAP_BL_OFF 1
#define LCD_MODE_CAP_CRTC_OFF 2
#define LCD_MODE_CAP_PANEL_OFF 4
-typedef struct _ATOM_FAKE_EDID_PATCH_RECORD {
- UCHAR ucRecordType;
- UCHAR ucFakeEDIDLength;
- UCHAR ucFakeEDIDString[1]; /* This actually has ucFakeEdidLength elements. */
+typedef struct _ATOM_FAKE_EDID_PATCH_RECORD
+{
+ UCHAR ucRecordType;
+ UCHAR ucFakeEDIDLength;
+ UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements.
} ATOM_FAKE_EDID_PATCH_RECORD;
-typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD {
- UCHAR ucRecordType;
- USHORT usHSize;
- USHORT usVSize;
-} ATOM_PANEL_RESOLUTION_PATCH_RECORD;
+typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD
+{
+ UCHAR ucRecordType;
+ USHORT usHSize;
+ USHORT usVSize;
+}ATOM_PANEL_RESOLUTION_PATCH_RECORD;
#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1
#define LCD_RTS_RECORD_TYPE 2
@@ -2306,21 +2829,25 @@ typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD {
/****************************Spread Spectrum Info Table Definitions **********************/
-/* ucTableFormatRevision=1 */
-/* ucTableContentRevision=2 */
-typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
- USHORT usSpreadSpectrumPercentage;
- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
- UCHAR ucSS_Step;
- UCHAR ucSS_Delay;
- UCHAR ucSS_Id;
- UCHAR ucRecommendedRef_Div;
- UCHAR ucSS_Range; /* it was reserved for V11 */
-} ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Bit2=1: PCIE REFCLK SS =0 iternal PPLL SS Others:TBD
+ UCHAR ucSS_Step;
+ UCHAR ucSS_Delay;
+ UCHAR ucSS_Id;
+ UCHAR ucRecommendedRef_Div;
+ UCHAR ucSS_Range; //it was reserved for V11
+}ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
#define ATOM_MAX_SS_ENTRY 16
-#define ATOM_DP_SS_ID1 0x0f1 /* SS modulation freq=30k */
-#define ATOM_DP_SS_ID2 0x0f2 /* SS modulation freq=33k */
+#define ATOM_DP_SS_ID1 0x0f1 // SS ID for internal DP stream at 2.7Ghz. if ATOM_DP_SS_ID2 does not exist in SS_InfoTable, it is used for internal DP stream at 1.62Ghz as well.
+#define ATOM_DP_SS_ID2 0x0f2 // SS ID for internal DP stream at 1.62Ghz, if it exists in SS_InfoTable.
+#define ATOM_LVLINK_2700MHz_SS_ID 0x0f3 // SS ID for LV link translator chip at 2.7Ghz
+#define ATOM_LVLINK_1620MHz_SS_ID 0x0f4 // SS ID for LV link translator chip at 1.62Ghz
+
#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
@@ -2329,29 +2856,30 @@ typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
#define ATOM_INTERNAL_SS_MASK 0x00000000
#define ATOM_EXTERNAL_SS_MASK 0x00000002
#define EXEC_SS_STEP_SIZE_SHIFT 2
-#define EXEC_SS_DELAY_SHIFT 4
+#define EXEC_SS_DELAY_SHIFT 4
#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4
-typedef struct _ATOM_SPREAD_SPECTRUM_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY];
-} ATOM_SPREAD_SPECTRUM_INFO;
-
-/****************************************************************************/
-/* Structure used in AnalogTV_InfoTable (Top level) */
-/****************************************************************************/
-/* ucTVBootUpDefaultStd definiton: */
-
-/* ATOM_TV_NTSC 1 */
-/* ATOM_TV_NTSCJ 2 */
-/* ATOM_TV_PAL 3 */
-/* ATOM_TV_PALM 4 */
-/* ATOM_TV_PALCN 5 */
-/* ATOM_TV_PALN 6 */
-/* ATOM_TV_PAL60 7 */
-/* ATOM_TV_SECAM 8 */
-
-/* ucTVSuppportedStd definition: */
+typedef struct _ATOM_SPREAD_SPECTRUM_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY];
+}ATOM_SPREAD_SPECTRUM_INFO;
+
+/****************************************************************************/
+// Structure used in AnalogTV_InfoTable (Top level)
+/****************************************************************************/
+//ucTVBootUpDefaultStd definiton:
+
+//ATOM_TV_NTSC 1
+//ATOM_TV_NTSCJ 2
+//ATOM_TV_PAL 3
+//ATOM_TV_PALM 4
+//ATOM_TV_PALCN 5
+//ATOM_TV_PALN 6
+//ATOM_TV_PAL60 7
+//ATOM_TV_SECAM 8
+
+//ucTVSupportedStd definition:
#define NTSC_SUPPORT 0x1
#define NTSCJ_SUPPORT 0x2
@@ -2364,46 +2892,58 @@ typedef struct _ATOM_SPREAD_SPECTRUM_INFO {
#define MAX_SUPPORTED_TV_TIMING 2
-typedef struct _ATOM_ANALOG_TV_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucTV_SupportedStandard;
- UCHAR ucTV_BootUpDefaultStandard;
- UCHAR ucExt_TV_ASIC_ID;
- UCHAR ucExt_TV_ASIC_SlaveAddr;
- /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; */
- ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING];
-} ATOM_ANALOG_TV_INFO;
+typedef struct _ATOM_ANALOG_TV_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucTV_SupportedStandard;
+ UCHAR ucTV_BootUpDefaultStandard;
+ UCHAR ucExt_TV_ASIC_ID;
+ UCHAR ucExt_TV_ASIC_SlaveAddr;
+ /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];*/
+ ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING];
+}ATOM_ANALOG_TV_INFO;
#define MAX_SUPPORTED_TV_TIMING_V1_2 3
-typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucTV_SupportedStandard;
- UCHAR ucTV_BootUpDefaultStandard;
- UCHAR ucExt_TV_ASIC_ID;
- UCHAR ucExt_TV_ASIC_SlaveAddr;
- ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];
-} ATOM_ANALOG_TV_INFO_V1_2;
+typedef struct _ATOM_ANALOG_TV_INFO_V1_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucTV_SupportedStandard;
+ UCHAR ucTV_BootUpDefaultStandard;
+ UCHAR ucExt_TV_ASIC_ID;
+ UCHAR ucExt_TV_ASIC_SlaveAddr;
+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];
+}ATOM_ANALOG_TV_INFO_V1_2;
+
+typedef struct _ATOM_DPCD_INFO
+{
+ UCHAR ucRevisionNumber; //10h : Revision 1.0; 11h : Revision 1.1
+ UCHAR ucMaxLinkRate; //06h : 1.62Gbps per lane; 0Ah = 2.7Gbps per lane
+ UCHAR ucMaxLane; //Bits 4:0 = MAX_LANE_COUNT (1/2/4). Bit 7 = ENHANCED_FRAME_CAP
+ UCHAR ucMaxDownSpread; //Bit0 = 0: No Down spread; Bit0 = 1: 0.5% (Subject to change according to DP spec)
+}ATOM_DPCD_INFO;
+
+#define ATOM_DPCD_MAX_LANE_MASK 0x1F
/**************************************************************************/
-/* VRAM usage and their definitions */
+// VRAM usage and their defintions
-/* One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */
-/* Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */
-/* All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! */
-/* To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR */
-/* To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX */
+// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data.
+// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below.
+// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned!
+// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR
+// To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX
#ifndef VESA_MEMORY_IN_64K_BLOCK
-#define VESA_MEMORY_IN_64K_BLOCK 0x100 /* 256*64K=16Mb (Max. VESA memory is 16Mb!) */
+#define VESA_MEMORY_IN_64K_BLOCK 0x100 //256*64K=16Mb (Max. VESA memory is 16Mb!)
#endif
-#define ATOM_EDID_RAW_DATASIZE 256 /* In Bytes */
-#define ATOM_HWICON_SURFACE_SIZE 4096 /* In Bytes */
+#define ATOM_EDID_RAW_DATASIZE 256 //In Bytes
+#define ATOM_HWICON_SURFACE_SIZE 4096 //In Bytes
#define ATOM_HWICON_INFOTABLE_SIZE 32
#define MAX_DTD_MODE_IN_VRAM 6
-#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) /* 28= (SIZEOF ATOM_DTD_FORMAT) */
-#define ATOM_STD_MODE_SUPPORT_TBL_SIZE (32*8) /* 32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) */
+#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT)
+#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
#define DFP_ENCODER_TYPE_OFFSET 0x80
#define DP_ENCODER_LANE_NUM_OFFSET 0x84
#define DP_ENCODER_LINK_RATE_OFFSET 0x88
@@ -2417,7 +2957,7 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
-#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
@@ -2431,13 +2971,13 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
-#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_TV2_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_TV2_DTD_MODE_TBL_ADDR (ATOM_TV2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
-#define ATOM_TV2_STD_MODE_TBL_ADDR (ATOM_TV2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP6_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP6_DTD_MODE_TBL_ADDR (ATOM_DFP6_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP6_STD_MODE_TBL_ADDR (ATOM_DFP6_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_DFP2_EDID_ADDR (ATOM_TV2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP2_EDID_ADDR (ATOM_DFP6_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
@@ -2457,533 +2997,850 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 256)
-#define ATOM_STACK_STORAGE_END (ATOM_STACK_STORAGE_START + 512)
+#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256)
+#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512
-/* The size below is in Kb! */
+//The size below is in Kb!
#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
-
+
#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L
#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0
-/***********************************************************************************/
-/* Structure used in VRAM_UsageByFirmwareTable */
-/* Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm */
-/* at running time. */
-/* note2: From RV770, the memory is more than 32bit addressable, so we will change */
-/* ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains */
-/* exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware */
-/* (in offset to start of memory address) is KB aligned instead of byte aligend. */
-/***********************************************************************************/
+/***********************************************************************************/
+// Structure used in VRAM_UsageByFirmwareTable
+// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm
+// at running time.
+// note2: From RV770, the memory is more than 32bit addressable, so we will change
+// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains
+// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
+// (in offset to start of memory address) is KB aligned instead of byte aligend.
+/***********************************************************************************/
+// Note3:
+/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter,
+for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have:
+
+If (ulStartAddrUsedByFirmware!=0)
+FBAccessAreaOffset= ulStartAddrUsedByFirmware - usFBUsedbyDrvInKB;
+Reserved area has been claimed by VBIOS including this FB access area; CAIL doesn't need to reserve any extra area for this purpose
+else //Non VGA case
+ if (FB_Size<=2Gb)
+ FBAccessAreaOffset= FB_Size - usFBUsedbyDrvInKB;
+ else
+ FBAccessAreaOffset= Aper_Size - usFBUsedbyDrvInKB
+
+CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
+
#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1
-typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO {
- ULONG ulStartAddrUsedByFirmware;
- USHORT usFirmwareUseInKb;
- USHORT usReserved;
-} ATOM_FIRMWARE_VRAM_RESERVE_INFO;
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
+{
+ ULONG ulStartAddrUsedByFirmware;
+ USHORT usFirmwareUseInKb;
+ USHORT usReserved;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO;
-typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_FIRMWARE_VRAM_RESERVE_INFO
- asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
-} ATOM_VRAM_USAGE_BY_FIRMWARE;
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_FIRMWARE_VRAM_RESERVE_INFO asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE;
-/****************************************************************************/
-/* Structure used in GPIO_Pin_LUTTable */
-/****************************************************************************/
-typedef struct _ATOM_GPIO_PIN_ASSIGNMENT {
- USHORT usGpioPin_AIndex;
- UCHAR ucGpioPinBitShift;
- UCHAR ucGPIO_ID;
-} ATOM_GPIO_PIN_ASSIGNMENT;
+// change verion to 1.5, when allow driver to allocate the vram area for command table access.
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5
+{
+ ULONG ulStartAddrUsedByFirmware;
+ USHORT usFirmwareUseInKb;
+ USHORT usFBUsedByDrvInKb;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5;
-typedef struct _ATOM_GPIO_PIN_LUT {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
-} ATOM_GPIO_PIN_LUT;
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5;
+
+/****************************************************************************/
+// Structure used in GPIO_Pin_LUTTable
+/****************************************************************************/
+typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
+{
+ USHORT usGpioPin_AIndex;
+ UCHAR ucGpioPinBitShift;
+ UCHAR ucGPIO_ID;
+}ATOM_GPIO_PIN_ASSIGNMENT;
-/****************************************************************************/
-/* Structure used in ComponentVideoInfoTable */
-/****************************************************************************/
+typedef struct _ATOM_GPIO_PIN_LUT
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
+}ATOM_GPIO_PIN_LUT;
+
+/****************************************************************************/
+// Structure used in ComponentVideoInfoTable
+/****************************************************************************/
#define GPIO_PIN_ACTIVE_HIGH 0x1
#define MAX_SUPPORTED_CV_STANDARDS 5
-/* definitions for ATOM_D_INFO.ucSettings */
-#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F /* [4:0] */
-#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 /* [6:5] = must be zeroed out */
-#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 /* [7] */
+// definitions for ATOM_D_INFO.ucSettings
+#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F // [4:0]
+#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 // [6:5] = must be zeroed out
+#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 // [7]
-typedef struct _ATOM_GPIO_INFO {
- USHORT usAOffset;
- UCHAR ucSettings;
- UCHAR ucReserved;
-} ATOM_GPIO_INFO;
+typedef struct _ATOM_GPIO_INFO
+{
+ USHORT usAOffset;
+ UCHAR ucSettings;
+ UCHAR ucReserved;
+}ATOM_GPIO_INFO;
-/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) */
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector)
#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2
-/* definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i */
-#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 /* [7]; */
-#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F /* [6:0] */
-
-/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode */
-/* Line 3 out put 5V. */
-#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 /* represent gpio 3 state for 16:9 */
-#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 /* represent gpio 4 state for 16:9 */
-#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0
-
-/* Line 3 out put 2.2V */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 /* represent gpio 3 state for 4:3 Letter box */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 /* represent gpio 4 state for 4:3 Letter box */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2
-
-/* Line 3 out put 0V */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 /* represent gpio 3 state for 4:3 */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 /* represent gpio 4 state for 4:3 */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4
-
-#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F /* bit [5:0] */
-
-#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 /* bit 7 */
-
-/* GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. */
-#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 /* bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */
-#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 /* bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */
-
-typedef struct _ATOM_COMPONENT_VIDEO_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usMask_PinRegisterIndex;
- USHORT usEN_PinRegisterIndex;
- USHORT usY_PinRegisterIndex;
- USHORT usA_PinRegisterIndex;
- UCHAR ucBitShift;
- UCHAR ucPinActiveState; /* ucPinActiveState: Bit0=1 active high, =0 active low */
- ATOM_DTD_FORMAT sReserved; /* must be zeroed out */
- UCHAR ucMiscInfo;
- UCHAR uc480i;
- UCHAR uc480p;
- UCHAR uc720p;
- UCHAR uc1080i;
- UCHAR ucLetterBoxMode;
- UCHAR ucReserved[3];
- UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */
- ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
- ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
-} ATOM_COMPONENT_VIDEO_INFO;
-
-/* ucTableFormatRevision=2 */
-/* ucTableContentRevision=1 */
-typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucMiscInfo;
- UCHAR uc480i;
- UCHAR uc480p;
- UCHAR uc720p;
- UCHAR uc1080i;
- UCHAR ucReserved;
- UCHAR ucLetterBoxMode;
- UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */
- ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
- ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
-} ATOM_COMPONENT_VIDEO_INFO_V21;
+// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i
+#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 //[7];
+#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F //[6:0]
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode
+//Line 3 out put 5V.
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 //represent gpio 3 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 //represent gpio 4 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0
+
+//Line 3 out put 2.2V
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 //represent gpio 3 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 //represent gpio 4 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2
+
+//Line 3 out put 0V
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 //represent gpio 3 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 //represent gpio 4 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4
+
+#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F // bit [5:0]
+
+#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 //bit 7
+
+//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+
+
+typedef struct _ATOM_COMPONENT_VIDEO_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMask_PinRegisterIndex;
+ USHORT usEN_PinRegisterIndex;
+ USHORT usY_PinRegisterIndex;
+ USHORT usA_PinRegisterIndex;
+ UCHAR ucBitShift;
+ UCHAR ucPinActiveState; //ucPinActiveState: Bit0=1 active high, =0 active low
+ ATOM_DTD_FORMAT sReserved; // must be zeroed out
+ UCHAR ucMiscInfo;
+ UCHAR uc480i;
+ UCHAR uc480p;
+ UCHAR uc720p;
+ UCHAR uc1080i;
+ UCHAR ucLetterBoxMode;
+ UCHAR ucReserved[3];
+ UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+ ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucMiscInfo;
+ UCHAR uc480i;
+ UCHAR uc480p;
+ UCHAR uc720p;
+ UCHAR uc1080i;
+ UCHAR ucReserved;
+ UCHAR ucLetterBoxMode;
+ UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+ ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO_V21;
#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21
-/****************************************************************************/
-/* Structure used in object_InfoTable */
-/****************************************************************************/
-typedef struct _ATOM_OBJECT_HEADER {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usDeviceSupport;
- USHORT usConnectorObjectTableOffset;
- USHORT usRouterObjectTableOffset;
- USHORT usEncoderObjectTableOffset;
- USHORT usProtectionObjectTableOffset; /* only available when Protection block is independent. */
- USHORT usDisplayPathTableOffset;
-} ATOM_OBJECT_HEADER;
-
-typedef struct _ATOM_DISPLAY_OBJECT_PATH {
- USHORT usDeviceTag; /* supported device */
- USHORT usSize; /* the size of ATOM_DISPLAY_OBJECT_PATH */
- USHORT usConnObjectId; /* Connector Object ID */
- USHORT usGPUObjectId; /* GPU ID */
- USHORT usGraphicObjIds[1]; /* 1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. */
-} ATOM_DISPLAY_OBJECT_PATH;
-
-typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE {
- UCHAR ucNumOfDispPath;
- UCHAR ucVersion;
- UCHAR ucPadding[2];
- ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
-} ATOM_DISPLAY_OBJECT_PATH_TABLE;
-
-typedef struct _ATOM_OBJECT /* each object has this structure */
-{
- USHORT usObjectID;
- USHORT usSrcDstTableOffset;
- USHORT usRecordOffset; /* this pointing to a bunch of records defined below */
- USHORT usReserved;
-} ATOM_OBJECT;
-
-typedef struct _ATOM_OBJECT_TABLE /* Above 4 object table offset pointing to a bunch of objects all have this structure */
-{
- UCHAR ucNumberOfObjects;
- UCHAR ucPadding[3];
- ATOM_OBJECT asObjects[1];
-} ATOM_OBJECT_TABLE;
-
-typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT /* usSrcDstTableOffset pointing to this structure */
-{
- UCHAR ucNumberOfSrc;
- USHORT usSrcObjectID[1];
- UCHAR ucNumberOfDst;
- USHORT usDstObjectID[1];
-} ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
-
-/* Related definitions, all records are differnt but they have a commond header */
-typedef struct _ATOM_COMMON_RECORD_HEADER {
- UCHAR ucRecordType; /* An emun to indicate the record type */
- UCHAR ucRecordSize; /* The size of the whole record in byte */
-} ATOM_COMMON_RECORD_HEADER;
-
-#define ATOM_I2C_RECORD_TYPE 1
+/****************************************************************************/
+// Structure used in object_InfoTable
+/****************************************************************************/
+typedef struct _ATOM_OBJECT_HEADER
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ USHORT usConnectorObjectTableOffset;
+ USHORT usRouterObjectTableOffset;
+ USHORT usEncoderObjectTableOffset;
+ USHORT usProtectionObjectTableOffset; //only available when Protection block is independent.
+ USHORT usDisplayPathTableOffset;
+}ATOM_OBJECT_HEADER;
+
+typedef struct _ATOM_OBJECT_HEADER_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ USHORT usConnectorObjectTableOffset;
+ USHORT usRouterObjectTableOffset;
+ USHORT usEncoderObjectTableOffset;
+ USHORT usProtectionObjectTableOffset; //only available when Protection block is independent.
+ USHORT usDisplayPathTableOffset;
+ USHORT usMiscObjectTableOffset;
+}ATOM_OBJECT_HEADER_V3;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH
+{
+ USHORT usDeviceTag; //supported device
+ USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
+ USHORT usConnObjectId; //Connector Object ID
+ USHORT usGPUObjectId; //GPU ID
+ USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
+}ATOM_DISPLAY_OBJECT_PATH;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
+{
+ UCHAR ucNumOfDispPath;
+ UCHAR ucVersion;
+ UCHAR ucPadding[2];
+ ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
+}ATOM_DISPLAY_OBJECT_PATH_TABLE;
+
+
+typedef struct _ATOM_OBJECT //each object has this structure
+{
+ USHORT usObjectID;
+ USHORT usSrcDstTableOffset;
+ USHORT usRecordOffset; //this pointing to a bunch of records defined below
+ USHORT usReserved;
+}ATOM_OBJECT;
+
+typedef struct _ATOM_OBJECT_TABLE //Above 4 object table offset pointing to a bunch of objects all have this structure
+{
+ UCHAR ucNumberOfObjects;
+ UCHAR ucPadding[3];
+ ATOM_OBJECT asObjects[1];
+}ATOM_OBJECT_TABLE;
+
+typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure
+{
+ UCHAR ucNumberOfSrc;
+ USHORT usSrcObjectID[1];
+ UCHAR ucNumberOfDst;
+ USHORT usDstObjectID[1];
+}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
+
+
+//Two definitions below are for OPM on MXM module designs
+
+#define EXT_HPDPIN_LUTINDEX_0 0
+#define EXT_HPDPIN_LUTINDEX_1 1
+#define EXT_HPDPIN_LUTINDEX_2 2
+#define EXT_HPDPIN_LUTINDEX_3 3
+#define EXT_HPDPIN_LUTINDEX_4 4
+#define EXT_HPDPIN_LUTINDEX_5 5
+#define EXT_HPDPIN_LUTINDEX_6 6
+#define EXT_HPDPIN_LUTINDEX_7 7
+#define MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES (EXT_HPDPIN_LUTINDEX_7+1)
+
+#define EXT_AUXDDC_LUTINDEX_0 0
+#define EXT_AUXDDC_LUTINDEX_1 1
+#define EXT_AUXDDC_LUTINDEX_2 2
+#define EXT_AUXDDC_LUTINDEX_3 3
+#define EXT_AUXDDC_LUTINDEX_4 4
+#define EXT_AUXDDC_LUTINDEX_5 5
+#define EXT_AUXDDC_LUTINDEX_6 6
+#define EXT_AUXDDC_LUTINDEX_7 7
+#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1)
+
+typedef struct _EXT_DISPLAY_PATH
+{
+ USHORT usDeviceTag; //A bit vector to show what devices are supported
+ USHORT usDeviceACPIEnum; //16bit device ACPI id.
+ USHORT usDeviceConnector; //A physical connector for displays to plug in, using object connector definitions
+ UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT
+ UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT
+ USHORT usExtEncoderObjId; //external encoder object id
+ USHORT usReserved[3];
+}EXT_DISPLAY_PATH;
+
+#define NUMBER_OF_UCHAR_FOR_GUID 16
+#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
+
+typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string
+ EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
+ UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
+ UCHAR Reserved [7]; // for potential expansion
+}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
+
+//Related definitions, all records are differnt but they have a commond header
+typedef struct _ATOM_COMMON_RECORD_HEADER
+{
+ UCHAR ucRecordType; //An emun to indicate the record type
+ UCHAR ucRecordSize; //The size of the whole record in byte
+}ATOM_COMMON_RECORD_HEADER;
+
+
+#define ATOM_I2C_RECORD_TYPE 1
#define ATOM_HPD_INT_RECORD_TYPE 2
#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3
#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4
-#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
-#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
+#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7
-#define ATOM_JTAG_RECORD_TYPE 8 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
+#define ATOM_JTAG_RECORD_TYPE 8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9
#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10
#define ATOM_CONNECTOR_CF_RECORD_TYPE 11
#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12
#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13
-#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14
-#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15
-
-/* Must be updated when new record type is added,equal to that record definition! */
-#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_CF_RECORD_TYPE
-
-typedef struct _ATOM_I2C_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- ATOM_I2C_ID_CONFIG sucI2cId;
- UCHAR ucI2CAddr; /* The slave address, it's 0 when the record is attached to connector for DDC */
-} ATOM_I2C_RECORD;
-
-typedef struct _ATOM_HPD_INT_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
- UCHAR ucPlugged_PinState;
-} ATOM_HPD_INT_RECORD;
-
-typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucProtectionFlag;
- UCHAR ucReserved;
-} ATOM_OUTPUT_PROTECTION_RECORD;
-
-typedef struct _ATOM_CONNECTOR_DEVICE_TAG {
- ULONG ulACPIDeviceEnum; /* Reserved for now */
- USHORT usDeviceID; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT" */
- USHORT usPadding;
-} ATOM_CONNECTOR_DEVICE_TAG;
-
-typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucNumberOfDevice;
- UCHAR ucReserved;
- ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation */
-} ATOM_CONNECTOR_DEVICE_TAG_RECORD;
-
-typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucConfigGPIOID;
- UCHAR ucConfigGPIOState; /* Set to 1 when it's active high to enable external flow in */
- UCHAR ucFlowinGPIPID;
- UCHAR ucExtInGPIPID;
-} ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
-
-typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucCTL1GPIO_ID;
- UCHAR ucCTL1GPIOState; /* Set to 1 when it's active high */
- UCHAR ucCTL2GPIO_ID;
- UCHAR ucCTL2GPIOState; /* Set to 1 when it's active high */
- UCHAR ucCTL3GPIO_ID;
- UCHAR ucCTL3GPIOState; /* Set to 1 when it's active high */
- UCHAR ucCTLFPGA_IN_ID;
- UCHAR ucPadding[3];
-} ATOM_ENCODER_FPGA_CONTROL_RECORD;
-
-typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
- UCHAR ucTVActiveState; /* Indicating when the pin==0 or 1 when TV is connected */
-} ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
-
-typedef struct _ATOM_JTAG_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucTMSGPIO_ID;
- UCHAR ucTMSGPIOState; /* Set to 1 when it's active high */
- UCHAR ucTCKGPIO_ID;
- UCHAR ucTCKGPIOState; /* Set to 1 when it's active high */
- UCHAR ucTDOGPIO_ID;
- UCHAR ucTDOGPIOState; /* Set to 1 when it's active high */
- UCHAR ucTDIGPIO_ID;
- UCHAR ucTDIGPIOState; /* Set to 1 when it's active high */
- UCHAR ucPadding[2];
-} ATOM_JTAG_RECORD;
-
-/* The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually */
-typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR {
- UCHAR ucGPIOID; /* GPIO_ID, find the corresponding ID in GPIO_LUT table */
- UCHAR ucGPIO_PinState; /* Pin state showing how to set-up the pin */
-} ATOM_GPIO_PIN_CONTROL_PAIR;
-
-typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucFlags; /* Future expnadibility */
- UCHAR ucNumberOfPins; /* Number of GPIO pins used to control the object */
- ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; /* the real gpio pin pair determined by number of pins ucNumberOfPins */
-} ATOM_OBJECT_GPIO_CNTL_RECORD;
-
-/* Definitions for GPIO pin state */
+#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14
+#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15
+#define ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE 16 //This is for the case when connectors are not known to object table
+#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table
+#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
+#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
+
+
+//Must be updated when new record type is added,equal to that record definition!
+#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE
+
+typedef struct _ATOM_I2C_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ATOM_I2C_ID_CONFIG sucI2cId;
+ UCHAR ucI2CAddr; //The slave address, it's 0 when the record is attached to connector for DDC
+}ATOM_I2C_RECORD;
+
+typedef struct _ATOM_HPD_INT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucHPDIntGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info
+ UCHAR ucPlugged_PinState;
+}ATOM_HPD_INT_RECORD;
+
+
+typedef struct _ATOM_OUTPUT_PROTECTION_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucProtectionFlag;
+ UCHAR ucReserved;
+}ATOM_OUTPUT_PROTECTION_RECORD;
+
+typedef struct _ATOM_CONNECTOR_DEVICE_TAG
+{
+ ULONG ulACPIDeviceEnum; //Reserved for now
+ USHORT usDeviceID; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT"
+ USHORT usPadding;
+}ATOM_CONNECTOR_DEVICE_TAG;
+
+typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucNumberOfDevice;
+ UCHAR ucReserved;
+ ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
+}ATOM_CONNECTOR_DEVICE_TAG_RECORD;
+
+
+typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucConfigGPIOID;
+ UCHAR ucConfigGPIOState; //Set to 1 when it's active high to enable external flow in
+ UCHAR ucFlowinGPIPID;
+ UCHAR ucExtInGPIPID;
+}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
+
+typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucCTL1GPIO_ID;
+ UCHAR ucCTL1GPIOState; //Set to 1 when it's active high
+ UCHAR ucCTL2GPIO_ID;
+ UCHAR ucCTL2GPIOState; //Set to 1 when it's active high
+ UCHAR ucCTL3GPIO_ID;
+ UCHAR ucCTL3GPIOState; //Set to 1 when it's active high
+ UCHAR ucCTLFPGA_IN_ID;
+ UCHAR ucPadding[3];
+}ATOM_ENCODER_FPGA_CONTROL_RECORD;
+
+typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info
+ UCHAR ucTVActiveState; //Indicating when the pin==0 or 1 when TV is connected
+}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
+
+typedef struct _ATOM_JTAG_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucTMSGPIO_ID;
+ UCHAR ucTMSGPIOState; //Set to 1 when it's active high
+ UCHAR ucTCKGPIO_ID;
+ UCHAR ucTCKGPIOState; //Set to 1 when it's active high
+ UCHAR ucTDOGPIO_ID;
+ UCHAR ucTDOGPIOState; //Set to 1 when it's active high
+ UCHAR ucTDIGPIO_ID;
+ UCHAR ucTDIGPIOState; //Set to 1 when it's active high
+ UCHAR ucPadding[2];
+}ATOM_JTAG_RECORD;
+
+
+//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually
+typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR
+{
+ UCHAR ucGPIOID; // GPIO_ID, find the corresponding ID in GPIO_LUT table
+ UCHAR ucGPIO_PinState; // Pin state showing how to set-up the pin
+}ATOM_GPIO_PIN_CONTROL_PAIR;
+
+typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucFlags; // Future expnadibility
+ UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object
+ ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins
+}ATOM_OBJECT_GPIO_CNTL_RECORD;
+
+//Definitions for GPIO pin state
#define GPIO_PIN_TYPE_INPUT 0x00
#define GPIO_PIN_TYPE_OUTPUT 0x10
#define GPIO_PIN_TYPE_HW_CONTROL 0x20
-/* For GPIO_PIN_TYPE_OUTPUT the following is defined */
+//For GPIO_PIN_TYPE_OUTPUT the following is defined
#define GPIO_PIN_OUTPUT_STATE_MASK 0x01
#define GPIO_PIN_OUTPUT_STATE_SHIFT 0
#define GPIO_PIN_STATE_ACTIVE_LOW 0x0
#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1
-typedef struct _ATOM_ENCODER_DVO_CF_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- ULONG ulStrengthControl; /* DVOA strength control for CF */
- UCHAR ucPadding[2];
-} ATOM_ENCODER_DVO_CF_RECORD;
+// Indexes to GPIO array in GLSync record
+#define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0
+#define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1
+#define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_REQ 3
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4
+#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
+#define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6
+#define ATOM_GPIO_INDEX_GLSYNC_MAX 7
+
+typedef struct _ATOM_ENCODER_DVO_CF_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ULONG ulStrengthControl; // DVOA strength control for CF
+ UCHAR ucPadding[2];
+}ATOM_ENCODER_DVO_CF_RECORD;
-/* value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle */
+// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
-typedef struct _ATOM_CONNECTOR_CF_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- USHORT usMaxPixClk;
- UCHAR ucFlowCntlGpioId;
- UCHAR ucSwapCntlGpioId;
- UCHAR ucConnectedDvoBundle;
- UCHAR ucPadding;
-} ATOM_CONNECTOR_CF_RECORD;
-
-typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- ATOM_DTD_FORMAT asTiming;
-} ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
-
-typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader; /* ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE */
- UCHAR ucSubConnectorType; /* CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A */
- UCHAR ucReserved;
-} ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
-
-typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucMuxType; /* decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state */
- UCHAR ucMuxControlPin;
- UCHAR ucMuxState[2]; /* for alligment purpose */
-} ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
-
-typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucMuxType;
- UCHAR ucMuxControlPin;
- UCHAR ucMuxState[2]; /* for alligment purpose */
-} ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
-
-/* define ucMuxType */
+typedef struct _ATOM_CONNECTOR_CF_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ USHORT usMaxPixClk;
+ UCHAR ucFlowCntlGpioId;
+ UCHAR ucSwapCntlGpioId;
+ UCHAR ucConnectedDvoBundle;
+ UCHAR ucPadding;
+}ATOM_CONNECTOR_CF_RECORD;
+
+typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ATOM_DTD_FORMAT asTiming;
+}ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
+
+typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader; //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE
+ UCHAR ucSubConnectorType; //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A
+ UCHAR ucReserved;
+}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
+
+
+typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucMuxType; //decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state
+ UCHAR ucMuxControlPin;
+ UCHAR ucMuxState[2]; //for alligment purpose
+}ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
+
+typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucMuxType;
+ UCHAR ucMuxControlPin;
+ UCHAR ucMuxState[2]; //for alligment purpose
+}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
+
+// define ucMuxType
#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f
#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01
-/****************************************************************************/
-/* ASIC voltage data table */
-/****************************************************************************/
-typedef struct _ATOM_VOLTAGE_INFO_HEADER {
- USHORT usVDDCBaseLevel; /* In number of 50mv unit */
- USHORT usReserved; /* For possible extension table offset */
- UCHAR ucNumOfVoltageEntries;
- UCHAR ucBytesPerVoltageEntry;
- UCHAR ucVoltageStep; /* Indicating in how many mv increament is one step, 0.5mv unit */
- UCHAR ucDefaultVoltageEntry;
- UCHAR ucVoltageControlI2cLine;
- UCHAR ucVoltageControlAddress;
- UCHAR ucVoltageControlOffset;
-} ATOM_VOLTAGE_INFO_HEADER;
-
-typedef struct _ATOM_VOLTAGE_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_VOLTAGE_INFO_HEADER viHeader;
- UCHAR ucVoltageEntries[64]; /* 64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry */
-} ATOM_VOLTAGE_INFO;
-
-typedef struct _ATOM_VOLTAGE_FORMULA {
- USHORT usVoltageBaseLevel; /* In number of 1mv unit */
- USHORT usVoltageStep; /* Indicating in how many mv increament is one step, 1mv unit */
- UCHAR ucNumOfVoltageEntries; /* Number of Voltage Entry, which indicate max Voltage */
- UCHAR ucFlag; /* bit0=0 :step is 1mv =1 0.5mv */
- UCHAR ucBaseVID; /* if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep */
- UCHAR ucReserved;
- UCHAR ucVIDAdjustEntries[32]; /* 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries */
-} ATOM_VOLTAGE_FORMULA;
-
-typedef struct _ATOM_VOLTAGE_CONTROL {
- UCHAR ucVoltageControlId; /* Indicate it is controlled by I2C or GPIO or HW state machine */
- UCHAR ucVoltageControlI2cLine;
- UCHAR ucVoltageControlAddress;
- UCHAR ucVoltageControlOffset;
- USHORT usGpioPin_AIndex; /* GPIO_PAD register index */
- UCHAR ucGpioPinBitShift[9]; /* at most 8 pin support 255 VIDs, termintate with 0xff */
- UCHAR ucReserved;
-} ATOM_VOLTAGE_CONTROL;
-
-/* Define ucVoltageControlId */
+typedef struct _ATOM_CONNECTOR_HPDPIN_LUT_RECORD //record for ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucHPDPINMap[MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES]; //An fixed size array which maps external pins to internal GPIO_PIN_INFO table
+}ATOM_CONNECTOR_HPDPIN_LUT_RECORD;
+
+typedef struct _ATOM_CONNECTOR_AUXDDC_LUT_RECORD //record for ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ATOM_I2C_ID_CONFIG ucAUXDDCMap[MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES]; //An fixed size array which maps external pins to internal DDC ID
+}ATOM_CONNECTOR_AUXDDC_LUT_RECORD;
+
+typedef struct _ATOM_OBJECT_LINK_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ USHORT usObjectID; //could be connector, encorder or other object in object.h
+}ATOM_OBJECT_LINK_RECORD;
+
+typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ USHORT usReserved;
+}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
+
+/****************************************************************************/
+// ASIC voltage data table
+/****************************************************************************/
+typedef struct _ATOM_VOLTAGE_INFO_HEADER
+{
+ USHORT usVDDCBaseLevel; //In number of 50mv unit
+ USHORT usReserved; //For possible extension table offset
+ UCHAR ucNumOfVoltageEntries;
+ UCHAR ucBytesPerVoltageEntry;
+ UCHAR ucVoltageStep; //Indicating in how many mv increament is one step, 0.5mv unit
+ UCHAR ucDefaultVoltageEntry;
+ UCHAR ucVoltageControlI2cLine;
+ UCHAR ucVoltageControlAddress;
+ UCHAR ucVoltageControlOffset;
+}ATOM_VOLTAGE_INFO_HEADER;
+
+typedef struct _ATOM_VOLTAGE_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VOLTAGE_INFO_HEADER viHeader;
+ UCHAR ucVoltageEntries[64]; //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry
+}ATOM_VOLTAGE_INFO;
+
+
+typedef struct _ATOM_VOLTAGE_FORMULA
+{
+ USHORT usVoltageBaseLevel; // In number of 1mv unit
+ USHORT usVoltageStep; // Indicating in how many mv increament is one step, 1mv unit
+ UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage
+ UCHAR ucFlag; // bit0=0 :step is 1mv =1 0.5mv
+ UCHAR ucBaseVID; // if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep
+ UCHAR ucReserved;
+ UCHAR ucVIDAdjustEntries[32]; // 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA;
+
+typedef struct _VOLTAGE_LUT_ENTRY
+{
+ USHORT usVoltageCode; // The Voltage ID, either GPIO or I2C code
+ USHORT usVoltageValue; // The corresponding Voltage Value, in mV
+}VOLTAGE_LUT_ENTRY;
+
+typedef struct _ATOM_VOLTAGE_FORMULA_V2
+{
+ UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage
+ UCHAR ucReserved[3];
+ VOLTAGE_LUT_ENTRY asVIDAdjustEntries[32];// 32 is for allocation, the actual number of entries is in ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA_V2;
+
+typedef struct _ATOM_VOLTAGE_CONTROL
+{
+ UCHAR ucVoltageControlId; //Indicate it is controlled by I2C or GPIO or HW state machine
+ UCHAR ucVoltageControlI2cLine;
+ UCHAR ucVoltageControlAddress;
+ UCHAR ucVoltageControlOffset;
+ USHORT usGpioPin_AIndex; //GPIO_PAD register index
+ UCHAR ucGpioPinBitShift[9]; //at most 8 pin support 255 VIDs, termintate with 0xff
+ UCHAR ucReserved;
+}ATOM_VOLTAGE_CONTROL;
+
+// Define ucVoltageControlId
#define VOLTAGE_CONTROLLED_BY_HW 0x00
#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F
#define VOLTAGE_CONTROLLED_BY_GPIO 0x80
-#define VOLTAGE_CONTROL_ID_LM64 0x01 /* I2C control, used for R5xx Core Voltage */
-#define VOLTAGE_CONTROL_ID_DAC 0x02 /* I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI */
-#define VOLTAGE_CONTROL_ID_VT116xM 0x03 /* I2C control, used for R6xx Core Voltage */
-#define VOLTAGE_CONTROL_ID_DS4402 0x04
-
-typedef struct _ATOM_VOLTAGE_OBJECT {
- UCHAR ucVoltageType; /* Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI */
- UCHAR ucSize; /* Size of Object */
- ATOM_VOLTAGE_CONTROL asControl; /* describ how to control */
- ATOM_VOLTAGE_FORMULA asFormula; /* Indicate How to convert real Voltage to VID */
-} ATOM_VOLTAGE_OBJECT;
-
-typedef struct _ATOM_VOLTAGE_OBJECT_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_VOLTAGE_OBJECT asVoltageObj[3]; /* Info for Voltage control */
-} ATOM_VOLTAGE_OBJECT_INFO;
-
-typedef struct _ATOM_LEAKID_VOLTAGE {
- UCHAR ucLeakageId;
- UCHAR ucReserved;
- USHORT usVoltage;
-} ATOM_LEAKID_VOLTAGE;
-
-typedef struct _ATOM_ASIC_PROFILE_VOLTAGE {
- UCHAR ucProfileId;
- UCHAR ucReserved;
- USHORT usSize;
- USHORT usEfuseSpareStartAddr;
- USHORT usFuseIndex[8]; /* from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, */
- ATOM_LEAKID_VOLTAGE asLeakVol[2]; /* Leakid and relatd voltage */
-} ATOM_ASIC_PROFILE_VOLTAGE;
-
-/* ucProfileId */
-#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1
+#define VOLTAGE_CONTROL_ID_LM64 0x01 //I2C control, used for R5xx Core Voltage
+#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
+#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage
+#define VOLTAGE_CONTROL_ID_DS4402 0x04
+
+typedef struct _ATOM_VOLTAGE_OBJECT
+{
+ UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
+ UCHAR ucSize; //Size of Object
+ ATOM_VOLTAGE_CONTROL asControl; //describ how to control
+ ATOM_VOLTAGE_FORMULA asFormula; //Indicate How to convert real Voltage to VID
+}ATOM_VOLTAGE_OBJECT;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_V2
+{
+ UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
+ UCHAR ucSize; //Size of Object
+ ATOM_VOLTAGE_CONTROL asControl; //describ how to control
+ ATOM_VOLTAGE_FORMULA_V2 asFormula; //Indicate How to convert real Voltage to VID
+}ATOM_VOLTAGE_OBJECT_V2;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VOLTAGE_OBJECT asVoltageObj[3]; //Info for Voltage control
+}ATOM_VOLTAGE_OBJECT_INFO;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VOLTAGE_OBJECT_V2 asVoltageObj[3]; //Info for Voltage control
+}ATOM_VOLTAGE_OBJECT_INFO_V2;
+
+typedef struct _ATOM_LEAKID_VOLTAGE
+{
+ UCHAR ucLeakageId;
+ UCHAR ucReserved;
+ USHORT usVoltage;
+}ATOM_LEAKID_VOLTAGE;
+
+typedef struct _ATOM_ASIC_PROFILE_VOLTAGE
+{
+ UCHAR ucProfileId;
+ UCHAR ucReserved;
+ USHORT usSize;
+ USHORT usEfuseSpareStartAddr;
+ USHORT usFuseIndex[8]; //from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id,
+ ATOM_LEAKID_VOLTAGE asLeakVol[2]; //Leakid and relatd voltage
+}ATOM_ASIC_PROFILE_VOLTAGE;
+
+//ucProfileId
+#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1
#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1
#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2
-typedef struct _ATOM_ASIC_PROFILING_INFO {
- ATOM_COMMON_TABLE_HEADER asHeader;
- ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
-} ATOM_ASIC_PROFILING_INFO;
-
-typedef struct _ATOM_POWER_SOURCE_OBJECT {
- UCHAR ucPwrSrcId; /* Power source */
- UCHAR ucPwrSensorType; /* GPIO, I2C or none */
- UCHAR ucPwrSensId; /* if GPIO detect, it is GPIO id, if I2C detect, it is I2C id */
- UCHAR ucPwrSensSlaveAddr; /* Slave address if I2C detect */
- UCHAR ucPwrSensRegIndex; /* I2C register Index if I2C detect */
- UCHAR ucPwrSensRegBitMask; /* detect which bit is used if I2C detect */
- UCHAR ucPwrSensActiveState; /* high active or low active */
- UCHAR ucReserve[3]; /* reserve */
- USHORT usSensPwr; /* in unit of watt */
-} ATOM_POWER_SOURCE_OBJECT;
-
-typedef struct _ATOM_POWER_SOURCE_INFO {
- ATOM_COMMON_TABLE_HEADER asHeader;
- UCHAR asPwrbehave[16];
- ATOM_POWER_SOURCE_OBJECT asPwrObj[1];
-} ATOM_POWER_SOURCE_INFO;
-
-/* Define ucPwrSrcId */
+typedef struct _ATOM_ASIC_PROFILING_INFO
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
+}ATOM_ASIC_PROFILING_INFO;
+
+typedef struct _ATOM_POWER_SOURCE_OBJECT
+{
+ UCHAR ucPwrSrcId; // Power source
+ UCHAR ucPwrSensorType; // GPIO, I2C or none
+ UCHAR ucPwrSensId; // if GPIO detect, it is GPIO id, if I2C detect, it is I2C id
+ UCHAR ucPwrSensSlaveAddr; // Slave address if I2C detect
+ UCHAR ucPwrSensRegIndex; // I2C register Index if I2C detect
+ UCHAR ucPwrSensRegBitMask; // detect which bit is used if I2C detect
+ UCHAR ucPwrSensActiveState; // high active or low active
+ UCHAR ucReserve[3]; // reserve
+ USHORT usSensPwr; // in unit of watt
+}ATOM_POWER_SOURCE_OBJECT;
+
+typedef struct _ATOM_POWER_SOURCE_INFO
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ UCHAR asPwrbehave[16];
+ ATOM_POWER_SOURCE_OBJECT asPwrObj[1];
+}ATOM_POWER_SOURCE_INFO;
+
+
+//Define ucPwrSrcId
#define POWERSOURCE_PCIE_ID1 0x00
#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01
#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02
#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04
#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08
-/* define ucPwrSensorId */
+//define ucPwrSensorId
#define POWER_SENSOR_ALWAYS 0x00
#define POWER_SENSOR_GPIO 0x01
#define POWER_SENSOR_I2C 0x02
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock;
+ ULONG ulDentistVCOFreq;
+ ULONG ulBootUpUMAClock;
+ ULONG ulReserved1[8];
+ ULONG ulBootUpReqDisplayVector;
+ ULONG ulOtherDisplayMisc;
+ ULONG ulGPUCapInfo;
+ ULONG ulReserved2[3];
+ ULONG ulSystemConfig;
+ ULONG ulCPUCapInfo;
+ USHORT usMaxNBVoltage;
+ USHORT usMinNBVoltage;
+ USHORT usBootUpNBVoltage;
+ USHORT usExtDispConnInfoOffset;
+ UCHAR ucHtcTmpLmt;
+ UCHAR ucTjOffset;
+ UCHAR ucMemoryType;
+ UCHAR ucUMAChannelNumber;
+ ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];
+ ULONG ulCSR_M3_ARB_CNTL_UVD[10];
+ ULONG ulCSR_M3_ARB_CNTL_FS3D[10];
+ ULONG ulReserved3[42];
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+}ATOM_INTEGRATED_SYSTEM_INFO_V6;
+
+/**********************************************************************************************************************
+// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+//ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit.
+//ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
+//ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
+//ulReserved1[8] Reserved by now, must be 0x0.
+//ulBootUpReqDisplayVector VBIOS boot up display IDs
+// ATOM_DEVICE_CRT1_SUPPORT 0x0001
+// ATOM_DEVICE_CRT2_SUPPORT 0x0010
+// ATOM_DEVICE_DFP1_SUPPORT 0x0008
+// ATOM_DEVICE_DFP6_SUPPORT 0x0040
+// ATOM_DEVICE_DFP2_SUPPORT 0x0080
+// ATOM_DEVICE_DFP3_SUPPORT 0x0200
+// ATOM_DEVICE_DFP4_SUPPORT 0x0400
+// ATOM_DEVICE_DFP5_SUPPORT 0x0800
+// ATOM_DEVICE_LCD1_SUPPORT 0x0002
+//ulOtherDisplayMisc Other display related flags, not defined yet.
+//ulGPUCapInfo TBD
+//ulReserved2[3] must be 0x0 for the reserved.
+//ulSystemConfig TBD
+//ulCPUCapInfo TBD
+//usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
+//usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
+//usBootUpNBVoltage Boot up NB voltage in unit of mv.
+//ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register.
+//ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed.
+//ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+//ucUMAChannelNumber System memory channel numbers.
+//usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table.
+//ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default
+//ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback.
+//ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+**********************************************************************************************************************/
+
/**************************************************************************/
-/* This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design */
-/* Memory SS Info Table */
-/* Define Memory Clock SS chip ID */
+// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
+//Memory SS Info Table
+//Define Memory Clock SS chip ID
#define ICS91719 1
#define ICS91720 2
-/* Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol */
-typedef struct _ATOM_I2C_DATA_RECORD {
- UCHAR ucNunberOfBytes; /* Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" */
- UCHAR ucI2CData[1]; /* I2C data in bytes, should be less than 16 bytes usually */
-} ATOM_I2C_DATA_RECORD;
-
-/* Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information */
-typedef struct _ATOM_I2C_DEVICE_SETUP_INFO {
- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* I2C line and HW/SW assisted cap. */
- UCHAR ucSSChipID; /* SS chip being used */
- UCHAR ucSSChipSlaveAddr; /* Slave Address to set up this SS chip */
- UCHAR ucNumOfI2CDataRecords; /* number of data block */
- ATOM_I2C_DATA_RECORD asI2CData[1];
-} ATOM_I2C_DEVICE_SETUP_INFO;
-
-/* ========================================================================================== */
-typedef struct _ATOM_ASIC_MVDD_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
-} ATOM_ASIC_MVDD_INFO;
-
-/* ========================================================================================== */
+//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol
+typedef struct _ATOM_I2C_DATA_RECORD
+{
+ UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop"
+ UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually
+}ATOM_I2C_DATA_RECORD;
+
+
+//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information
+typedef struct _ATOM_I2C_DEVICE_SETUP_INFO
+{
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //I2C line and HW/SW assisted cap.
+ UCHAR ucSSChipID; //SS chip being used
+ UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip
+ UCHAR ucNumOfI2CDataRecords; //number of data block
+ ATOM_I2C_DATA_RECORD asI2CData[1];
+}ATOM_I2C_DEVICE_SETUP_INFO;
+
+//==========================================================================================
+typedef struct _ATOM_ASIC_MVDD_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
+}ATOM_ASIC_MVDD_INFO;
+
+//==========================================================================================
#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO
-/* ========================================================================================== */
+//==========================================================================================
/**************************************************************************/
-typedef struct _ATOM_ASIC_SS_ASSIGNMENT {
- ULONG ulTargetClockRange; /* Clock Out frequence (VCO ), in unit of 10Khz */
- USHORT usSpreadSpectrumPercentage; /* in unit of 0.01% */
- USHORT usSpreadRateInKhz; /* in unit of kHz, modulation freq */
- UCHAR ucClockIndication; /* Indicate which clock source needs SS */
- UCHAR ucSpreadSpectrumMode; /* Bit1=0 Down Spread,=1 Center Spread. */
- UCHAR ucReserved[2];
-} ATOM_ASIC_SS_ASSIGNMENT;
-
-/* Define ucSpreadSpectrumType */
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT
+{
+ ULONG ulTargetClockRange; //Clock Out frequence (VCO ), in unit of 10Khz
+ USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
+ USHORT usSpreadRateInKhz; //in unit of kHz, modulation freq
+ UCHAR ucClockIndication; //Indicate which clock source needs SS
+ UCHAR ucSpreadSpectrumMode; //Bit1=0 Down Spread,=1 Center Spread.
+ UCHAR ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT;
+
+//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type.
+//SS is not required or enabled if a match is not found.
#define ASIC_INTERNAL_MEMORY_SS 1
#define ASIC_INTERNAL_ENGINE_SS 2
-#define ASIC_INTERNAL_UVD_SS 3
+#define ASIC_INTERNAL_UVD_SS 3
+#define ASIC_INTERNAL_SS_ON_TMDS 4
+#define ASIC_INTERNAL_SS_ON_HDMI 5
+#define ASIC_INTERNAL_SS_ON_LVDS 6
+#define ASIC_INTERNAL_SS_ON_DP 7
+#define ASIC_INTERNAL_SS_ON_DCPLL 8
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
+{
+ ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+ //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+ USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
+ USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
+ UCHAR ucClockIndication; //Indicate which clock source needs SS
+ UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+ UCHAR ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V2;
+
+//ucSpreadSpectrumMode
+//#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
+//#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
+//#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001
+//#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001
+//#define ATOM_INTERNAL_SS_MASK 0x00000000
+//#define ATOM_EXTERNAL_SS_MASK 0x00000002
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4];
+}ATOM_ASIC_INTERNAL_SS_INFO;
-typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4];
-} ATOM_ASIC_INTERNAL_SS_INFO;
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[1]; //this is point only.
+}ATOM_ASIC_INTERNAL_SS_INFO_V2;
-/* ==============================Scratch Pad Definition Portion=============================== */
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
+{
+ ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+ //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+ USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
+ USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
+ UCHAR ucClockIndication; //Indicate which clock source needs SS
+ UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+ UCHAR ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V3;
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[1]; //this is pointer only.
+}ATOM_ASIC_INTERNAL_SS_INFO_V3;
+
+
+//==============================Scratch Pad Definition Portion===============================
#define ATOM_DEVICE_CONNECT_INFO_DEF 0
#define ATOM_ROM_LOCATION_DEF 1
#define ATOM_TV_STANDARD_DEF 2
@@ -2995,7 +3852,8 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_I2C_CHANNEL_STATUS_DEF 8
#define ATOM_I2C_CHANNEL_STATUS1_DEF 9
-/* BIOS_0_SCRATCH Definition */
+
+// BIOS_0_SCRATCH Definition
#define ATOM_S0_CRT1_MONO 0x00000001L
#define ATOM_S0_CRT1_COLOR 0x00000002L
#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR)
@@ -3008,6 +3866,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S0_CV_DIN_A 0x00000020L
#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A)
+
#define ATOM_S0_CRT2_MONO 0x00000100L
#define ATOM_S0_CRT2_COLOR 0x00000200L
#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR)
@@ -3025,28 +3884,27 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S0_DFP2 0x00020000L
#define ATOM_S0_LCD1 0x00040000L
#define ATOM_S0_LCD2 0x00080000L
-#define ATOM_S0_TV2 0x00100000L
-#define ATOM_S0_DFP3 0x00200000L
-#define ATOM_S0_DFP4 0x00400000L
-#define ATOM_S0_DFP5 0x00800000L
+#define ATOM_S0_DFP6 0x00100000L
+#define ATOM_S0_DFP3 0x00200000L
+#define ATOM_S0_DFP4 0x00400000L
+#define ATOM_S0_DFP5 0x00800000L
-#define ATOM_S0_DFP_MASK \
- (ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5)
+#define ATOM_S0_DFP_MASK ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 | ATOM_S0_DFP6
-#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L /* If set, indicates we are running a PCIE asic with */
- /* the FAD/HDP reg access bug. Bit is read by DAL */
+#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L // If set, indicates we are running a PCIE asic with
+ // the FAD/HDP reg access bug. Bit is read by DAL, this is obsolete from RV5xx
#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L
#define ATOM_S0_THERMAL_STATE_SHIFT 26
#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L
-#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29
+#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
-/* Byte aligned definition for BIOS usage */
+//Byte aligned defintion for BIOS usage
#define ATOM_S0_CRT1_MONOb0 0x01
#define ATOM_S0_CRT1_COLORb0 0x02
#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
@@ -3076,8 +3934,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S0_DFP2b2 0x02
#define ATOM_S0_LCD1b2 0x04
#define ATOM_S0_LCD2b2 0x08
-#define ATOM_S0_TV2b2 0x10
-#define ATOM_S0_DFP3b2 0x20
+#define ATOM_S0_DFP6b2 0x10
+#define ATOM_S0_DFP3b2 0x20
+#define ATOM_S0_DFP4b2 0x40
+#define ATOM_S0_DFP5b2 0x80
+
#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C
#define ATOM_S0_THERMAL_STATE_SHIFTb3 2
@@ -3085,43 +3946,20 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0
#define ATOM_S0_LCD1_SHIFT 18
-/* BIOS_1_SCRATCH Definition */
+// BIOS_1_SCRATCH Definition
#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL
#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L
-/* BIOS_2_SCRATCH Definition */
+// BIOS_2_SCRATCH Definition
#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL
#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L
#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8
-#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
-#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
-#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
-#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
-#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
-#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
-#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
-#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
-#define ATOM_S2_CV_DPMS_STATE 0x01000000L
-#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
-#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
-#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
-
-#define ATOM_S2_DFP_DPM_STATE \
- (ATOM_S2_DFP1_DPMS_STATE | ATOM_S2_DFP2_DPMS_STATE | \
- ATOM_S2_DFP3_DPMS_STATE | ATOM_S2_DFP4_DPMS_STATE | \
- ATOM_S2_DFP5_DPMS_STATE)
-
-#define ATOM_S2_DEVICE_DPMS_STATE \
- (ATOM_S2_CRT1_DPMS_STATE + ATOM_S2_LCD1_DPMS_STATE + \
- ATOM_S2_TV1_DPMS_STATE + ATOM_S2_DFP_DPMS_STATE + \
- ATOM_S2_CRT2_DPMS_STATE + ATOM_S2_LCD2_DPMS_STATE + \
- ATOM_S2_TV2_DPMS_STATE + ATOM_S2_CV_DPMS_STATE)
-
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L
+#define ATOM_S2_DEVICE_DPMS_STATE 0x00010000L
#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L
#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0
@@ -3131,21 +3969,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L
-/* Byte aligned definition for BIOS usage */
+
+//Byte aligned defintion for BIOS usage
#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F
#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
-#define ATOM_S2_CRT1_DPMS_STATEb2 0x01
-#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
-#define ATOM_S2_TV1_DPMS_STATEb2 0x04
-#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
-#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
-#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
-#define ATOM_S2_TV2_DPMS_STATEb2 0x40
-#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
-#define ATOM_S2_CV_DPMS_STATEb3 0x01
-#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
-#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
-#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
+#define ATOM_S2_DEVICE_DPMS_STATEb2 0x01
#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C
@@ -3153,21 +3981,22 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20
#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0
-/* BIOS_3_SCRATCH Definition */
+
+// BIOS_3_SCRATCH Definition
#define ATOM_S3_CRT1_ACTIVE 0x00000001L
#define ATOM_S3_LCD1_ACTIVE 0x00000002L
#define ATOM_S3_TV1_ACTIVE 0x00000004L
#define ATOM_S3_DFP1_ACTIVE 0x00000008L
#define ATOM_S3_CRT2_ACTIVE 0x00000010L
#define ATOM_S3_LCD2_ACTIVE 0x00000020L
-#define ATOM_S3_TV2_ACTIVE 0x00000040L
+#define ATOM_S3_DFP6_ACTIVE 0x00000040L
#define ATOM_S3_DFP2_ACTIVE 0x00000080L
#define ATOM_S3_CV_ACTIVE 0x00000100L
#define ATOM_S3_DFP3_ACTIVE 0x00000200L
#define ATOM_S3_DFP4_ACTIVE 0x00000400L
#define ATOM_S3_DFP5_ACTIVE 0x00000800L
-#define ATOM_S3_DEVICE_ACTIVE_MASK 0x000003FFL
+#define ATOM_S3_DEVICE_ACTIVE_MASK 0x00000FFFL
#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L
#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L
@@ -3178,7 +4007,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L
#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L
#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L
-#define ATOM_S3_TV2_CRTC_ACTIVE 0x00400000L
+#define ATOM_S3_DFP6_CRTC_ACTIVE 0x00400000L
#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L
#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L
#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L
@@ -3187,17 +4016,18 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L
+//Below two definitions are not supported in pplib, but in the old powerplay in DAL
#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L
#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L
-/* Byte aligned definition for BIOS usage */
+//Byte aligned defintion for BIOS usage
#define ATOM_S3_CRT1_ACTIVEb0 0x01
#define ATOM_S3_LCD1_ACTIVEb0 0x02
#define ATOM_S3_TV1_ACTIVEb0 0x04
#define ATOM_S3_DFP1_ACTIVEb0 0x08
#define ATOM_S3_CRT2_ACTIVEb0 0x10
#define ATOM_S3_LCD2_ACTIVEb0 0x20
-#define ATOM_S3_TV2_ACTIVEb0 0x40
+#define ATOM_S3_DFP6_ACTIVEb0 0x40
#define ATOM_S3_DFP2_ACTIVEb0 0x80
#define ATOM_S3_CV_ACTIVEb1 0x01
#define ATOM_S3_DFP3_ACTIVEb1 0x02
@@ -3212,7 +4042,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08
#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10
#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20
-#define ATOM_S3_TV2_CRTC_ACTIVEb2 0x40
+#define ATOM_S3_DFP6_CRTC_ACTIVEb2 0x40
#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80
#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01
#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02
@@ -3221,35 +4051,31 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF
-#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20
-#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
-#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
-
-/* BIOS_4_SCRATCH Definition */
+// BIOS_4_SCRATCH Definition
#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL
#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L
#define ATOM_S4_LCD1_REFRESH_SHIFT 8
-/* Byte aligned definition for BIOS usage */
+//Byte aligned defintion for BIOS usage
#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF
#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0
#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0
-/* BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! */
+// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!!
#define ATOM_S5_DOS_REQ_CRT1b0 0x01
#define ATOM_S5_DOS_REQ_LCD1b0 0x02
#define ATOM_S5_DOS_REQ_TV1b0 0x04
#define ATOM_S5_DOS_REQ_DFP1b0 0x08
#define ATOM_S5_DOS_REQ_CRT2b0 0x10
#define ATOM_S5_DOS_REQ_LCD2b0 0x20
-#define ATOM_S5_DOS_REQ_TV2b0 0x40
+#define ATOM_S5_DOS_REQ_DFP6b0 0x40
#define ATOM_S5_DOS_REQ_DFP2b0 0x80
#define ATOM_S5_DOS_REQ_CVb1 0x01
#define ATOM_S5_DOS_REQ_DFP3b1 0x02
#define ATOM_S5_DOS_REQ_DFP4b1 0x04
#define ATOM_S5_DOS_REQ_DFP5b1 0x08
-#define ATOM_S5_DOS_REQ_DEVICEw0 0x03FF
+#define ATOM_S5_DOS_REQ_DEVICEw0 0x0FFF
#define ATOM_S5_DOS_REQ_CRT1 0x0001
#define ATOM_S5_DOS_REQ_LCD1 0x0002
@@ -3257,22 +4083,21 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S5_DOS_REQ_DFP1 0x0008
#define ATOM_S5_DOS_REQ_CRT2 0x0010
#define ATOM_S5_DOS_REQ_LCD2 0x0020
-#define ATOM_S5_DOS_REQ_TV2 0x0040
+#define ATOM_S5_DOS_REQ_DFP6 0x0040
#define ATOM_S5_DOS_REQ_DFP2 0x0080
#define ATOM_S5_DOS_REQ_CV 0x0100
-#define ATOM_S5_DOS_REQ_DFP3 0x0200
-#define ATOM_S5_DOS_REQ_DFP4 0x0400
-#define ATOM_S5_DOS_REQ_DFP5 0x0800
+#define ATOM_S5_DOS_REQ_DFP3 0x0200
+#define ATOM_S5_DOS_REQ_DFP4 0x0400
+#define ATOM_S5_DOS_REQ_DFP5 0x0800
#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0
#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0
#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0
#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1
-#define ATOM_S5_DOS_FORCE_DEVICEw1 \
- (ATOM_S5_DOS_FORCE_CRT1b2 + ATOM_S5_DOS_FORCE_TV1b2 + \
- ATOM_S5_DOS_FORCE_CRT2b2 + (ATOM_S5_DOS_FORCE_CVb3 << 8))
+#define ATOM_S5_DOS_FORCE_DEVICEw1 (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\
+ (ATOM_S5_DOS_FORCE_CVb3<<8))
-/* BIOS_6_SCRATCH Definition */
+// BIOS_6_SCRATCH Definition
#define ATOM_S6_DEVICE_CHANGE 0x00000001L
#define ATOM_S6_SCALER_CHANGE 0x00000002L
#define ATOM_S6_LID_CHANGE 0x00000004L
@@ -3285,11 +4110,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L
#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L
#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L
-#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L /* Normal expansion Request bit for LCD */
-#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L /* Aspect ratio expansion Request bit for LCD */
+#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L //Normal expansion Request bit for LCD
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L //Aspect ratio expansion Request bit for LCD
-#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L /* This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion */
-#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L /* This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion */
+#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion
+#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion
#define ATOM_S6_ACC_REQ_CRT1 0x00010000L
#define ATOM_S6_ACC_REQ_LCD1 0x00020000L
@@ -3297,7 +4122,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_ACC_REQ_DFP1 0x00080000L
#define ATOM_S6_ACC_REQ_CRT2 0x00100000L
#define ATOM_S6_ACC_REQ_LCD2 0x00200000L
-#define ATOM_S6_ACC_REQ_TV2 0x00400000L
+#define ATOM_S6_ACC_REQ_DFP6 0x00400000L
#define ATOM_S6_ACC_REQ_DFP2 0x00800000L
#define ATOM_S6_ACC_REQ_CV 0x01000000L
#define ATOM_S6_ACC_REQ_DFP3 0x02000000L
@@ -3310,7 +4135,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L
#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L
-/* Byte aligned definition for BIOS usage */
+//Byte aligned defintion for BIOS usage
#define ATOM_S6_DEVICE_CHANGEb0 0x01
#define ATOM_S6_SCALER_CHANGEb0 0x02
#define ATOM_S6_LID_CHANGEb0 0x04
@@ -3320,11 +4145,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_LID_STATEb0 0x40
#define ATOM_S6_DOCK_STATEb0 0x80
#define ATOM_S6_CRITICAL_STATEb1 0x01
-#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02
+#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02
#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04
#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08
-#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10
-#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20
+#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20
#define ATOM_S6_ACC_REQ_CRT1b2 0x01
#define ATOM_S6_ACC_REQ_LCD1b2 0x02
@@ -3332,12 +4157,12 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_ACC_REQ_DFP1b2 0x08
#define ATOM_S6_ACC_REQ_CRT2b2 0x10
#define ATOM_S6_ACC_REQ_LCD2b2 0x20
-#define ATOM_S6_ACC_REQ_TV2b2 0x40
+#define ATOM_S6_ACC_REQ_DFP6b2 0x40
#define ATOM_S6_ACC_REQ_DFP2b2 0x80
#define ATOM_S6_ACC_REQ_CVb3 0x01
-#define ATOM_S6_ACC_REQ_DFP3b3 0x02
-#define ATOM_S6_ACC_REQ_DFP4b3 0x04
-#define ATOM_S6_ACC_REQ_DFP5b3 0x08
+#define ATOM_S6_ACC_REQ_DFP3b3 0x02
+#define ATOM_S6_ACC_REQ_DFP4b3 0x04
+#define ATOM_S6_ACC_REQ_DFP5b3 0x08
#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0
#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10
@@ -3366,7 +4191,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30
#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31
-/* BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! */
+// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!!
#define ATOM_S7_DOS_MODE_TYPEb0 0x03
#define ATOM_S7_DOS_MODE_VGAb0 0x00
#define ATOM_S7_DOS_MODE_VESAb0 0x01
@@ -3378,220 +4203,194 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
-/* BIOS_8_SCRATCH Definition */
+// BIOS_8_SCRATCH Definition
#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF
-#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000
+#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000
#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0
#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16
-/* BIOS_9_SCRATCH Definition */
-#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK
+// BIOS_9_SCRATCH Definition
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK
#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF
#endif
-#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK
#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000
#endif
-#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT
#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0
#endif
-#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT
#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16
#endif
+
#define ATOM_FLAG_SET 0x20
#define ATOM_FLAG_CLEAR 0
-#define CLEAR_ATOM_S6_ACC_MODE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
-#define SET_ATOM_S6_DEVICE_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_SCALER_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_LID_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
-
-#define SET_ATOM_S6_LID_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) |\
- ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
-#define CLEAR_ATOM_S6_LID_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
-
-#define SET_ATOM_S6_DOCK_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8)| \
- ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_DOCK_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
-#define CLEAR_ATOM_S6_DOCK_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
-
-#define SET_ATOM_S6_THERMAL_STATE_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
-
-#define SET_ATOM_S6_CRITICAL_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
-#define CLEAR_ATOM_S6_CRITICAL_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
-
-#define SET_ATOM_S6_REQ_SCALER \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
-#define CLEAR_ATOM_S6_REQ_SCALER \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
-
-#define SET_ATOM_S6_REQ_SCALER_ARATIO \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
-#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
-
-#define SET_ATOM_S6_I2C_STATE_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
-
-#define SET_ATOM_S6_DISPLAY_STATE_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
-
-#define SET_ATOM_S6_DEVICE_RECONFIG \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define CLEAR_ATOM_S0_LCD1 \
- ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 ) | \
- ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
-#define SET_ATOM_S7_DOS_8BIT_DAC_EN \
- ((ATOM_DOS_MODE_INFO_DEF << 8) | \
- ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
-#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN \
- ((ATOM_DOS_MODE_INFO_DEF << 8) | \
- ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
+#define CLEAR_ATOM_S6_ACC_MODE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
+#define SET_ATOM_S6_DEVICE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SCALER_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_LID_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
-/****************************************************************************/
-/* Portion II: Definitinos only used in Driver */
+#define SET_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_DOCK_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_THERMAL_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_I2C_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DISPLAY_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DEVICE_RECONFIG ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S0_LCD1 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )| ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
+#define SET_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
+
+/****************************************************************************/
+//Portion II: Definitinos only used in Driver
/****************************************************************************/
-/* Macros used by driver */
+// Macros used by driver
+#ifdef __cplusplus
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) ((reinterpret_cast<char*>(&(static_cast<ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*>(0))->FieldName)-static_cast<char*>(0))/sizeof(USHORT))
-#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char *)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES *)0)->FieldName)-(char *)0)/sizeof(USHORT))
+#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F)
+#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F)
+#else // not __cplusplus
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT))
#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
+#endif // __cplusplus
#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION
#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION
-/****************************************************************************/
-/* Portion III: Definitinos only used in VBIOS */
+/****************************************************************************/
+//Portion III: Definitinos only used in VBIOS
/****************************************************************************/
#define ATOM_DAC_SRC 0x80
#define ATOM_SRC_DAC1 0
#define ATOM_SRC_DAC2 0x80
-#ifdef UEFI_BUILD
-#define USHORT UTEMP
-#endif
-
-typedef struct _MEMORY_PLLINIT_PARAMETERS {
- ULONG ulTargetMemoryClock; /* In 10Khz unit */
- UCHAR ucAction; /* not define yet */
- UCHAR ucFbDiv_Hi; /* Fbdiv Hi byte */
- UCHAR ucFbDiv; /* FB value */
- UCHAR ucPostDiv; /* Post div */
-} MEMORY_PLLINIT_PARAMETERS;
+typedef struct _MEMORY_PLLINIT_PARAMETERS
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+ UCHAR ucAction; //not define yet
+ UCHAR ucFbDiv_Hi; //Fbdiv Hi byte
+ UCHAR ucFbDiv; //FB value
+ UCHAR ucPostDiv; //Post div
+}MEMORY_PLLINIT_PARAMETERS;
#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS
-#define GPIO_PIN_WRITE 0x01
+
+#define GPIO_PIN_WRITE 0x01
#define GPIO_PIN_READ 0x00
-typedef struct _GPIO_PIN_CONTROL_PARAMETERS {
- UCHAR ucGPIO_ID; /* return value, read from GPIO pins */
- UCHAR ucGPIOBitShift; /* define which bit in uGPIOBitVal need to be update */
- UCHAR ucGPIOBitVal; /* Set/Reset corresponding bit defined in ucGPIOBitMask */
- UCHAR ucAction; /* =GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write */
-} GPIO_PIN_CONTROL_PARAMETERS;
-
-typedef struct _ENABLE_SCALER_PARAMETERS {
- UCHAR ucScaler; /* ATOM_SCALER1, ATOM_SCALER2 */
- UCHAR ucEnable; /* ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION */
- UCHAR ucTVStandard; /* */
- UCHAR ucPadding[1];
-} ENABLE_SCALER_PARAMETERS;
-#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS
-
-/* ucEnable: */
+typedef struct _GPIO_PIN_CONTROL_PARAMETERS
+{
+ UCHAR ucGPIO_ID; //return value, read from GPIO pins
+ UCHAR ucGPIOBitShift; //define which bit in uGPIOBitVal need to be update
+ UCHAR ucGPIOBitVal; //Set/Reset corresponding bit defined in ucGPIOBitMask
+ UCHAR ucAction; //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write
+}GPIO_PIN_CONTROL_PARAMETERS;
+
+typedef struct _ENABLE_SCALER_PARAMETERS
+{
+ UCHAR ucScaler; // ATOM_SCALER1, ATOM_SCALER2
+ UCHAR ucEnable; // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION
+ UCHAR ucTVStandard; //
+ UCHAR ucPadding[1];
+}ENABLE_SCALER_PARAMETERS;
+#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS
+
+//ucEnable:
#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0
#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1
#define SCALER_ENABLE_2TAP_ALPHA_MODE 2
#define SCALER_ENABLE_MULTITAP_MODE 3
-typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS {
- ULONG usHWIconHorzVertPosn; /* Hardware Icon Vertical position */
- UCHAR ucHWIconVertOffset; /* Hardware Icon Vertical offset */
- UCHAR ucHWIconHorzOffset; /* Hardware Icon Horizontal offset */
- UCHAR ucSelection; /* ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
-} ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
-
-typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION {
- ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon;
- ENABLE_CRTC_PARAMETERS sReserved;
-} ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
-
-typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS {
- USHORT usHight; /* Image Hight */
- USHORT usWidth; /* Image Width */
- UCHAR ucSurface; /* Surface 1 or 2 */
- UCHAR ucPadding[3];
-} ENABLE_GRAPH_SURFACE_PARAMETERS;
-
-typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 {
- USHORT usHight; /* Image Hight */
- USHORT usWidth; /* Image Width */
- UCHAR ucSurface; /* Surface 1 or 2 */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucPadding[2];
-} ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
-
-typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION {
- ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
- ENABLE_YUV_PS_ALLOCATION sReserved; /* Don't set this one */
-} ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
-
-typedef struct _MEMORY_CLEAN_UP_PARAMETERS {
- USHORT usMemoryStart; /* in 8Kb boundry, offset from memory base address */
- USHORT usMemorySize; /* 8Kb blocks aligned */
-} MEMORY_CLEAN_UP_PARAMETERS;
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS
+{
+ ULONG usHWIconHorzVertPosn; // Hardware Icon Vertical position
+ UCHAR ucHWIconVertOffset; // Hardware Icon Vertical offset
+ UCHAR ucHWIconHorzOffset; // Hardware Icon Horizontal offset
+ UCHAR ucSelection; // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION
+{
+ ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon;
+ ENABLE_CRTC_PARAMETERS sReserved;
+}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS
+{
+ USHORT usHight; // Image Hight
+ USHORT usWidth; // Image Width
+ UCHAR ucSurface; // Surface 1 or 2
+ UCHAR ucPadding[3];
+}ENABLE_GRAPH_SURFACE_PARAMETERS;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2
+{
+ USHORT usHight; // Image Hight
+ USHORT usWidth; // Image Width
+ UCHAR ucSurface; // Surface 1 or 2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[2];
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3
+{
+ USHORT usHight; // Image Hight
+ USHORT usWidth; // Image Width
+ UCHAR ucSurface; // Surface 1 or 2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0.
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
+{
+ ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
+ ENABLE_YUV_PS_ALLOCATION sReserved; // Don't set this one
+}ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
+
+typedef struct _MEMORY_CLEAN_UP_PARAMETERS
+{
+ USHORT usMemoryStart; //in 8Kb boundry, offset from memory base address
+ USHORT usMemorySize; //8Kb blocks aligned
+}MEMORY_CLEAN_UP_PARAMETERS;
#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
-typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS {
- USHORT usX_Size; /* When use as input parameter, usX_Size indicates which CRTC */
- USHORT usY_Size;
-} GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
+typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS
+{
+ USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC
+ USHORT usY_Size;
+}GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
-typedef struct _INDIRECT_IO_ACCESS {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR IOAccessSequence[256];
+typedef struct _INDIRECT_IO_ACCESS
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR IOAccessSequence[256];
} INDIRECT_IO_ACCESS;
#define INDIRECT_READ 0x00
@@ -3615,93 +4414,108 @@ typedef struct _INDIRECT_IO_ACCESS {
#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ
#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE
-typedef struct _ATOM_OEM_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
-} ATOM_OEM_INFO;
-
-typedef struct _ATOM_TV_MODE {
- UCHAR ucVMode_Num; /* Video mode number */
- UCHAR ucTV_Mode_Num; /* Internal TV mode number */
-} ATOM_TV_MODE;
-
-typedef struct _ATOM_BIOS_INT_TVSTD_MODE {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usTV_Mode_LUT_Offset; /* Pointer to standard to internal number conversion table */
- USHORT usTV_FIFO_Offset; /* Pointer to FIFO entry table */
- USHORT usNTSC_Tbl_Offset; /* Pointer to SDTV_Mode_NTSC table */
- USHORT usPAL_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */
- USHORT usCV_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */
-} ATOM_BIOS_INT_TVSTD_MODE;
-
-typedef struct _ATOM_TV_MODE_SCALER_PTR {
- USHORT ucFilter0_Offset; /* Pointer to filter format 0 coefficients */
- USHORT usFilter1_Offset; /* Pointer to filter format 0 coefficients */
- UCHAR ucTV_Mode_Num;
-} ATOM_TV_MODE_SCALER_PTR;
-
-typedef struct _ATOM_STANDARD_VESA_TIMING {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_DTD_FORMAT aModeTimings[16]; /* 16 is not the real array number, just for initial allocation */
-} ATOM_STANDARD_VESA_TIMING;
-
-typedef struct _ATOM_STD_FORMAT {
- USHORT usSTD_HDisp;
- USHORT usSTD_VDisp;
- USHORT usSTD_RefreshRate;
- USHORT usReserved;
-} ATOM_STD_FORMAT;
-
-typedef struct _ATOM_VESA_TO_EXTENDED_MODE {
- USHORT usVESA_ModeNumber;
- USHORT usExtendedModeNumber;
-} ATOM_VESA_TO_EXTENDED_MODE;
-
-typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
-} ATOM_VESA_TO_INTENAL_MODE_LUT;
+typedef struct _ATOM_OEM_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+}ATOM_OEM_INFO;
+
+typedef struct _ATOM_TV_MODE
+{
+ UCHAR ucVMode_Num; //Video mode number
+ UCHAR ucTV_Mode_Num; //Internal TV mode number
+}ATOM_TV_MODE;
+
+typedef struct _ATOM_BIOS_INT_TVSTD_MODE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usTV_Mode_LUT_Offset; // Pointer to standard to internal number conversion table
+ USHORT usTV_FIFO_Offset; // Pointer to FIFO entry table
+ USHORT usNTSC_Tbl_Offset; // Pointer to SDTV_Mode_NTSC table
+ USHORT usPAL_Tbl_Offset; // Pointer to SDTV_Mode_PAL table
+ USHORT usCV_Tbl_Offset; // Pointer to SDTV_Mode_PAL table
+}ATOM_BIOS_INT_TVSTD_MODE;
+
+
+typedef struct _ATOM_TV_MODE_SCALER_PTR
+{
+ USHORT ucFilter0_Offset; //Pointer to filter format 0 coefficients
+ USHORT usFilter1_Offset; //Pointer to filter format 0 coefficients
+ UCHAR ucTV_Mode_Num;
+}ATOM_TV_MODE_SCALER_PTR;
+
+typedef struct _ATOM_STANDARD_VESA_TIMING
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT aModeTimings[16]; // 16 is not the real array number, just for initial allocation
+}ATOM_STANDARD_VESA_TIMING;
+
+
+typedef struct _ATOM_STD_FORMAT
+{
+ USHORT usSTD_HDisp;
+ USHORT usSTD_VDisp;
+ USHORT usSTD_RefreshRate;
+ USHORT usReserved;
+}ATOM_STD_FORMAT;
+
+typedef struct _ATOM_VESA_TO_EXTENDED_MODE
+{
+ USHORT usVESA_ModeNumber;
+ USHORT usExtendedModeNumber;
+}ATOM_VESA_TO_EXTENDED_MODE;
+
+typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
+}ATOM_VESA_TO_INTENAL_MODE_LUT;
/*************** ATOM Memory Related Data Structure ***********************/
-typedef struct _ATOM_MEMORY_VENDOR_BLOCK {
- UCHAR ucMemoryType;
- UCHAR ucMemoryVendor;
- UCHAR ucAdjMCId;
- UCHAR ucDynClkId;
- ULONG ulDllResetClkRange;
-} ATOM_MEMORY_VENDOR_BLOCK;
-
-typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG {
+typedef struct _ATOM_MEMORY_VENDOR_BLOCK{
+ UCHAR ucMemoryType;
+ UCHAR ucMemoryVendor;
+ UCHAR ucAdjMCId;
+ UCHAR ucDynClkId;
+ ULONG ulDllResetClkRange;
+}ATOM_MEMORY_VENDOR_BLOCK;
+
+
+typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{
#if ATOM_BIG_ENDIAN
- ULONG ucMemBlkId:8;
- ULONG ulMemClockRange:24;
+ ULONG ucMemBlkId:8;
+ ULONG ulMemClockRange:24;
#else
- ULONG ulMemClockRange:24;
- ULONG ucMemBlkId:8;
+ ULONG ulMemClockRange:24;
+ ULONG ucMemBlkId:8;
#endif
-} ATOM_MEMORY_SETTING_ID_CONFIG;
-
-typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS {
- ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
- ULONG ulAccess;
-} ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
-
-typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK {
- ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
- ULONG aulMemData[1];
-} ATOM_MEMORY_SETTING_DATA_BLOCK;
-
-typedef struct _ATOM_INIT_REG_INDEX_FORMAT {
- USHORT usRegIndex; /* MC register index */
- UCHAR ucPreRegDataLength; /* offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf */
-} ATOM_INIT_REG_INDEX_FORMAT;
-
-typedef struct _ATOM_INIT_REG_BLOCK {
- USHORT usRegIndexTblSize; /* size of asRegIndexBuf */
- USHORT usRegDataBlkSize; /* size of ATOM_MEMORY_SETTING_DATA_BLOCK */
- ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1];
- ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1];
-} ATOM_INIT_REG_BLOCK;
+}ATOM_MEMORY_SETTING_ID_CONFIG;
+
+typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS
+{
+ ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
+ ULONG ulAccess;
+}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
+
+
+typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{
+ ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
+ ULONG aulMemData[1];
+}ATOM_MEMORY_SETTING_DATA_BLOCK;
+
+
+typedef struct _ATOM_INIT_REG_INDEX_FORMAT{
+ USHORT usRegIndex; // MC register index
+ UCHAR ucPreRegDataLength; // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf
+}ATOM_INIT_REG_INDEX_FORMAT;
+
+
+typedef struct _ATOM_INIT_REG_BLOCK{
+ USHORT usRegIndexTblSize; //size of asRegIndexBuf
+ USHORT usRegDataBlkSize; //size of ATOM_MEMORY_SETTING_DATA_BLOCK
+ ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1];
+ ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1];
+}ATOM_INIT_REG_BLOCK;
#define END_OF_REG_INDEX_BLOCK 0x0ffff
#define END_OF_REG_DATA_BLOCK 0x00000000
@@ -3716,16 +4530,19 @@ typedef struct _ATOM_INIT_REG_BLOCK {
#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1)
#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1)
-typedef struct _ATOM_MC_INIT_PARAM_TABLE {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usAdjustARB_SEQDataOffset;
- USHORT usMCInitMemTypeTblOffset;
- USHORT usMCInitCommonTblOffset;
- USHORT usMCInitPowerDownTblOffset;
- ULONG ulARB_SEQDataBuf[32];
- ATOM_INIT_REG_BLOCK asMCInitMemType;
- ATOM_INIT_REG_BLOCK asMCInitCommon;
-} ATOM_MC_INIT_PARAM_TABLE;
+
+typedef struct _ATOM_MC_INIT_PARAM_TABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usAdjustARB_SEQDataOffset;
+ USHORT usMCInitMemTypeTblOffset;
+ USHORT usMCInitCommonTblOffset;
+ USHORT usMCInitPowerDownTblOffset;
+ ULONG ulARB_SEQDataBuf[32];
+ ATOM_INIT_REG_BLOCK asMCInitMemType;
+ ATOM_INIT_REG_BLOCK asMCInitCommon;
+}ATOM_MC_INIT_PARAM_TABLE;
+
#define _4Mx16 0x2
#define _4Mx32 0x3
@@ -3751,221 +4568,272 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE {
#define QIMONDA INFINEON
#define PROMOS MOSEL
+#define KRETON INFINEON
-/* ///////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// */
+/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
#define UCODE_ROM_START_ADDRESS 0x1c000
-#define UCODE_SIGNATURE 0x4375434d /* 'MCuC' - MC uCode */
-
-/* uCode block header for reference */
-
-typedef struct _MCuCodeHeader {
- ULONG ulSignature;
- UCHAR ucRevision;
- UCHAR ucChecksum;
- UCHAR ucReserved1;
- UCHAR ucReserved2;
- USHORT usParametersLength;
- USHORT usUCodeLength;
- USHORT usReserved1;
- USHORT usReserved2;
+#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode
+
+//uCode block header for reference
+
+typedef struct _MCuCodeHeader
+{
+ ULONG ulSignature;
+ UCHAR ucRevision;
+ UCHAR ucChecksum;
+ UCHAR ucReserved1;
+ UCHAR ucReserved2;
+ USHORT usParametersLength;
+ USHORT usUCodeLength;
+ USHORT usReserved1;
+ USHORT usReserved2;
} MCuCodeHeader;
-/* //////////////////////////////////////////////////////////////////////////////// */
+//////////////////////////////////////////////////////////////////////////////////
#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16
#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF
-typedef struct _ATOM_VRAM_MODULE_V1 {
- ULONG ulReserved;
- USHORT usEMRSValue;
- USHORT usMRSValue;
- USHORT usReserved;
- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; */
- UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender */
- UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */
- UCHAR ucRow; /* Number of Row,in power of 2; */
- UCHAR ucColumn; /* Number of Column,in power of 2; */
- UCHAR ucBank; /* Nunber of Bank; */
- UCHAR ucRank; /* Number of Rank, in power of 2 */
- UCHAR ucChannelNum; /* Number of channel; */
- UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */
- UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */
- UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */
- UCHAR ucReserved[2];
-} ATOM_VRAM_MODULE_V1;
-
-typedef struct _ATOM_VRAM_MODULE_V2 {
- ULONG ulReserved;
- ULONG ulFlags; /* To enable/disable functionalities based on memory type */
- ULONG ulEngineClock; /* Override of default engine clock for particular memory type */
- ULONG ulMemoryClock; /* Override of default memory clock for particular memory type */
- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usEMRSValue;
- USHORT usMRSValue;
- USHORT usReserved;
- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */
- UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */
- UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */
- UCHAR ucRow; /* Number of Row,in power of 2; */
- UCHAR ucColumn; /* Number of Column,in power of 2; */
- UCHAR ucBank; /* Nunber of Bank; */
- UCHAR ucRank; /* Number of Rank, in power of 2 */
- UCHAR ucChannelNum; /* Number of channel; */
- UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */
- UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */
- UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */
- UCHAR ucRefreshRateFactor;
- UCHAR ucReserved[3];
-} ATOM_VRAM_MODULE_V2;
-
-typedef struct _ATOM_MEMORY_TIMING_FORMAT {
- ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */
- union {
- USHORT usMRS; /* mode register */
- USHORT usDDR3_MR0;
- };
- union {
- USHORT usEMRS; /* extended mode register */
- USHORT usDDR3_MR1;
- };
- UCHAR ucCL; /* CAS latency */
- UCHAR ucWL; /* WRITE Latency */
- UCHAR uctRAS; /* tRAS */
- UCHAR uctRC; /* tRC */
- UCHAR uctRFC; /* tRFC */
- UCHAR uctRCDR; /* tRCDR */
- UCHAR uctRCDW; /* tRCDW */
- UCHAR uctRP; /* tRP */
- UCHAR uctRRD; /* tRRD */
- UCHAR uctWR; /* tWR */
- UCHAR uctWTR; /* tWTR */
- UCHAR uctPDIX; /* tPDIX */
- UCHAR uctFAW; /* tFAW */
- UCHAR uctAOND; /* tAOND */
- union {
- struct {
- UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */
- UCHAR ucReserved;
- };
- USHORT usDDR3_MR2;
- };
-} ATOM_MEMORY_TIMING_FORMAT;
-
-typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 {
- ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */
- USHORT usMRS; /* mode register */
- USHORT usEMRS; /* extended mode register */
- UCHAR ucCL; /* CAS latency */
- UCHAR ucWL; /* WRITE Latency */
- UCHAR uctRAS; /* tRAS */
- UCHAR uctRC; /* tRC */
- UCHAR uctRFC; /* tRFC */
- UCHAR uctRCDR; /* tRCDR */
- UCHAR uctRCDW; /* tRCDW */
- UCHAR uctRP; /* tRP */
- UCHAR uctRRD; /* tRRD */
- UCHAR uctWR; /* tWR */
- UCHAR uctWTR; /* tWTR */
- UCHAR uctPDIX; /* tPDIX */
- UCHAR uctFAW; /* tFAW */
- UCHAR uctAOND; /* tAOND */
- UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */
-/* ///////////////////////GDDR parameters/////////////////////////////////// */
- UCHAR uctCCDL; /* */
- UCHAR uctCRCRL; /* */
- UCHAR uctCRCWL; /* */
- UCHAR uctCKE; /* */
- UCHAR uctCKRSE; /* */
- UCHAR uctCKRSX; /* */
- UCHAR uctFAW32; /* */
- UCHAR ucReserved1; /* */
- UCHAR ucReserved2; /* */
- UCHAR ucTerminator;
-} ATOM_MEMORY_TIMING_FORMAT_V1;
-
-typedef struct _ATOM_MEMORY_FORMAT {
- ULONG ulDllDisClock; /* memory DLL will be disable when target memory clock is below this clock */
- union {
- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usDDR3_Reserved; /* Not used for DDR3 memory */
- };
- union {
- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usDDR3_MR3; /* Used for DDR3 memory */
- };
- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */
- UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */
- UCHAR ucRow; /* Number of Row,in power of 2; */
- UCHAR ucColumn; /* Number of Column,in power of 2; */
- UCHAR ucBank; /* Nunber of Bank; */
- UCHAR ucRank; /* Number of Rank, in power of 2 */
- UCHAR ucBurstSize; /* burst size, 0= burst size=4 1= burst size=8 */
- UCHAR ucDllDisBit; /* position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) */
- UCHAR ucRefreshRateFactor; /* memory refresh rate in unit of ms */
- UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
- UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
- UCHAR ucMemAttrib; /* Memory Device Addribute, like RDBI/WDBI etc */
- ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
-} ATOM_MEMORY_FORMAT;
-
-typedef struct _ATOM_VRAM_MODULE_V3 {
- ULONG ulChannelMapCfg; /* board dependent paramenter:Channel combination */
- USHORT usSize; /* size of ATOM_VRAM_MODULE_V3 */
- USHORT usDefaultMVDDQ; /* board dependent parameter:Default Memory Core Voltage */
- USHORT usDefaultMVDDC; /* board dependent parameter:Default Memory IO Voltage */
- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
- UCHAR ucChannelNum; /* board dependent parameter:Number of channel; */
- UCHAR ucChannelSize; /* board dependent parameter:32bit or 64bit */
- UCHAR ucVREFI; /* board dependnt parameter: EXT or INT +160mv to -140mv */
- UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
- UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
- ATOM_MEMORY_FORMAT asMemory; /* describ all of video memory parameters from memory spec */
-} ATOM_VRAM_MODULE_V3;
-
-/* ATOM_VRAM_MODULE_V3.ucNPL_RT */
+typedef struct _ATOM_VRAM_MODULE_V1
+{
+ ULONG ulReserved;
+ USHORT usEMRSValue;
+ USHORT usMRSValue;
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved;
+ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender
+ UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+ UCHAR ucRow; // Number of Row,in power of 2;
+ UCHAR ucColumn; // Number of Column,in power of 2;
+ UCHAR ucBank; // Nunber of Bank;
+ UCHAR ucRank; // Number of Rank, in power of 2
+ UCHAR ucChannelNum; // Number of channel;
+ UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+ UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+ UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+ UCHAR ucReserved[2];
+}ATOM_VRAM_MODULE_V1;
+
+
+typedef struct _ATOM_VRAM_MODULE_V2
+{
+ ULONG ulReserved;
+ ULONG ulFlags; // To enable/disable functionalities based on memory type
+ ULONG ulEngineClock; // Override of default engine clock for particular memory type
+ ULONG ulMemoryClock; // Override of default memory clock for particular memory type
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRSValue;
+ USHORT usMRSValue;
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+ UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+ UCHAR ucRow; // Number of Row,in power of 2;
+ UCHAR ucColumn; // Number of Column,in power of 2;
+ UCHAR ucBank; // Nunber of Bank;
+ UCHAR ucRank; // Number of Rank, in power of 2
+ UCHAR ucChannelNum; // Number of channel;
+ UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+ UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+ UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+ UCHAR ucRefreshRateFactor;
+ UCHAR ucReserved[3];
+}ATOM_VRAM_MODULE_V2;
+
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT
+{
+ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
+ union{
+ USHORT usMRS; // mode register
+ USHORT usDDR3_MR0;
+ };
+ union{
+ USHORT usEMRS; // extended mode register
+ USHORT usDDR3_MR1;
+ };
+ UCHAR ucCL; // CAS latency
+ UCHAR ucWL; // WRITE Latency
+ UCHAR uctRAS; // tRAS
+ UCHAR uctRC; // tRC
+ UCHAR uctRFC; // tRFC
+ UCHAR uctRCDR; // tRCDR
+ UCHAR uctRCDW; // tRCDW
+ UCHAR uctRP; // tRP
+ UCHAR uctRRD; // tRRD
+ UCHAR uctWR; // tWR
+ UCHAR uctWTR; // tWTR
+ UCHAR uctPDIX; // tPDIX
+ UCHAR uctFAW; // tFAW
+ UCHAR uctAOND; // tAOND
+ union
+ {
+ struct {
+ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
+ UCHAR ucReserved;
+ };
+ USHORT usDDR3_MR2;
+ };
+}ATOM_MEMORY_TIMING_FORMAT;
+
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1
+{
+ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
+ USHORT usMRS; // mode register
+ USHORT usEMRS; // extended mode register
+ UCHAR ucCL; // CAS latency
+ UCHAR ucWL; // WRITE Latency
+ UCHAR uctRAS; // tRAS
+ UCHAR uctRC; // tRC
+ UCHAR uctRFC; // tRFC
+ UCHAR uctRCDR; // tRCDR
+ UCHAR uctRCDW; // tRCDW
+ UCHAR uctRP; // tRP
+ UCHAR uctRRD; // tRRD
+ UCHAR uctWR; // tWR
+ UCHAR uctWTR; // tWTR
+ UCHAR uctPDIX; // tPDIX
+ UCHAR uctFAW; // tFAW
+ UCHAR uctAOND; // tAOND
+ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
+////////////////////////////////////GDDR parameters///////////////////////////////////
+ UCHAR uctCCDL; //
+ UCHAR uctCRCRL; //
+ UCHAR uctCRCWL; //
+ UCHAR uctCKE; //
+ UCHAR uctCKRSE; //
+ UCHAR uctCKRSX; //
+ UCHAR uctFAW32; //
+ UCHAR ucMR5lo; //
+ UCHAR ucMR5hi; //
+ UCHAR ucTerminator;
+}ATOM_MEMORY_TIMING_FORMAT_V1;
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT_V2
+{
+ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
+ USHORT usMRS; // mode register
+ USHORT usEMRS; // extended mode register
+ UCHAR ucCL; // CAS latency
+ UCHAR ucWL; // WRITE Latency
+ UCHAR uctRAS; // tRAS
+ UCHAR uctRC; // tRC
+ UCHAR uctRFC; // tRFC
+ UCHAR uctRCDR; // tRCDR
+ UCHAR uctRCDW; // tRCDW
+ UCHAR uctRP; // tRP
+ UCHAR uctRRD; // tRRD
+ UCHAR uctWR; // tWR
+ UCHAR uctWTR; // tWTR
+ UCHAR uctPDIX; // tPDIX
+ UCHAR uctFAW; // tFAW
+ UCHAR uctAOND; // tAOND
+ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
+////////////////////////////////////GDDR parameters///////////////////////////////////
+ UCHAR uctCCDL; //
+ UCHAR uctCRCRL; //
+ UCHAR uctCRCWL; //
+ UCHAR uctCKE; //
+ UCHAR uctCKRSE; //
+ UCHAR uctCKRSX; //
+ UCHAR uctFAW32; //
+ UCHAR ucMR4lo; //
+ UCHAR ucMR4hi; //
+ UCHAR ucMR5lo; //
+ UCHAR ucMR5hi; //
+ UCHAR ucTerminator;
+ UCHAR ucReserved;
+}ATOM_MEMORY_TIMING_FORMAT_V2;
+
+typedef struct _ATOM_MEMORY_FORMAT
+{
+ ULONG ulDllDisClock; // memory DLL will be disable when target memory clock is below this clock
+ union{
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_Reserved; // Not used for DDR3 memory
+ };
+ union{
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_MR3; // Used for DDR3 memory
+ };
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+ UCHAR ucRow; // Number of Row,in power of 2;
+ UCHAR ucColumn; // Number of Column,in power of 2;
+ UCHAR ucBank; // Nunber of Bank;
+ UCHAR ucRank; // Number of Rank, in power of 2
+ UCHAR ucBurstSize; // burst size, 0= burst size=4 1= burst size=8
+ UCHAR ucDllDisBit; // position of DLL Enable/Disable bit in EMRS ( Extended Mode Register )
+ UCHAR ucRefreshRateFactor; // memory refresh rate in unit of ms
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucPreamble; //[7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemAttrib; // Memory Device Addribute, like RDBI/WDBI etc
+ ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; //Memory Timing block sort from lower clock to higher clock
+}ATOM_MEMORY_FORMAT;
+
+
+typedef struct _ATOM_VRAM_MODULE_V3
+{
+ ULONG ulChannelMapCfg; // board dependent paramenter:Channel combination
+ USHORT usSize; // size of ATOM_VRAM_MODULE_V3
+ USHORT usDefaultMVDDQ; // board dependent parameter:Default Memory Core Voltage
+ USHORT usDefaultMVDDC; // board dependent parameter:Default Memory IO Voltage
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucChannelNum; // board dependent parameter:Number of channel;
+ UCHAR ucChannelSize; // board dependent parameter:32bit or 64bit
+ UCHAR ucVREFI; // board dependnt parameter: EXT or INT +160mv to -140mv
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ ATOM_MEMORY_FORMAT asMemory; // describ all of video memory parameters from memory spec
+}ATOM_VRAM_MODULE_V3;
+
+
+//ATOM_VRAM_MODULE_V3.ucNPL_RT
#define NPL_RT_MASK 0x0f
#define BATTERY_ODT_MASK 0xc0
#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3
-typedef struct _ATOM_VRAM_MODULE_V4 {
- ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */
- USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */
- USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
- /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */
- USHORT usReserved;
- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */
- UCHAR ucChannelNum; /* Number of channels present in this module config */
- UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */
- UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
- UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
- UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */
- UCHAR ucVREFI; /* board dependent parameter */
- UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
- UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
- UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
- /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */
- UCHAR ucReserved[3];
-
-/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */
- union {
- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usDDR3_Reserved;
- };
- union {
- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usDDR3_MR3; /* Used for DDR3 memory */
- };
- UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */
- UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */
- UCHAR ucReserved2[2];
- ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
-} ATOM_VRAM_MODULE_V4;
+typedef struct _ATOM_VRAM_MODULE_V4
+{
+ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
+ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+ UCHAR ucChannelNum; // Number of channels present in this module config
+ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
+ UCHAR ucVREFI; // board dependent parameter
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ UCHAR ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+ union{
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_Reserved;
+ };
+ union{
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_MR3; // Used for DDR3 memory
+ };
+ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucReserved2[2];
+ ATOM_MEMORY_TIMING_FORMAT asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V4;
#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3
#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1
@@ -3973,96 +4841,139 @@ typedef struct _ATOM_VRAM_MODULE_V4 {
#define VRAM_MODULE_V4_MISC_BL8 0x4
#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10
-typedef struct _ATOM_VRAM_MODULE_V5 {
- ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */
- USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */
- USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
- /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */
- USHORT usReserved;
- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */
- UCHAR ucChannelNum; /* Number of channels present in this module config */
- UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */
- UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
- UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
- UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */
- UCHAR ucVREFI; /* board dependent parameter */
- UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
- UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
- UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
- /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */
- UCHAR ucReserved[3];
+typedef struct _ATOM_VRAM_MODULE_V5
+{
+ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
+ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+ UCHAR ucChannelNum; // Number of channels present in this module config
+ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
+ UCHAR ucVREFI; // board dependent parameter
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ UCHAR ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+ UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+ ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V5;
+
+typedef struct _ATOM_VRAM_MODULE_V6
+{
+ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
+ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+ UCHAR ucChannelNum; // Number of channels present in this module config
+ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
+ UCHAR ucVREFI; // board dependent parameter
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ UCHAR ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+ UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+ ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V6;
+
+
+
+typedef struct _ATOM_VRAM_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucNumOfVRAMModule;
+ ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_V2;
-/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */
- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
- UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */
- UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */
- UCHAR ucFIFODepth; /* FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth */
- UCHAR ucCDR_Bandwidth; /* [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth */
- ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
-} ATOM_VRAM_MODULE_V5;
-
-typedef struct _ATOM_VRAM_INFO_V2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucNumOfVRAMModule;
- ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
-} ATOM_VRAM_INFO_V2;
-
-typedef struct _ATOM_VRAM_INFO_V3 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
- USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
- USHORT usRerseved;
- UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */
- UCHAR ucNumOfVRAMModule;
- ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
- ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */
- /* ATOM_INIT_REG_BLOCK aMemAdjust; */
-} ATOM_VRAM_INFO_V3;
+typedef struct _ATOM_VRAM_INFO_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+ USHORT usRerseved;
+ UCHAR aVID_PinsShift[9]; // 8 bit strap maximum+terminator
+ UCHAR ucNumOfVRAMModule;
+ ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+ ATOM_INIT_REG_BLOCK asMemPatch; // for allocation
+ // ATOM_INIT_REG_BLOCK aMemAdjust;
+}ATOM_VRAM_INFO_V3;
#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3
-typedef struct _ATOM_VRAM_INFO_V4 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
- USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
- USHORT usRerseved;
- UCHAR ucMemDQ7_0ByteRemap; /* DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 */
- ULONG ulMemDQ7_0BitRemap; /* each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] */
- UCHAR ucReservde[4];
- UCHAR ucNumOfVRAMModule;
- ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
- ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */
- /* ATOM_INIT_REG_BLOCK aMemAdjust; */
-} ATOM_VRAM_INFO_V4;
-
-typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */
-} ATOM_VRAM_GPIO_DETECTION_INFO;
-
-typedef struct _ATOM_MEMORY_TRAINING_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucTrainingLoop;
- UCHAR ucReserved[3];
- ATOM_INIT_REG_BLOCK asMemTrainingSetting;
-} ATOM_MEMORY_TRAINING_INFO;
-
-typedef struct SW_I2C_CNTL_DATA_PARAMETERS {
- UCHAR ucControl;
- UCHAR ucData;
- UCHAR ucSatus;
- UCHAR ucTemp;
+typedef struct _ATOM_VRAM_INFO_V4
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+ USHORT usRerseved;
+ UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
+ ULONG ulMemDQ7_0BitRemap; // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21]
+ UCHAR ucReservde[4];
+ UCHAR ucNumOfVRAMModule;
+ ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+ ATOM_INIT_REG_BLOCK asMemPatch; // for allocation
+ // ATOM_INIT_REG_BLOCK aMemAdjust;
+}ATOM_VRAM_INFO_V4;
+
+typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR aVID_PinsShift[9]; //8 bit strap maximum+terminator
+}ATOM_VRAM_GPIO_DETECTION_INFO;
+
+
+typedef struct _ATOM_MEMORY_TRAINING_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucTrainingLoop;
+ UCHAR ucReserved[3];
+ ATOM_INIT_REG_BLOCK asMemTrainingSetting;
+}ATOM_MEMORY_TRAINING_INFO;
+
+
+typedef struct SW_I2C_CNTL_DATA_PARAMETERS
+{
+ UCHAR ucControl;
+ UCHAR ucData;
+ UCHAR ucSatus;
+ UCHAR ucTemp;
} SW_I2C_CNTL_DATA_PARAMETERS;
#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS
-typedef struct _SW_I2C_IO_DATA_PARAMETERS {
- USHORT GPIO_Info;
- UCHAR ucAct;
- UCHAR ucData;
-} SW_I2C_IO_DATA_PARAMETERS;
+typedef struct _SW_I2C_IO_DATA_PARAMETERS
+{
+ USHORT GPIO_Info;
+ UCHAR ucAct;
+ UCHAR ucData;
+ } SW_I2C_IO_DATA_PARAMETERS;
#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS
@@ -4087,127 +4998,136 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS {
#define SW_I2C_CNTL_CLOSE 5
#define SW_I2C_CNTL_WRITE1BIT 6
-/* ==============================VESA definition Portion=============================== */
+//==============================VESA definition Portion===============================
#define VESA_OEM_PRODUCT_REV '01.00'
-#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB /* refer to VBE spec p.32, no TTY support */
+#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
#define VESA_MODE_WIN_ATTRIBUTE 7
#define VESA_WIN_SIZE 64
-typedef struct _PTR_32_BIT_STRUCTURE {
- USHORT Offset16;
- USHORT Segment16;
+typedef struct _PTR_32_BIT_STRUCTURE
+{
+ USHORT Offset16;
+ USHORT Segment16;
} PTR_32_BIT_STRUCTURE;
-typedef union _PTR_32_BIT_UNION {
- PTR_32_BIT_STRUCTURE SegmentOffset;
- ULONG Ptr32_Bit;
+typedef union _PTR_32_BIT_UNION
+{
+ PTR_32_BIT_STRUCTURE SegmentOffset;
+ ULONG Ptr32_Bit;
} PTR_32_BIT_UNION;
-typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE {
- UCHAR VbeSignature[4];
- USHORT VbeVersion;
- PTR_32_BIT_UNION OemStringPtr;
- UCHAR Capabilities[4];
- PTR_32_BIT_UNION VideoModePtr;
- USHORT TotalMemory;
+typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE
+{
+ UCHAR VbeSignature[4];
+ USHORT VbeVersion;
+ PTR_32_BIT_UNION OemStringPtr;
+ UCHAR Capabilities[4];
+ PTR_32_BIT_UNION VideoModePtr;
+ USHORT TotalMemory;
} VBE_1_2_INFO_BLOCK_UPDATABLE;
-typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE {
- VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock;
- USHORT OemSoftRev;
- PTR_32_BIT_UNION OemVendorNamePtr;
- PTR_32_BIT_UNION OemProductNamePtr;
- PTR_32_BIT_UNION OemProductRevPtr;
+
+typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE
+{
+ VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock;
+ USHORT OemSoftRev;
+ PTR_32_BIT_UNION OemVendorNamePtr;
+ PTR_32_BIT_UNION OemProductNamePtr;
+ PTR_32_BIT_UNION OemProductRevPtr;
} VBE_2_0_INFO_BLOCK_UPDATABLE;
-typedef union _VBE_VERSION_UNION {
- VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock;
- VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock;
+typedef union _VBE_VERSION_UNION
+{
+ VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock;
+ VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock;
} VBE_VERSION_UNION;
-typedef struct _VBE_INFO_BLOCK {
- VBE_VERSION_UNION UpdatableVBE_Info;
- UCHAR Reserved[222];
- UCHAR OemData[256];
+typedef struct _VBE_INFO_BLOCK
+{
+ VBE_VERSION_UNION UpdatableVBE_Info;
+ UCHAR Reserved[222];
+ UCHAR OemData[256];
} VBE_INFO_BLOCK;
-typedef struct _VBE_FP_INFO {
- USHORT HSize;
- USHORT VSize;
- USHORT FPType;
- UCHAR RedBPP;
- UCHAR GreenBPP;
- UCHAR BlueBPP;
- UCHAR ReservedBPP;
- ULONG RsvdOffScrnMemSize;
- ULONG RsvdOffScrnMEmPtr;
- UCHAR Reserved[14];
+typedef struct _VBE_FP_INFO
+{
+ USHORT HSize;
+ USHORT VSize;
+ USHORT FPType;
+ UCHAR RedBPP;
+ UCHAR GreenBPP;
+ UCHAR BlueBPP;
+ UCHAR ReservedBPP;
+ ULONG RsvdOffScrnMemSize;
+ ULONG RsvdOffScrnMEmPtr;
+ UCHAR Reserved[14];
} VBE_FP_INFO;
-typedef struct _VESA_MODE_INFO_BLOCK {
-/* Mandatory information for all VBE revisions */
- USHORT ModeAttributes; /* dw ? ; mode attributes */
- UCHAR WinAAttributes; /* db ? ; window A attributes */
- UCHAR WinBAttributes; /* db ? ; window B attributes */
- USHORT WinGranularity; /* dw ? ; window granularity */
- USHORT WinSize; /* dw ? ; window size */
- USHORT WinASegment; /* dw ? ; window A start segment */
- USHORT WinBSegment; /* dw ? ; window B start segment */
- ULONG WinFuncPtr; /* dd ? ; real mode pointer to window function */
- USHORT BytesPerScanLine; /* dw ? ; bytes per scan line */
-
-/* ; Mandatory information for VBE 1.2 and above */
- USHORT XResolution; /* dw ? ; horizontal resolution in pixels or characters */
- USHORT YResolution; /* dw ? ; vertical resolution in pixels or characters */
- UCHAR XCharSize; /* db ? ; character cell width in pixels */
- UCHAR YCharSize; /* db ? ; character cell height in pixels */
- UCHAR NumberOfPlanes; /* db ? ; number of memory planes */
- UCHAR BitsPerPixel; /* db ? ; bits per pixel */
- UCHAR NumberOfBanks; /* db ? ; number of banks */
- UCHAR MemoryModel; /* db ? ; memory model type */
- UCHAR BankSize; /* db ? ; bank size in KB */
- UCHAR NumberOfImagePages; /* db ? ; number of images */
- UCHAR ReservedForPageFunction; /* db 1 ; reserved for page function */
-
-/* ; Direct Color fields(required for direct/6 and YUV/7 memory models) */
- UCHAR RedMaskSize; /* db ? ; size of direct color red mask in bits */
- UCHAR RedFieldPosition; /* db ? ; bit position of lsb of red mask */
- UCHAR GreenMaskSize; /* db ? ; size of direct color green mask in bits */
- UCHAR GreenFieldPosition; /* db ? ; bit position of lsb of green mask */
- UCHAR BlueMaskSize; /* db ? ; size of direct color blue mask in bits */
- UCHAR BlueFieldPosition; /* db ? ; bit position of lsb of blue mask */
- UCHAR RsvdMaskSize; /* db ? ; size of direct color reserved mask in bits */
- UCHAR RsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask */
- UCHAR DirectColorModeInfo; /* db ? ; direct color mode attributes */
-
-/* ; Mandatory information for VBE 2.0 and above */
- ULONG PhysBasePtr; /* dd ? ; physical address for flat memory frame buffer */
- ULONG Reserved_1; /* dd 0 ; reserved - always set to 0 */
- USHORT Reserved_2; /* dw 0 ; reserved - always set to 0 */
-
-/* ; Mandatory information for VBE 3.0 and above */
- USHORT LinBytesPerScanLine; /* dw ? ; bytes per scan line for linear modes */
- UCHAR BnkNumberOfImagePages; /* db ? ; number of images for banked modes */
- UCHAR LinNumberOfImagPages; /* db ? ; number of images for linear modes */
- UCHAR LinRedMaskSize; /* db ? ; size of direct color red mask(linear modes) */
- UCHAR LinRedFieldPosition; /* db ? ; bit position of lsb of red mask(linear modes) */
- UCHAR LinGreenMaskSize; /* db ? ; size of direct color green mask(linear modes) */
- UCHAR LinGreenFieldPosition; /* db ? ; bit position of lsb of green mask(linear modes) */
- UCHAR LinBlueMaskSize; /* db ? ; size of direct color blue mask(linear modes) */
- UCHAR LinBlueFieldPosition; /* db ? ; bit position of lsb of blue mask(linear modes) */
- UCHAR LinRsvdMaskSize; /* db ? ; size of direct color reserved mask(linear modes) */
- UCHAR LinRsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask(linear modes) */
- ULONG MaxPixelClock; /* dd ? ; maximum pixel clock(in Hz) for graphics mode */
- UCHAR Reserved; /* db 190 dup (0) */
+typedef struct _VESA_MODE_INFO_BLOCK
+{
+// Mandatory information for all VBE revisions
+ USHORT ModeAttributes; // dw ? ; mode attributes
+ UCHAR WinAAttributes; // db ? ; window A attributes
+ UCHAR WinBAttributes; // db ? ; window B attributes
+ USHORT WinGranularity; // dw ? ; window granularity
+ USHORT WinSize; // dw ? ; window size
+ USHORT WinASegment; // dw ? ; window A start segment
+ USHORT WinBSegment; // dw ? ; window B start segment
+ ULONG WinFuncPtr; // dd ? ; real mode pointer to window function
+ USHORT BytesPerScanLine;// dw ? ; bytes per scan line
+
+//; Mandatory information for VBE 1.2 and above
+ USHORT XResolution; // dw ? ; horizontal resolution in pixels or characters
+ USHORT YResolution; // dw ? ; vertical resolution in pixels or characters
+ UCHAR XCharSize; // db ? ; character cell width in pixels
+ UCHAR YCharSize; // db ? ; character cell height in pixels
+ UCHAR NumberOfPlanes; // db ? ; number of memory planes
+ UCHAR BitsPerPixel; // db ? ; bits per pixel
+ UCHAR NumberOfBanks; // db ? ; number of banks
+ UCHAR MemoryModel; // db ? ; memory model type
+ UCHAR BankSize; // db ? ; bank size in KB
+ UCHAR NumberOfImagePages;// db ? ; number of images
+ UCHAR ReservedForPageFunction;//db 1 ; reserved for page function
+
+//; Direct Color fields(required for direct/6 and YUV/7 memory models)
+ UCHAR RedMaskSize; // db ? ; size of direct color red mask in bits
+ UCHAR RedFieldPosition; // db ? ; bit position of lsb of red mask
+ UCHAR GreenMaskSize; // db ? ; size of direct color green mask in bits
+ UCHAR GreenFieldPosition; // db ? ; bit position of lsb of green mask
+ UCHAR BlueMaskSize; // db ? ; size of direct color blue mask in bits
+ UCHAR BlueFieldPosition; // db ? ; bit position of lsb of blue mask
+ UCHAR RsvdMaskSize; // db ? ; size of direct color reserved mask in bits
+ UCHAR RsvdFieldPosition; // db ? ; bit position of lsb of reserved mask
+ UCHAR DirectColorModeInfo;// db ? ; direct color mode attributes
+
+//; Mandatory information for VBE 2.0 and above
+ ULONG PhysBasePtr; // dd ? ; physical address for flat memory frame buffer
+ ULONG Reserved_1; // dd 0 ; reserved - always set to 0
+ USHORT Reserved_2; // dw 0 ; reserved - always set to 0
+
+//; Mandatory information for VBE 3.0 and above
+ USHORT LinBytesPerScanLine; // dw ? ; bytes per scan line for linear modes
+ UCHAR BnkNumberOfImagePages;// db ? ; number of images for banked modes
+ UCHAR LinNumberOfImagPages; // db ? ; number of images for linear modes
+ UCHAR LinRedMaskSize; // db ? ; size of direct color red mask(linear modes)
+ UCHAR LinRedFieldPosition; // db ? ; bit position of lsb of red mask(linear modes)
+ UCHAR LinGreenMaskSize; // db ? ; size of direct color green mask(linear modes)
+ UCHAR LinGreenFieldPosition;// db ? ; bit position of lsb of green mask(linear modes)
+ UCHAR LinBlueMaskSize; // db ? ; size of direct color blue mask(linear modes)
+ UCHAR LinBlueFieldPosition; // db ? ; bit position of lsb of blue mask(linear modes)
+ UCHAR LinRsvdMaskSize; // db ? ; size of direct color reserved mask(linear modes)
+ UCHAR LinRsvdFieldPosition; // db ? ; bit position of lsb of reserved mask(linear modes)
+ ULONG MaxPixelClock; // dd ? ; maximum pixel clock(in Hz) for graphics mode
+ UCHAR Reserved; // db 190 dup (0)
} VESA_MODE_INFO_BLOCK;
-/* BIOS function CALLS */
-#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 /* ATI Extended Function code */
+// BIOS function CALLS
+#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 // ATI Extended Function code
#define ATOM_BIOS_FUNCTION_COP_MODE 0x00
#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04
#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05
#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06
-#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B
+#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B
#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E
#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F
#define ATOM_BIOS_FUNCTION_STV_STD 0x16
@@ -4217,100 +5137,135 @@ typedef struct _VESA_MODE_INFO_BLOCK {
#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82
#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83
#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84
-#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A
+#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A
#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B
-#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 /* Sub function 80 */
-#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 /* Sub function 80 */
+#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 // Sub function 80
+#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 // Sub function 80
#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D
#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E
-#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F
-#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 /* Sub function 03 */
-#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 /* Sub function 7 */
-#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 /* Notify caller the current thermal state */
-#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 /* Notify caller the current critical state */
-#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 /* Sub function 85 */
-#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900 /* Sub function 89 */
-#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 /* Notify caller that ADC is supported */
-
-#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 /* Set DPMS */
-#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 /* BL: Sub function 01 */
-#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 /* BL: Sub function 02 */
-#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 /* BH Parameter for DPMS ON. */
-#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 /* BH Parameter for DPMS STANDBY */
-#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 /* BH Parameter for DPMS SUSPEND */
-#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 /* BH Parameter for DPMS OFF */
-#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 /* BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) */
+#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F
+#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 // Sub function 03
+#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 // Sub function 7
+#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 // Notify caller the current thermal state
+#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 // Notify caller the current critical state
+#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 // Sub function 85
+#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89
+#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 // Notify caller that ADC is supported
+
+
+#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 // Set DPMS
+#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 // BL: Sub function 01
+#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 // BL: Sub function 02
+#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 // BH Parameter for DPMS ON.
+#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 // BH Parameter for DPMS STANDBY
+#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 // BH Parameter for DPMS SUSPEND
+#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 // BH Parameter for DPMS OFF
+#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED)
#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L
#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L
#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL
-/* structure used for VBIOS only */
+// structure used for VBIOS only
-/* DispOutInfoTable */
-typedef struct _ASIC_TRANSMITTER_INFO {
+//DispOutInfoTable
+typedef struct _ASIC_TRANSMITTER_INFO
+{
USHORT usTransmitterObjId;
USHORT usSupportDevice;
- UCHAR ucTransmitterCmdTblId;
- UCHAR ucConfig;
- UCHAR ucEncoderID; /* available 1st encoder ( default ) */
- UCHAR ucOptionEncoderID; /* available 2nd encoder ( optional ) */
- UCHAR uc2ndEncoderID;
- UCHAR ucReserved;
-} ASIC_TRANSMITTER_INFO;
-
-typedef struct _ASIC_ENCODER_INFO {
+ UCHAR ucTransmitterCmdTblId;
+ UCHAR ucConfig;
+ UCHAR ucEncoderID; //available 1st encoder ( default )
+ UCHAR ucOptionEncoderID; //available 2nd encoder ( optional )
+ UCHAR uc2ndEncoderID;
+ UCHAR ucReserved;
+}ASIC_TRANSMITTER_INFO;
+
+typedef struct _ASIC_ENCODER_INFO
+{
UCHAR ucEncoderID;
UCHAR ucEncoderConfig;
- USHORT usEncoderCmdTblId;
-} ASIC_ENCODER_INFO;
+ USHORT usEncoderCmdTblId;
+}ASIC_ENCODER_INFO;
+
+typedef struct _ATOM_DISP_OUT_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT ptrTransmitterInfo;
+ USHORT ptrEncoderInfo;
+ ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
+ ASIC_ENCODER_INFO asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO;
-typedef struct _ATOM_DISP_OUT_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
+typedef struct _ATOM_DISP_OUT_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
USHORT ptrTransmitterInfo;
USHORT ptrEncoderInfo;
- ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
- ASIC_ENCODER_INFO asEncoderInfo[1];
-} ATOM_DISP_OUT_INFO;
+ USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary.
+ ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
+ ASIC_ENCODER_INFO asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO_V2;
-/* DispDevicePriorityInfo */
-typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
+// DispDevicePriorityInfo
+typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
USHORT asDevicePriority[16];
-} ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
-
-/* ProcessAuxChannelTransactionTable */
-typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS {
- USHORT lpAuxRequest;
- USHORT lpDataOut;
- UCHAR ucChannelID;
- union {
- UCHAR ucReplyStatus;
- UCHAR ucDelay;
+}ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
+{
+ USHORT lpAuxRequest;
+ USHORT lpDataOut;
+ UCHAR ucChannelID;
+ union
+ {
+ UCHAR ucReplyStatus;
+ UCHAR ucDelay;
+ };
+ UCHAR ucDataOutLen;
+ UCHAR ucReserved;
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2
+{
+ USHORT lpAuxRequest;
+ USHORT lpDataOut;
+ UCHAR ucChannelID;
+ union
+ {
+ UCHAR ucReplyStatus;
+ UCHAR ucDelay;
};
- UCHAR ucDataOutLen;
- UCHAR ucReserved;
-} PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
+ UCHAR ucDataOutLen;
+ UCHAR ucHPD_ID; //=0: HPD1, =1: HPD2, =2: HPD3, =3: HPD4, =4: HPD5, =5: HPD6
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2;
#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
-/* GetSinkType */
+//GetSinkType
-typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS
+{
USHORT ucLinkClock;
- union {
- UCHAR ucConfig; /* for DP training command */
- UCHAR ucI2cId; /* use for GET_SINK_TYPE command */
+ union
+ {
+ UCHAR ucConfig; // for DP training command
+ UCHAR ucI2cId; // use for GET_SINK_TYPE command
};
UCHAR ucAction;
UCHAR ucStatus;
UCHAR ucLaneNum;
UCHAR ucReserved[2];
-} DP_ENCODER_SERVICE_PARAMETERS;
+}DP_ENCODER_SERVICE_PARAMETERS;
-/* ucAction */
+// ucAction
#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01
+/* obselete */
#define ATOM_DP_ACTION_TRAINING_START 0x02
#define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03
#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04
@@ -4318,7 +5273,7 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
#define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06
#define ATOM_DP_ACTION_BLANKING 0x07
-/* ucConfig */
+// ucConfig
#define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03
#define ATOM_DP_CONFIG_DIG1_ENCODER 0x00
#define ATOM_DP_CONFIG_DIG2_ENCODER 0x01
@@ -4326,14 +5281,14 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
#define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04
#define ATOM_DP_CONFIG_LINK_A 0x00
#define ATOM_DP_CONFIG_LINK_B 0x04
-
+/* /obselete */
#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
-/* DP_TRAINING_TABLE */
-#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
+// DP_TRAINING_TABLE
+#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 )
-#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16)
-#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24)
+#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16 )
+#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24 )
#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32)
#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40)
#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48)
@@ -4341,183 +5296,241 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64)
#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72)
#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76)
-#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80)
+#define DP_I2C_AUX_DDC_WRITE_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80)
+#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 84)
-typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS {
- UCHAR ucI2CSpeed;
- union {
- UCHAR ucRegIndex;
- UCHAR ucStatus;
+typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+{
+ UCHAR ucI2CSpeed;
+ union
+ {
+ UCHAR ucRegIndex;
+ UCHAR ucStatus;
};
- USHORT lpI2CDataOut;
- UCHAR ucFlag;
- UCHAR ucTransBytes;
- UCHAR ucSlaveAddr;
- UCHAR ucLineNumber;
-} PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
+ USHORT lpI2CDataOut;
+ UCHAR ucFlag;
+ UCHAR ucTransBytes;
+ UCHAR ucSlaveAddr;
+ UCHAR ucLineNumber;
+}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
-/* ucFlag */
+//ucFlag
#define HW_I2C_WRITE 1
#define HW_I2C_READ 0
+#define I2C_2BYTE_ADDR 0x02
+typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
+{
+ UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ...
+ UCHAR ucReserved[3];
+}SET_HWBLOCK_INSTANCE_PARAMETER_V2;
+
+#define HWBLKINST_INSTANCE_MASK 0x07
+#define HWBLKINST_HWBLK_MASK 0xF0
+#define HWBLKINST_HWBLK_SHIFT 0x04
+
+//ucHWBlock
+#define SELECT_DISP_ENGINE 0
+#define SELECT_DISP_PLL 1
+#define SELECT_DCIO_UNIPHY_LINK0 2
+#define SELECT_DCIO_UNIPHY_LINK1 3
+#define SELECT_DCIO_IMPCAL 4
+#define SELECT_DCIO_DIG 6
+#define SELECT_CRTC_PIXEL_RATE 7
+
+/****************************************************************************/
+//Portion VI: Definitinos for vbios MC scratch registers that driver used
/****************************************************************************/
-/* Portion VI: Definitinos being oboselete */
+
+#define MC_MISC0__MEMORY_TYPE_MASK 0xF0000000
+#define MC_MISC0__MEMORY_TYPE__GDDR1 0x10000000
+#define MC_MISC0__MEMORY_TYPE__DDR2 0x20000000
+#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000
+#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000
+#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000
+#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000
+
+/****************************************************************************/
+//Portion VI: Definitinos being oboselete
/****************************************************************************/
-/* ========================================================================================== */
-/* Remove the definitions below when driver is ready! */
-typedef struct _ATOM_DAC_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usMaxFrequency; /* in 10kHz unit */
- USHORT usReserved;
-} ATOM_DAC_INFO;
-
-typedef struct _COMPASSIONATE_DATA {
- ATOM_COMMON_TABLE_HEADER sHeader;
-
- /* ============================== DAC1 portion */
- UCHAR ucDAC1_BG_Adjustment;
- UCHAR ucDAC1_DAC_Adjustment;
- USHORT usDAC1_FORCE_Data;
- /* ============================== DAC2 portion */
- UCHAR ucDAC2_CRT2_BG_Adjustment;
- UCHAR ucDAC2_CRT2_DAC_Adjustment;
- USHORT usDAC2_CRT2_FORCE_Data;
- USHORT usDAC2_CRT2_MUX_RegisterIndex;
- UCHAR ucDAC2_CRT2_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
- UCHAR ucDAC2_NTSC_BG_Adjustment;
- UCHAR ucDAC2_NTSC_DAC_Adjustment;
- USHORT usDAC2_TV1_FORCE_Data;
- USHORT usDAC2_TV1_MUX_RegisterIndex;
- UCHAR ucDAC2_TV1_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
- UCHAR ucDAC2_CV_BG_Adjustment;
- UCHAR ucDAC2_CV_DAC_Adjustment;
- USHORT usDAC2_CV_FORCE_Data;
- USHORT usDAC2_CV_MUX_RegisterIndex;
- UCHAR ucDAC2_CV_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
- UCHAR ucDAC2_PAL_BG_Adjustment;
- UCHAR ucDAC2_PAL_DAC_Adjustment;
- USHORT usDAC2_TV2_FORCE_Data;
-} COMPASSIONATE_DATA;
+//==========================================================================================
+//Remove the definitions below when driver is ready!
+typedef struct _ATOM_DAC_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMaxFrequency; // in 10kHz unit
+ USHORT usReserved;
+}ATOM_DAC_INFO;
+
+
+typedef struct _COMPASSIONATE_DATA
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+
+ //============================== DAC1 portion
+ UCHAR ucDAC1_BG_Adjustment;
+ UCHAR ucDAC1_DAC_Adjustment;
+ USHORT usDAC1_FORCE_Data;
+ //============================== DAC2 portion
+ UCHAR ucDAC2_CRT2_BG_Adjustment;
+ UCHAR ucDAC2_CRT2_DAC_Adjustment;
+ USHORT usDAC2_CRT2_FORCE_Data;
+ USHORT usDAC2_CRT2_MUX_RegisterIndex;
+ UCHAR ucDAC2_CRT2_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+ UCHAR ucDAC2_NTSC_BG_Adjustment;
+ UCHAR ucDAC2_NTSC_DAC_Adjustment;
+ USHORT usDAC2_TV1_FORCE_Data;
+ USHORT usDAC2_TV1_MUX_RegisterIndex;
+ UCHAR ucDAC2_TV1_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+ UCHAR ucDAC2_CV_BG_Adjustment;
+ UCHAR ucDAC2_CV_DAC_Adjustment;
+ USHORT usDAC2_CV_FORCE_Data;
+ USHORT usDAC2_CV_MUX_RegisterIndex;
+ UCHAR ucDAC2_CV_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+ UCHAR ucDAC2_PAL_BG_Adjustment;
+ UCHAR ucDAC2_PAL_DAC_Adjustment;
+ USHORT usDAC2_TV2_FORCE_Data;
+}COMPASSIONATE_DATA;
/****************************Supported Device Info Table Definitions**********************/
-/* ucConnectInfo: */
-/* [7:4] - connector type */
-/* = 1 - VGA connector */
-/* = 2 - DVI-I */
-/* = 3 - DVI-D */
-/* = 4 - DVI-A */
-/* = 5 - SVIDEO */
-/* = 6 - COMPOSITE */
-/* = 7 - LVDS */
-/* = 8 - DIGITAL LINK */
-/* = 9 - SCART */
-/* = 0xA - HDMI_type A */
-/* = 0xB - HDMI_type B */
-/* = 0xE - Special case1 (DVI+DIN) */
-/* Others=TBD */
-/* [3:0] - DAC Associated */
-/* = 0 - no DAC */
-/* = 1 - DACA */
-/* = 2 - DACB */
-/* = 3 - External DAC */
-/* Others=TBD */
-/* */
-
-typedef struct _ATOM_CONNECTOR_INFO {
+// ucConnectInfo:
+// [7:4] - connector type
+// = 1 - VGA connector
+// = 2 - DVI-I
+// = 3 - DVI-D
+// = 4 - DVI-A
+// = 5 - SVIDEO
+// = 6 - COMPOSITE
+// = 7 - LVDS
+// = 8 - DIGITAL LINK
+// = 9 - SCART
+// = 0xA - HDMI_type A
+// = 0xB - HDMI_type B
+// = 0xE - Special case1 (DVI+DIN)
+// Others=TBD
+// [3:0] - DAC Associated
+// = 0 - no DAC
+// = 1 - DACA
+// = 2 - DACB
+// = 3 - External DAC
+// Others=TBD
+//
+
+typedef struct _ATOM_CONNECTOR_INFO
+{
#if ATOM_BIG_ENDIAN
- UCHAR bfConnectorType:4;
- UCHAR bfAssociatedDAC:4;
+ UCHAR bfConnectorType:4;
+ UCHAR bfAssociatedDAC:4;
#else
- UCHAR bfAssociatedDAC:4;
- UCHAR bfConnectorType:4;
+ UCHAR bfAssociatedDAC:4;
+ UCHAR bfConnectorType:4;
#endif
-} ATOM_CONNECTOR_INFO;
+}ATOM_CONNECTOR_INFO;
+
+typedef union _ATOM_CONNECTOR_INFO_ACCESS
+{
+ ATOM_CONNECTOR_INFO sbfAccess;
+ UCHAR ucAccess;
+}ATOM_CONNECTOR_INFO_ACCESS;
-typedef union _ATOM_CONNECTOR_INFO_ACCESS {
- ATOM_CONNECTOR_INFO sbfAccess;
- UCHAR ucAccess;
-} ATOM_CONNECTOR_INFO_ACCESS;
+typedef struct _ATOM_CONNECTOR_INFO_I2C
+{
+ ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+}ATOM_CONNECTOR_INFO_I2C;
-typedef struct _ATOM_CONNECTOR_INFO_I2C {
- ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
-} ATOM_CONNECTOR_INFO_I2C;
-typedef struct _ATOM_SUPPORTED_DEVICES_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usDeviceSupport;
- ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
-} ATOM_SUPPORTED_DEVICES_INFO;
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
+}ATOM_SUPPORTED_DEVICES_INFO;
#define NO_INT_SRC_MAPPED 0xFF
-typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP {
- UCHAR ucIntSrcBitmap;
-} ATOM_CONNECTOR_INC_SRC_BITMAP;
-
-typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usDeviceSupport;
- ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
- ATOM_CONNECTOR_INC_SRC_BITMAP
- asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
-} ATOM_SUPPORTED_DEVICES_INFO_2;
-
-typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usDeviceSupport;
- ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
- ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
-} ATOM_SUPPORTED_DEVICES_INFO_2d1;
+typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP
+{
+ UCHAR ucIntSrcBitmap;
+}ATOM_CONNECTOR_INC_SRC_BITMAP;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+ ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+}ATOM_SUPPORTED_DEVICES_INFO_2;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
+ ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_SUPPORTED_DEVICES_INFO_2d1;
#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1
-typedef struct _ATOM_MISC_CONTROL_INFO {
- USHORT usFrequency;
- UCHAR ucPLL_ChargePump; /* PLL charge-pump gain control */
- UCHAR ucPLL_DutyCycle; /* PLL duty cycle control */
- UCHAR ucPLL_VCO_Gain; /* PLL VCO gain control */
- UCHAR ucPLL_VoltageSwing; /* PLL driver voltage swing control */
-} ATOM_MISC_CONTROL_INFO;
+
+
+typedef struct _ATOM_MISC_CONTROL_INFO
+{
+ USHORT usFrequency;
+ UCHAR ucPLL_ChargePump; // PLL charge-pump gain control
+ UCHAR ucPLL_DutyCycle; // PLL duty cycle control
+ UCHAR ucPLL_VCO_Gain; // PLL VCO gain control
+ UCHAR ucPLL_VoltageSwing; // PLL driver voltage swing control
+}ATOM_MISC_CONTROL_INFO;
+
#define ATOM_MAX_MISC_INFO 4
-typedef struct _ATOM_TMDS_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usMaxFrequency; /* in 10Khz */
- ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO];
-} ATOM_TMDS_INFO;
+typedef struct _ATOM_TMDS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMaxFrequency; // in 10Khz
+ ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO];
+}ATOM_TMDS_INFO;
+
+
+typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE
+{
+ UCHAR ucTVStandard; //Same as TV standards defined above,
+ UCHAR ucPadding[1];
+}ATOM_ENCODER_ANALOG_ATTRIBUTE;
-typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE {
- UCHAR ucTVStandard; /* Same as TV standards defined above, */
- UCHAR ucPadding[1];
-} ATOM_ENCODER_ANALOG_ATTRIBUTE;
+typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE
+{
+ UCHAR ucAttribute; //Same as other digital encoder attributes defined above
+ UCHAR ucPadding[1];
+}ATOM_ENCODER_DIGITAL_ATTRIBUTE;
-typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE {
- UCHAR ucAttribute; /* Same as other digital encoder attributes defined above */
- UCHAR ucPadding[1];
-} ATOM_ENCODER_DIGITAL_ATTRIBUTE;
+typedef union _ATOM_ENCODER_ATTRIBUTE
+{
+ ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
+ ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
+}ATOM_ENCODER_ATTRIBUTE;
-typedef union _ATOM_ENCODER_ATTRIBUTE {
- ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
- ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
-} ATOM_ENCODER_ATTRIBUTE;
-typedef struct _DVO_ENCODER_CONTROL_PARAMETERS {
- USHORT usPixelClock;
- USHORT usEncoderID;
- UCHAR ucDeviceType; /* Use ATOM_DEVICE_xxx1_Index to indicate device type only. */
- UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */
- ATOM_ENCODER_ATTRIBUTE usDevAttr;
-} DVO_ENCODER_CONTROL_PARAMETERS;
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock;
+ USHORT usEncoderID;
+ UCHAR ucDeviceType; //Use ATOM_DEVICE_xxx1_Index to indicate device type only.
+ UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+ ATOM_ENCODER_ATTRIBUTE usDevAttr;
+}DVO_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION
+{
+ DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
+}DVO_ENCODER_CONTROL_PS_ALLOCATION;
-typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
- DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
-} DVO_ENCODER_CONTROL_PS_ALLOCATION;
#define ATOM_XTMDS_ASIC_SI164_ID 1
#define ATOM_XTMDS_ASIC_SI178_ID 2
@@ -4526,27 +5539,30 @@ typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002
#define ATOM_XTMDS_MVPU_FPGA 0x00000004
-typedef struct _ATOM_XTMDS_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usSingleLinkMaxFrequency;
- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* Point the ID on which I2C is used to control external chip */
- UCHAR ucXtransimitterID;
- UCHAR ucSupportedLink; /* Bit field, bit0=1, single link supported;bit1=1,dual link supported */
- UCHAR ucSequnceAlterID; /* Even with the same external TMDS asic, it's possible that the program seqence alters */
- /* due to design. This ID is used to alert driver that the sequence is not "standard"! */
- UCHAR ucMasterAddress; /* Address to control Master xTMDS Chip */
- UCHAR ucSlaveAddress; /* Address to control Slave xTMDS Chip */
-} ATOM_XTMDS_INFO;
-
-typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
- UCHAR ucEnable; /* ATOM_ENABLE=On or ATOM_DISABLE=Off */
- UCHAR ucDevice; /* ATOM_DEVICE_DFP1_INDEX.... */
- UCHAR ucPadding[2];
-} DFP_DPMS_STATUS_CHANGE_PARAMETERS;
+
+typedef struct _ATOM_XTMDS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usSingleLinkMaxFrequency;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //Point the ID on which I2C is used to control external chip
+ UCHAR ucXtransimitterID;
+ UCHAR ucSupportedLink; // Bit field, bit0=1, single link supported;bit1=1,dual link supported
+ UCHAR ucSequnceAlterID; // Even with the same external TMDS asic, it's possible that the program seqence alters
+ // due to design. This ID is used to alert driver that the sequence is not "standard"!
+ UCHAR ucMasterAddress; // Address to control Master xTMDS Chip
+ UCHAR ucSlaveAddress; // Address to control Slave xTMDS Chip
+}ATOM_XTMDS_INFO;
+
+typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE=On or ATOM_DISABLE=Off
+ UCHAR ucDevice; // ATOM_DEVICE_DFP1_INDEX....
+ UCHAR ucPadding[2];
+}DFP_DPMS_STATUS_CHANGE_PARAMETERS;
/****************************Legacy Power Play Table Definitions **********************/
-/* Definitions for ulPowerPlayMiscInfo */
+//Definitions for ulPowerPlayMiscInfo
#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L
#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L
#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L
@@ -4558,8 +5574,8 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L
#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L
-#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L /* When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program */
-
+#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program
+
#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L
#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L
#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L
@@ -4569,22 +5585,22 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L
#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L
-#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L
+#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L
#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L
#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L
#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L
-#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L /* 0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved */
-#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20
#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L
#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L
#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L
-#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L /* When set, Dynamic */
-#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L /* When set, Dynamic */
-#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L /* When set, This mode is for acceleated 3D mode */
+#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L //When set, Dynamic
+#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L //When set, Dynamic
+#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L //When set, This mode is for acceleated 3D mode
-#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L /* 1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) */
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks)
#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28
#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L
@@ -4594,55 +5610,59 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L
#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L
#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L
-#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L /* If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. */
- /* If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback */
+#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption.
+ //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback
#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L
#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L
-#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L
-
-/* ucTableFormatRevision=1 */
-/* ucTableContentRevision=1 */
-typedef struct _ATOM_POWERMODE_INFO {
- ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
- ULONG ulReserved1; /* must set to 0 */
- ULONG ulReserved2; /* must set to 0 */
- USHORT usEngineClock;
- USHORT usMemoryClock;
- UCHAR ucVoltageDropIndex; /* index to GPIO table */
- UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
- UCHAR ucMinTemperature;
- UCHAR ucMaxTemperature;
- UCHAR ucNumPciELanes; /* number of PCIE lanes */
-} ATOM_POWERMODE_INFO;
-
-/* ucTableFormatRevision=2 */
-/* ucTableContentRevision=1 */
-typedef struct _ATOM_POWERMODE_INFO_V2 {
- ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
- ULONG ulMiscInfo2;
- ULONG ulEngineClock;
- ULONG ulMemoryClock;
- UCHAR ucVoltageDropIndex; /* index to GPIO table */
- UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
- UCHAR ucMinTemperature;
- UCHAR ucMaxTemperature;
- UCHAR ucNumPciELanes; /* number of PCIE lanes */
-} ATOM_POWERMODE_INFO_V2;
-
-/* ucTableFormatRevision=2 */
-/* ucTableContentRevision=2 */
-typedef struct _ATOM_POWERMODE_INFO_V3 {
- ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
- ULONG ulMiscInfo2;
- ULONG ulEngineClock;
- ULONG ulMemoryClock;
- UCHAR ucVoltageDropIndex; /* index to Core (VDDC) votage table */
- UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
- UCHAR ucMinTemperature;
- UCHAR ucMaxTemperature;
- UCHAR ucNumPciELanes; /* number of PCIE lanes */
- UCHAR ucVDDCI_VoltageDropIndex; /* index to VDDCI votage table */
-} ATOM_POWERMODE_INFO_V3;
+#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct _ATOM_POWERMODE_INFO
+{
+ ULONG ulMiscInfo; //The power level should be arranged in ascending order
+ ULONG ulReserved1; // must set to 0
+ ULONG ulReserved2; // must set to 0
+ USHORT usEngineClock;
+ USHORT usMemoryClock;
+ UCHAR ucVoltageDropIndex; // index to GPIO table
+ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucNumPciELanes; // number of PCIE lanes
+}ATOM_POWERMODE_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct _ATOM_POWERMODE_INFO_V2
+{
+ ULONG ulMiscInfo; //The power level should be arranged in ascending order
+ ULONG ulMiscInfo2;
+ ULONG ulEngineClock;
+ ULONG ulMemoryClock;
+ UCHAR ucVoltageDropIndex; // index to GPIO table
+ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucNumPciELanes; // number of PCIE lanes
+}ATOM_POWERMODE_INFO_V2;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct _ATOM_POWERMODE_INFO_V3
+{
+ ULONG ulMiscInfo; //The power level should be arranged in ascending order
+ ULONG ulMiscInfo2;
+ ULONG ulEngineClock;
+ ULONG ulMemoryClock;
+ UCHAR ucVoltageDropIndex; // index to Core (VDDC) votage table
+ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucNumPciELanes; // number of PCIE lanes
+ UCHAR ucVDDCI_VoltageDropIndex; // index to VDDCI votage table
+}ATOM_POWERMODE_INFO_V3;
+
#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8
@@ -4655,40 +5675,44 @@ typedef struct _ATOM_POWERMODE_INFO_V3 {
#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04
#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05
#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06
-#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 /* Andigilog */
-
-typedef struct _ATOM_POWERPLAY_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucOverdriveThermalController;
- UCHAR ucOverdriveI2cLine;
- UCHAR ucOverdriveIntBitmap;
- UCHAR ucOverdriveControllerAddress;
- UCHAR ucSizeOfPowerModeEntry;
- UCHAR ucNumOfPowerModeEntries;
- ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
-} ATOM_POWERPLAY_INFO;
-
-typedef struct _ATOM_POWERPLAY_INFO_V2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucOverdriveThermalController;
- UCHAR ucOverdriveI2cLine;
- UCHAR ucOverdriveIntBitmap;
- UCHAR ucOverdriveControllerAddress;
- UCHAR ucSizeOfPowerModeEntry;
- UCHAR ucNumOfPowerModeEntries;
- ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
-} ATOM_POWERPLAY_INFO_V2;
-
-typedef struct _ATOM_POWERPLAY_INFO_V3 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucOverdriveThermalController;
- UCHAR ucOverdriveI2cLine;
- UCHAR ucOverdriveIntBitmap;
- UCHAR ucOverdriveControllerAddress;
- UCHAR ucSizeOfPowerModeEntry;
- UCHAR ucNumOfPowerModeEntries;
- ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
-} ATOM_POWERPLAY_INFO_V3;
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 // Andigilog
+
+
+typedef struct _ATOM_POWERPLAY_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucOverdriveThermalController;
+ UCHAR ucOverdriveI2cLine;
+ UCHAR ucOverdriveIntBitmap;
+ UCHAR ucOverdriveControllerAddress;
+ UCHAR ucSizeOfPowerModeEntry;
+ UCHAR ucNumOfPowerModeEntries;
+ ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO;
+
+typedef struct _ATOM_POWERPLAY_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucOverdriveThermalController;
+ UCHAR ucOverdriveI2cLine;
+ UCHAR ucOverdriveIntBitmap;
+ UCHAR ucOverdriveControllerAddress;
+ UCHAR ucSizeOfPowerModeEntry;
+ UCHAR ucNumOfPowerModeEntries;
+ ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V2;
+
+typedef struct _ATOM_POWERPLAY_INFO_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucOverdriveThermalController;
+ UCHAR ucOverdriveI2cLine;
+ UCHAR ucOverdriveIntBitmap;
+ UCHAR ucOverdriveControllerAddress;
+ UCHAR ucSizeOfPowerModeEntry;
+ UCHAR ucNumOfPowerModeEntries;
+ ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V3;
/* New PPlib */
/**************************************************************************/
@@ -4873,40 +5897,42 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
- ULONG ulFlags;
+ ULONG ulFlags;
} ATOM_PPLIB_RS780_CLOCK_INFO;
-#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
-#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
-#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
-#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
-#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
-#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
-#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
/**************************************************************************/
-/* Following definitions are for compatiblity issue in different SW components. */
+
+// Following definitions are for compatiblity issue in different SW components.
#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
-#define Object_Info Object_Header
+#define Object_Info Object_Header
#define AdjustARB_SEQ MC_InitParameter
#define VRAM_GPIO_DetectionInfo VoltageObjectInfo
-#define ASIC_VDDCI_Info ASIC_ProfilingInfo
+#define ASIC_VDDCI_Info ASIC_ProfilingInfo
#define ASIC_MVDDQ_Info MemoryTrainingInfo
-#define SS_Info PPLL_SS_Info
+#define SS_Info PPLL_SS_Info
#define ASIC_MVDDC_Info ASIC_InternalSS_Info
#define DispDevicePriorityInfo SaveRestoreInfo
#define DispOutInfo TV_VideoMode
+
#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE
#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE
-/* New device naming, remove them when both DAL/VBIOS is ready */
+//New device naming, remove them when both DAL/VBIOS is ready
#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS
#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS
@@ -4921,7 +5947,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX
#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX
-
+
#define ATOM_DEVICE_DFP2I_INDEX 0x00000009
#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX)
@@ -4939,7 +5965,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define ATOM_S3_DFP2I_ACTIVEb1 0x02
-#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE
+#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE
#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE
#define ATOM_S3_DFP2I_ACTIVE 0x00000200L
@@ -4958,14 +5984,14 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02
#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L
-#define TMDS1XEncoderControl DVOEncoderControl
+#define TMDS1XEncoderControl DVOEncoderControl
#define DFP1XOutputControl DVOOutputControl
#define ExternalDFPOutputControl DFP1XOutputControl
#define EnableExternalTMDS_Encoder TMDS1XEncoderControl
#define DFP1IOutputControl TMDSAOutputControl
-#define DFP2IOutputControl LVTMAOutputControl
+#define DFP2IOutputControl LVTMAOutputControl
#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS
#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
@@ -4974,7 +6000,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
#define ucDac1Standard ucDacStandard
-#define ucDac2Standard ucDacStandard
+#define ucDac2Standard ucDacStandard
#define TMDS1EncoderControl TMDSAEncoderControl
#define TMDS2EncoderControl LVTMAEncoderControl
@@ -4984,12 +6010,56 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define CRT1OutputControl DAC1OutputControl
#define CRT2OutputControl DAC2OutputControl
-/* These two lines will be removed for sure in a few days, will follow up with Michael V. */
+//These two lines will be removed for sure in a few days, will follow up with Michael V.
#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL
-#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL
+#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL
+
+//#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
+//#define ATOM_S2_LCD1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_TV1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_DFP1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_CRT2_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+
+#define ATOM_S6_ACC_REQ_TV2 0x00400000L
+#define ATOM_DEVICE_TV2_INDEX 0x00000006
+#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX)
+#define ATOM_S0_TV2 0x00100000L
+#define ATOM_S3_TV2_ACTIVE ATOM_S3_DFP6_ACTIVE
+#define ATOM_S3_TV2_CRTC_ACTIVE ATOM_S3_DFP6_CRTC_ACTIVE
+
+//
+#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
+#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
+#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
+#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
+#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
+#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
+#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
+#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
+#define ATOM_S2_CV_DPMS_STATE 0x01000000L
+#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
+#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
+#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
+
+#define ATOM_S2_CRT1_DPMS_STATEb2 0x01
+#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
+#define ATOM_S2_TV1_DPMS_STATEb2 0x04
+#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
+#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
+#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
+#define ATOM_S2_TV2_DPMS_STATEb2 0x40
+#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
+#define ATOM_S2_CV_DPMS_STATEb3 0x01
+#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
+#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
+#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
+
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
+#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
/*********************************************************************************/
-#pragma pack() /* BIOS data must use byte aligment */
+#pragma pack() // BIOS data must use byte aligment
#endif /* _ATOMBIOS_H */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index af464e35..dd9fdf5 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -245,21 +245,25 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
- atombios_enable_crtc(crtc, 1);
+ atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
- atombios_enable_crtc_memreq(crtc, 1);
- atombios_blank_crtc(crtc, 0);
- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
+ atombios_blank_crtc(crtc, ATOM_DISABLE);
+ /* XXX re-enable when interrupt support is added */
+ if (!ASIC_IS_DCE4(rdev))
+ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
- atombios_blank_crtc(crtc, 1);
+ /* XXX re-enable when interrupt support is added */
+ if (!ASIC_IS_DCE4(rdev))
+ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
- atombios_enable_crtc_memreq(crtc, 0);
- atombios_enable_crtc(crtc, 0);
+ atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
+ atombios_enable_crtc(crtc, ATOM_DISABLE);
break;
}
}
@@ -349,6 +353,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+union atom_enable_ss {
+ ENABLE_LVDS_SS_PARAMETERS legacy;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
+};
+
static void atombios_set_ss(struct drm_crtc *crtc, int enable)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -358,11 +367,14 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
struct radeon_encoder *radeon_encoder = NULL;
struct radeon_encoder_atom_dig *dig = NULL;
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
- ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION args;
- ENABLE_LVDS_SS_PARAMETERS legacy_args;
+ union atom_enable_ss args;
uint16_t percentage = 0;
uint8_t type = 0, step = 0, delay = 0, range = 0;
+ /* XXX add ss support for DCE4 */
+ if (ASIC_IS_DCE4(rdev))
+ return;
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
@@ -386,29 +398,28 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
if (!radeon_encoder)
return;
+ memset(&args, 0, sizeof(args));
if (ASIC_IS_AVIVO(rdev)) {
- memset(&args, 0, sizeof(args));
- args.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
- args.ucSpreadSpectrumType = type;
- args.ucSpreadSpectrumStep = step;
- args.ucSpreadSpectrumDelay = delay;
- args.ucSpreadSpectrumRange = range;
- args.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- args.ucEnable = enable;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
+ args.v1.ucSpreadSpectrumType = type;
+ args.v1.ucSpreadSpectrumStep = step;
+ args.v1.ucSpreadSpectrumDelay = delay;
+ args.v1.ucSpreadSpectrumRange = range;
+ args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ args.v1.ucEnable = enable;
} else {
- memset(&legacy_args, 0, sizeof(legacy_args));
- legacy_args.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
- legacy_args.ucSpreadSpectrumType = type;
- legacy_args.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
- legacy_args.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
- legacy_args.ucEnable = enable;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&legacy_args);
+ args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
+ args.legacy.ucSpreadSpectrumType = type;
+ args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
+ args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
+ args.legacy.ucEnable = enable;
}
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
union adjust_pixel_clock {
ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
};
static u32 atombios_adjust_pll(struct drm_crtc *crtc,
@@ -420,10 +431,24 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
u32 adjusted_clock = mode->clock;
+ int encoder_mode = 0;
/* reset the pll flags */
pll->flags = 0;
+ /* select the PLL algo */
+ if (ASIC_IS_AVIVO(rdev)) {
+ if (radeon_new_pll == 0)
+ pll->algo = PLL_ALGO_LEGACY;
+ else
+ pll->algo = PLL_ALGO_NEW;
+ } else {
+ if (radeon_new_pll == 1)
+ pll->algo = PLL_ALGO_NEW;
+ else
+ pll->algo = PLL_ALGO_LEGACY;
+ }
+
if (ASIC_IS_AVIVO(rdev)) {
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
@@ -448,10 +473,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
+ encoder_mode = atombios_get_encoder_mode(encoder);
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
adjusted_clock = mode->clock * 2;
+ /* LVDS PLL quirks */
+ if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ pll->algo = dig->pll_algo;
+ }
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -468,14 +499,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
*/
if (ASIC_IS_DCE3(rdev)) {
union adjust_pixel_clock args;
- struct radeon_encoder_atom_dig *dig;
u8 frev, crev;
int index;
- if (!radeon_encoder->enc_priv)
- return adjusted_clock;
- dig = radeon_encoder->enc_priv;
-
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev);
@@ -489,12 +515,51 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
case 2:
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
- args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ args.v1.ucEncodeMode = encoder_mode;
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args);
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
break;
+ case 3:
+ args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
+ args.v3.sInput.ucEncodeMode = encoder_mode;
+ args.v3.sInput.ucDispPllConfig = 0;
+ if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (encoder_mode == ATOM_ENCODER_MODE_DP)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_COHERENT_MODE;
+ else {
+ if (dig->coherent_mode)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_COHERENT_MODE;
+ if (mode->clock > 165000)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_DUAL_LINK;
+ }
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ /* may want to enable SS on DP/eDP eventually */
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;
+ if (mode->clock > 165000)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_DUAL_LINK;
+ }
+ atom_execute_table(rdev->mode_info.atom_context,
+ index, (uint32_t *)&args);
+ adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
+ if (args.v3.sOutput.ucRefDiv) {
+ pll->flags |= RADEON_PLL_USE_REF_DIV;
+ pll->reference_div = args.v3.sOutput.ucRefDiv;
+ }
+ if (args.v3.sOutput.ucPostDiv) {
+ pll->flags |= RADEON_PLL_USE_POST_DIV;
+ pll->post_div = args.v3.sOutput.ucPostDiv;
+ }
+ break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return adjusted_clock;
@@ -513,9 +578,47 @@ union set_pixel_clock {
PIXEL_CLOCK_PARAMETERS v1;
PIXEL_CLOCK_PARAMETERS_V2 v2;
PIXEL_CLOCK_PARAMETERS_V3 v3;
+ PIXEL_CLOCK_PARAMETERS_V5 v5;
};
-void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ u8 frev, crev;
+ int index;
+ union set_pixel_clock args;
+
+ memset(&args, 0, sizeof(args));
+
+ index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+ atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev);
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 5:
+ /* if the default dcpll clock is specified,
+ * SetPixelClock provides the dividers
+ */
+ args.v5.ucCRTC = ATOM_CRTC_INVALID;
+ args.v5.usPixelClock = rdev->clock.default_dispclk;
+ args.v5.ucPpll = ATOM_DCPLL;
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return;
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -529,12 +632,14 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
u32 adjusted_clock;
+ int encoder_mode = 0;
memset(&args, 0, sizeof(args));
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
+ encoder_mode = atombios_get_encoder_mode(encoder);
break;
}
}
@@ -542,26 +647,24 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (!radeon_encoder)
return;
- if (radeon_crtc->crtc_id == 0)
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
pll = &rdev->clock.p1pll;
- else
+ break;
+ case ATOM_PPLL2:
pll = &rdev->clock.p2pll;
+ break;
+ case ATOM_DCPLL:
+ case ATOM_PPLL_INVALID:
+ pll = &rdev->clock.dcpll;
+ break;
+ }
/* adjust pixel clock as needed */
adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
- if (ASIC_IS_AVIVO(rdev)) {
- if (radeon_new_pll)
- radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
- &fb_div, &frac_fb_div,
- &ref_div, &post_div);
- else
- radeon_compute_pll(pll, adjusted_clock, &pll_clock,
- &fb_div, &frac_fb_div,
- &ref_div, &post_div);
- } else
- radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
- &ref_div, &post_div);
+ radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+ &ref_div, &post_div);
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -576,8 +679,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
args.v1.usFbDiv = cpu_to_le16(fb_div);
args.v1.ucFracFbDiv = frac_fb_div;
args.v1.ucPostDiv = post_div;
- args.v1.ucPpll =
- radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ args.v1.ucPpll = radeon_crtc->pll_id;
args.v1.ucCRTC = radeon_crtc->crtc_id;
args.v1.ucRefDivSrc = 1;
break;
@@ -587,8 +689,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
args.v2.usFbDiv = cpu_to_le16(fb_div);
args.v2.ucFracFbDiv = frac_fb_div;
args.v2.ucPostDiv = post_div;
- args.v2.ucPpll =
- radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ args.v2.ucPpll = radeon_crtc->pll_id;
args.v2.ucCRTC = radeon_crtc->crtc_id;
args.v2.ucRefDivSrc = 1;
break;
@@ -598,12 +699,22 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
args.v3.usFbDiv = cpu_to_le16(fb_div);
args.v3.ucFracFbDiv = frac_fb_div;
args.v3.ucPostDiv = post_div;
- args.v3.ucPpll =
- radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
+ args.v3.ucPpll = radeon_crtc->pll_id;
+ args.v3.ucMiscInfo = (radeon_crtc->pll_id << 2);
args.v3.ucTransmitterId = radeon_encoder->encoder_id;
- args.v3.ucEncoderMode =
- atombios_get_encoder_mode(encoder);
+ args.v3.ucEncoderMode = encoder_mode;
+ break;
+ case 5:
+ args.v5.ucCRTC = radeon_crtc->crtc_id;
+ args.v5.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v5.ucRefDiv = ref_div;
+ args.v5.usFbDiv = cpu_to_le16(fb_div);
+ args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+ args.v5.ucPostDiv = post_div;
+ args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
+ args.v5.ucTransmitterID = radeon_encoder->encoder_id;
+ args.v5.ucEncoderMode = encoder_mode;
+ args.v5.ucPpll = radeon_crtc->pll_id;
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
@@ -618,6 +729,140 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_framebuffer *radeon_fb;
+ struct drm_gem_object *obj;
+ struct radeon_bo *rbo;
+ uint64_t fb_location;
+ uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+ int r;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ DRM_DEBUG("No FB bound\n");
+ return 0;
+ }
+
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+
+ /* Pin framebuffer & get tilling informations */
+ obj = radeon_fb->obj;
+ rbo = obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ return -EINVAL;
+ }
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+ break;
+ case 15:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+ break;
+ case 16:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+ break;
+ case 24:
+ case 32:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+ break;
+ default:
+ DRM_ERROR("Unsupported screen depth %d\n",
+ crtc->fb->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ WREG32(AVIVO_D1VGA_CONTROL, 0);
+ break;
+ case 1:
+ WREG32(AVIVO_D2VGA_CONTROL, 0);
+ break;
+ case 2:
+ WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+ break;
+ case 3:
+ WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+ break;
+ case 4:
+ WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+ break;
+ case 5:
+ WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+ break;
+ default:
+ break;
+ }
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+ WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+
+ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
+ WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
+
+ fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
+ WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+ WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+ WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+ crtc->mode.vdisplay);
+ x &= ~3;
+ y &= ~1;
+ WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
+ (x << 16) | y);
+ WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+ (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+ EVERGREEN_INTERLEAVE_EN);
+ else
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+
+ if (old_fb && old_fb != crtc->fb) {
+ radeon_fb = to_radeon_framebuffer(old_fb);
+ rbo = radeon_fb->obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+
+ /* Bytes per pixel may have changed */
+ radeon_bandwidth_update(rdev);
+
+ return 0;
+}
+
static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
@@ -755,7 +1000,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_DCE4(rdev))
+ return evergreen_crtc_set_base(crtc, x, y, old_fb);
+ else if (ASIC_IS_AVIVO(rdev))
return avivo_crtc_set_base(crtc, x, y, old_fb);
else
return radeon_crtc_set_base(crtc, x, y, old_fb);
@@ -785,6 +1032,46 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
}
}
+static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *test_encoder;
+ struct drm_crtc *test_crtc;
+ uint32_t pll_in_use = 0;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ /* if crtc is driving DP and we have an ext clock, use that */
+ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+ if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
+ if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
+ if (rdev->clock.dp_extclk)
+ return ATOM_PPLL_INVALID;
+ }
+ }
+ }
+
+ /* otherwise, pick one of the plls */
+ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_test_crtc;
+
+ if (crtc == test_crtc)
+ continue;
+
+ radeon_test_crtc = to_radeon_crtc(test_crtc);
+ if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
+ (radeon_test_crtc->pll_id <= ATOM_PPLL2))
+ pll_in_use |= (1 << radeon_test_crtc->pll_id);
+ }
+ if (!(pll_in_use & 1))
+ return ATOM_PPLL1;
+ return ATOM_PPLL2;
+ } else
+ return radeon_crtc->crtc_id;
+
+}
+
int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -796,19 +1083,27 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
/* TODO color tiling */
+ /* pick pll */
+ radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
+
atombios_set_ss(crtc, 0);
+ /* always set DCPLL */
+ if (ASIC_IS_DCE4(rdev))
+ atombios_crtc_set_dcpll(crtc);
atombios_crtc_set_pll(crtc, adjusted_mode);
atombios_set_ss(crtc, 1);
- atombios_crtc_set_timing(crtc, adjusted_mode);
- if (ASIC_IS_AVIVO(rdev))
- atombios_crtc_set_base(crtc, x, y, old_fb);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+ else if (ASIC_IS_AVIVO(rdev))
+ atombios_crtc_set_timing(crtc, adjusted_mode);
else {
+ atombios_crtc_set_timing(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0)
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
- atombios_crtc_set_base(crtc, x, y, old_fb);
radeon_legacy_atom_fixup(crtc);
}
+ atombios_crtc_set_base(crtc, x, y, old_fb);
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
return 0;
@@ -825,14 +1120,14 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
- atombios_lock_crtc(crtc, 1);
+ atombios_lock_crtc(crtc, ATOM_ENABLE);
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void atombios_crtc_commit(struct drm_crtc *crtc)
{
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
- atombios_lock_crtc(crtc, 0);
+ atombios_lock_crtc(crtc, ATOM_DISABLE);
}
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
@@ -848,8 +1143,37 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
void radeon_atombios_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc)
{
- if (radeon_crtc->crtc_id == 1)
- radeon_crtc->crtc_offset =
- AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ default:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
+ break;
+ case 1:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
+ break;
+ case 2:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
+ break;
+ case 3:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
+ break;
+ case 4:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
+ break;
+ case 5:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
+ break;
+ }
+ } else {
+ if (radeon_crtc->crtc_id == 1)
+ radeon_crtc->crtc_offset =
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
+ else
+ radeon_crtc->crtc_offset = 0;
+ }
+ radeon_crtc->pll_id = -1;
drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 99915a6..8a133bd 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -321,6 +321,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
train_set[lane] = v | p;
}
+union aux_channel_transaction {
+ PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
+ PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
+};
/* radeon aux chan functions */
bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
@@ -329,7 +333,7 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
{
struct drm_device *dev = chan->dev;
struct radeon_device *rdev = dev->dev_private;
- PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+ union aux_channel_transaction args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
int retry_count = 0;
@@ -341,31 +345,33 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
retry:
memcpy(base, req_bytes, num_bytes);
- args.lpAuxRequest = 0;
- args.lpDataOut = 16;
- args.ucDataOutLen = 0;
- args.ucChannelID = chan->rec.i2c_id;
- args.ucDelay = delay / 10;
+ args.v1.lpAuxRequest = 0;
+ args.v1.lpDataOut = 16;
+ args.v1.ucDataOutLen = 0;
+ args.v1.ucChannelID = chan->rec.i2c_id;
+ args.v1.ucDelay = delay / 10;
+ if (ASIC_IS_DCE4(rdev))
+ args.v2.ucHPD_ID = chan->rec.hpd_id;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- if (args.ucReplyStatus && !args.ucDataOutLen) {
- if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
+ if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) {
+ if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10)
goto retry;
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
- chan->rec.i2c_id, args.ucReplyStatus, retry_count);
+ chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count);
return false;
}
- if (args.ucDataOutLen && read_byte && read_buf_len) {
- if (read_buf_len < args.ucDataOutLen) {
+ if (args.v1.ucDataOutLen && read_byte && read_buf_len) {
+ if (read_buf_len < args.v1.ucDataOutLen) {
DRM_ERROR("Buffer to small for return answer %d %d\n",
- read_buf_len, args.ucDataOutLen);
+ read_buf_len, args.v1.ucDataOutLen);
return false;
}
{
- int len = min(read_buf_len, args.ucDataOutLen);
+ int len = min(read_buf_len, args.v1.ucDataOutLen);
memcpy(read_byte, base + 16, len);
}
}
@@ -626,12 +632,19 @@ void dp_link_train(struct drm_encoder *encoder,
dp_set_link_bw_lanes(radeon_connector, link_configuration);
/* disable downspread on the sink */
dp_set_downspread(radeon_connector, 0);
- /* start training on the source */
- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
- dig_connector->dp_clock, enc_id, 0);
- /* set training pattern 1 on the source */
- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
- dig_connector->dp_clock, enc_id, 0);
+ if (ASIC_IS_DCE4(rdev)) {
+ /* start training on the source */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START);
+ /* set training pattern 1 on the source */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1);
+ } else {
+ /* start training on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
+ dig_connector->dp_clock, enc_id, 0);
+ /* set training pattern 1 on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+ dig_connector->dp_clock, enc_id, 0);
+ }
/* set initial vs/emph */
memset(train_set, 0, 4);
@@ -691,8 +704,11 @@ void dp_link_train(struct drm_encoder *encoder,
/* set training pattern 2 on the sink */
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
/* set training pattern 2 on the source */
- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
- dig_connector->dp_clock, enc_id, 1);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2);
+ else
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+ dig_connector->dp_clock, enc_id, 1);
/* channel equalization loop */
tries = 0;
@@ -729,7 +745,11 @@ void dp_link_train(struct drm_encoder *encoder,
>> DP_TRAIN_PRE_EMPHASIS_SHIFT);
/* disable the training pattern on the sink */
- dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
+ else
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
+ dig_connector->dp_clock, enc_id, 0);
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
dig_connector->dp_clock, enc_id, 0);
diff --git a/drivers/gpu/drm/radeon/avivod.h b/drivers/gpu/drm/radeon/avivod.h
index d4e6e6e..3c391e7 100644
--- a/drivers/gpu/drm/radeon/avivod.h
+++ b/drivers/gpu/drm/radeon/avivod.h
@@ -30,11 +30,13 @@
#define D1CRTC_CONTROL 0x6080
#define CRTC_EN (1 << 0)
+#define D1CRTC_STATUS 0x609c
#define D1CRTC_UPDATE_LOCK 0x60E8
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
#define D2CRTC_CONTROL 0x6880
+#define D2CRTC_STATUS 0x689c
#define D2CRTC_UPDATE_LOCK 0x68E8
#define D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
#define D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
new file mode 100644
index 0000000..bd2e7aa
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -0,0 +1,767 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include "drmP.h"
+#include "radeon.h"
+#include "radeon_drm.h"
+#include "rv770d.h"
+#include "atom.h"
+#include "avivod.h"
+#include "evergreen_reg.h"
+
+static void evergreen_gpu_init(struct radeon_device *rdev);
+void evergreen_fini(struct radeon_device *rdev);
+
+bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ bool connected = false;
+ /* XXX */
+ return connected;
+}
+
+void evergreen_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ /* XXX */
+}
+
+void evergreen_hpd_init(struct radeon_device *rdev)
+{
+ /* XXX */
+}
+
+
+void evergreen_bandwidth_update(struct radeon_device *rdev)
+{
+ /* XXX */
+}
+
+void evergreen_hpd_fini(struct radeon_device *rdev)
+{
+ /* XXX */
+}
+
+static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ u32 tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(SRBM_STATUS) & 0x1F00;
+ if (!tmp)
+ return 0;
+ udelay(1);
+ }
+ return -1;
+}
+
+/*
+ * GART
+ */
+int evergreen_pcie_gart_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int r, i;
+
+ if (rdev->gart.table.vram.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
+ radeon_gart_restore(rdev);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+ WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
+ for (i = 1; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+ r600_pcie_gart_tlb_flush(rdev);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+void evergreen_pcie_gart_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i, r;
+
+ /* Disable all tables */
+ for (i = 0; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ if (rdev->gart.table.vram.robj) {
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ }
+}
+
+void evergreen_pcie_gart_fini(struct radeon_device *rdev)
+{
+ evergreen_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+
+void evergreen_agp_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i;
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ for (i = 0; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
+static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+{
+ save->vga_control[0] = RREG32(D1VGA_CONTROL);
+ save->vga_control[1] = RREG32(D2VGA_CONTROL);
+ save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
+ save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
+ save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
+ save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+ save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+
+ /* Stop all video */
+ WREG32(VGA_RENDER_CONTROL, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+ WREG32(D1VGA_CONTROL, 0);
+ WREG32(D2VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+}
+
+static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+{
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+ /* Unlock host access */
+ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+ /* Restore video state */
+ WREG32(D1VGA_CONTROL, save->vga_control[0]);
+ WREG32(D2VGA_CONTROL, save->vga_control[1]);
+ WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
+ WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
+ WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
+ WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+}
+
+static void evergreen_mc_program(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 tmp;
+ int i, j;
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+ WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ /* Lockout access through VGA aperture*/
+ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+ /* Update configuration */
+ if (rdev->flags & RADEON_IS_AGP) {
+ if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+ /* VRAM before AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.gtt_end >> 12);
+ } else {
+ /* VRAM after AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.gtt_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ }
+ } else {
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ }
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+ tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+ tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+ WREG32(MC_VM_FB_LOCATION, tmp);
+ WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+ WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+ WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
+ if (rdev->flags & RADEON_IS_AGP) {
+ WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
+ WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
+ WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
+ } else {
+ WREG32(MC_VM_AGP_BASE, 0);
+ WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+ WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+ }
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ evergreen_mc_resume(rdev, &save);
+ /* we need to own VRAM, so turn off the VGA renderer here
+ * to stop it overwriting our objects */
+ rv515_vga_render_disable(rdev);
+}
+
+#if 0
+/*
+ * CP.
+ */
+static void evergreen_cp_stop(struct radeon_device *rdev)
+{
+ /* XXX */
+}
+
+
+static int evergreen_cp_load_microcode(struct radeon_device *rdev)
+{
+ /* XXX */
+
+ return 0;
+}
+
+
+/*
+ * Core functions
+ */
+static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
+ u32 num_backends,
+ u32 backend_disable_mask)
+{
+ u32 backend_map = 0;
+
+ return backend_map;
+}
+#endif
+
+static void evergreen_gpu_init(struct radeon_device *rdev)
+{
+ /* XXX */
+}
+
+int evergreen_mc_init(struct radeon_device *rdev)
+{
+ fixed20_12 a;
+ u32 tmp;
+ int chansize, numchan;
+
+ /* Get VRAM informations */
+ rdev->mc.vram_is_ddr = true;
+ tmp = RREG32(MC_ARB_RAMCFG);
+ if (tmp & CHANSIZE_OVERRIDE) {
+ chansize = 16;
+ } else if (tmp & CHANSIZE_MASK) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ }
+ rdev->mc.vram_width = numchan * chansize;
+ /* Could aper size report 0 ? */
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ /* Setup GPU memory space */
+ /* size in MB on evergreen */
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ /* FIXME remove this once we support unmappable VRAM */
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
+ rdev->mc.mc_vram_size = rdev->mc.aper_size;
+ rdev->mc.real_vram_size = rdev->mc.aper_size;
+ }
+ r600_vram_gtt_location(rdev, &rdev->mc);
+ /* FIXME: we should enforce default clock in case GPU is not in
+ * default setup
+ */
+ a.full = rfixed_const(100);
+ rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
+ rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+ return 0;
+}
+
+int evergreen_gpu_reset(struct radeon_device *rdev)
+{
+ /* FIXME: implement for evergreen */
+ return 0;
+}
+
+static int evergreen_startup(struct radeon_device *rdev)
+{
+#if 0
+ int r;
+
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+#endif
+ evergreen_mc_program(rdev);
+#if 0
+ if (rdev->flags & RADEON_IS_AGP) {
+ evergreem_agp_enable(rdev);
+ } else {
+ r = evergreen_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+#endif
+ evergreen_gpu_init(rdev);
+#if 0
+ if (!rdev->r600_blit.shader_obj) {
+ r = r600_blit_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed blitter (%d).\n", r);
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ DRM_ERROR("failed to pin blit object %d\n", r);
+ return r;
+ }
+
+ /* Enable IRQ */
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ r600_irq_set(rdev);
+
+ r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ if (r)
+ return r;
+ r = evergreen_cp_load_microcode(rdev);
+ if (r)
+ return r;
+ r = r600_cp_resume(rdev);
+ if (r)
+ return r;
+ /* write back buffer are not vital so don't worry about failure */
+ r600_wb_enable(rdev);
+#endif
+ return 0;
+}
+
+int evergreen_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+ * posting will perform necessary task to bring back GPU into good
+ * shape.
+ */
+ /* post card */
+ atom_asic_init(rdev->mode_info.atom_context);
+ /* Initialize clocks */
+ r = radeon_clocks_init(rdev);
+ if (r) {
+ return r;
+ }
+
+ r = evergreen_startup(rdev);
+ if (r) {
+ DRM_ERROR("r600 startup failed on resume\n");
+ return r;
+ }
+#if 0
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ return r;
+ }
+#endif
+ return r;
+
+}
+
+int evergreen_suspend(struct radeon_device *rdev)
+{
+#if 0
+ int r;
+
+ /* FIXME: we should wait for ring to be empty */
+ r700_cp_stop(rdev);
+ rdev->cp.ready = false;
+ r600_wb_disable(rdev);
+ evergreen_pcie_gart_disable(rdev);
+ /* unpin shaders bo */
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+#endif
+ return 0;
+}
+
+static bool evergreen_card_posted(struct radeon_device *rdev)
+{
+ u32 reg;
+
+ /* first check CRTCs */
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (reg & EVERGREEN_CRTC_MASTER_EN)
+ return true;
+
+ /* then check MEM_SIZE, in case the crtcs are off */
+ if (RREG32(CONFIG_MEMSIZE))
+ return true;
+
+ return false;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int evergreen_init(struct radeon_device *rdev)
+{
+ int r;
+
+ r = radeon_dummy_page_init(rdev);
+ if (r)
+ return r;
+ /* This don't do much */
+ r = radeon_gem_init(rdev);
+ if (r)
+ return r;
+ /* Read BIOS */
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ /* Must be an ATOMBIOS */
+ if (!rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
+ return -EINVAL;
+ }
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+ /* Post card if necessary */
+ if (!evergreen_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
+ DRM_INFO("GPU not posted. posting now...\n");
+ atom_asic_init(rdev->mode_info.atom_context);
+ }
+ /* Initialize scratch registers */
+ r600_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ r = radeon_clocks_init(rdev);
+ if (r)
+ return r;
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r)
+ radeon_agp_disable(rdev);
+ }
+ /* initialize memory controller */
+ r = evergreen_mc_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+#if 0
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
+
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
+
+ r = r600_pcie_gart_init(rdev);
+ if (r)
+ return r;
+#endif
+ rdev->accel_working = false;
+ r = evergreen_startup(rdev);
+ if (r) {
+ evergreen_suspend(rdev);
+ /*r600_wb_fini(rdev);*/
+ /*radeon_ring_fini(rdev);*/
+ /*evergreen_pcie_gart_fini(rdev);*/
+ rdev->accel_working = false;
+ }
+ if (rdev->accel_working) {
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ }
+ return 0;
+}
+
+void evergreen_fini(struct radeon_device *rdev)
+{
+ evergreen_suspend(rdev);
+#if 0
+ r600_blit_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ radeon_ring_fini(rdev);
+ r600_wb_fini(rdev);
+ evergreen_pcie_gart_fini(rdev);
+#endif
+ radeon_gem_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_clocks_fini(rdev);
+ radeon_agp_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+ radeon_dummy_page_fini(rdev);
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
new file mode 100644
index 0000000..f7c7c96
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __EVERGREEN_REG_H__
+#define __EVERGREEN_REG_H__
+
+/* evergreen */
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324
+#define EVERGREEN_D3VGA_CONTROL 0x3e0
+#define EVERGREEN_D4VGA_CONTROL 0x3e4
+#define EVERGREEN_D5VGA_CONTROL 0x3e8
+#define EVERGREEN_D6VGA_CONTROL 0x3ec
+
+#define EVERGREEN_P1PLL_SS_CNTL 0x414
+#define EVERGREEN_P2PLL_SS_CNTL 0x454
+# define EVERGREEN_PxPLL_SS_EN (1 << 12)
+/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
+#define EVERGREEN_GRPH_ENABLE 0x6800
+#define EVERGREEN_GRPH_CONTROL 0x6804
+# define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
+# define EVERGREEN_GRPH_DEPTH_8BPP 0
+# define EVERGREEN_GRPH_DEPTH_16BPP 1
+# define EVERGREEN_GRPH_DEPTH_32BPP 2
+# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
+/* 8 BPP */
+# define EVERGREEN_GRPH_FORMAT_INDEXED 0
+/* 16 BPP */
+# define EVERGREEN_GRPH_FORMAT_ARGB1555 0
+# define EVERGREEN_GRPH_FORMAT_ARGB565 1
+# define EVERGREEN_GRPH_FORMAT_ARGB4444 2
+# define EVERGREEN_GRPH_FORMAT_AI88 3
+# define EVERGREEN_GRPH_FORMAT_MONO16 4
+# define EVERGREEN_GRPH_FORMAT_BGRA5551 5
+/* 32 BPP */
+# define EVERGREEN_GRPH_FORMAT_ARGB8888 0
+# define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
+# define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
+# define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
+# define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
+# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
+# define EVERGREEN_GRPH_FORMAT_RGB111110 6
+# define EVERGREEN_GRPH_FORMAT_BGR101111 7
+#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
+# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
+# define EVERGREEN_GRPH_ENDIAN_NONE 0
+# define EVERGREEN_GRPH_ENDIAN_8IN16 1
+# define EVERGREEN_GRPH_ENDIAN_8IN32 2
+# define EVERGREEN_GRPH_ENDIAN_8IN64 3
+# define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
+# define EVERGREEN_GRPH_RED_SEL_R 0
+# define EVERGREEN_GRPH_RED_SEL_G 1
+# define EVERGREEN_GRPH_RED_SEL_B 2
+# define EVERGREEN_GRPH_RED_SEL_A 3
+# define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
+# define EVERGREEN_GRPH_GREEN_SEL_G 0
+# define EVERGREEN_GRPH_GREEN_SEL_B 1
+# define EVERGREEN_GRPH_GREEN_SEL_A 2
+# define EVERGREEN_GRPH_GREEN_SEL_R 3
+# define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
+# define EVERGREEN_GRPH_BLUE_SEL_B 0
+# define EVERGREEN_GRPH_BLUE_SEL_A 1
+# define EVERGREEN_GRPH_BLUE_SEL_R 2
+# define EVERGREEN_GRPH_BLUE_SEL_G 3
+# define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
+# define EVERGREEN_GRPH_ALPHA_SEL_A 0
+# define EVERGREEN_GRPH_ALPHA_SEL_R 1
+# define EVERGREEN_GRPH_ALPHA_SEL_G 2
+# define EVERGREEN_GRPH_ALPHA_SEL_B 3
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x6810
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x6814
+# define EVERGREEN_GRPH_DFQ_ENABLE (1 << 0)
+# define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
+#define EVERGREEN_GRPH_PITCH 0x6818
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x681c
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x6820
+#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x6824
+#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x6828
+#define EVERGREEN_GRPH_X_START 0x682c
+#define EVERGREEN_GRPH_Y_START 0x6830
+#define EVERGREEN_GRPH_X_END 0x6834
+#define EVERGREEN_GRPH_Y_END 0x6838
+
+/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
+#define EVERGREEN_CUR_CONTROL 0x6998
+# define EVERGREEN_CURSOR_EN (1 << 0)
+# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
+# define EVERGREEN_CURSOR_MONO 0
+# define EVERGREEN_CURSOR_24_1 1
+# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
+# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
+# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
+# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
+# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
+# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
+# define EVERGREEN_CURSOR_URGENT_1_8 1
+# define EVERGREEN_CURSOR_URGENT_1_4 2
+# define EVERGREEN_CURSOR_URGENT_3_8 3
+# define EVERGREEN_CURSOR_URGENT_1_2 4
+#define EVERGREEN_CUR_SURFACE_ADDRESS 0x699c
+# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
+#define EVERGREEN_CUR_SIZE 0x69a0
+#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x69a4
+#define EVERGREEN_CUR_POSITION 0x69a8
+#define EVERGREEN_CUR_HOT_SPOT 0x69ac
+#define EVERGREEN_CUR_COLOR1 0x69b0
+#define EVERGREEN_CUR_COLOR2 0x69b4
+#define EVERGREEN_CUR_UPDATE 0x69b8
+# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
+# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
+# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
+# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+/* LUT blocks at 0x69e0, 0x75e0, 0x101e0, 0x10de0, 0x119e0, 0x125e0 */
+#define EVERGREEN_DC_LUT_RW_MODE 0x69e0
+#define EVERGREEN_DC_LUT_RW_INDEX 0x69e4
+#define EVERGREEN_DC_LUT_SEQ_COLOR 0x69e8
+#define EVERGREEN_DC_LUT_PWL_DATA 0x69ec
+#define EVERGREEN_DC_LUT_30_COLOR 0x69f0
+#define EVERGREEN_DC_LUT_VGA_ACCESS_ENABLE 0x69f4
+#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x69f8
+#define EVERGREEN_DC_LUT_AUTOFILL 0x69fc
+#define EVERGREEN_DC_LUT_CONTROL 0x6a00
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x6a04
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x6a08
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x6a0c
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x6a10
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x6a14
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x6a18
+
+#define EVERGREEN_DATA_FORMAT 0x6b00
+# define EVERGREEN_INTERLEAVE_EN (1 << 0)
+#define EVERGREEN_DESKTOP_HEIGHT 0x6b04
+
+#define EVERGREEN_VIEWPORT_START 0x6d70
+#define EVERGREEN_VIEWPORT_SIZE 0x6d74
+
+/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
+#define EVERGREEN_CRTC0_REGISTER_OFFSET (0x6df0 - 0x6df0)
+#define EVERGREEN_CRTC1_REGISTER_OFFSET (0x79f0 - 0x6df0)
+#define EVERGREEN_CRTC2_REGISTER_OFFSET (0x105f0 - 0x6df0)
+#define EVERGREEN_CRTC3_REGISTER_OFFSET (0x111f0 - 0x6df0)
+#define EVERGREEN_CRTC4_REGISTER_OFFSET (0x11df0 - 0x6df0)
+#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0)
+
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_CONTROL 0x6e70
+# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
+#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
+
+#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
+#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
+#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
+#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c0d4650..91eb762 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -197,13 +197,13 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
+ radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32(RADEON_AIC_CNTL, tmp);
/* set address range for PCI address translate */
- WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
- WREG32(RADEON_AIC_HI_ADDR, tmp);
+ WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
+ WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
/* set PCI GART page-table base address */
WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
@@ -312,9 +312,11 @@ int r100_irq_process(struct radeon_device *rdev)
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 0);
+ wake_up(&rdev->irq.vblank_queue);
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 1);
+ wake_up(&rdev->irq.vblank_queue);
}
if (status & RADEON_FP_DETECT_STAT) {
queue_hotplug = true;
@@ -366,8 +368,8 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
/* Wait until IDLE & CLEAN */
- radeon_ring_write(rdev, PACKET0(0x1720, 0));
- radeon_ring_write(rdev, (1 << 16) | (1 << 17));
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
@@ -1701,7 +1703,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev)
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & (1 << 31))) {
+ if (!(tmp & RADEON_RBBM_ACTIVE)) {
return 0;
}
DRM_UDELAY(1);
@@ -1716,8 +1718,8 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
- tmp = RREG32(0x0150);
- if (tmp & (1 << 2)) {
+ tmp = RREG32(RADEON_MC_STATUS);
+ if (tmp & RADEON_MC_IDLE) {
return 0;
}
DRM_UDELAY(1);
@@ -1790,7 +1792,7 @@ int r100_gpu_reset(struct radeon_device *rdev)
}
/* Check if GPU is idle */
status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 31)) {
+ if (status & RADEON_RBBM_ACTIVE) {
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
return -1;
}
@@ -1800,6 +1802,9 @@ int r100_gpu_reset(struct radeon_device *rdev)
void r100_set_common_regs(struct radeon_device *rdev)
{
+ struct drm_device *dev = rdev->ddev;
+ bool force_dac2 = false;
+
/* set these so they don't interfere with anything */
WREG32(RADEON_OV0_SCALE_CNTL, 0);
WREG32(RADEON_SUBPIC_CNTL, 0);
@@ -1808,6 +1813,68 @@ void r100_set_common_regs(struct radeon_device *rdev)
WREG32(RADEON_DVI_I2C_CNTL_1, 0);
WREG32(RADEON_CAP0_TRIG_CNTL, 0);
WREG32(RADEON_CAP1_TRIG_CNTL, 0);
+
+ /* always set up dac2 on rn50 and some rv100 as lots
+ * of servers seem to wire it up to a VGA port but
+ * don't report it in the bios connector
+ * table.
+ */
+ switch (dev->pdev->device) {
+ /* RN50 */
+ case 0x515e:
+ case 0x5969:
+ force_dac2 = true;
+ break;
+ /* RV100*/
+ case 0x5159:
+ case 0x515a:
+ /* DELL triple head servers */
+ if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
+ ((dev->pdev->subsystem_device == 0x016c) ||
+ (dev->pdev->subsystem_device == 0x016d) ||
+ (dev->pdev->subsystem_device == 0x016e) ||
+ (dev->pdev->subsystem_device == 0x016f) ||
+ (dev->pdev->subsystem_device == 0x0170) ||
+ (dev->pdev->subsystem_device == 0x017d) ||
+ (dev->pdev->subsystem_device == 0x017e) ||
+ (dev->pdev->subsystem_device == 0x0183) ||
+ (dev->pdev->subsystem_device == 0x018a) ||
+ (dev->pdev->subsystem_device == 0x019a)))
+ force_dac2 = true;
+ break;
+ }
+
+ if (force_dac2) {
+ u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+ u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+ u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
+
+ /* For CRT on DAC2, don't turn it on if BIOS didn't
+ enable it, even it's detected.
+ */
+
+ /* force it to crtc0 */
+ dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
+ dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
+ disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+
+ /* set up the TV DAC */
+ tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
+ RADEON_TV_DAC_STD_MASK |
+ RADEON_TV_DAC_RDACPD |
+ RADEON_TV_DAC_GDACPD |
+ RADEON_TV_DAC_BDACPD |
+ RADEON_TV_DAC_BGADJ_MASK |
+ RADEON_TV_DAC_DACADJ_MASK);
+ tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
+ RADEON_TV_DAC_NHOLD |
+ RADEON_TV_DAC_STD_PS2 |
+ (0x58 << 16));
+
+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+ WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+ }
}
/*
@@ -1889,17 +1956,20 @@ static u32 r100_get_accessible_vram(struct radeon_device *rdev)
void r100_vram_init_sizes(struct radeon_device *rdev)
{
u64 config_aper_size;
- u32 accessible;
+ /* work out accessible VRAM */
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
+ /* FIXME we don't use the second aperture yet when we could use it */
+ if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
-
if (rdev->flags & RADEON_IS_IGP) {
uint32_t tom;
/* read NB_TOM to get the amount of ram stolen for the GPU */
tom = RREG32(RADEON_NB_TOM);
rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
- /* for IGPs we need to keep VRAM where it was put by the BIOS */
- rdev->mc.vram_location = (tom & 0xffff) << 16;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
} else {
@@ -1911,30 +1981,19 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
rdev->mc.real_vram_size = 8192 * 1024;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
}
- /* let driver place VRAM */
- rdev->mc.vram_location = 0xFFFFFFFFUL;
- /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
- * Novell bug 204882 + along with lots of ubuntu ones */
+ /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
+ * Novell bug 204882 + along with lots of ubuntu ones
+ */
if (config_aper_size > rdev->mc.real_vram_size)
rdev->mc.mc_vram_size = config_aper_size;
else
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
}
-
- /* work out accessible VRAM */
- accessible = r100_get_accessible_vram(rdev);
-
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-
- if (accessible > rdev->mc.aper_size)
- accessible = rdev->mc.aper_size;
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+ /* FIXME remove this once we support unmappable VRAM */
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
rdev->mc.real_vram_size = rdev->mc.aper_size;
+ }
}
void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@ -1951,11 +2010,18 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
WREG32(RADEON_CONFIG_CNTL, temp);
}
-void r100_vram_info(struct radeon_device *rdev)
+void r100_mc_init(struct radeon_device *rdev)
{
- r100_vram_get_type(rdev);
+ u64 base;
+ r100_vram_get_type(rdev);
r100_vram_init_sizes(rdev);
+ base = rdev->mc.aper_base;
+ if (rdev->flags & RADEON_IS_IGP)
+ base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
}
@@ -3226,10 +3292,9 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
{
/* Update base address for crtc */
- WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_location);
+ WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
- WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR,
- rdev->mc.vram_location);
+ WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
}
/* Restore CRTC registers */
WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
@@ -3390,32 +3455,6 @@ void r100_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
-int r100_mc_init(struct radeon_device *rdev)
-{
- int r;
- u32 tmp;
-
- /* Setup GPU memory space */
- rdev->mc.vram_location = 0xFFFFFFFFUL;
- rdev->mc.gtt_location = 0xFFFFFFFFUL;
- if (rdev->flags & RADEON_IS_IGP) {
- tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
- rdev->mc.vram_location = tmp << 16;
- }
- if (rdev->flags & RADEON_IS_AGP) {
- r = radeon_agp_init(rdev);
- if (r) {
- radeon_agp_disable(rdev);
- } else {
- rdev->mc.gtt_location = rdev->mc.agp_base;
- }
- }
- r = radeon_mc_setup(rdev);
- if (r)
- return r;
- return 0;
-}
-
int r100_init(struct radeon_device *rdev)
{
int r;
@@ -3458,12 +3497,15 @@ int r100_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- r100_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = r100_mc_init(rdev);
- if (r)
- return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize VRAM */
+ r100_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index ff1e0cd..1146c99 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -31,6 +31,7 @@
#include "radeon_reg.h"
#include "radeon.h"
+#include "r100d.h"
#include "r200_reg_safe.h"
#include "r100_track.h"
@@ -79,6 +80,51 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
return vtx_size;
}
+int r200_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
+ struct radeon_fence *fence)
+{
+ uint32_t size;
+ uint32_t cur_size;
+ int i, num_loops;
+ int r = 0;
+
+ /* radeon pitch is /64 */
+ size = num_pages << PAGE_SHIFT;
+ num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
+ r = radeon_ring_lock(rdev, num_loops * 4 + 64);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+ /* Must wait for 2D idle & clean before DMA or hangs might happen */
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, (1 << 16));
+ for (i = 0; i < num_loops; i++) {
+ cur_size = size;
+ if (cur_size > 0x1FFFFF) {
+ cur_size = 0x1FFFFF;
+ }
+ size -= cur_size;
+ radeon_ring_write(rdev, PACKET0(0x720, 2));
+ radeon_ring_write(rdev, src_offset);
+ radeon_ring_write(rdev, dst_offset);
+ radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
+ src_offset += cur_size;
+ dst_offset += cur_size;
+ }
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
+ if (fence) {
+ r = radeon_fence_emit(rdev, fence);
+ }
+ radeon_ring_unlock_commit(rdev);
+ return r;
+}
+
+
static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
{
int vtx_size, i, tex_size;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 43b55a0..4cef90c 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -117,18 +117,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
- WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
+ tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
table_addr = rdev->gart.table_addr;
WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
/* FIXME: setup default page */
- WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
+ WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
/* Clear error */
WREG32_PCIE(0x18, 0);
@@ -174,18 +175,20 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
/* Write SC register so SC & US assert idle */
- radeon_ring_write(rdev, PACKET0(0x43E0, 0));
+ radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(0x43E4, 0));
+ radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
radeon_ring_write(rdev, 0);
/* Flush 3D cache */
- radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
- radeon_ring_write(rdev, (2 << 0));
- radeon_ring_write(rdev, PACKET0(0x4F18, 0));
- radeon_ring_write(rdev, (1 << 0));
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_ZC_FLUSH);
/* Wait until IDLE & CLEAN */
- radeon_ring_write(rdev, PACKET0(0x1720, 0));
- radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
+ RADEON_WAIT_2D_IDLECLEAN |
+ RADEON_WAIT_DMA_GUI_IDLE));
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
@@ -198,50 +201,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
}
-int r300_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_pages,
- struct radeon_fence *fence)
-{
- uint32_t size;
- uint32_t cur_size;
- int i, num_loops;
- int r = 0;
-
- /* radeon pitch is /64 */
- size = num_pages << PAGE_SHIFT;
- num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
- r = radeon_ring_lock(rdev, num_loops * 4 + 64);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
- /* Must wait for 2D idle & clean before DMA or hangs might happen */
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
- radeon_ring_write(rdev, (1 << 16));
- for (i = 0; i < num_loops; i++) {
- cur_size = size;
- if (cur_size > 0x1FFFFF) {
- cur_size = 0x1FFFFF;
- }
- size -= cur_size;
- radeon_ring_write(rdev, PACKET0(0x720, 2));
- radeon_ring_write(rdev, src_offset);
- radeon_ring_write(rdev, dst_offset);
- radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
- src_offset += cur_size;
- dst_offset += cur_size;
- }
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
- if (fence) {
- r = radeon_fence_emit(rdev, fence);
- }
- radeon_ring_unlock_commit(rdev);
- return r;
-}
-
void r300_ring_start(struct radeon_device *rdev)
{
unsigned gb_tile_config;
@@ -281,8 +240,8 @@ void r300_ring_start(struct radeon_device *rdev)
radeon_ring_write(rdev,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(0x170C, 0));
- radeon_ring_write(rdev, 1 << 31);
+ radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
+ radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
@@ -349,8 +308,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
- tmp = RREG32(0x0150);
- if (tmp & (1 << 4)) {
+ tmp = RREG32(RADEON_MC_STATUS);
+ if (tmp & R300_MC_IDLE) {
return 0;
}
DRM_UDELAY(1);
@@ -395,8 +354,8 @@ void r300_gpu_init(struct radeon_device *rdev)
"programming pipes. Bad things might happen.\n");
}
- tmp = RREG32(0x170C);
- WREG32(0x170C, tmp | (1 << 31));
+ tmp = RREG32(R300_DST_PIPE_CONFIG);
+ WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
WREG32(R300_RB2D_DSTCACHE_MODE,
R300_DC_AUTOFLUSH_ENABLE |
@@ -437,8 +396,8 @@ int r300_ga_reset(struct radeon_device *rdev)
/* GA still busy soft reset it */
WREG32(0x429C, 0x200);
WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
- WREG32(0x43E0, 0);
- WREG32(0x43E4, 0);
+ WREG32(R300_RE_SCISSORS_TL, 0);
+ WREG32(R300_RE_SCISSORS_BR, 0);
WREG32(0x24AC, 0);
}
/* Wait to prevent race in RBBM_STATUS */
@@ -488,7 +447,7 @@ int r300_gpu_reset(struct radeon_device *rdev)
}
/* Check if GPU is idle */
status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 31)) {
+ if (status & RADEON_RBBM_ACTIVE) {
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
return -1;
}
@@ -500,13 +459,13 @@ int r300_gpu_reset(struct radeon_device *rdev)
/*
* r300,r350,rv350,rv380 VRAM info
*/
-void r300_vram_info(struct radeon_device *rdev)
+void r300_mc_init(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u64 base;
+ u32 tmp;
/* DDR for all card after R300 & IGP */
rdev->mc.vram_is_ddr = true;
-
tmp = RREG32(RADEON_MEM_CNTL);
tmp &= R300_MEM_NUM_CHANNELS_MASK;
switch (tmp) {
@@ -515,8 +474,13 @@ void r300_vram_info(struct radeon_device *rdev)
case 2: rdev->mc.vram_width = 256; break;
default: rdev->mc.vram_width = 128; break;
}
-
r100_vram_init_sizes(rdev);
+ base = rdev->mc.aper_base;
+ if (rdev->flags & RADEON_IS_IGP)
+ base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
}
void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
@@ -578,6 +542,40 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
}
+int rv370_get_pcie_lanes(struct radeon_device *rdev)
+{
+ u32 link_width_cntl;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return 0;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return 0;
+
+ /* FIXME wait for idle */
+
+ if (rdev->family < CHIP_R600)
+ link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ else
+ link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+ switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+ case RADEON_PCIE_LC_LINK_WIDTH_X0:
+ return 0;
+ case RADEON_PCIE_LC_LINK_WIDTH_X1:
+ return 1;
+ case RADEON_PCIE_LC_LINK_WIDTH_X2:
+ return 2;
+ case RADEON_PCIE_LC_LINK_WIDTH_X4:
+ return 4;
+ case RADEON_PCIE_LC_LINK_WIDTH_X8:
+ return 8;
+ case RADEON_PCIE_LC_LINK_WIDTH_X16:
+ default:
+ return 16;
+ }
+}
+
#if defined(CONFIG_DEBUG_FS)
static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
{
@@ -707,6 +705,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
tile_flags |= R300_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_TXO_MICRO_TILE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
tmp |= tile_flags;
@@ -757,6 +757,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
tile_flags |= R300_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_COLOR_MICROTILE_ENABLE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
@@ -828,7 +830,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_DEPTHMACROTILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_DEPTHMICROTILE_TILED;;
+ tile_flags |= R300_DEPTHMICROTILE_TILED;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
@@ -1387,12 +1391,15 @@ int r300_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- r300_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
- if (r)
- return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize memory controller */
+ r300_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 34bffa0..ea46d55 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -33,6 +33,7 @@
#include "drmP.h"
#include "drm.h"
+#include "drm_buffer.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
#include "r300_reg.h"
@@ -299,46 +300,42 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
int reg;
int sz;
int i;
- int values[64];
+ u32 *value;
RING_LOCALS;
sz = header.packet0.count;
reg = (header.packet0.reghi << 8) | header.packet0.reglo;
if ((sz > 64) || (sz < 0)) {
- DRM_ERROR
- ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
- reg, sz);
+ DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
+ reg, sz);
return -EINVAL;
}
+
for (i = 0; i < sz; i++) {
- values[i] = ((int *)cmdbuf->buf)[i];
switch (r300_reg_flags[(reg >> 2) + i]) {
case MARK_SAFE:
break;
case MARK_CHECK_OFFSET:
- if (!radeon_check_offset(dev_priv, (u32) values[i])) {
- DRM_ERROR
- ("Offset failed range check (reg=%04x sz=%d)\n",
- reg, sz);
+ value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+ if (!radeon_check_offset(dev_priv, *value)) {
+ DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n",
+ reg, sz);
return -EINVAL;
}
break;
default:
DRM_ERROR("Register %04x failed check as flag=%02x\n",
- reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
+ reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
return -EINVAL;
}
}
BEGIN_RING(1 + sz);
OUT_RING(CP_PACKET0(reg, sz - 1));
- OUT_RING_TABLE(values, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * 4;
- cmdbuf->bufsz -= sz * 4;
-
return 0;
}
@@ -362,7 +359,7 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
if (!sz)
return 0;
- if (sz * 4 > cmdbuf->bufsz)
+ if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
if (reg + sz * 4 >= 0x10000) {
@@ -380,12 +377,9 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
BEGIN_RING(1 + sz);
OUT_RING(CP_PACKET0(reg, sz - 1));
- OUT_RING_TABLE((int *)cmdbuf->buf, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * 4;
- cmdbuf->bufsz -= sz * 4;
-
return 0;
}
@@ -407,7 +401,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
if (!sz)
return 0;
- if (sz * 16 > cmdbuf->bufsz)
+ if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
/* VAP is very sensitive so we purge cache before we program it
@@ -426,7 +420,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
BEGIN_RING(3 + sz * 4);
OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
- OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4);
ADVANCE_RING();
BEGIN_RING(2);
@@ -434,9 +428,6 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
OUT_RING(0);
ADVANCE_RING();
- cmdbuf->buf += sz * 16;
- cmdbuf->bufsz -= sz * 16;
-
return 0;
}
@@ -449,14 +440,14 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
{
RING_LOCALS;
- if (8 * 4 > cmdbuf->bufsz)
+ if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
BEGIN_RING(10);
OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
(1 << R300_PRIM_NUM_VERTICES_SHIFT));
- OUT_RING_TABLE((int *)cmdbuf->buf, 8);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8);
ADVANCE_RING();
BEGIN_RING(4);
@@ -468,9 +459,6 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
/* set flush flag */
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
- cmdbuf->buf += 8 * 4;
- cmdbuf->bufsz -= 8 * 4;
-
return 0;
}
@@ -480,28 +468,29 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
{
int count, i, k;
#define MAX_ARRAY_PACKET 64
- u32 payload[MAX_ARRAY_PACKET];
+ u32 *data;
u32 narrays;
RING_LOCALS;
- count = (header >> 16) & 0x3fff;
+ count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16;
if ((count + 1) > MAX_ARRAY_PACKET) {
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
count);
return -EINVAL;
}
- memset(payload, 0, MAX_ARRAY_PACKET * 4);
- memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
-
/* carefully check packet contents */
- narrays = payload[0];
+ /* We have already read the header so advance the buffer. */
+ drm_buffer_advance(cmdbuf->buffer, 4);
+
+ narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
k = 0;
i = 1;
while ((k < narrays) && (i < (count + 1))) {
i++; /* skip attribute field */
- if (!radeon_check_offset(dev_priv, payload[i])) {
+ data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+ if (!radeon_check_offset(dev_priv, *data)) {
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
@@ -512,7 +501,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
if (k == narrays)
break;
/* have one more to process, they come in pairs */
- if (!radeon_check_offset(dev_priv, payload[i])) {
+ data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+ if (!radeon_check_offset(dev_priv, *data)) {
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
@@ -533,30 +523,30 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
BEGIN_RING(count + 2);
OUT_RING(header);
- OUT_RING_TABLE(payload, count + 1);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1);
ADVANCE_RING();
- cmdbuf->buf += (count + 2) * 4;
- cmdbuf->bufsz -= (count + 2) * 4;
-
return 0;
}
static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
- u32 *cmd = (u32 *) cmdbuf->buf;
+ u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
int count, ret;
RING_LOCALS;
- count=(cmd[0]>>16) & 0x3fff;
- if (cmd[0] & 0x8000) {
- u32 offset;
+ count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
- if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+ if (*cmd & 0x8000) {
+ u32 offset;
+ u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
- offset = cmd[2] << 10;
+
+ u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ offset = *cmd2 << 10;
ret = !radeon_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
@@ -564,9 +554,10 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
}
}
- if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
- (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
- offset = cmd[3] << 10;
+ if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+ (*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+ u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+ offset = *cmd3 << 10;
ret = !radeon_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
@@ -577,28 +568,25 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
}
BEGIN_RING(count+2);
- OUT_RING(cmd[0]);
- OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
ADVANCE_RING();
- cmdbuf->buf += (count+2)*4;
- cmdbuf->bufsz -= (count+2)*4;
-
return 0;
}
static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
- u32 *cmd;
+ u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+ u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
int count;
int expected_count;
RING_LOCALS;
- cmd = (u32 *) cmdbuf->buf;
- count = (cmd[0]>>16) & 0x3fff;
- expected_count = cmd[1] >> 16;
- if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
+ count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
+
+ expected_count = *cmd1 >> 16;
+ if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
expected_count = (expected_count+1)/2;
if (count && count != expected_count) {
@@ -608,55 +596,53 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
}
BEGIN_RING(count+2);
- OUT_RING(cmd[0]);
- OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
ADVANCE_RING();
- cmdbuf->buf += (count+2)*4;
- cmdbuf->bufsz -= (count+2)*4;
-
if (!count) {
- drm_r300_cmd_header_t header;
+ drm_r300_cmd_header_t stack_header, *header;
+ u32 *cmd1, *cmd2, *cmd3;
- if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
+ if (drm_buffer_unprocessed(cmdbuf->buffer)
+ < 4*4 + sizeof(stack_header)) {
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
return -EINVAL;
}
- header.u = *(unsigned int *)cmdbuf->buf;
+ header = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_header), &stack_header);
- cmdbuf->buf += sizeof(header);
- cmdbuf->bufsz -= sizeof(header);
- cmd = (u32 *) cmdbuf->buf;
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+ cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
- if (header.header.cmd_type != R300_CMD_PACKET3 ||
- header.packet3.packet != R300_CMD_PACKET3_RAW ||
- cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
+ if (header->header.cmd_type != R300_CMD_PACKET3 ||
+ header->packet3.packet != R300_CMD_PACKET3_RAW ||
+ *cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
return -EINVAL;
}
- if ((cmd[1] & 0x8000ffff) != 0x80000810) {
- DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
+ if ((*cmd1 & 0x8000ffff) != 0x80000810) {
+ DRM_ERROR("Invalid indx_buffer reg address %08X\n",
+ *cmd1);
return -EINVAL;
}
- if (!radeon_check_offset(dev_priv, cmd[2])) {
- DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
+ if (!radeon_check_offset(dev_priv, *cmd2)) {
+ DRM_ERROR("Invalid indx_buffer offset is %08X\n",
+ *cmd2);
return -EINVAL;
}
- if (cmd[3] != expected_count) {
+ if (*cmd3 != expected_count) {
DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
- cmd[3], expected_count);
+ *cmd3, expected_count);
return -EINVAL;
}
BEGIN_RING(4);
- OUT_RING(cmd[0]);
- OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4);
ADVANCE_RING();
-
- cmdbuf->buf += 4*4;
- cmdbuf->bufsz -= 4*4;
}
return 0;
@@ -665,39 +651,39 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
- u32 header;
+ u32 *header;
int count;
RING_LOCALS;
- if (4 > cmdbuf->bufsz)
+ if (4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
/* Fixme !! This simply emits a packet without much checking.
We need to be smarter. */
/* obtain first word - actual packet3 header */
- header = *(u32 *) cmdbuf->buf;
+ header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
/* Is it packet 3 ? */
- if ((header >> 30) != 0x3) {
- DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
+ if ((*header >> 30) != 0x3) {
+ DRM_ERROR("Not a packet3 header (0x%08x)\n", *header);
return -EINVAL;
}
- count = (header >> 16) & 0x3fff;
+ count = (*header >> 16) & 0x3fff;
/* Check again now that we know how much data to expect */
- if ((count + 2) * 4 > cmdbuf->bufsz) {
+ if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) {
DRM_ERROR
("Expected packet3 of length %d but have only %d bytes left\n",
- (count + 2) * 4, cmdbuf->bufsz);
+ (count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer));
return -EINVAL;
}
/* Is it a packet type we know about ? */
- switch (header & 0xff00) {
+ switch (*header & 0xff00) {
case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
- return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
+ return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header);
case RADEON_CNTL_BITBLT_MULTI:
return r300_emit_bitblt_multi(dev_priv, cmdbuf);
@@ -723,18 +709,14 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
/* these packets are safe */
break;
default:
- DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
+ DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header);
return -EINVAL;
}
BEGIN_RING(count + 2);
- OUT_RING(header);
- OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
ADVANCE_RING();
- cmdbuf->buf += (count + 2) * 4;
- cmdbuf->bufsz -= (count + 2) * 4;
-
return 0;
}
@@ -748,8 +730,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
{
int n;
int ret;
- char *orig_buf = cmdbuf->buf;
- int orig_bufsz = cmdbuf->bufsz;
+ int orig_iter = cmdbuf->buffer->iterator;
/* This is a do-while-loop so that we run the interior at least once,
* even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
@@ -761,8 +742,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
if (ret)
return ret;
- cmdbuf->buf = orig_buf;
- cmdbuf->bufsz = orig_bufsz;
+ cmdbuf->buffer->iterator = orig_iter;
}
switch (header.packet3.packet) {
@@ -785,9 +765,9 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
break;
default:
- DRM_ERROR("bad packet3 type %i at %p\n",
+ DRM_ERROR("bad packet3 type %i at byte %d\n",
header.packet3.packet,
- cmdbuf->buf - sizeof(header));
+ cmdbuf->buffer->iterator - (int)sizeof(header));
return -EINVAL;
}
@@ -923,12 +903,13 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
drm_r300_cmd_header_t header)
{
u32 *ref_age_base;
- u32 i, buf_idx, h_pending;
- u64 ptr_addr;
+ u32 i, *buf_idx, h_pending;
+ u64 *ptr_addr;
+ u64 stack_ptr_addr;
RING_LOCALS;
- if (cmdbuf->bufsz <
- (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
+ if (drm_buffer_unprocessed(cmdbuf->buffer) <
+ (sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) {
return -EINVAL;
}
@@ -938,36 +919,35 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
dev_priv->scratch_ages[header.scratch.reg]++;
- ptr_addr = get_unaligned((u64 *)cmdbuf->buf);
- ref_age_base = (u32 *)(unsigned long)ptr_addr;
-
- cmdbuf->buf += sizeof(u64);
- cmdbuf->bufsz -= sizeof(u64);
+ ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_ptr_addr), &stack_ptr_addr);
+ ref_age_base = (u32 *)(unsigned long)*ptr_addr;
for (i=0; i < header.scratch.n_bufs; i++) {
- buf_idx = *(u32 *)cmdbuf->buf;
- buf_idx *= 2; /* 8 bytes per buf */
+ buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+ *buf_idx *= 2; /* 8 bytes per buf */
- if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
+ if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
+ &dev_priv->scratch_ages[header.scratch.reg],
+ sizeof(u32)))
return -EINVAL;
- }
- if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
+ if (DRM_COPY_FROM_USER(&h_pending,
+ ref_age_base + *buf_idx + 1,
+ sizeof(u32)))
return -EINVAL;
- }
- if (h_pending == 0) {
+ if (h_pending == 0)
return -EINVAL;
- }
h_pending--;
- if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
+ if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
+ &h_pending,
+ sizeof(u32)))
return -EINVAL;
- }
- cmdbuf->buf += sizeof(buf_idx);
- cmdbuf->bufsz -= sizeof(buf_idx);
+ drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx));
}
BEGIN_RING(2);
@@ -1009,19 +989,16 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
if (!sz)
return 0;
- if (sz * stride * 4 > cmdbuf->bufsz)
+ if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
BEGIN_RING(3 + sz * stride);
OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
- OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride);
ADVANCE_RING();
- cmdbuf->buf += sz * stride * 4;
- cmdbuf->bufsz -= sz * stride * 4;
-
return 0;
}
@@ -1053,19 +1030,18 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
goto cleanup;
}
- while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
+ while (drm_buffer_unprocessed(cmdbuf->buffer)
+ >= sizeof(drm_r300_cmd_header_t)) {
int idx;
- drm_r300_cmd_header_t header;
-
- header.u = *(unsigned int *)cmdbuf->buf;
+ drm_r300_cmd_header_t *header, stack_header;
- cmdbuf->buf += sizeof(header);
- cmdbuf->bufsz -= sizeof(header);
+ header = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_header), &stack_header);
- switch (header.header.cmd_type) {
+ switch (header->header.cmd_type) {
case R300_CMD_PACKET0:
DRM_DEBUG("R300_CMD_PACKET0\n");
- ret = r300_emit_packet0(dev_priv, cmdbuf, header);
+ ret = r300_emit_packet0(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_packet0 failed\n");
goto cleanup;
@@ -1074,7 +1050,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_VPU:
DRM_DEBUG("R300_CMD_VPU\n");
- ret = r300_emit_vpu(dev_priv, cmdbuf, header);
+ ret = r300_emit_vpu(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_vpu failed\n");
goto cleanup;
@@ -1083,7 +1059,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_PACKET3:
DRM_DEBUG("R300_CMD_PACKET3\n");
- ret = r300_emit_packet3(dev_priv, cmdbuf, header);
+ ret = r300_emit_packet3(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_packet3 failed\n");
goto cleanup;
@@ -1117,8 +1093,8 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
int i;
RING_LOCALS;
- BEGIN_RING(header.delay.count);
- for (i = 0; i < header.delay.count; i++)
+ BEGIN_RING(header->delay.count);
+ for (i = 0; i < header->delay.count; i++)
OUT_RING(RADEON_CP_PACKET2);
ADVANCE_RING();
}
@@ -1126,7 +1102,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_DMA_DISCARD:
DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
- idx = header.dma.buf_idx;
+ idx = header->dma.buf_idx;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
idx, dma->buf_count - 1);
@@ -1149,12 +1125,12 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_WAIT:
DRM_DEBUG("R300_CMD_WAIT\n");
- r300_cmd_wait(dev_priv, header);
+ r300_cmd_wait(dev_priv, *header);
break;
case R300_CMD_SCRATCH:
DRM_DEBUG("R300_CMD_SCRATCH\n");
- ret = r300_scratch(dev_priv, cmdbuf, header);
+ ret = r300_scratch(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_scratch failed\n");
goto cleanup;
@@ -1168,16 +1144,16 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
goto cleanup;
}
DRM_DEBUG("R300_CMD_R500FP\n");
- ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
+ ret = r300_emit_r500fp(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_r500fp failed\n");
goto cleanup;
}
break;
default:
- DRM_ERROR("bad cmd_type %i at %p\n",
- header.header.cmd_type,
- cmdbuf->buf - sizeof(header));
+ DRM_ERROR("bad cmd_type %i at byte %d\n",
+ header->header.cmd_type,
+ cmdbuf->buffer->iterator - (int)sizeof(*header));
ret = -EINVAL;
goto cleanup;
}
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 1735a2b..1a0d536 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -952,6 +952,7 @@
# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
# define R300_TXO_MACRO_TILE (1 << 2)
# define R300_TXO_MICRO_TILE (1 << 3)
+# define R300_TXO_MICRO_TILE_SQUARE (2 << 3)
# define R300_TXO_OFFSET_MASK 0xffffffe0
# define R300_TXO_OFFSET_SHIFT 5
/* END: Guess from R200 */
@@ -1360,6 +1361,7 @@
# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
+# define R300_COLOR_MICROTILE_SQUARE_ENABLE (2 << 17)
# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index d937324..c7593b8 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -40,28 +40,6 @@ static void r420_set_reg_safe(struct radeon_device *rdev)
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
}
-int r420_mc_init(struct radeon_device *rdev)
-{
- int r;
-
- /* Setup GPU memory space */
- rdev->mc.vram_location = 0xFFFFFFFFUL;
- rdev->mc.gtt_location = 0xFFFFFFFFUL;
- if (rdev->flags & RADEON_IS_AGP) {
- r = radeon_agp_init(rdev);
- if (r) {
- radeon_agp_disable(rdev);
- } else {
- rdev->mc.gtt_location = rdev->mc.agp_base;
- }
- }
- r = radeon_mc_setup(rdev);
- if (r) {
- return r;
- }
- return 0;
-}
-
void r420_pipes_init(struct radeon_device *rdev)
{
unsigned tmp;
@@ -69,7 +47,8 @@ void r420_pipes_init(struct radeon_device *rdev)
unsigned num_pipes;
/* GA_ENHANCE workaround TCL deadlock issue */
- WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
+ WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
+ (1 << 2) | (1 << 3));
/* add idle wait as per freedesktop.org bug 24041 */
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
@@ -97,17 +76,17 @@ void r420_pipes_init(struct radeon_device *rdev)
tmp = (7 << 1);
break;
}
- WREG32(0x42C8, (1 << num_pipes) - 1);
+ WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
/* Sub pixel 1/12 so we can have 4K rendering according to doc */
- tmp |= (1 << 4) | (1 << 0);
- WREG32(0x4018, tmp);
+ tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
+ WREG32(R300_GB_TILE_CONFIG, tmp);
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
}
- tmp = RREG32(0x170C);
- WREG32(0x170C, tmp | (1 << 31));
+ tmp = RREG32(R300_DST_PIPE_CONFIG);
+ WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
WREG32(R300_RB2D_DSTCACHE_MODE,
RREG32(R300_RB2D_DSTCACHE_MODE) |
@@ -348,13 +327,15 @@ int r420_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- r300_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
- if (r) {
- return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
}
+ /* initialize memory controller */
+ r300_mc_init(rdev);
r420_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 74ad89b..0cf2ad2 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -717,54 +717,62 @@
#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
#define AVIVO_DC_GPIO_HPD_A 0x7e94
-
-#define AVIVO_GPIO_0 0x7e30
-#define AVIVO_GPIO_1 0x7e40
-#define AVIVO_GPIO_2 0x7e50
-#define AVIVO_GPIO_3 0x7e60
-
#define AVIVO_DC_GPIO_HPD_Y 0x7e9c
-#define AVIVO_I2C_STATUS 0x7d30
-# define AVIVO_I2C_STATUS_DONE (1 << 0)
-# define AVIVO_I2C_STATUS_NACK (1 << 1)
-# define AVIVO_I2C_STATUS_HALT (1 << 2)
-# define AVIVO_I2C_STATUS_GO (1 << 3)
-# define AVIVO_I2C_STATUS_MASK 0x7
-/* If radeon_mm_i2c is to be believed, this is HALT, NACK, and maybe
- * DONE? */
-# define AVIVO_I2C_STATUS_CMD_RESET 0x7
-# define AVIVO_I2C_STATUS_CMD_WAIT (1 << 3)
-#define AVIVO_I2C_STOP 0x7d34
-#define AVIVO_I2C_START_CNTL 0x7d38
-# define AVIVO_I2C_START (1 << 8)
-# define AVIVO_I2C_CONNECTOR0 (0 << 16)
-# define AVIVO_I2C_CONNECTOR1 (1 << 16)
-#define R520_I2C_START (1<<0)
-#define R520_I2C_STOP (1<<1)
-#define R520_I2C_RX (1<<2)
-#define R520_I2C_EN (1<<8)
-#define R520_I2C_DDC1 (0<<16)
-#define R520_I2C_DDC2 (1<<16)
-#define R520_I2C_DDC3 (2<<16)
-#define R520_I2C_DDC_MASK (3<<16)
-#define AVIVO_I2C_CONTROL2 0x7d3c
-# define AVIVO_I2C_7D3C_SIZE_SHIFT 8
-# define AVIVO_I2C_7D3C_SIZE_MASK (0xf << 8)
-#define AVIVO_I2C_CONTROL3 0x7d40
-/* Reading is done 4 bytes at a time: read the bottom 8 bits from
- * 7d44, four times in a row.
- * Writing is a little more complex. First write DATA with
- * 0xnnnnnnzz, then 0xnnnnnnyy, where nnnnnn is some non-deterministic
- * magic number, zz is, I think, the slave address, and yy is the byte
- * you want to write. */
-#define AVIVO_I2C_DATA 0x7d44
-#define R520_I2C_ADDR_COUNT_MASK (0x7)
-#define R520_I2C_DATA_COUNT_SHIFT (8)
-#define R520_I2C_DATA_COUNT_MASK (0xF00)
-#define AVIVO_I2C_CNTL 0x7d50
-# define AVIVO_I2C_EN (1 << 0)
-# define AVIVO_I2C_RESET (1 << 8)
+#define AVIVO_DC_I2C_STATUS1 0x7d30
+# define AVIVO_DC_I2C_DONE (1 << 0)
+# define AVIVO_DC_I2C_NACK (1 << 1)
+# define AVIVO_DC_I2C_HALT (1 << 2)
+# define AVIVO_DC_I2C_GO (1 << 3)
+#define AVIVO_DC_I2C_RESET 0x7d34
+# define AVIVO_DC_I2C_SOFT_RESET (1 << 0)
+# define AVIVO_DC_I2C_ABORT (1 << 8)
+#define AVIVO_DC_I2C_CONTROL1 0x7d38
+# define AVIVO_DC_I2C_START (1 << 0)
+# define AVIVO_DC_I2C_STOP (1 << 1)
+# define AVIVO_DC_I2C_RECEIVE (1 << 2)
+# define AVIVO_DC_I2C_EN (1 << 8)
+# define AVIVO_DC_I2C_PIN_SELECT(x) ((x) << 16)
+# define AVIVO_SEL_DDC1 0
+# define AVIVO_SEL_DDC2 1
+# define AVIVO_SEL_DDC3 2
+#define AVIVO_DC_I2C_CONTROL2 0x7d3c
+# define AVIVO_DC_I2C_ADDR_COUNT(x) ((x) << 0)
+# define AVIVO_DC_I2C_DATA_COUNT(x) ((x) << 8)
+#define AVIVO_DC_I2C_CONTROL3 0x7d40
+# define AVIVO_DC_I2C_DATA_DRIVE_EN (1 << 0)
+# define AVIVO_DC_I2C_DATA_DRIVE_SEL (1 << 1)
+# define AVIVO_DC_I2C_CLK_DRIVE_EN (1 << 7)
+# define AVIVO_DC_I2C_RD_INTRA_BYTE_DELAY(x) ((x) << 8)
+# define AVIVO_DC_I2C_WR_INTRA_BYTE_DELAY(x) ((x) << 16)
+# define AVIVO_DC_I2C_TIME_LIMIT(x) ((x) << 24)
+#define AVIVO_DC_I2C_DATA 0x7d44
+#define AVIVO_DC_I2C_INTERRUPT_CONTROL 0x7d48
+# define AVIVO_DC_I2C_INTERRUPT_STATUS (1 << 0)
+# define AVIVO_DC_I2C_INTERRUPT_AK (1 << 8)
+# define AVIVO_DC_I2C_INTERRUPT_ENABLE (1 << 16)
+#define AVIVO_DC_I2C_ARBITRATION 0x7d50
+# define AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C (1 << 0)
+# define AVIVO_DC_I2C_SW_CAN_USE_I2C (1 << 1)
+# define AVIVO_DC_I2C_SW_DONE_USING_I2C (1 << 8)
+# define AVIVO_DC_I2C_HW_NEEDS_I2C (1 << 9)
+# define AVIVO_DC_I2C_ABORT_HDCP_I2C (1 << 16)
+# define AVIVO_DC_I2C_HW_USING_I2C (1 << 17)
+
+#define AVIVO_DC_GPIO_DDC1_MASK 0x7e40
+#define AVIVO_DC_GPIO_DDC1_A 0x7e44
+#define AVIVO_DC_GPIO_DDC1_EN 0x7e48
+#define AVIVO_DC_GPIO_DDC1_Y 0x7e4c
+
+#define AVIVO_DC_GPIO_DDC2_MASK 0x7e50
+#define AVIVO_DC_GPIO_DDC2_A 0x7e54
+#define AVIVO_DC_GPIO_DDC2_EN 0x7e58
+#define AVIVO_DC_GPIO_DDC2_Y 0x7e5c
+
+#define AVIVO_DC_GPIO_DDC3_MASK 0x7e60
+#define AVIVO_DC_GPIO_DDC3_A 0x7e64
+#define AVIVO_DC_GPIO_DDC3_EN 0x7e68
+#define AVIVO_DC_GPIO_DDC3_Y 0x7e6c
#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc
# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4)
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index ddf5731..2b8a5dd 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -119,13 +119,15 @@ static void r520_vram_get_type(struct radeon_device *rdev)
rdev->mc.vram_width *= 2;
}
-void r520_vram_info(struct radeon_device *rdev)
+void r520_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
r520_vram_get_type(rdev);
-
r100_vram_init_sizes(rdev);
+ radeon_vram_location(rdev, &rdev->mc, 0);
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
@@ -267,12 +269,15 @@ int r520_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- r520_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
- if (r)
- return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize memory controller */
+ r520_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2ffcf5a..c522901 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -353,23 +353,14 @@ void r600_hpd_fini(struct radeon_device *rdev)
/*
* R600 PCIE GART
*/
-int r600_gart_clear_page(struct radeon_device *rdev, int i)
-{
- void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
- u64 pte;
-
- if (i < 0 || i > rdev->gart.num_gpu_pages)
- return -EINVAL;
- pte = 0;
- writeq(pte, ((void __iomem *)ptr) + (i * 8));
- return 0;
-}
-
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
{
unsigned i;
u32 tmp;
+ /* flush hdp cache so updates hit vram */
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
@@ -416,6 +407,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -619,6 +611,68 @@ static void r600_mc_program(struct radeon_device *rdev)
rv515_vga_render_disable(rdev);
}
+/**
+ * r600_vram_gtt_location - try to find VRAM & GTT location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place VRAM at same place as in CPU (PCI)
+ * address space as some GPU seems to have issue when we reprogram at
+ * different address space.
+ *
+ * If there is not enough space to fit the unvisible VRAM after the
+ * aperture then we limit the VRAM size to the aperture.
+ *
+ * If we are using AGP then place VRAM adjacent to AGP aperture are we need
+ * them to be in one from GPU point of view so that we can program GPU to
+ * catch access outside them (weird GPU policy see ??).
+ *
+ * This function will never fails, worst case are limiting VRAM or GTT.
+ *
+ * Note: GTT start, end, size should be initialized before calling this
+ * function on AGP platform.
+ */
+void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+ u64 size_bf, size_af;
+
+ if (mc->mc_vram_size > 0xE0000000) {
+ /* leave room for at least 512M GTT */
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = 0xE0000000;
+ mc->mc_vram_size = 0xE0000000;
+ }
+ if (rdev->flags & RADEON_IS_AGP) {
+ size_bf = mc->gtt_start;
+ size_af = 0xFFFFFFFF - mc->gtt_end + 1;
+ if (size_bf > size_af) {
+ if (mc->mc_vram_size > size_bf) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_bf;
+ mc->mc_vram_size = size_bf;
+ }
+ mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+ } else {
+ if (mc->mc_vram_size > size_af) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_af;
+ mc->mc_vram_size = size_af;
+ }
+ mc->vram_start = mc->gtt_end;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+ } else {
+ u64 base = 0;
+ if (rdev->flags & RADEON_IS_IGP)
+ base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ radeon_gtt_location(rdev, mc);
+ }
+}
+
int r600_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
@@ -658,75 +712,21 @@ int r600_mc_init(struct radeon_device *rdev)
/* Setup GPU memory space */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ /* FIXME remove this once we support unmappable VRAM */
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
rdev->mc.real_vram_size = rdev->mc.aper_size;
-
- if (rdev->flags & RADEON_IS_AGP) {
- /* gtt_size is setup by radeon_agp_init */
- rdev->mc.gtt_location = rdev->mc.agp_base;
- tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
- /* Try to put vram before or after AGP because we
- * we want SYSTEM_APERTURE to cover both VRAM and
- * AGP so that GPU can catch out of VRAM/AGP access
- */
- if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
- /* Enough place before */
- rdev->mc.vram_location = rdev->mc.gtt_location -
- rdev->mc.mc_vram_size;
- } else if (tmp > rdev->mc.mc_vram_size) {
- /* Enough place after */
- rdev->mc.vram_location = rdev->mc.gtt_location +
- rdev->mc.gtt_size;
- } else {
- /* Try to setup VRAM then AGP might not
- * not work on some card
- */
- rdev->mc.vram_location = 0x00000000UL;
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- }
- } else {
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
- rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
- 0xFFFF) << 24;
- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
- if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
- /* Enough place after vram */
- rdev->mc.gtt_location = tmp;
- } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
- /* Enough place before vram */
- rdev->mc.gtt_location = 0;
- } else {
- /* Not enough place after or before shrink
- * gart size
- */
- if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
- rdev->mc.gtt_location = 0;
- rdev->mc.gtt_size = rdev->mc.vram_location;
- } else {
- rdev->mc.gtt_location = tmp;
- rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
- }
- }
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
}
- rdev->mc.vram_start = rdev->mc.vram_location;
- rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
- rdev->mc.gtt_start = rdev->mc.gtt_location;
- rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+ r600_vram_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
-
if (rdev->flags & RADEON_IS_IGP)
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
-
return 0;
}
@@ -981,6 +981,9 @@ void r600_gpu_init(struct radeon_device *rdev)
{
u32 tiling_config;
u32 ramcfg;
+ u32 backend_map;
+ u32 cc_rb_backend_disable;
+ u32 cc_gc_shader_pipe_config;
u32 tmp;
int i, j;
u32 sq_config;
@@ -1090,8 +1093,11 @@ void r600_gpu_init(struct radeon_device *rdev)
default:
break;
}
+ rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
+ rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= GROUP_SIZE(0);
+ rdev->config.r600.tiling_group_size = 256;
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) {
tiling_config |= ROW_TILING(3);
@@ -1101,24 +1107,33 @@ void r600_gpu_init(struct radeon_device *rdev)
tiling_config |= SAMPLE_SPLIT(tmp);
}
tiling_config |= BANK_SWAPS(1);
- tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
- rdev->config.r600.max_backends,
- (0xff << rdev->config.r600.max_backends) & 0xff);
- tiling_config |= BACKEND_MAP(tmp);
+
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ cc_rb_backend_disable |=
+ BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ cc_gc_shader_pipe_config |=
+ INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
+ cc_gc_shader_pipe_config |=
+ INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
+
+ backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
+ (R6XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable &
+ R6XX_MAX_BACKENDS_MASK) >> 16)),
+ (cc_rb_backend_disable >> 16));
+
+ tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
- tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
- WREG32(CC_RB_BACKEND_DISABLE, tmp);
-
/* Setup pipes */
- tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
- tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
- WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
+ WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
+ tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -1783,12 +1798,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
/* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
+ radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
+ /* wait for 3D idle clean */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
- radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
- radeon_ring_write(rdev, 1);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
radeon_ring_write(rdev, RB_INT_STAT);
@@ -2745,6 +2765,7 @@ restart_ih:
case 0: /* D1 vblank */
if (disp_int & LB_D1_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 0);
+ wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
@@ -2765,6 +2786,7 @@ restart_ih:
case 0: /* D2 vblank */
if (disp_int & LB_D2_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 1);
+ wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 0dcb690..db92801 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
+ return rdev->family >= CHIP_R600
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
@@ -147,15 +147,23 @@ static void r600_audio_update_hdmi(unsigned long param)
}
/*
+ * turn on/off audio engine
+ */
+static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
+{
+ DRM_INFO("%s audio support", enable ? "Enabling" : "Disabling");
+ WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
+}
+
+/*
* initialize the audio vars and register the update timer
*/
int r600_audio_init(struct radeon_device *rdev)
{
- if (!r600_audio_chipset_supported(rdev))
+ if (!radeon_audio || !r600_audio_chipset_supported(rdev))
return 0;
- DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling");
- WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
+ r600_audio_engine_enable(rdev, true);
rdev->audio_channels = -1;
rdev->audio_rate = -1;
@@ -258,9 +266,10 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
*/
void r600_audio_fini(struct radeon_device *rdev)
{
- if (!r600_audio_chipset_supported(rdev))
+ if (!radeon_audio || !r600_audio_chipset_supported(rdev))
return;
del_timer(&rdev->audio_timer);
- WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
+
+ r600_audio_engine_enable(rdev, false);
}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 5ea4323..f4fb88e 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -49,7 +49,7 @@ set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64
RING_LOCALS;
DRM_DEBUG("\n");
- h = (h + 7) & ~7;
+ h = ALIGN(h, 8);
if (h < 8)
h = 8;
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 446b765..f6c6c77 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -25,7 +25,7 @@ set_render_target(struct radeon_device *rdev, int format,
u32 cb_color_info;
int pitch, slice;
- h = (h + 7) & ~7;
+ h = ALIGN(h, 8);
if (h < 8)
h = 8;
@@ -396,15 +396,13 @@ set_default_state(struct radeon_device *rdev)
NUM_ES_STACK_ENTRIES(num_es_stack_entries));
/* emit an IB pointing at default state */
- dwords = (rdev->r600_blit.state_len + 0xf) & ~0xf;
+ dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
radeon_ring_write(rdev, dwords);
- radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
- radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
/* SQ config */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
@@ -578,9 +576,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
ring_size += 40; /* shaders + def state */
- ring_size += 7; /* fence emit for VB IB */
+ ring_size += 10; /* fence emit for VB IB */
ring_size += 5; /* done copy */
- ring_size += 7; /* fence emit for done copy */
+ ring_size += 10; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
if (r)
return r;
@@ -594,13 +592,6 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
{
int r;
- radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
- radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
- /* wait for 3D idle clean */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
-
if (rdev->r600_blit.vb_ib)
r600_vb_ib_put(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index d745e81..a112c59 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -9,11 +9,6 @@ const u32 r6xx_default_state[] =
0xc0012800,
0x80000000,
0x80000000,
- 0xc0004600,
- 0x00000016,
- 0xc0016800,
- 0x00000010,
- 0x00028000,
0xc0016800,
0x00000010,
0x00008000,
@@ -531,11 +526,6 @@ const u32 r7xx_default_state[] =
0xc0012800,
0x80000000,
0x80000000,
- 0xc0004600,
- 0x00000016,
- 0xc0016800,
- 0x00000010,
- 0x00028000,
0xc0016800,
0x00000010,
0x00008000,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 75bcf35..40416c0 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -734,8 +734,8 @@ static void r600_gfx_init(struct drm_device *dev,
u32 hdp_host_path_cntl;
u32 backend_map;
u32 gb_tiling_config = 0;
- u32 cc_rb_backend_disable = 0;
- u32 cc_gc_shader_pipe_config = 0;
+ u32 cc_rb_backend_disable;
+ u32 cc_gc_shader_pipe_config;
u32 ramcfg;
/* setup chip specs */
@@ -857,29 +857,44 @@ static void r600_gfx_init(struct drm_device *dev,
gb_tiling_config |= R600_BANK_SWAPS(1);
- backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
- dev_priv->r600_max_backends,
- (0xff << dev_priv->r600_max_backends) & 0xff);
- gb_tiling_config |= R600_BACKEND_MAP(backend_map);
+ cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ cc_rb_backend_disable |=
+ R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
- cc_gc_shader_pipe_config =
+ cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ cc_gc_shader_pipe_config |=
R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK);
- cc_rb_backend_disable =
- R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
+ backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
+ (R6XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable &
+ R6XX_MAX_BACKENDS_MASK) >> 16)),
+ (cc_rb_backend_disable >> 16));
+ gb_tiling_config |= R600_BACKEND_MAP(backend_map);
RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ if (gb_tiling_config & 0xc0) {
+ dev_priv->r600_group_size = 512;
+ } else {
+ dev_priv->r600_group_size = 256;
+ }
+ dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
+ if (gb_tiling_config & 0x30) {
+ dev_priv->r600_nbanks = 8;
+ } else {
+ dev_priv->r600_nbanks = 4;
+ }
RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
num_qd_pipes =
- R6XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK);
+ R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
@@ -1151,7 +1166,8 @@ static void r600_gfx_init(struct drm_device *dev,
}
-static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
+static u32 r700_get_tile_pipe_to_backend_map(drm_radeon_private_t *dev_priv,
+ u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
@@ -1162,6 +1178,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
u32 swizzle_pipe[R7XX_MAX_PIPES];
u32 cur_backend;
u32 i;
+ bool force_no_swizzle;
if (num_tile_pipes > R7XX_MAX_PIPES)
num_tile_pipes = R7XX_MAX_PIPES;
@@ -1191,6 +1208,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+ case CHIP_RV770:
+ case CHIP_RV730:
+ force_no_swizzle = false;
+ break;
+ case CHIP_RV710:
+ case CHIP_RV740:
+ default:
+ force_no_swizzle = true;
+ break;
+ }
+
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
@@ -1201,49 +1230,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
swizzle_pipe[1] = 1;
break;
case 3:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 1;
+ }
break;
case 4:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 3;
- swizzle_pipe[3] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 3;
+ swizzle_pipe[3] = 1;
+ }
break;
case 5:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 1;
+ swizzle_pipe[4] = 3;
+ }
break;
case 6:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 5;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 5;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ }
break;
case 7:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 5;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ swizzle_pipe[6] = 5;
+ }
break;
case 8:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 7;
- swizzle_pipe[7] = 5;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ swizzle_pipe[7] = 7;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ swizzle_pipe[6] = 7;
+ swizzle_pipe[7] = 5;
+ }
break;
}
@@ -1264,8 +1344,10 @@ static void r700_gfx_init(struct drm_device *dev,
drm_radeon_private_t *dev_priv)
{
int i, j, num_qd_pipes;
+ u32 ta_aux_cntl;
u32 sx_debug_1;
u32 smx_dc_ctl0;
+ u32 db_debug3;
u32 num_gs_verts_per_thread;
u32 vgt_gs_per_es;
u32 gs_prim_buffer_depth = 0;
@@ -1276,8 +1358,8 @@ static void r700_gfx_init(struct drm_device *dev,
u32 sq_dyn_gpr_size_simd_ab_0;
u32 backend_map;
u32 gb_tiling_config = 0;
- u32 cc_rb_backend_disable = 0;
- u32 cc_gc_shader_pipe_config = 0;
+ u32 cc_rb_backend_disable;
+ u32 cc_gc_shader_pipe_config;
u32 mc_arb_ramcfg;
u32 db_debug4;
@@ -1428,38 +1510,51 @@ static void r700_gfx_init(struct drm_device *dev,
gb_tiling_config |= R600_BANK_SWAPS(1);
- if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
- backend_map = 0x28;
- else
- backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
- dev_priv->r600_max_backends,
- (0xff << dev_priv->r600_max_backends) & 0xff);
- gb_tiling_config |= R600_BACKEND_MAP(backend_map);
+ cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ cc_rb_backend_disable |=
+ R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
- cc_gc_shader_pipe_config =
+ cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ cc_gc_shader_pipe_config |=
R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK);
- cc_rb_backend_disable =
- R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
+ backend_map = 0x28;
+ else
+ backend_map = r700_get_tile_pipe_to_backend_map(dev_priv,
+ dev_priv->r600_max_tile_pipes,
+ (R7XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable &
+ R7XX_MAX_BACKENDS_MASK) >> 16)),
+ (cc_rb_backend_disable >> 16));
+ gb_tiling_config |= R600_BACKEND_MAP(backend_map);
RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ if (gb_tiling_config & 0xc0) {
+ dev_priv->r600_group_size = 512;
+ } else {
+ dev_priv->r600_group_size = 256;
+ }
+ dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
+ if (gb_tiling_config & 0x30) {
+ dev_priv->r600_nbanks = 8;
+ } else {
+ dev_priv->r600_nbanks = 4;
+ }
RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
- RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
- RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
num_qd_pipes =
- R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK);
+ R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
@@ -1469,10 +1564,8 @@ static void r700_gfx_init(struct drm_device *dev,
RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30));
- RADEON_WRITE(R600_TA_CNTL_AUX, (R600_DISABLE_CUBE_ANISO |
- R600_SYNC_GRADIENT |
- R600_SYNC_WALKER |
- R600_SYNC_ALIGNER));
+ ta_aux_cntl = RADEON_READ(R600_TA_CNTL_AUX);
+ RADEON_WRITE(R600_TA_CNTL_AUX, ta_aux_cntl | R600_DISABLE_CUBE_ANISO);
sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1);
sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS;
@@ -1483,14 +1576,28 @@ static void r700_gfx_init(struct drm_device *dev,
smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1);
RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0);
- RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
- R700_GS_FLUSH_CTL(4) |
- R700_ACK_FLUSH_CTL(3) |
- R700_SYNC_FLUSH_CTL));
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV740)
+ RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
+ R700_GS_FLUSH_CTL(4) |
+ R700_ACK_FLUSH_CTL(3) |
+ R700_SYNC_FLUSH_CTL));
- if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)
- RADEON_WRITE(R700_DB_DEBUG3, R700_DB_CLK_OFF_DELAY(0x1f));
- else {
+ db_debug3 = RADEON_READ(R700_DB_DEBUG3);
+ db_debug3 &= ~R700_DB_CLK_OFF_DELAY(0x1f);
+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+ case CHIP_RV770:
+ case CHIP_RV740:
+ db_debug3 |= R700_DB_CLK_OFF_DELAY(0x1f);
+ break;
+ case CHIP_RV710:
+ case CHIP_RV730:
+ default:
+ db_debug3 |= R700_DB_CLK_OFF_DELAY(2);
+ break;
+ }
+ RADEON_WRITE(R700_DB_DEBUG3, db_debug3);
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV770) {
db_debug4 = RADEON_READ(RV700_DB_DEBUG4);
db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER;
RADEON_WRITE(RV700_DB_DEBUG4, db_debug4);
@@ -1519,10 +1626,10 @@ static void r700_gfx_init(struct drm_device *dev,
R600_ALU_UPDATE_FIFO_HIWATER(0x8));
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_RV770:
- sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
- break;
case CHIP_RV730:
case CHIP_RV710:
+ sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
+ break;
case CHIP_RV740:
default:
sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
@@ -2529,3 +2636,12 @@ out:
mutex_unlock(&dev_priv->cs_mutex);
return r;
}
+
+void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size)
+{
+ struct drm_radeon_private *dev_priv = dev->dev_private;
+
+ *npipes = dev_priv->r600_npipes;
+ *nbanks = dev_priv->r600_nbanks;
+ *group_size = dev_priv->r600_group_size;
+}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index e4c45ec..cd2c63b 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,6 +28,7 @@
#include "drmP.h"
#include "radeon.h"
#include "r600d.h"
+#include "r600_reg_safe.h"
static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
@@ -35,11 +36,313 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
+extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
+
struct r600_cs_track {
- u32 cb_color0_base_last;
+ /* configuration we miror so that we use same code btw kms/ums */
+ u32 group_size;
+ u32 nbanks;
+ u32 npipes;
+ /* value we track */
+ u32 nsamples;
+ u32 cb_color_base_last[8];
+ struct radeon_bo *cb_color_bo[8];
+ u32 cb_color_bo_offset[8];
+ struct radeon_bo *cb_color_frag_bo[8];
+ struct radeon_bo *cb_color_tile_bo[8];
+ u32 cb_color_info[8];
+ u32 cb_color_size_idx[8];
+ u32 cb_target_mask;
+ u32 cb_shader_mask;
+ u32 cb_color_size[8];
+ u32 vgt_strmout_en;
+ u32 vgt_strmout_buffer_en;
+ u32 db_depth_control;
+ u32 db_depth_info;
+ u32 db_depth_size_idx;
+ u32 db_depth_view;
+ u32 db_depth_size;
+ u32 db_offset;
+ struct radeon_bo *db_bo;
};
+static inline int r600_bpe_from_format(u32 *bpe, u32 format)
+{
+ switch (format) {
+ case V_038004_COLOR_8:
+ case V_038004_COLOR_4_4:
+ case V_038004_COLOR_3_3_2:
+ case V_038004_FMT_1:
+ *bpe = 1;
+ break;
+ case V_038004_COLOR_16:
+ case V_038004_COLOR_16_FLOAT:
+ case V_038004_COLOR_8_8:
+ case V_038004_COLOR_5_6_5:
+ case V_038004_COLOR_6_5_5:
+ case V_038004_COLOR_1_5_5_5:
+ case V_038004_COLOR_4_4_4_4:
+ case V_038004_COLOR_5_5_5_1:
+ *bpe = 2;
+ break;
+ case V_038004_FMT_8_8_8:
+ *bpe = 3;
+ break;
+ case V_038004_COLOR_32:
+ case V_038004_COLOR_32_FLOAT:
+ case V_038004_COLOR_16_16:
+ case V_038004_COLOR_16_16_FLOAT:
+ case V_038004_COLOR_8_24:
+ case V_038004_COLOR_8_24_FLOAT:
+ case V_038004_COLOR_24_8:
+ case V_038004_COLOR_24_8_FLOAT:
+ case V_038004_COLOR_10_11_11:
+ case V_038004_COLOR_10_11_11_FLOAT:
+ case V_038004_COLOR_11_11_10:
+ case V_038004_COLOR_11_11_10_FLOAT:
+ case V_038004_COLOR_2_10_10_10:
+ case V_038004_COLOR_8_8_8_8:
+ case V_038004_COLOR_10_10_10_2:
+ case V_038004_FMT_5_9_9_9_SHAREDEXP:
+ case V_038004_FMT_32_AS_8:
+ case V_038004_FMT_32_AS_8_8:
+ *bpe = 4;
+ break;
+ case V_038004_COLOR_X24_8_32_FLOAT:
+ case V_038004_COLOR_32_32:
+ case V_038004_COLOR_32_32_FLOAT:
+ case V_038004_COLOR_16_16_16_16:
+ case V_038004_COLOR_16_16_16_16_FLOAT:
+ *bpe = 8;
+ break;
+ case V_038004_FMT_16_16_16:
+ case V_038004_FMT_16_16_16_FLOAT:
+ *bpe = 6;
+ break;
+ case V_038004_FMT_32_32_32:
+ case V_038004_FMT_32_32_32_FLOAT:
+ *bpe = 12;
+ break;
+ case V_038004_COLOR_32_32_32_32:
+ case V_038004_COLOR_32_32_32_32_FLOAT:
+ *bpe = 16;
+ break;
+ case V_038004_FMT_GB_GR:
+ case V_038004_FMT_BG_RG:
+ case V_038004_COLOR_INVALID:
+ *bpe = 16;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void r600_cs_track_init(struct r600_cs_track *track)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ track->cb_color_base_last[i] = 0;
+ track->cb_color_size[i] = 0;
+ track->cb_color_size_idx[i] = 0;
+ track->cb_color_info[i] = 0;
+ track->cb_color_bo[i] = NULL;
+ track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+ }
+ track->cb_target_mask = 0xFFFFFFFF;
+ track->cb_shader_mask = 0xFFFFFFFF;
+ track->db_bo = NULL;
+ /* assume the biggest format and that htile is enabled */
+ track->db_depth_info = 7 | (1 << 25);
+ track->db_depth_view = 0xFFFFC000;
+ track->db_depth_size = 0xFFFFFFFF;
+ track->db_depth_size_idx = 0;
+ track->db_depth_control = 0xFFFFFFFF;
+}
+
+static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+{
+ struct r600_cs_track *track = p->track;
+ u32 bpe = 0, pitch, slice_tile_max, size, tmp, height;
+ volatile u32 *ib = p->ib->ptr;
+
+ if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
+ dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
+ return -EINVAL;
+ }
+ size = radeon_bo_size(track->cb_color_bo[i]);
+ if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
+ dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
+ __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
+ i, track->cb_color_info[i]);
+ return -EINVAL;
+ }
+ pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) << 3;
+ slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
+ if (!pitch) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) for %d invalid (0x%08X)\n",
+ __func__, __LINE__, pitch, i, track->cb_color_size[i]);
+ return -EINVAL;
+ }
+ height = size / (pitch * bpe);
+ if (height > 8192)
+ height = 8192;
+ switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) {
+ case V_0280A0_ARRAY_LINEAR_GENERAL:
+ case V_0280A0_ARRAY_LINEAR_ALIGNED:
+ if (pitch & 0x3f) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d x %d = %d) invalid\n",
+ __func__, __LINE__, pitch, bpe, pitch * bpe);
+ return -EINVAL;
+ }
+ if ((pitch * bpe) & (track->group_size - 1)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ break;
+ case V_0280A0_ARRAY_1D_TILED_THIN1:
+ if ((pitch * 8 * bpe * track->nsamples) & (track->group_size - 1)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ height &= ~0x7;
+ if (!height)
+ height = 8;
+ break;
+ case V_0280A0_ARRAY_2D_TILED_THIN1:
+ if (pitch & ((8 * track->nbanks) - 1)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ tmp = pitch * 8 * bpe * track->nsamples;
+ tmp = tmp / track->nbanks;
+ if (tmp & (track->group_size - 1)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ height &= ~((16 * track->npipes) - 1);
+ if (!height)
+ height = 16 * track->npipes;
+ break;
+ default:
+ dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+ G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+ track->cb_color_info[i]);
+ return -EINVAL;
+ }
+ /* check offset */
+ tmp = height * pitch;
+ if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
+ dev_warn(p->dev, "%s offset[%d] %d to big\n", __func__, i, track->cb_color_bo_offset[i]);
+ return -EINVAL;
+ }
+ /* limit max tile */
+ tmp = (height * pitch) >> 6;
+ if (tmp < slice_tile_max)
+ slice_tile_max = tmp;
+ tmp = S_028060_PITCH_TILE_MAX((pitch >> 3) - 1) |
+ S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
+ ib[track->cb_color_size_idx[i]] = tmp;
+ return 0;
+}
+
+static int r600_cs_track_check(struct radeon_cs_parser *p)
+{
+ struct r600_cs_track *track = p->track;
+ u32 tmp;
+ int r, i;
+ volatile u32 *ib = p->ib->ptr;
+
+ /* on legacy kernel we don't perform advanced check */
+ if (p->rdev == NULL)
+ return 0;
+ /* we don't support out buffer yet */
+ if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) {
+ dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
+ return -EINVAL;
+ }
+ /* check that we have a cb for each enabled target, we don't check
+ * shader_mask because it seems mesa isn't always setting it :(
+ */
+ tmp = track->cb_target_mask;
+ for (i = 0; i < 8; i++) {
+ if ((tmp >> (i * 4)) & 0xF) {
+ /* at least one component is enabled */
+ if (track->cb_color_bo[i] == NULL) {
+ dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+ __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+ return -EINVAL;
+ }
+ /* perform rewrite of CB_COLOR[0-7]_SIZE */
+ r = r600_cs_track_validate_cb(p, i);
+ if (r)
+ return r;
+ }
+ }
+ /* Check depth buffer */
+ if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+ G_028800_Z_ENABLE(track->db_depth_control)) {
+ u32 nviews, bpe, ntiles;
+ if (track->db_bo == NULL) {
+ dev_warn(p->dev, "z/stencil with no depth buffer\n");
+ return -EINVAL;
+ }
+ if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
+ dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
+ return -EINVAL;
+ }
+ switch (G_028010_FORMAT(track->db_depth_info)) {
+ case V_028010_DEPTH_16:
+ bpe = 2;
+ break;
+ case V_028010_DEPTH_X8_24:
+ case V_028010_DEPTH_8_24:
+ case V_028010_DEPTH_X8_24_FLOAT:
+ case V_028010_DEPTH_8_24_FLOAT:
+ case V_028010_DEPTH_32_FLOAT:
+ bpe = 4;
+ break;
+ case V_028010_DEPTH_X24_8_32_FLOAT:
+ bpe = 8;
+ break;
+ default:
+ dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
+ return -EINVAL;
+ }
+ if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+ if (!track->db_depth_size_idx) {
+ dev_warn(p->dev, "z/stencil buffer size not set\n");
+ return -EINVAL;
+ }
+ printk_once(KERN_WARNING "You have old & broken userspace please consider updating mesa\n");
+ tmp = radeon_bo_size(track->db_bo) - track->db_offset;
+ tmp = (tmp / bpe) >> 6;
+ if (!tmp) {
+ dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
+ track->db_depth_size, bpe, track->db_offset,
+ radeon_bo_size(track->db_bo));
+ return -EINVAL;
+ }
+ ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
+ } else {
+ ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+ nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
+ tmp = ntiles * bpe * 64 * nviews;
+ if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
+ dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n",
+ track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+ radeon_bo_size(track->db_bo));
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
/**
* r600_cs_packet_parse() - parse cp packet and point ib index to next packet
* @parser: parser structure holding parsing context.
@@ -359,6 +662,334 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
return 0;
}
+/**
+ * r600_cs_check_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ * @idx: index into the cs buffer
+ *
+ * This function will test against r600_reg_safe_bm and return 0
+ * if register is safe. If register is not flag as safe this function
+ * will test it against a list of register needind special handling.
+ */
+static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+ struct r600_cs_track *track = (struct r600_cs_track *)p->track;
+ struct radeon_cs_reloc *reloc;
+ u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm);
+ u32 m, i, tmp, *ib;
+ int r;
+
+ i = (reg >> 7);
+ if (i > last_reg) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ m = 1 << ((reg >> 2) & 31);
+ if (!(r600_reg_safe_bm[i] & m))
+ return 0;
+ ib = p->ib->ptr;
+ switch (reg) {
+ /* force following reg to 0 in an attemp to disable out buffer
+ * which will need us to better understand how it works to perform
+ * security check on it (Jerome)
+ */
+ case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
+ case R_008C44_SQ_ESGS_RING_SIZE:
+ case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
+ case R_008C54_SQ_ESTMP_RING_SIZE:
+ case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
+ case R_008C74_SQ_FBUF_RING_SIZE:
+ case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
+ case R_008C5C_SQ_GSTMP_RING_SIZE:
+ case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
+ case R_008C4C_SQ_GSVS_RING_SIZE:
+ case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
+ case R_008C6C_SQ_PSTMP_RING_SIZE:
+ case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
+ case R_008C7C_SQ_REDUC_RING_SIZE:
+ case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
+ case R_008C64_SQ_VSTMP_RING_SIZE:
+ case R_0288C8_SQ_GS_VERT_ITEMSIZE:
+ /* get value to populate the IB don't remove */
+ tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;
+ break;
+ case R_028800_DB_DEPTH_CONTROL:
+ track->db_depth_control = radeon_get_ib_value(p, idx);
+ break;
+ case R_028010_DB_DEPTH_INFO:
+ track->db_depth_info = radeon_get_ib_value(p, idx);
+ break;
+ case R_028004_DB_DEPTH_VIEW:
+ track->db_depth_view = radeon_get_ib_value(p, idx);
+ break;
+ case R_028000_DB_DEPTH_SIZE:
+ track->db_depth_size = radeon_get_ib_value(p, idx);
+ track->db_depth_size_idx = idx;
+ break;
+ case R_028AB0_VGT_STRMOUT_EN:
+ track->vgt_strmout_en = radeon_get_ib_value(p, idx);
+ break;
+ case R_028B20_VGT_STRMOUT_BUFFER_EN:
+ track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
+ break;
+ case R_028238_CB_TARGET_MASK:
+ track->cb_target_mask = radeon_get_ib_value(p, idx);
+ break;
+ case R_02823C_CB_SHADER_MASK:
+ track->cb_shader_mask = radeon_get_ib_value(p, idx);
+ break;
+ case R_028C04_PA_SC_AA_CONFIG:
+ tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
+ track->nsamples = 1 << tmp;
+ break;
+ case R_0280A0_CB_COLOR0_INFO:
+ case R_0280A4_CB_COLOR1_INFO:
+ case R_0280A8_CB_COLOR2_INFO:
+ case R_0280AC_CB_COLOR3_INFO:
+ case R_0280B0_CB_COLOR4_INFO:
+ case R_0280B4_CB_COLOR5_INFO:
+ case R_0280B8_CB_COLOR6_INFO:
+ case R_0280BC_CB_COLOR7_INFO:
+ tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case R_028060_CB_COLOR0_SIZE:
+ case R_028064_CB_COLOR1_SIZE:
+ case R_028068_CB_COLOR2_SIZE:
+ case R_02806C_CB_COLOR3_SIZE:
+ case R_028070_CB_COLOR4_SIZE:
+ case R_028074_CB_COLOR5_SIZE:
+ case R_028078_CB_COLOR6_SIZE:
+ case R_02807C_CB_COLOR7_SIZE:
+ tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
+ track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_size_idx[tmp] = idx;
+ break;
+ /* This register were added late, there is userspace
+ * which does provide relocation for those but set
+ * 0 offset. In order to avoid breaking old userspace
+ * we detect this and set address to point to last
+ * CB_COLOR0_BASE, note that if userspace doesn't set
+ * CB_COLOR0_BASE before this register we will report
+ * error. Old userspace always set CB_COLOR0_BASE
+ * before any of this.
+ */
+ case R_0280E0_CB_COLOR0_FRAG:
+ case R_0280E4_CB_COLOR1_FRAG:
+ case R_0280E8_CB_COLOR2_FRAG:
+ case R_0280EC_CB_COLOR3_FRAG:
+ case R_0280F0_CB_COLOR4_FRAG:
+ case R_0280F4_CB_COLOR5_FRAG:
+ case R_0280F8_CB_COLOR6_FRAG:
+ case R_0280FC_CB_COLOR7_FRAG:
+ tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
+ if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!track->cb_color_base_last[tmp]) {
+ dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] = track->cb_color_base_last[tmp];
+ printk_once(KERN_WARNING "You have old & broken userspace "
+ "please consider updating mesa & xf86-video-ati\n");
+ track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
+ } else {
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_frag_bo[tmp] = reloc->robj;
+ }
+ break;
+ case R_0280C0_CB_COLOR0_TILE:
+ case R_0280C4_CB_COLOR1_TILE:
+ case R_0280C8_CB_COLOR2_TILE:
+ case R_0280CC_CB_COLOR3_TILE:
+ case R_0280D0_CB_COLOR4_TILE:
+ case R_0280D4_CB_COLOR5_TILE:
+ case R_0280D8_CB_COLOR6_TILE:
+ case R_0280DC_CB_COLOR7_TILE:
+ tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
+ if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!track->cb_color_base_last[tmp]) {
+ dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] = track->cb_color_base_last[tmp];
+ printk_once(KERN_WARNING "You have old & broken userspace "
+ "please consider updating mesa & xf86-video-ati\n");
+ track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
+ } else {
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_tile_bo[tmp] = reloc->robj;
+ }
+ break;
+ case CB_COLOR0_BASE:
+ case CB_COLOR1_BASE:
+ case CB_COLOR2_BASE:
+ case CB_COLOR3_BASE:
+ case CB_COLOR4_BASE:
+ case CB_COLOR5_BASE:
+ case CB_COLOR6_BASE:
+ case CB_COLOR7_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - CB_COLOR0_BASE) / 4;
+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_base_last[tmp] = ib[idx];
+ track->cb_color_bo[tmp] = reloc->robj;
+ break;
+ case DB_DEPTH_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_bo = reloc->robj;
+ break;
+ case DB_HTILE_DATA_BASE:
+ case SQ_PGM_START_FS:
+ case SQ_PGM_START_ES:
+ case SQ_PGM_START_VS:
+ case SQ_PGM_START_GS:
+ case SQ_PGM_START_PS:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
+ default:
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline unsigned minify(unsigned size, unsigned levels)
+{
+ size = size >> levels;
+ if (size < 1)
+ size = 1;
+ return size;
+}
+
+static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
+ unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
+ unsigned *l0_size, unsigned *mipmap_size)
+{
+ unsigned offset, i, level, face;
+ unsigned width, height, depth, rowstride, size;
+
+ w0 = minify(w0, 0);
+ h0 = minify(h0, 0);
+ d0 = minify(d0, 0);
+ for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
+ width = minify(w0, i);
+ height = minify(h0, i);
+ depth = minify(d0, i);
+ for(face = 0; face < nfaces; face++) {
+ rowstride = ((width * bpe) + 255) & ~255;
+ size = height * rowstride * depth;
+ offset += size;
+ offset = (offset + 0x1f) & ~0x1f;
+ }
+ }
+ *l0_size = (((w0 * bpe) + 255) & ~255) * h0 * d0;
+ *mipmap_size = offset;
+ if (!blevel)
+ *mipmap_size -= *l0_size;
+ if (!nlevels)
+ *mipmap_size = *l0_size;
+}
+
+/**
+ * r600_check_texture_resource() - check if register is authorized or not
+ * @p: parser structure holding parsing context
+ * @idx: index into the cs buffer
+ * @texture: texture's bo structure
+ * @mipmap: mipmap's bo structure
+ *
+ * This function will check that the resource has valid field and that
+ * the texture and mipmap bo object are big enough to cover this resource.
+ */
+static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
+ struct radeon_bo *texture,
+ struct radeon_bo *mipmap)
+{
+ u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
+ u32 word0, word1, l0_size, mipmap_size;
+
+ /* on legacy kernel we don't perform advanced check */
+ if (p->rdev == NULL)
+ return 0;
+ word0 = radeon_get_ib_value(p, idx + 0);
+ word1 = radeon_get_ib_value(p, idx + 1);
+ w0 = G_038000_TEX_WIDTH(word0) + 1;
+ h0 = G_038004_TEX_HEIGHT(word1) + 1;
+ d0 = G_038004_TEX_DEPTH(word1);
+ nfaces = 1;
+ switch (G_038000_DIM(word0)) {
+ case V_038000_SQ_TEX_DIM_1D:
+ case V_038000_SQ_TEX_DIM_2D:
+ case V_038000_SQ_TEX_DIM_3D:
+ break;
+ case V_038000_SQ_TEX_DIM_CUBEMAP:
+ nfaces = 6;
+ break;
+ case V_038000_SQ_TEX_DIM_1D_ARRAY:
+ case V_038000_SQ_TEX_DIM_2D_ARRAY:
+ case V_038000_SQ_TEX_DIM_2D_MSAA:
+ case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+ default:
+ dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
+ return -EINVAL;
+ }
+ if (r600_bpe_from_format(&bpe, G_038004_DATA_FORMAT(word1))) {
+ dev_warn(p->dev, "%s:%d texture invalid format %d\n",
+ __func__, __LINE__, G_038004_DATA_FORMAT(word1));
+ return -EINVAL;
+ }
+ word0 = radeon_get_ib_value(p, idx + 4);
+ word1 = radeon_get_ib_value(p, idx + 5);
+ blevel = G_038010_BASE_LEVEL(word0);
+ nlevels = G_038014_LAST_LEVEL(word1);
+ r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe, &l0_size, &mipmap_size);
+ /* using get ib will give us the offset into the texture bo */
+ word0 = radeon_get_ib_value(p, idx + 2);
+ if ((l0_size + word0) > radeon_bo_size(texture)) {
+ dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
+ w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
+ return -EINVAL;
+ }
+ /* using get ib will give us the offset into the mipmap bo */
+ word0 = radeon_get_ib_value(p, idx + 3);
+ if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
+ dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+ w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int r600_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
@@ -408,12 +1039,22 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ r = r600_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
break;
case PACKET3_DRAW_INDEX_AUTO:
if (pkt->count != 1) {
DRM_ERROR("bad DRAW_INDEX_AUTO\n");
return -EINVAL;
}
+ r = r600_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ return r;
+ }
break;
case PACKET3_DRAW_INDEX_IMMD_BE:
case PACKET3_DRAW_INDEX_IMMD:
@@ -421,6 +1062,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX_IMMD\n");
return -EINVAL;
}
+ r = r600_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
break;
case PACKET3_WAIT_REG_MEM:
if (pkt->count != 5) {
@@ -493,30 +1139,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
for (i = 0; i < pkt->count; i++) {
reg = start_reg + (4 * i);
- switch (reg) {
- case SQ_ESGS_RING_BASE:
- case SQ_GSVS_RING_BASE:
- case SQ_ESTMP_RING_BASE:
- case SQ_GSTMP_RING_BASE:
- case SQ_VSTMP_RING_BASE:
- case SQ_PSTMP_RING_BASE:
- case SQ_FBUF_RING_BASE:
- case SQ_REDUC_RING_BASE:
- case SX_MEMORY_EXPORT_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad SET_CONFIG_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- break;
- case CP_COHER_BASE:
- /* use PACKET3_SURFACE_SYNC */
- return -EINVAL;
- default:
- break;
- }
+ r = r600_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
}
break;
case PACKET3_SET_CONTEXT_REG:
@@ -530,106 +1155,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
for (i = 0; i < pkt->count; i++) {
reg = start_reg + (4 * i);
- switch (reg) {
- /* This register were added late, there is userspace
- * which does provide relocation for those but set
- * 0 offset. In order to avoid breaking old userspace
- * we detect this and set address to point to last
- * CB_COLOR0_BASE, note that if userspace doesn't set
- * CB_COLOR0_BASE before this register we will report
- * error. Old userspace always set CB_COLOR0_BASE
- * before any of this.
- */
- case R_0280E0_CB_COLOR0_FRAG:
- case R_0280E4_CB_COLOR1_FRAG:
- case R_0280E8_CB_COLOR2_FRAG:
- case R_0280EC_CB_COLOR3_FRAG:
- case R_0280F0_CB_COLOR4_FRAG:
- case R_0280F4_CB_COLOR5_FRAG:
- case R_0280F8_CB_COLOR6_FRAG:
- case R_0280FC_CB_COLOR7_FRAG:
- case R_0280C0_CB_COLOR0_TILE:
- case R_0280C4_CB_COLOR1_TILE:
- case R_0280C8_CB_COLOR2_TILE:
- case R_0280CC_CB_COLOR3_TILE:
- case R_0280D0_CB_COLOR4_TILE:
- case R_0280D4_CB_COLOR5_TILE:
- case R_0280D8_CB_COLOR6_TILE:
- case R_0280DC_CB_COLOR7_TILE:
- if (!r600_cs_packet_next_is_pkt3_nop(p)) {
- if (!track->cb_color0_base_last) {
- dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
- return -EINVAL;
- }
- ib[idx+1+i] = track->cb_color0_base_last;
- printk_once(KERN_WARNING "radeon: You have old & broken userspace "
- "please consider updating mesa & xf86-video-ati\n");
- } else {
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- }
- break;
- case DB_DEPTH_BASE:
- case DB_HTILE_DATA_BASE:
- case CB_COLOR0_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- track->cb_color0_base_last = ib[idx+1+i];
- break;
- case CB_COLOR1_BASE:
- case CB_COLOR2_BASE:
- case CB_COLOR3_BASE:
- case CB_COLOR4_BASE:
- case CB_COLOR5_BASE:
- case CB_COLOR6_BASE:
- case CB_COLOR7_BASE:
- case SQ_PGM_START_FS:
- case SQ_PGM_START_ES:
- case SQ_PGM_START_VS:
- case SQ_PGM_START_GS:
- case SQ_PGM_START_PS:
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- break;
- case VGT_DMA_BASE:
- case VGT_DMA_BASE_HI:
- /* These should be handled by DRAW_INDEX packet 3 */
- case VGT_STRMOUT_BASE_OFFSET_0:
- case VGT_STRMOUT_BASE_OFFSET_1:
- case VGT_STRMOUT_BASE_OFFSET_2:
- case VGT_STRMOUT_BASE_OFFSET_3:
- case VGT_STRMOUT_BASE_OFFSET_HI_0:
- case VGT_STRMOUT_BASE_OFFSET_HI_1:
- case VGT_STRMOUT_BASE_OFFSET_HI_2:
- case VGT_STRMOUT_BASE_OFFSET_HI_3:
- case VGT_STRMOUT_BUFFER_BASE_0:
- case VGT_STRMOUT_BUFFER_BASE_1:
- case VGT_STRMOUT_BUFFER_BASE_2:
- case VGT_STRMOUT_BUFFER_BASE_3:
- case VGT_STRMOUT_BUFFER_OFFSET_0:
- case VGT_STRMOUT_BUFFER_OFFSET_1:
- case VGT_STRMOUT_BUFFER_OFFSET_2:
- case VGT_STRMOUT_BUFFER_OFFSET_3:
- /* These should be handled by STRMOUT_BUFFER packet 3 */
- DRM_ERROR("bad context reg: 0x%08x\n", reg);
- return -EINVAL;
- default:
- break;
- }
+ r = r600_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
}
break;
case PACKET3_SET_RESOURCE:
@@ -646,6 +1174,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
for (i = 0; i < (pkt->count / 7); i++) {
+ struct radeon_bo *texture, *mipmap;
+ u32 size, offset;
+
switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
case SQ_TEX_VTX_VALID_TEXTURE:
/* tex base */
@@ -655,6 +1186,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ texture = reloc->robj;
/* tex mip base */
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -662,6 +1194,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ mipmap = reloc->robj;
+ r = r600_check_texture_resource(p, idx+(i*7)+1,
+ texture, mipmap);
+ if (r)
+ return r;
break;
case SQ_TEX_VTX_VALID_BUFFER:
/* vtx base */
@@ -670,6 +1207,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
}
+ offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
+ size = radeon_get_ib_value(p, idx+1+(i*7)+1);
+ if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
+ /* force size to size of the buffer */
+ dev_warn(p->dev, "vbo resource seems too big for the bo\n");
+ ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
+ }
ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
break;
@@ -760,11 +1304,28 @@ int r600_cs_parse(struct radeon_cs_parser *p)
struct r600_cs_track *track;
int r;
- track = kzalloc(sizeof(*track), GFP_KERNEL);
- p->track = track;
+ if (p->track == NULL) {
+ /* initialize tracker, we are in kms */
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (track == NULL)
+ return -ENOMEM;
+ r600_cs_track_init(track);
+ if (p->rdev->family < CHIP_RV770) {
+ track->npipes = p->rdev->config.r600.tiling_npipes;
+ track->nbanks = p->rdev->config.r600.tiling_nbanks;
+ track->group_size = p->rdev->config.r600.tiling_group_size;
+ } else if (p->rdev->family <= CHIP_RV740) {
+ track->npipes = p->rdev->config.rv770.tiling_npipes;
+ track->nbanks = p->rdev->config.rv770.tiling_nbanks;
+ track->group_size = p->rdev->config.rv770.tiling_group_size;
+ }
+ p->track = track;
+ }
do {
r = r600_cs_packet_parse(p, &pkt, p->idx);
if (r) {
+ kfree(p->track);
+ p->track = NULL;
return r;
}
p->idx += pkt.count + 2;
@@ -779,9 +1340,13 @@ int r600_cs_parse(struct radeon_cs_parser *p)
break;
default:
DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ kfree(p->track);
+ p->track = NULL;
return -EINVAL;
}
if (r) {
+ kfree(p->track);
+ p->track = NULL;
return r;
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
@@ -791,6 +1356,8 @@ int r600_cs_parse(struct radeon_cs_parser *p)
mdelay(1);
}
#endif
+ kfree(p->track);
+ p->track = NULL;
return 0;
}
@@ -833,9 +1400,16 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
{
struct radeon_cs_parser parser;
struct radeon_cs_chunk *ib_chunk;
- struct radeon_ib fake_ib;
+ struct radeon_ib fake_ib;
+ struct r600_cs_track *track;
int r;
+ /* initialize tracker */
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (track == NULL)
+ return -ENOMEM;
+ r600_cs_track_init(track);
+ r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
@@ -843,6 +1417,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
parser.rdev = NULL;
parser.family = family;
parser.ib = &fake_ib;
+ parser.track = track;
fake_ib.ptr = ib;
r = radeon_cs_parser_init(&parser, data);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 3048088..5b2e4d4 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -883,6 +883,16 @@
#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
+#define R_028C04_PA_SC_AA_CONFIG 0x028C04
+#define S_028C04_MSAA_NUM_SAMPLES(x) (((x) & 0x3) << 0)
+#define G_028C04_MSAA_NUM_SAMPLES(x) (((x) >> 0) & 0x3)
+#define C_028C04_MSAA_NUM_SAMPLES 0xFFFFFFFC
+#define S_028C04_AA_MASK_CENTROID_DTMN(x) (((x) & 0x1) << 4)
+#define G_028C04_AA_MASK_CENTROID_DTMN(x) (((x) >> 4) & 0x1)
+#define C_028C04_AA_MASK_CENTROID_DTMN 0xFFFFFFEF
+#define S_028C04_MAX_SAMPLE_DIST(x) (((x) & 0xF) << 13)
+#define G_028C04_MAX_SAMPLE_DIST(x) (((x) >> 13) & 0xF)
+#define C_028C04_MAX_SAMPLE_DIST 0xFFFE1FFF
#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
@@ -905,6 +915,461 @@
#define R_0280D4_CB_COLOR5_TILE 0x0280D4
#define R_0280D8_CB_COLOR6_TILE 0x0280D8
#define R_0280DC_CB_COLOR7_TILE 0x0280DC
-
+#define R_0280A0_CB_COLOR0_INFO 0x0280A0
+#define S_0280A0_ENDIAN(x) (((x) & 0x3) << 0)
+#define G_0280A0_ENDIAN(x) (((x) >> 0) & 0x3)
+#define C_0280A0_ENDIAN 0xFFFFFFFC
+#define S_0280A0_FORMAT(x) (((x) & 0x3F) << 2)
+#define G_0280A0_FORMAT(x) (((x) >> 2) & 0x3F)
+#define C_0280A0_FORMAT 0xFFFFFF03
+#define V_0280A0_COLOR_INVALID 0x00000000
+#define V_0280A0_COLOR_8 0x00000001
+#define V_0280A0_COLOR_4_4 0x00000002
+#define V_0280A0_COLOR_3_3_2 0x00000003
+#define V_0280A0_COLOR_16 0x00000005
+#define V_0280A0_COLOR_16_FLOAT 0x00000006
+#define V_0280A0_COLOR_8_8 0x00000007
+#define V_0280A0_COLOR_5_6_5 0x00000008
+#define V_0280A0_COLOR_6_5_5 0x00000009
+#define V_0280A0_COLOR_1_5_5_5 0x0000000A
+#define V_0280A0_COLOR_4_4_4_4 0x0000000B
+#define V_0280A0_COLOR_5_5_5_1 0x0000000C
+#define V_0280A0_COLOR_32 0x0000000D
+#define V_0280A0_COLOR_32_FLOAT 0x0000000E
+#define V_0280A0_COLOR_16_16 0x0000000F
+#define V_0280A0_COLOR_16_16_FLOAT 0x00000010
+#define V_0280A0_COLOR_8_24 0x00000011
+#define V_0280A0_COLOR_8_24_FLOAT 0x00000012
+#define V_0280A0_COLOR_24_8 0x00000013
+#define V_0280A0_COLOR_24_8_FLOAT 0x00000014
+#define V_0280A0_COLOR_10_11_11 0x00000015
+#define V_0280A0_COLOR_10_11_11_FLOAT 0x00000016
+#define V_0280A0_COLOR_11_11_10 0x00000017
+#define V_0280A0_COLOR_11_11_10_FLOAT 0x00000018
+#define V_0280A0_COLOR_2_10_10_10 0x00000019
+#define V_0280A0_COLOR_8_8_8_8 0x0000001A
+#define V_0280A0_COLOR_10_10_10_2 0x0000001B
+#define V_0280A0_COLOR_X24_8_32_FLOAT 0x0000001C
+#define V_0280A0_COLOR_32_32 0x0000001D
+#define V_0280A0_COLOR_32_32_FLOAT 0x0000001E
+#define V_0280A0_COLOR_16_16_16_16 0x0000001F
+#define V_0280A0_COLOR_16_16_16_16_FLOAT 0x00000020
+#define V_0280A0_COLOR_32_32_32_32 0x00000022
+#define V_0280A0_COLOR_32_32_32_32_FLOAT 0x00000023
+#define S_0280A0_ARRAY_MODE(x) (((x) & 0xF) << 8)
+#define G_0280A0_ARRAY_MODE(x) (((x) >> 8) & 0xF)
+#define C_0280A0_ARRAY_MODE 0xFFFFF0FF
+#define V_0280A0_ARRAY_LINEAR_GENERAL 0x00000000
+#define V_0280A0_ARRAY_LINEAR_ALIGNED 0x00000001
+#define V_0280A0_ARRAY_1D_TILED_THIN1 0x00000002
+#define V_0280A0_ARRAY_2D_TILED_THIN1 0x00000004
+#define S_0280A0_NUMBER_TYPE(x) (((x) & 0x7) << 12)
+#define G_0280A0_NUMBER_TYPE(x) (((x) >> 12) & 0x7)
+#define C_0280A0_NUMBER_TYPE 0xFFFF8FFF
+#define S_0280A0_READ_SIZE(x) (((x) & 0x1) << 15)
+#define G_0280A0_READ_SIZE(x) (((x) >> 15) & 0x1)
+#define C_0280A0_READ_SIZE 0xFFFF7FFF
+#define S_0280A0_COMP_SWAP(x) (((x) & 0x3) << 16)
+#define G_0280A0_COMP_SWAP(x) (((x) >> 16) & 0x3)
+#define C_0280A0_COMP_SWAP 0xFFFCFFFF
+#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18)
+#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3)
+#define C_0280A0_TILE_MODE 0xFFF3FFFF
+#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20)
+#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1)
+#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF
+#define S_0280A0_CLEAR_COLOR(x) (((x) & 0x1) << 21)
+#define G_0280A0_CLEAR_COLOR(x) (((x) >> 21) & 0x1)
+#define C_0280A0_CLEAR_COLOR 0xFFDFFFFF
+#define S_0280A0_BLEND_BYPASS(x) (((x) & 0x1) << 22)
+#define G_0280A0_BLEND_BYPASS(x) (((x) >> 22) & 0x1)
+#define C_0280A0_BLEND_BYPASS 0xFFBFFFFF
+#define S_0280A0_BLEND_FLOAT32(x) (((x) & 0x1) << 23)
+#define G_0280A0_BLEND_FLOAT32(x) (((x) >> 23) & 0x1)
+#define C_0280A0_BLEND_FLOAT32 0xFF7FFFFF
+#define S_0280A0_SIMPLE_FLOAT(x) (((x) & 0x1) << 24)
+#define G_0280A0_SIMPLE_FLOAT(x) (((x) >> 24) & 0x1)
+#define C_0280A0_SIMPLE_FLOAT 0xFEFFFFFF
+#define S_0280A0_ROUND_MODE(x) (((x) & 0x1) << 25)
+#define G_0280A0_ROUND_MODE(x) (((x) >> 25) & 0x1)
+#define C_0280A0_ROUND_MODE 0xFDFFFFFF
+#define S_0280A0_TILE_COMPACT(x) (((x) & 0x1) << 26)
+#define G_0280A0_TILE_COMPACT(x) (((x) >> 26) & 0x1)
+#define C_0280A0_TILE_COMPACT 0xFBFFFFFF
+#define S_0280A0_SOURCE_FORMAT(x) (((x) & 0x1) << 27)
+#define G_0280A0_SOURCE_FORMAT(x) (((x) >> 27) & 0x1)
+#define C_0280A0_SOURCE_FORMAT 0xF7FFFFFF
+#define R_0280A4_CB_COLOR1_INFO 0x0280A4
+#define R_0280A8_CB_COLOR2_INFO 0x0280A8
+#define R_0280AC_CB_COLOR3_INFO 0x0280AC
+#define R_0280B0_CB_COLOR4_INFO 0x0280B0
+#define R_0280B4_CB_COLOR5_INFO 0x0280B4
+#define R_0280B8_CB_COLOR6_INFO 0x0280B8
+#define R_0280BC_CB_COLOR7_INFO 0x0280BC
+#define R_028060_CB_COLOR0_SIZE 0x028060
+#define S_028060_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
+#define G_028060_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
+#define C_028060_PITCH_TILE_MAX 0xFFFFFC00
+#define S_028060_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
+#define G_028060_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
+#define C_028060_SLICE_TILE_MAX 0xC00003FF
+#define R_028064_CB_COLOR1_SIZE 0x028064
+#define R_028068_CB_COLOR2_SIZE 0x028068
+#define R_02806C_CB_COLOR3_SIZE 0x02806C
+#define R_028070_CB_COLOR4_SIZE 0x028070
+#define R_028074_CB_COLOR5_SIZE 0x028074
+#define R_028078_CB_COLOR6_SIZE 0x028078
+#define R_02807C_CB_COLOR7_SIZE 0x02807C
+#define R_028238_CB_TARGET_MASK 0x028238
+#define S_028238_TARGET0_ENABLE(x) (((x) & 0xF) << 0)
+#define G_028238_TARGET0_ENABLE(x) (((x) >> 0) & 0xF)
+#define C_028238_TARGET0_ENABLE 0xFFFFFFF0
+#define S_028238_TARGET1_ENABLE(x) (((x) & 0xF) << 4)
+#define G_028238_TARGET1_ENABLE(x) (((x) >> 4) & 0xF)
+#define C_028238_TARGET1_ENABLE 0xFFFFFF0F
+#define S_028238_TARGET2_ENABLE(x) (((x) & 0xF) << 8)
+#define G_028238_TARGET2_ENABLE(x) (((x) >> 8) & 0xF)
+#define C_028238_TARGET2_ENABLE 0xFFFFF0FF
+#define S_028238_TARGET3_ENABLE(x) (((x) & 0xF) << 12)
+#define G_028238_TARGET3_ENABLE(x) (((x) >> 12) & 0xF)
+#define C_028238_TARGET3_ENABLE 0xFFFF0FFF
+#define S_028238_TARGET4_ENABLE(x) (((x) & 0xF) << 16)
+#define G_028238_TARGET4_ENABLE(x) (((x) >> 16) & 0xF)
+#define C_028238_TARGET4_ENABLE 0xFFF0FFFF
+#define S_028238_TARGET5_ENABLE(x) (((x) & 0xF) << 20)
+#define G_028238_TARGET5_ENABLE(x) (((x) >> 20) & 0xF)
+#define C_028238_TARGET5_ENABLE 0xFF0FFFFF
+#define S_028238_TARGET6_ENABLE(x) (((x) & 0xF) << 24)
+#define G_028238_TARGET6_ENABLE(x) (((x) >> 24) & 0xF)
+#define C_028238_TARGET6_ENABLE 0xF0FFFFFF
+#define S_028238_TARGET7_ENABLE(x) (((x) & 0xF) << 28)
+#define G_028238_TARGET7_ENABLE(x) (((x) >> 28) & 0xF)
+#define C_028238_TARGET7_ENABLE 0x0FFFFFFF
+#define R_02823C_CB_SHADER_MASK 0x02823C
+#define S_02823C_OUTPUT0_ENABLE(x) (((x) & 0xF) << 0)
+#define G_02823C_OUTPUT0_ENABLE(x) (((x) >> 0) & 0xF)
+#define C_02823C_OUTPUT0_ENABLE 0xFFFFFFF0
+#define S_02823C_OUTPUT1_ENABLE(x) (((x) & 0xF) << 4)
+#define G_02823C_OUTPUT1_ENABLE(x) (((x) >> 4) & 0xF)
+#define C_02823C_OUTPUT1_ENABLE 0xFFFFFF0F
+#define S_02823C_OUTPUT2_ENABLE(x) (((x) & 0xF) << 8)
+#define G_02823C_OUTPUT2_ENABLE(x) (((x) >> 8) & 0xF)
+#define C_02823C_OUTPUT2_ENABLE 0xFFFFF0FF
+#define S_02823C_OUTPUT3_ENABLE(x) (((x) & 0xF) << 12)
+#define G_02823C_OUTPUT3_ENABLE(x) (((x) >> 12) & 0xF)
+#define C_02823C_OUTPUT3_ENABLE 0xFFFF0FFF
+#define S_02823C_OUTPUT4_ENABLE(x) (((x) & 0xF) << 16)
+#define G_02823C_OUTPUT4_ENABLE(x) (((x) >> 16) & 0xF)
+#define C_02823C_OUTPUT4_ENABLE 0xFFF0FFFF
+#define S_02823C_OUTPUT5_ENABLE(x) (((x) & 0xF) << 20)
+#define G_02823C_OUTPUT5_ENABLE(x) (((x) >> 20) & 0xF)
+#define C_02823C_OUTPUT5_ENABLE 0xFF0FFFFF
+#define S_02823C_OUTPUT6_ENABLE(x) (((x) & 0xF) << 24)
+#define G_02823C_OUTPUT6_ENABLE(x) (((x) >> 24) & 0xF)
+#define C_02823C_OUTPUT6_ENABLE 0xF0FFFFFF
+#define S_02823C_OUTPUT7_ENABLE(x) (((x) & 0xF) << 28)
+#define G_02823C_OUTPUT7_ENABLE(x) (((x) >> 28) & 0xF)
+#define C_02823C_OUTPUT7_ENABLE 0x0FFFFFFF
+#define R_028AB0_VGT_STRMOUT_EN 0x028AB0
+#define S_028AB0_STREAMOUT(x) (((x) & 0x1) << 0)
+#define G_028AB0_STREAMOUT(x) (((x) >> 0) & 0x1)
+#define C_028AB0_STREAMOUT 0xFFFFFFFE
+#define R_028B20_VGT_STRMOUT_BUFFER_EN 0x028B20
+#define S_028B20_BUFFER_0_EN(x) (((x) & 0x1) << 0)
+#define G_028B20_BUFFER_0_EN(x) (((x) >> 0) & 0x1)
+#define C_028B20_BUFFER_0_EN 0xFFFFFFFE
+#define S_028B20_BUFFER_1_EN(x) (((x) & 0x1) << 1)
+#define G_028B20_BUFFER_1_EN(x) (((x) >> 1) & 0x1)
+#define C_028B20_BUFFER_1_EN 0xFFFFFFFD
+#define S_028B20_BUFFER_2_EN(x) (((x) & 0x1) << 2)
+#define G_028B20_BUFFER_2_EN(x) (((x) >> 2) & 0x1)
+#define C_028B20_BUFFER_2_EN 0xFFFFFFFB
+#define S_028B20_BUFFER_3_EN(x) (((x) & 0x1) << 3)
+#define G_028B20_BUFFER_3_EN(x) (((x) >> 3) & 0x1)
+#define C_028B20_BUFFER_3_EN 0xFFFFFFF7
+#define S_028B20_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_028B20_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_028B20_SIZE 0x00000000
+#define R_038000_SQ_TEX_RESOURCE_WORD0_0 0x038000
+#define S_038000_DIM(x) (((x) & 0x7) << 0)
+#define G_038000_DIM(x) (((x) >> 0) & 0x7)
+#define C_038000_DIM 0xFFFFFFF8
+#define V_038000_SQ_TEX_DIM_1D 0x00000000
+#define V_038000_SQ_TEX_DIM_2D 0x00000001
+#define V_038000_SQ_TEX_DIM_3D 0x00000002
+#define V_038000_SQ_TEX_DIM_CUBEMAP 0x00000003
+#define V_038000_SQ_TEX_DIM_1D_ARRAY 0x00000004
+#define V_038000_SQ_TEX_DIM_2D_ARRAY 0x00000005
+#define V_038000_SQ_TEX_DIM_2D_MSAA 0x00000006
+#define V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007
+#define S_038000_TILE_MODE(x) (((x) & 0xF) << 3)
+#define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF)
+#define C_038000_TILE_MODE 0xFFFFFF87
+#define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7)
+#define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1)
+#define C_038000_TILE_TYPE 0xFFFFFF7F
+#define S_038000_PITCH(x) (((x) & 0x7FF) << 8)
+#define G_038000_PITCH(x) (((x) >> 8) & 0x7FF)
+#define C_038000_PITCH 0xFFF800FF
+#define S_038000_TEX_WIDTH(x) (((x) & 0x1FFF) << 19)
+#define G_038000_TEX_WIDTH(x) (((x) >> 19) & 0x1FFF)
+#define C_038000_TEX_WIDTH 0x0007FFFF
+#define R_038004_SQ_TEX_RESOURCE_WORD1_0 0x038004
+#define S_038004_TEX_HEIGHT(x) (((x) & 0x1FFF) << 0)
+#define G_038004_TEX_HEIGHT(x) (((x) >> 0) & 0x1FFF)
+#define C_038004_TEX_HEIGHT 0xFFFFE000
+#define S_038004_TEX_DEPTH(x) (((x) & 0x1FFF) << 13)
+#define G_038004_TEX_DEPTH(x) (((x) >> 13) & 0x1FFF)
+#define C_038004_TEX_DEPTH 0xFC001FFF
+#define S_038004_DATA_FORMAT(x) (((x) & 0x3F) << 26)
+#define G_038004_DATA_FORMAT(x) (((x) >> 26) & 0x3F)
+#define C_038004_DATA_FORMAT 0x03FFFFFF
+#define V_038004_COLOR_INVALID 0x00000000
+#define V_038004_COLOR_8 0x00000001
+#define V_038004_COLOR_4_4 0x00000002
+#define V_038004_COLOR_3_3_2 0x00000003
+#define V_038004_COLOR_16 0x00000005
+#define V_038004_COLOR_16_FLOAT 0x00000006
+#define V_038004_COLOR_8_8 0x00000007
+#define V_038004_COLOR_5_6_5 0x00000008
+#define V_038004_COLOR_6_5_5 0x00000009
+#define V_038004_COLOR_1_5_5_5 0x0000000A
+#define V_038004_COLOR_4_4_4_4 0x0000000B
+#define V_038004_COLOR_5_5_5_1 0x0000000C
+#define V_038004_COLOR_32 0x0000000D
+#define V_038004_COLOR_32_FLOAT 0x0000000E
+#define V_038004_COLOR_16_16 0x0000000F
+#define V_038004_COLOR_16_16_FLOAT 0x00000010
+#define V_038004_COLOR_8_24 0x00000011
+#define V_038004_COLOR_8_24_FLOAT 0x00000012
+#define V_038004_COLOR_24_8 0x00000013
+#define V_038004_COLOR_24_8_FLOAT 0x00000014
+#define V_038004_COLOR_10_11_11 0x00000015
+#define V_038004_COLOR_10_11_11_FLOAT 0x00000016
+#define V_038004_COLOR_11_11_10 0x00000017
+#define V_038004_COLOR_11_11_10_FLOAT 0x00000018
+#define V_038004_COLOR_2_10_10_10 0x00000019
+#define V_038004_COLOR_8_8_8_8 0x0000001A
+#define V_038004_COLOR_10_10_10_2 0x0000001B
+#define V_038004_COLOR_X24_8_32_FLOAT 0x0000001C
+#define V_038004_COLOR_32_32 0x0000001D
+#define V_038004_COLOR_32_32_FLOAT 0x0000001E
+#define V_038004_COLOR_16_16_16_16 0x0000001F
+#define V_038004_COLOR_16_16_16_16_FLOAT 0x00000020
+#define V_038004_COLOR_32_32_32_32 0x00000022
+#define V_038004_COLOR_32_32_32_32_FLOAT 0x00000023
+#define V_038004_FMT_1 0x00000025
+#define V_038004_FMT_GB_GR 0x00000027
+#define V_038004_FMT_BG_RG 0x00000028
+#define V_038004_FMT_32_AS_8 0x00000029
+#define V_038004_FMT_32_AS_8_8 0x0000002A
+#define V_038004_FMT_5_9_9_9_SHAREDEXP 0x0000002B
+#define V_038004_FMT_8_8_8 0x0000002C
+#define V_038004_FMT_16_16_16 0x0000002D
+#define V_038004_FMT_16_16_16_FLOAT 0x0000002E
+#define V_038004_FMT_32_32_32 0x0000002F
+#define V_038004_FMT_32_32_32_FLOAT 0x00000030
+#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
+#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
+#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
+#define C_038010_FORMAT_COMP_X 0xFFFFFFFC
+#define S_038010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2)
+#define G_038010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3)
+#define C_038010_FORMAT_COMP_Y 0xFFFFFFF3
+#define S_038010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4)
+#define G_038010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3)
+#define C_038010_FORMAT_COMP_Z 0xFFFFFFCF
+#define S_038010_FORMAT_COMP_W(x) (((x) & 0x3) << 6)
+#define G_038010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3)
+#define C_038010_FORMAT_COMP_W 0xFFFFFF3F
+#define S_038010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8)
+#define G_038010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3)
+#define C_038010_NUM_FORMAT_ALL 0xFFFFFCFF
+#define S_038010_SRF_MODE_ALL(x) (((x) & 0x1) << 10)
+#define G_038010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1)
+#define C_038010_SRF_MODE_ALL 0xFFFFFBFF
+#define S_038010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11)
+#define G_038010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1)
+#define C_038010_FORCE_DEGAMMA 0xFFFFF7FF
+#define S_038010_ENDIAN_SWAP(x) (((x) & 0x3) << 12)
+#define G_038010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3)
+#define C_038010_ENDIAN_SWAP 0xFFFFCFFF
+#define S_038010_REQUEST_SIZE(x) (((x) & 0x3) << 14)
+#define G_038010_REQUEST_SIZE(x) (((x) >> 14) & 0x3)
+#define C_038010_REQUEST_SIZE 0xFFFF3FFF
+#define S_038010_DST_SEL_X(x) (((x) & 0x7) << 16)
+#define G_038010_DST_SEL_X(x) (((x) >> 16) & 0x7)
+#define C_038010_DST_SEL_X 0xFFF8FFFF
+#define S_038010_DST_SEL_Y(x) (((x) & 0x7) << 19)
+#define G_038010_DST_SEL_Y(x) (((x) >> 19) & 0x7)
+#define C_038010_DST_SEL_Y 0xFFC7FFFF
+#define S_038010_DST_SEL_Z(x) (((x) & 0x7) << 22)
+#define G_038010_DST_SEL_Z(x) (((x) >> 22) & 0x7)
+#define C_038010_DST_SEL_Z 0xFE3FFFFF
+#define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25)
+#define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7)
+#define C_038010_DST_SEL_W 0xF1FFFFFF
+#define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28)
+#define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
+#define C_038010_BASE_LEVEL 0x0FFFFFFF
+#define R_038014_SQ_TEX_RESOURCE_WORD5_0 0x038014
+#define S_038014_LAST_LEVEL(x) (((x) & 0xF) << 0)
+#define G_038014_LAST_LEVEL(x) (((x) >> 0) & 0xF)
+#define C_038014_LAST_LEVEL 0xFFFFFFF0
+#define S_038014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4)
+#define G_038014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF)
+#define C_038014_BASE_ARRAY 0xFFFE000F
+#define S_038014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17)
+#define G_038014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF)
+#define C_038014_LAST_ARRAY 0xC001FFFF
+#define R_0288A8_SQ_ESGS_RING_ITEMSIZE 0x0288A8
+#define S_0288A8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288A8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288A8_ITEMSIZE 0xFFFF8000
+#define R_008C44_SQ_ESGS_RING_SIZE 0x008C44
+#define S_008C44_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C44_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C44_MEM_SIZE 0x00000000
+#define R_0288B0_SQ_ESTMP_RING_ITEMSIZE 0x0288B0
+#define S_0288B0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288B0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288B0_ITEMSIZE 0xFFFF8000
+#define R_008C54_SQ_ESTMP_RING_SIZE 0x008C54
+#define S_008C54_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C54_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C54_MEM_SIZE 0x00000000
+#define R_0288C0_SQ_FBUF_RING_ITEMSIZE 0x0288C0
+#define S_0288C0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288C0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288C0_ITEMSIZE 0xFFFF8000
+#define R_008C74_SQ_FBUF_RING_SIZE 0x008C74
+#define S_008C74_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C74_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C74_MEM_SIZE 0x00000000
+#define R_0288B4_SQ_GSTMP_RING_ITEMSIZE 0x0288B4
+#define S_0288B4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288B4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288B4_ITEMSIZE 0xFFFF8000
+#define R_008C5C_SQ_GSTMP_RING_SIZE 0x008C5C
+#define S_008C5C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C5C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C5C_MEM_SIZE 0x00000000
+#define R_0288AC_SQ_GSVS_RING_ITEMSIZE 0x0288AC
+#define S_0288AC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288AC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288AC_ITEMSIZE 0xFFFF8000
+#define R_008C4C_SQ_GSVS_RING_SIZE 0x008C4C
+#define S_008C4C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C4C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C4C_MEM_SIZE 0x00000000
+#define R_0288BC_SQ_PSTMP_RING_ITEMSIZE 0x0288BC
+#define S_0288BC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288BC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288BC_ITEMSIZE 0xFFFF8000
+#define R_008C6C_SQ_PSTMP_RING_SIZE 0x008C6C
+#define S_008C6C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C6C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C6C_MEM_SIZE 0x00000000
+#define R_0288C4_SQ_REDUC_RING_ITEMSIZE 0x0288C4
+#define S_0288C4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288C4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288C4_ITEMSIZE 0xFFFF8000
+#define R_008C7C_SQ_REDUC_RING_SIZE 0x008C7C
+#define S_008C7C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C7C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C7C_MEM_SIZE 0x00000000
+#define R_0288B8_SQ_VSTMP_RING_ITEMSIZE 0x0288B8
+#define S_0288B8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288B8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288B8_ITEMSIZE 0xFFFF8000
+#define R_008C64_SQ_VSTMP_RING_SIZE 0x008C64
+#define S_008C64_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C64_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C64_MEM_SIZE 0x00000000
+#define R_0288C8_SQ_GS_VERT_ITEMSIZE 0x0288C8
+#define S_0288C8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288C8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288C8_ITEMSIZE 0xFFFF8000
+#define R_028010_DB_DEPTH_INFO 0x028010
+#define S_028010_FORMAT(x) (((x) & 0x7) << 0)
+#define G_028010_FORMAT(x) (((x) >> 0) & 0x7)
+#define C_028010_FORMAT 0xFFFFFFF8
+#define V_028010_DEPTH_INVALID 0x00000000
+#define V_028010_DEPTH_16 0x00000001
+#define V_028010_DEPTH_X8_24 0x00000002
+#define V_028010_DEPTH_8_24 0x00000003
+#define V_028010_DEPTH_X8_24_FLOAT 0x00000004
+#define V_028010_DEPTH_8_24_FLOAT 0x00000005
+#define V_028010_DEPTH_32_FLOAT 0x00000006
+#define V_028010_DEPTH_X24_8_32_FLOAT 0x00000007
+#define S_028010_READ_SIZE(x) (((x) & 0x1) << 3)
+#define G_028010_READ_SIZE(x) (((x) >> 3) & 0x1)
+#define C_028010_READ_SIZE 0xFFFFFFF7
+#define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15)
+#define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF)
+#define C_028010_ARRAY_MODE 0xFFF87FFF
+#define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25)
+#define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1)
+#define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF
+#define S_028010_TILE_COMPACT(x) (((x) & 0x1) << 26)
+#define G_028010_TILE_COMPACT(x) (((x) >> 26) & 0x1)
+#define C_028010_TILE_COMPACT 0xFBFFFFFF
+#define S_028010_ZRANGE_PRECISION(x) (((x) & 0x1) << 31)
+#define G_028010_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1)
+#define C_028010_ZRANGE_PRECISION 0x7FFFFFFF
+#define R_028000_DB_DEPTH_SIZE 0x028000
+#define S_028000_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
+#define G_028000_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
+#define C_028000_PITCH_TILE_MAX 0xFFFFFC00
+#define S_028000_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
+#define G_028000_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
+#define C_028000_SLICE_TILE_MAX 0xC00003FF
+#define R_028004_DB_DEPTH_VIEW 0x028004
+#define S_028004_SLICE_START(x) (((x) & 0x7FF) << 0)
+#define G_028004_SLICE_START(x) (((x) >> 0) & 0x7FF)
+#define C_028004_SLICE_START 0xFFFFF800
+#define S_028004_SLICE_MAX(x) (((x) & 0x7FF) << 13)
+#define G_028004_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
+#define C_028004_SLICE_MAX 0xFF001FFF
+#define R_028800_DB_DEPTH_CONTROL 0x028800
+#define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0)
+#define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1)
+#define C_028800_STENCIL_ENABLE 0xFFFFFFFE
+#define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1)
+#define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1)
+#define C_028800_Z_ENABLE 0xFFFFFFFD
+#define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2)
+#define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1)
+#define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB
+#define S_028800_ZFUNC(x) (((x) & 0x7) << 4)
+#define G_028800_ZFUNC(x) (((x) >> 4) & 0x7)
+#define C_028800_ZFUNC 0xFFFFFF8F
+#define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7)
+#define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1)
+#define C_028800_BACKFACE_ENABLE 0xFFFFFF7F
+#define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8)
+#define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7)
+#define C_028800_STENCILFUNC 0xFFFFF8FF
+#define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11)
+#define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7)
+#define C_028800_STENCILFAIL 0xFFFFC7FF
+#define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14)
+#define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7)
+#define C_028800_STENCILZPASS 0xFFFE3FFF
+#define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17)
+#define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7)
+#define C_028800_STENCILZFAIL 0xFFF1FFFF
+#define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20)
+#define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7)
+#define C_028800_STENCILFUNC_BF 0xFF8FFFFF
+#define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23)
+#define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7)
+#define C_028800_STENCILFAIL_BF 0xFC7FFFFF
+#define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26)
+#define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7)
+#define C_028800_STENCILZPASS_BF 0xE3FFFFFF
+#define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29)
+#define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7)
+#define C_028800_STENCILZFAIL_BF 0x1FFFFFFF
#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c0356bb..829e26e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -89,6 +89,7 @@ extern int radeon_testing;
extern int radeon_connector_table;
extern int radeon_tv;
extern int radeon_new_pll;
+extern int radeon_dynpm;
extern int radeon_audio;
/*
@@ -118,6 +119,21 @@ struct radeon_device;
/*
* BIOS.
*/
+#define ATRM_BIOS_PAGE 4096
+
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_atrm_supported(struct pci_dev *pdev);
+int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
+#else
+static inline bool radeon_atrm_supported(struct pci_dev *pdev)
+{
+ return false;
+}
+
+static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
+ return -EINVAL;
+}
+#endif
bool radeon_get_bios(struct radeon_device *rdev);
@@ -138,17 +154,23 @@ void radeon_dummy_page_fini(struct radeon_device *rdev);
struct radeon_clock {
struct radeon_pll p1pll;
struct radeon_pll p2pll;
+ struct radeon_pll dcpll;
struct radeon_pll spll;
struct radeon_pll mpll;
/* 10 Khz units */
uint32_t default_mclk;
uint32_t default_sclk;
+ uint32_t default_dispclk;
+ uint32_t dp_extclk;
};
/*
* Power management
*/
int radeon_pm_init(struct radeon_device *rdev);
+void radeon_pm_compute_clocks(struct radeon_device *rdev);
+void radeon_combios_get_power_modes(struct radeon_device *rdev);
+void radeon_atombios_get_power_modes(struct radeon_device *rdev);
/*
* Fences.
@@ -275,6 +297,7 @@ union radeon_gart_table {
};
#define RADEON_GPU_PAGE_SIZE 4096
+#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
struct radeon_gart {
dma_addr_t table_addr;
@@ -309,21 +332,19 @@ struct radeon_mc {
/* for some chips with <= 32MB we need to lie
* about vram size near mc fb location */
u64 mc_vram_size;
- u64 gtt_location;
+ u64 visible_vram_size;
u64 gtt_size;
u64 gtt_start;
u64 gtt_end;
- u64 vram_location;
u64 vram_start;
u64 vram_end;
unsigned vram_width;
u64 real_vram_size;
int vram_mtrr;
bool vram_is_ddr;
- bool igp_sideport_enabled;
+ bool igp_sideport_enabled;
};
-int radeon_mc_setup(struct radeon_device *rdev);
bool radeon_combios_sideport_present(struct radeon_device *rdev);
bool radeon_atombios_sideport_present(struct radeon_device *rdev);
@@ -348,6 +369,7 @@ struct radeon_irq {
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[2];
+ wait_queue_head_t vblank_queue;
/* FIXME: use defines for max hpd/dacs */
bool hpd[6];
spinlock_t sw_lock;
@@ -379,6 +401,7 @@ struct radeon_ib {
struct radeon_ib_pool {
struct mutex mutex;
struct radeon_bo *robj;
+ struct list_head bogus_ib;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
unsigned head_id;
@@ -433,6 +456,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_test(struct radeon_device *rdev);
+extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */
void radeon_ring_free_size(struct radeon_device *rdev);
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
@@ -570,7 +594,99 @@ struct radeon_wb {
* Equation between gpu/memory clock and available bandwidth is hw dependent
* (type of memory, bus size, efficiency, ...)
*/
+enum radeon_pm_state {
+ PM_STATE_DISABLED,
+ PM_STATE_MINIMUM,
+ PM_STATE_PAUSED,
+ PM_STATE_ACTIVE
+};
+enum radeon_pm_action {
+ PM_ACTION_NONE,
+ PM_ACTION_MINIMUM,
+ PM_ACTION_DOWNCLOCK,
+ PM_ACTION_UPCLOCK
+};
+
+enum radeon_voltage_type {
+ VOLTAGE_NONE = 0,
+ VOLTAGE_GPIO,
+ VOLTAGE_VDDC,
+ VOLTAGE_SW
+};
+
+enum radeon_pm_state_type {
+ POWER_STATE_TYPE_DEFAULT,
+ POWER_STATE_TYPE_POWERSAVE,
+ POWER_STATE_TYPE_BATTERY,
+ POWER_STATE_TYPE_BALANCED,
+ POWER_STATE_TYPE_PERFORMANCE,
+};
+
+enum radeon_pm_clock_mode_type {
+ POWER_MODE_TYPE_DEFAULT,
+ POWER_MODE_TYPE_LOW,
+ POWER_MODE_TYPE_MID,
+ POWER_MODE_TYPE_HIGH,
+};
+
+struct radeon_voltage {
+ enum radeon_voltage_type type;
+ /* gpio voltage */
+ struct radeon_gpio_rec gpio;
+ u32 delay; /* delay in usec from voltage drop to sclk change */
+ bool active_high; /* voltage drop is active when bit is high */
+ /* VDDC voltage */
+ u8 vddc_id; /* index into vddc voltage table */
+ u8 vddci_id; /* index into vddci voltage table */
+ bool vddci_enabled;
+ /* r6xx+ sw */
+ u32 voltage;
+};
+
+struct radeon_pm_non_clock_info {
+ /* pcie lanes */
+ int pcie_lanes;
+ /* standardized non-clock flags */
+ u32 flags;
+};
+
+struct radeon_pm_clock_info {
+ /* memory clock */
+ u32 mclk;
+ /* engine clock */
+ u32 sclk;
+ /* voltage info */
+ struct radeon_voltage voltage;
+ /* standardized clock flags - not sure we'll need these */
+ u32 flags;
+};
+
+struct radeon_power_state {
+ enum radeon_pm_state_type type;
+ /* XXX: use a define for num clock modes */
+ struct radeon_pm_clock_info clock_info[8];
+ /* number of valid clock modes in this power state */
+ int num_clock_modes;
+ struct radeon_pm_clock_info *default_clock_mode;
+ /* non clock info about this state */
+ struct radeon_pm_non_clock_info non_clock_info;
+ bool voltage_drop_active;
+};
+
+/*
+ * Some modes are overclocked by very low value, accept them
+ */
+#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
+
struct radeon_pm {
+ struct mutex mutex;
+ struct delayed_work idle_work;
+ enum radeon_pm_state state;
+ enum radeon_pm_action planned_action;
+ unsigned long action_timeout;
+ bool downclocked;
+ int active_crtcs;
+ int req_vblank;
fixed20_12 max_bandwidth;
fixed20_12 igp_sideport_mclk;
fixed20_12 igp_system_mclk;
@@ -582,6 +698,15 @@ struct radeon_pm {
fixed20_12 core_bandwidth;
fixed20_12 sclk;
fixed20_12 needed_bandwidth;
+ /* XXX: use a define for num power modes */
+ struct radeon_power_state power_state[8];
+ /* number of valid power states */
+ int num_power_states;
+ struct radeon_power_state *current_power_state;
+ struct radeon_pm_clock_info *current_clock_mode;
+ struct radeon_power_state *requested_power_state;
+ struct radeon_pm_clock_info *requested_clock_mode;
+ struct radeon_power_state *default_power_state;
};
@@ -651,6 +776,7 @@ struct radeon_asic {
void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
uint32_t (*get_memory_clock)(struct radeon_device *rdev);
void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
+ int (*get_pcie_lanes)(struct radeon_device *rdev);
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
int (*set_surface_reg)(struct radeon_device *rdev, int reg,
@@ -701,6 +827,9 @@ struct r600_asic {
unsigned sx_max_export_pos_size;
unsigned sx_max_export_smx_size;
unsigned sq_num_cf_insts;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
};
struct rv770_asic {
@@ -721,6 +850,9 @@ struct rv770_asic {
unsigned sc_prim_fifo_size;
unsigned sc_hiz_tile_fifo_size;
unsigned sc_earlyz_tile_fifo_fize;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
};
union radeon_asic_config {
@@ -830,6 +962,8 @@ struct radeon_device {
struct r600_ih ih; /* r6/700 interrupt ring */
struct workqueue_struct *wq;
struct work_struct hotplug_work;
+ int num_crtc; /* number of crtcs */
+ struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
/* audio stuff */
struct timer_list audio_timer;
@@ -838,6 +972,8 @@ struct radeon_device {
int audio_bits_per_sample;
uint8_t audio_status_bits;
uint8_t audio_category_code;
+
+ bool powered_down;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -895,6 +1031,8 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
+#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
+#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
#define WREG32_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32(reg); \
@@ -956,7 +1094,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
-
+#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
/*
* BIOS helpers.
@@ -1015,6 +1153,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
+#define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
@@ -1029,6 +1168,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
/* AGP */
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
+extern void radeon_gart_restore(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev);
@@ -1042,6 +1182,10 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
+extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
+extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+extern int radeon_resume_kms(struct drm_device *dev);
+extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
struct r100_mc_save {
@@ -1096,7 +1240,7 @@ extern void r200_set_safe_registers(struct radeon_device *rdev);
/* r300,r350,rv350,rv370,rv380 */
extern void r300_set_reg_safe(struct radeon_device *rdev);
extern void r300_mc_program(struct radeon_device *rdev);
-extern void r300_vram_info(struct radeon_device *rdev);
+extern void r300_mc_init(struct radeon_device *rdev);
extern void r300_clock_startup(struct radeon_device *rdev);
extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
extern int rv370_pcie_gart_init(struct radeon_device *rdev);
@@ -1105,7 +1249,6 @@ extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
/* r420,r423,rv410 */
-extern int r420_mc_init(struct radeon_device *rdev);
extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
@@ -1147,13 +1290,13 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode2);
/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
+extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_cp_resume(struct radeon_device *rdev);
extern void r600_cp_fini(struct radeon_device *rdev);
extern int r600_count_pipe_bits(uint32_t val);
-extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
extern int r600_pcie_gart_init(struct radeon_device *rdev);
extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
@@ -1189,6 +1332,14 @@ extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
uint8_t status_bits,
uint8_t category_code);
+/* evergreen */
+struct evergreen_mc_save {
+ u32 vga_control[6];
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+ u32 crtc_control[6];
+};
+
#include "radeon_object.h"
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index c0681a55..c445779 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -237,6 +237,10 @@ int radeon_agp_init(struct radeon_device *rdev)
rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
+ rdev->mc.gtt_start = rdev->mc.agp_base;
+ rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1;
+ dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
+ rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end);
/* workaround some hw issues */
if (rdev->family < CHIP_R200) {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 05ee1ae..d3a157b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -43,7 +43,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
/*
- * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+ * r100,rv100,rs100,rv200,rs200
*/
extern int r100_init(struct radeon_device *rdev);
extern void r100_fini(struct radeon_device *rdev);
@@ -108,6 +108,52 @@ static struct radeon_asic r100_asic = {
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+};
+
+/*
+ * r200,rv250,rs300,rv280
+ */
+extern int r200_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
+ struct radeon_fence *fence);
+static struct radeon_asic r200_asic = {
+ .init = &r100_init,
+ .fini = &r100_fini,
+ .suspend = &r100_suspend,
+ .resume = &r100_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_reset = &r100_gpu_reset,
+ .gart_tlb_flush = &r100_pci_gart_tlb_flush,
+ .gart_set_page = &r100_pci_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r100_fence_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
@@ -138,11 +184,8 @@ extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t
extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
-extern int r300_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_pages,
- struct radeon_fence *fence);
+extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
+
static struct radeon_asic r300_asic = {
.init = &r300_init,
.fini = &r300_fini,
@@ -162,7 +205,46 @@ static struct radeon_asic r300_asic = {
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+};
+
+
+static struct radeon_asic r300_asic_pcie = {
+ .init = &r300_init,
+ .fini = &r300_fini,
+ .suspend = &r300_suspend,
+ .resume = &r300_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_reset = &r300_gpu_reset,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
@@ -206,12 +288,13 @@ static struct radeon_asic r420_asic = {
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
+ .copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
@@ -255,12 +338,13 @@ static struct radeon_asic rs400_asic = {
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
+ .copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
@@ -314,14 +398,17 @@ static struct radeon_asic rs600_asic = {
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
+ .copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs600_bandwidth_update,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
@@ -360,12 +447,13 @@ static struct radeon_asic rs690_asic = {
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
- .copy = &r300_copy_dma,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r200_copy_dma,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
@@ -412,12 +500,13 @@ static struct radeon_asic rv515_asic = {
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
+ .copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
@@ -455,12 +544,13 @@ static struct radeon_asic r520_asic = {
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
+ .copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
@@ -538,8 +628,9 @@ static struct radeon_asic r600_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
@@ -583,6 +674,7 @@ static struct radeon_asic rv770_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r600_set_surface_reg,
@@ -595,4 +687,54 @@ static struct radeon_asic rv770_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
};
+/*
+ * evergreen
+ */
+int evergreen_init(struct radeon_device *rdev);
+void evergreen_fini(struct radeon_device *rdev);
+int evergreen_suspend(struct radeon_device *rdev);
+int evergreen_resume(struct radeon_device *rdev);
+int evergreen_gpu_reset(struct radeon_device *rdev);
+void evergreen_bandwidth_update(struct radeon_device *rdev);
+void evergreen_hpd_init(struct radeon_device *rdev);
+void evergreen_hpd_fini(struct radeon_device *rdev);
+bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void evergreen_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
+
+static struct radeon_asic evergreen_asic = {
+ .init = &evergreen_init,
+ .fini = &evergreen_fini,
+ .suspend = &evergreen_suspend,
+ .resume = &evergreen_resume,
+ .cp_commit = NULL,
+ .gpu_reset = &evergreen_gpu_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = NULL,
+ .ring_ib_execute = NULL,
+ .irq_set = NULL,
+ .irq_process = NULL,
+ .get_vblank_counter = NULL,
+ .fence_ring_emit = NULL,
+ .cs_parse = NULL,
+ .copy_blit = NULL,
+ .copy_dma = NULL,
+ .copy = NULL,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
+};
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 4d88315..93783b1 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -159,8 +159,15 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device
struct radeon_gpio_rec *gpio)
{
struct radeon_hpd hpd;
+ u32 reg;
+
+ if (ASIC_IS_DCE4(rdev))
+ reg = EVERGREEN_DC_GPIO_HPD_A;
+ else
+ reg = AVIVO_DC_GPIO_HPD_A;
+
hpd.gpio = *gpio;
- if (gpio->reg == AVIVO_DC_GPIO_HPD_A) {
+ if (gpio->reg == reg) {
switch(gpio->mask) {
case (1 << 0):
hpd.hpd = RADEON_HPD_1;
@@ -574,6 +581,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
ddc_bus.valid = false;
}
+ /* needed for aux chan transactions */
+ ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0;
+
conn_id = le16_to_cpu(path->usConnObjectId);
if (!radeon_atom_apply_quirks
@@ -838,6 +848,7 @@ union firmware_info {
ATOM_FIRMWARE_INFO_V1_2 info_12;
ATOM_FIRMWARE_INFO_V1_3 info_13;
ATOM_FIRMWARE_INFO_V1_4 info_14;
+ ATOM_FIRMWARE_INFO_V2_1 info_21;
};
bool radeon_atom_get_clock_info(struct drm_device *dev)
@@ -849,6 +860,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
uint8_t frev, crev;
struct radeon_pll *p1pll = &rdev->clock.p1pll;
struct radeon_pll *p2pll = &rdev->clock.p2pll;
+ struct radeon_pll *dcpll = &rdev->clock.dcpll;
struct radeon_pll *spll = &rdev->clock.spll;
struct radeon_pll *mpll = &rdev->clock.mpll;
uint16_t data_offset;
@@ -951,8 +963,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
rdev->clock.default_mclk =
le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
+ if (ASIC_IS_DCE4(rdev)) {
+ rdev->clock.default_dispclk =
+ le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
+ if (rdev->clock.default_dispclk == 0)
+ rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+ rdev->clock.dp_extclk =
+ le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
+ }
+ *dcpll = *p1pll;
+
return true;
}
+
return false;
}
@@ -1091,6 +1114,30 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
return ss;
}
+static void radeon_atom_apply_lvds_quirks(struct drm_device *dev,
+ struct radeon_encoder_atom_dig *lvds)
+{
+
+ /* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */
+ if ((dev->pdev->device == 0x95c4) &&
+ (dev->pdev->subsystem_vendor == 0x1179) &&
+ (dev->pdev->subsystem_device == 0xff50)) {
+ if ((lvds->native_mode.hdisplay == 1280) &&
+ (lvds->native_mode.vdisplay == 800))
+ lvds->pll_algo = PLL_ALGO_LEGACY;
+ }
+
+ /* Dell Studio 15 laptop panel doesn't like new pll divider algo */
+ if ((dev->pdev->device == 0x95c4) &&
+ (dev->pdev->subsystem_vendor == 0x1028) &&
+ (dev->pdev->subsystem_device == 0x029f)) {
+ if ((lvds->native_mode.hdisplay == 1280) &&
+ (lvds->native_mode.vdisplay == 800))
+ lvds->pll_algo = PLL_ALGO_LEGACY;
+ }
+
+}
+
union lvds_info {
struct _ATOM_LVDS_INFO info;
struct _ATOM_LVDS_INFO_V12 info_12;
@@ -1161,6 +1208,21 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
+ if (ASIC_IS_AVIVO(rdev)) {
+ if (radeon_new_pll == 0)
+ lvds->pll_algo = PLL_ALGO_LEGACY;
+ else
+ lvds->pll_algo = PLL_ALGO_NEW;
+ } else {
+ if (radeon_new_pll == 1)
+ lvds->pll_algo = PLL_ALGO_NEW;
+ else
+ lvds->pll_algo = PLL_ALGO_LEGACY;
+ }
+
+ /* LVDS quirks */
+ radeon_atom_apply_lvds_quirks(dev, lvds);
+
encoder->native_mode = lvds->native_mode;
}
return lvds;
@@ -1385,20 +1447,375 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
return tv_dac;
}
-void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE info_4;
+};
+
+void radeon_atombios_get_power_modes(struct radeon_device *rdev)
{
- DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ u32 misc, misc2 = 0, sclk, mclk;
+ union power_info *power_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ struct _ATOM_PPLIB_STATE *power_state;
+ int num_modes = 0, i, j;
+ int state_index = 0, mode_index = 0;
- args.ucEnable = enable;
+ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ rdev->pm.default_power_state = NULL;
+
+ if (power_info) {
+ if (frev < 4) {
+ num_modes = power_info->info.ucNumOfPowerModeEntries;
+ if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+ num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+ for (i = 0; i < num_modes; i++) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ switch (frev) {
+ case 1:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ /* skip overclock modes for now */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
+ rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk >
+ rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
+ continue;
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
+ }
+ /* order matters! */
+ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_POWERSAVE;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ }
+ state_index++;
+ break;
+ case 2:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ /* skip overclock modes for now */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
+ rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk >
+ rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
+ continue;
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
+ misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
+ }
+ /* order matters! */
+ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_POWERSAVE;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ }
+ state_index++;
+ break;
+ case 3:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ /* skip overclock modes for now */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
+ rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk >
+ rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
+ continue;
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
+ misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
+ if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
+ true;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
+ power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
+ }
+ }
+ /* order matters! */
+ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_POWERSAVE;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ }
+ state_index++;
+ break;
+ }
+ }
+ } else if (frev == 4) {
+ for (i = 0; i < power_info->info_4.ucNumStates; i++) {
+ mode_index = 0;
+ power_state = (struct _ATOM_PPLIB_STATE *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usStateArrayOffset) +
+ i * power_info->info_4.ucStateEntrySize);
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) +
+ (power_state->ucNonClockStateIndex *
+ power_info->info_4.ucNonClockSize));
+ for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) {
+ if (rdev->flags & RADEON_IS_IGP) {
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info =
+ (struct _ATOM_PPLIB_RS780_CLOCK_INFO *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
+ (power_state->ucClockStateIndices[j] *
+ power_info->info_4.ucClockInfoSize));
+ sclk = le16_to_cpu(clock_info->usLowEngineClockLow);
+ sclk |= clock_info->ucLowEngineClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ /* skip invalid modes */
+ if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
+ continue;
+ /* skip overclock modes for now */
+ if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
+ rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)
+ continue;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ clock_info->usVDDC;
+ mode_index++;
+ } else {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
+ (struct _ATOM_PPLIB_R600_CLOCK_INFO *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
+ (power_state->ucClockStateIndices[j] *
+ power_info->info_4.ucClockInfoSize));
+ sclk = le16_to_cpu(clock_info->usEngineClockLow);
+ sclk |= clock_info->ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->usMemoryClockLow);
+ mclk |= clock_info->ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
+ continue;
+ /* skip overclock modes for now */
+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
+ rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
+ rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
+ continue;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ clock_info->usVDDC;
+ mode_index++;
+ }
+ }
+ rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+ if (mode_index) {
+ misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ misc2 = le16_to_cpu(non_clock_info->usClassification);
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
+ ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+ switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+ case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ }
+ if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+ }
+ state_index++;
+ }
+ }
+ }
+ } else {
+ /* XXX figure out some good default low power mode for cards w/out power tables */
+ }
+
+ if (rdev->pm.default_power_state == NULL) {
+ /* add the default mode */
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ if (rdev->asic->get_pcie_lanes)
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
+ else
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
+ rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ state_index++;
+ }
+ rdev->pm.num_power_states = state_index;
+
+ rdev->pm.current_power_state = rdev->pm.default_power_state;
+ rdev->pm.current_clock_mode =
+ rdev->pm.default_power_state->default_clock_mode;
}
-void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable)
+void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
{
- ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt);
+ DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
args.ucEnable = enable;
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
new file mode 100644
index 0000000..3f557c4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2010 Red Hat Inc.
+ * Author : Dave Airlie <airlied@redhat.com>
+ *
+ * Licensed under GPLv2
+ *
+ * ATPX support for both Intel/ATI
+ */
+#include <linux/vga_switcheroo.h>
+#include <acpi/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <linux/pci.h>
+
+#define ATPX_VERSION 0
+#define ATPX_GPU_PWR 2
+#define ATPX_MUX_SELECT 3
+
+#define ATPX_INTEGRATED 0
+#define ATPX_DISCRETE 1
+
+#define ATPX_MUX_IGD 0
+#define ATPX_MUX_DISCRETE 1
+
+static struct radeon_atpx_priv {
+ bool atpx_detected;
+ /* handle for device - and atpx */
+ acpi_handle dhandle;
+ acpi_handle atpx_handle;
+ acpi_handle atrm_handle;
+} radeon_atpx_priv;
+
+/* retrieve the ROM in 4k blocks */
+static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
+ int offset, int len)
+{
+ acpi_status status;
+ union acpi_object atrm_arg_elements[2], *obj;
+ struct acpi_object_list atrm_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+ atrm_arg.count = 2;
+ atrm_arg.pointer = &atrm_arg_elements[0];
+
+ atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atrm_arg_elements[0].integer.value = offset;
+
+ atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atrm_arg_elements[1].integer.value = len;
+
+ status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+ return -ENODEV;
+ }
+
+ obj = (union acpi_object *)buffer.pointer;
+ memcpy(bios+offset, obj->buffer.pointer, len);
+ kfree(buffer.pointer);
+ return len;
+}
+
+bool radeon_atrm_supported(struct pci_dev *pdev)
+{
+ /* get the discrete ROM only via ATRM */
+ if (!radeon_atpx_priv.atpx_detected)
+ return false;
+
+ if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ return false;
+ return true;
+}
+
+
+int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
+{
+ return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
+}
+
+static int radeon_atpx_get_version(acpi_handle handle)
+{
+ acpi_status status;
+ union acpi_object atpx_arg_elements[2], *obj;
+ struct acpi_object_list atpx_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ atpx_arg.count = 2;
+ atpx_arg.pointer = &atpx_arg_elements[0];
+
+ atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atpx_arg_elements[0].integer.value = ATPX_VERSION;
+
+ atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atpx_arg_elements[1].integer.value = ATPX_VERSION;
+
+ status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
+ return -ENOSYS;
+ }
+ obj = (union acpi_object *)buffer.pointer;
+ if (obj && (obj->type == ACPI_TYPE_BUFFER))
+ printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2));
+ kfree(buffer.pointer);
+ return 0;
+}
+
+static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value)
+{
+ acpi_status status;
+ union acpi_object atpx_arg_elements[2];
+ struct acpi_object_list atpx_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ uint8_t buf[4] = {0};
+
+ if (!handle)
+ return -EINVAL;
+
+ atpx_arg.count = 2;
+ atpx_arg.pointer = &atpx_arg_elements[0];
+
+ atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atpx_arg_elements[0].integer.value = cmd_id;
+
+ buf[2] = value & 0xff;
+ buf[3] = (value >> 8) & 0xff;
+
+ atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
+ atpx_arg_elements[1].buffer.length = 4;
+ atpx_arg_elements[1].buffer.pointer = buf;
+
+ status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
+ return -ENOSYS;
+ }
+ kfree(buffer.pointer);
+
+ return 0;
+}
+
+static int radeon_atpx_set_discrete_state(acpi_handle handle, int state)
+{
+ return radeon_atpx_execute(handle, ATPX_GPU_PWR, state);
+}
+
+static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id)
+{
+ return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id);
+}
+
+
+static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
+{
+ if (id == VGA_SWITCHEROO_IGD)
+ radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 0);
+ else
+ radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 1);
+ return 0;
+}
+
+static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
+ enum vga_switcheroo_state state)
+{
+ /* on w500 ACPI can't change intel gpu state */
+ if (id == VGA_SWITCHEROO_IGD)
+ return 0;
+
+ radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state);
+ return 0;
+}
+
+static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
+{
+ acpi_handle dhandle, atpx_handle, atrm_handle;
+ acpi_status status;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+
+ status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ radeon_atpx_priv.dhandle = dhandle;
+ radeon_atpx_priv.atpx_handle = atpx_handle;
+ radeon_atpx_priv.atrm_handle = atrm_handle;
+ return true;
+}
+
+static int radeon_atpx_init(void)
+{
+ /* set up the ATPX handle */
+
+ radeon_atpx_get_version(radeon_atpx_priv.atpx_handle);
+ return 0;
+}
+
+static int radeon_atpx_get_client_id(struct pci_dev *pdev)
+{
+ if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ return VGA_SWITCHEROO_IGD;
+ else
+ return VGA_SWITCHEROO_DIS;
+}
+
+static struct vga_switcheroo_handler radeon_atpx_handler = {
+ .switchto = radeon_atpx_switchto,
+ .power_state = radeon_atpx_power_state,
+ .init = radeon_atpx_init,
+ .get_client_id = radeon_atpx_get_client_id,
+};
+
+static bool radeon_atpx_detect(void)
+{
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+ struct pci_dev *pdev = NULL;
+ bool has_atpx = false;
+ int vga_count = 0;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+ }
+
+ if (has_atpx && vga_count == 2) {
+ acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer);
+ printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
+ acpi_method_name);
+ radeon_atpx_priv.atpx_detected = true;
+ return true;
+ }
+ return false;
+}
+
+void radeon_register_atpx_handler(void)
+{
+ bool r;
+
+ /* detect if we have any ATPX + 2 VGA in the system */
+ r = radeon_atpx_detect();
+ if (!r)
+ return;
+
+ vga_switcheroo_register_handler(&radeon_atpx_handler);
+}
+
+void radeon_unregister_atpx_handler(void)
+{
+ vga_switcheroo_unregister_handler();
+}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 9069217..5572404 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -30,6 +30,7 @@
#include "radeon.h"
#include "atom.h"
+#include <linux/vga_switcheroo.h>
/*
* BIOS.
*/
@@ -62,7 +63,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
iounmap(bios);
return false;
}
- memcpy(rdev->bios, bios, size);
+ memcpy_fromio(rdev->bios, bios, size);
iounmap(bios);
return true;
}
@@ -93,6 +94,38 @@ static bool radeon_read_bios(struct radeon_device *rdev)
return true;
}
+/* ATRM is used to get the BIOS on the discrete cards in
+ * dual-gpu systems.
+ */
+static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+{
+ int ret;
+ int size = 64 * 1024;
+ int i;
+
+ if (!radeon_atrm_supported(rdev->pdev))
+ return false;
+
+ rdev->bios = kmalloc(size, GFP_KERNEL);
+ if (!rdev->bios) {
+ DRM_ERROR("Unable to allocate bios\n");
+ return false;
+ }
+
+ for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
+ ret = radeon_atrm_get_bios_chunk(rdev->bios,
+ (i * ATRM_BIOS_PAGE),
+ ATRM_BIOS_PAGE);
+ if (ret <= 0)
+ break;
+ }
+
+ if (i == 0 || rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+ kfree(rdev->bios);
+ return false;
+ }
+ return true;
+}
static bool r700_read_disabled_bios(struct radeon_device *rdev)
{
uint32_t viph_control;
@@ -388,16 +421,16 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
return legacy_read_disabled_bios(rdev);
}
+
bool radeon_get_bios(struct radeon_device *rdev)
{
bool r;
uint16_t tmp;
- if (rdev->flags & RADEON_IS_IGP) {
+ r = radeon_atrm_get_bios(rdev);
+ if (r == false)
r = igp_read_bios_from_vram(rdev);
- if (r == false)
- r = radeon_read_bios(rdev);
- } else
+ if (r == false)
r = radeon_read_bios(rdev);
if (r == false) {
r = radeon_read_disabled_bios(rdev);
@@ -408,6 +441,13 @@ bool radeon_get_bios(struct radeon_device *rdev)
return false;
}
if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+ printk("BIOS signature incorrect %x %x\n", rdev->bios[0], rdev->bios[1]);
+ goto free_bios;
+ }
+
+ tmp = RBIOS16(0x18);
+ if (RBIOS8(tmp + 0x14) != 0x0) {
+ DRM_INFO("Not an x86 BIOS ROM, not using.\n");
goto free_bios;
}
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 73c4405..f64936c 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -96,6 +96,7 @@ void radeon_get_clock_info(struct drm_device *dev)
struct radeon_device *rdev = dev->dev_private;
struct radeon_pll *p1pll = &rdev->clock.p1pll;
struct radeon_pll *p2pll = &rdev->clock.p2pll;
+ struct radeon_pll *dcpll = &rdev->clock.dcpll;
struct radeon_pll *spll = &rdev->clock.spll;
struct radeon_pll *mpll = &rdev->clock.mpll;
int ret;
@@ -204,6 +205,17 @@ void radeon_get_clock_info(struct drm_device *dev)
p2pll->max_frac_feedback_div = 0;
}
+ /* dcpll is DCE4 only */
+ dcpll->min_post_div = 2;
+ dcpll->max_post_div = 0x7f;
+ dcpll->min_frac_feedback_div = 0;
+ dcpll->max_frac_feedback_div = 9;
+ dcpll->min_ref_div = 2;
+ dcpll->max_ref_div = 0x3ff;
+ dcpll->min_feedback_div = 4;
+ dcpll->max_feedback_div = 0xfff;
+ dcpll->best_vco = 0;
+
p1pll->min_ref_div = 2;
p1pll->max_ref_div = 0x3ff;
p1pll->min_feedback_div = 4;
@@ -846,8 +858,10 @@ int radeon_static_clocks_init(struct drm_device *dev)
/* XXX make sure engine is idle */
if (radeon_dynclks != -1) {
- if (radeon_dynclks)
- radeon_set_clock_gating(rdev, 1);
+ if (radeon_dynclks) {
+ if (rdev->asic->set_clock_gating)
+ radeon_set_clock_gating(rdev, 1);
+ }
}
radeon_apply_clock_quirks(rdev);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 22d4761..e9ea38ec 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -150,6 +150,9 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
int rev;
uint16_t offset = 0, check_offset;
+ if (!rdev->bios)
+ return 0;
+
switch (table) {
/* absolute offset tables */
case COMBIOS_ASIC_INIT_1_TABLE:
@@ -443,6 +446,39 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
}
+bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
+{
+ int edid_info;
+ struct edid *edid;
+ edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
+ if (!edid_info)
+ return false;
+
+ edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
+ GFP_KERNEL);
+ if (edid == NULL)
+ return false;
+
+ memcpy((unsigned char *)edid,
+ (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH);
+
+ if (!drm_edid_is_valid(edid)) {
+ kfree(edid);
+ return false;
+ }
+
+ rdev->mode_info.bios_hardcoded_edid = edid;
+ return true;
+}
+
+struct edid *
+radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
+{
+ if (rdev->mode_info.bios_hardcoded_edid)
+ return rdev->mode_info.bios_hardcoded_edid;
+ return NULL;
+}
+
static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
int ddc_line)
{
@@ -486,9 +522,65 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
i2c.y_data_reg = ddc_line;
}
- if (rdev->family < CHIP_R200)
- i2c.hw_capable = false;
- else {
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_RS300:
+ switch (ddc_line) {
+ case RADEON_GPIO_DVI_DDC:
+ /* in theory this should be hw capable,
+ * but it doesn't seem to work
+ */
+ i2c.hw_capable = false;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_R200:
+ switch (ddc_line) {
+ case RADEON_GPIO_DVI_DDC:
+ case RADEON_GPIO_MONID:
+ i2c.hw_capable = true;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_RV250:
+ case CHIP_RV280:
+ switch (ddc_line) {
+ case RADEON_GPIO_VGA_DDC:
+ case RADEON_GPIO_DVI_DDC:
+ case RADEON_GPIO_CRT2_DDC:
+ i2c.hw_capable = true;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_R300:
+ case CHIP_R350:
+ switch (ddc_line) {
+ case RADEON_GPIO_VGA_DDC:
+ case RADEON_GPIO_DVI_DDC:
+ i2c.hw_capable = true;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_RV350:
+ case CHIP_RV380:
+ case CHIP_RS400:
+ case CHIP_RS480:
switch (ddc_line) {
case RADEON_GPIO_VGA_DDC:
case RADEON_GPIO_DVI_DDC:
@@ -504,9 +596,14 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
i2c.hw_capable = false;
break;
}
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
}
i2c.mm_i2c = false;
i2c.i2c_id = 0;
+ i2c.hpd_id = 0;
if (ddc_line)
i2c.valid = true;
@@ -527,9 +624,6 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
int8_t rev;
uint16_t sclk, mclk;
- if (rdev->bios == NULL)
- return false;
-
pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
if (pll_info) {
rev = RBIOS8(pll_info);
@@ -654,9 +748,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
if (!p_dac)
return NULL;
- if (rdev->bios == NULL)
- goto out;
-
/* check CRT table */
dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
if (dac_info) {
@@ -673,7 +764,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
found = 1;
}
-out:
if (!found) /* fallback to defaults */
radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
@@ -687,9 +777,6 @@ radeon_combios_get_tv_info(struct radeon_device *rdev)
uint16_t tv_info;
enum radeon_tv_std tv_std = TV_STD_NTSC;
- if (rdev->bios == NULL)
- return tv_std;
-
tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
if (tv_info) {
if (RBIOS8(tv_info + 6) == 'T') {
@@ -793,9 +880,6 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
if (!tv_dac)
return NULL;
- if (rdev->bios == NULL)
- goto out;
-
/* first check TV table */
dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
if (dac_info) {
@@ -857,7 +941,6 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
}
}
-out:
if (!found) /* fallback to defaults */
radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac);
@@ -945,11 +1028,6 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
int tmp, i;
struct radeon_encoder_lvds *lvds = NULL;
- if (rdev->bios == NULL) {
- lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
- goto out;
- }
-
lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
if (lcd_info) {
@@ -1050,7 +1128,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
DRM_INFO("No panel info found in BIOS\n");
lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
}
-out:
+
if (lvds)
encoder->native_mode = lvds->native_mode;
return lvds;
@@ -1102,9 +1180,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
int i, n;
uint8_t ver;
- if (rdev->bios == NULL)
- return false;
-
tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
if (tmds_info) {
@@ -1184,9 +1259,6 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
enum radeon_combios_ddc gpio;
struct radeon_i2c_bus_rec i2c_bus;
- if (rdev->bios == NULL)
- return false;
-
tmds->i2c_bus = NULL;
if (rdev->flags & RADEON_IS_IGP) {
offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
@@ -1253,7 +1325,10 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
break;
case DDC_LCD: /* MM i2c */
- DRM_ERROR("MM i2c requires hw i2c engine\n");
+ i2c_bus.valid = true;
+ i2c_bus.hw_capable = true;
+ i2c_bus.mm_i2c = true;
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
break;
default:
DRM_ERROR("Unsupported gpio %d\n", gpio);
@@ -1909,9 +1984,6 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
struct radeon_i2c_bus_rec ddc_i2c;
struct radeon_hpd hpd;
- if (rdev->bios == NULL)
- return false;
-
conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE);
if (conn_info) {
for (i = 0; i < 4; i++) {
@@ -2278,6 +2350,115 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
return true;
}
+void radeon_combios_get_power_modes(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ u16 offset, misc, misc2 = 0;
+ u8 rev, blocks, tmp;
+ int state_index = 0;
+
+ rdev->pm.default_power_state = NULL;
+
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
+ if (offset) {
+ rev = RBIOS8(offset);
+ blocks = RBIOS8(offset + 0x2);
+ /* power mode 0 tends to be the only valid one */
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2);
+ rdev->pm.power_state[state_index].clock_info[0].sclk = RBIOS32(offset + 0x5 + 0x6);
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ goto default_mode;
+ /* skip overclock modes for now */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
+ rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk >
+ rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
+ goto default_mode;
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ misc = RBIOS16(offset + 0x5 + 0x0);
+ if (rev > 4)
+ misc2 = RBIOS16(offset + 0x5 + 0xe);
+ if (misc & 0x4) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
+ if (misc & 0x8)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = true;
+ if (rev < 6) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
+ RBIOS16(offset + 0x5 + 0xb) * 4;
+ tmp = RBIOS8(offset + 0x5 + 0xd);
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
+ } else {
+ u8 entries = RBIOS8(offset + 0x5 + 0xb);
+ u16 voltage_table_offset = RBIOS16(offset + 0x5 + 0xc);
+ if (entries && voltage_table_offset) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
+ RBIOS16(voltage_table_offset) * 4;
+ tmp = RBIOS8(voltage_table_offset + 0x2);
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
+ } else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = false;
+ }
+ switch ((misc2 & 0x700) >> 8) {
+ case 0:
+ default:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 0;
+ break;
+ case 1:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 33;
+ break;
+ case 2:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 66;
+ break;
+ case 3:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 99;
+ break;
+ case 4:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 132;
+ break;
+ }
+ } else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ if (rev > 6)
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ RBIOS8(offset + 0x5 + 0x10);
+ state_index++;
+ } else {
+ /* XXX figure out some good default low power mode for mobility cards w/out power tables */
+ }
+ } else {
+ /* XXX figure out some good default low power mode for desktop cards */
+ }
+
+default_mode:
+ /* add the default mode */
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+ rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ if (rdev->asic->get_pcie_lanes)
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
+ else
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
+ rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.num_power_states = state_index + 1;
+
+ rdev->pm.current_power_state = rdev->pm.default_power_state;
+ rdev->pm.current_clock_mode =
+ rdev->pm.default_power_state->default_clock_mode;
+}
+
void radeon_external_tmds_setup(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -2289,23 +2470,21 @@ void radeon_external_tmds_setup(struct drm_encoder *encoder)
switch (tmds->dvo_chip) {
case DVO_SIL164:
/* sil 164 */
- radeon_i2c_do_lock(tmds->i2c_bus, 1);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
- tmds->slave_addr,
- 0x08, 0x30);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x08, 0x30);
+ radeon_i2c_put_byte(tmds->i2c_bus,
tmds->slave_addr,
0x09, 0x00);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
- tmds->slave_addr,
- 0x0a, 0x90);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
- tmds->slave_addr,
- 0x0c, 0x89);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0a, 0x90);
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0c, 0x89);
+ radeon_i2c_put_byte(tmds->i2c_bus,
tmds->slave_addr,
0x08, 0x3b);
- radeon_i2c_do_lock(tmds->i2c_bus, 0);
break;
case DVO_SIL1178:
/* sil 1178 - untested */
@@ -2338,9 +2517,6 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
uint32_t reg, val, and_mask, or_mask;
struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
- if (rdev->bios == NULL)
- return false;
-
if (!tmds)
return false;
@@ -2390,11 +2566,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
index++;
val = RBIOS8(index);
index++;
- radeon_i2c_do_lock(tmds->i2c_bus, 1);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
- slave_addr,
- reg, val);
- radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ slave_addr,
+ reg, val);
break;
default:
DRM_ERROR("Unknown id %d\n", id >> 13);
@@ -2447,11 +2621,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
reg = id & 0x1fff;
val = RBIOS8(index);
index += 1;
- radeon_i2c_do_lock(tmds->i2c_bus, 1);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
- tmds->slave_addr,
- reg, val);
- radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ reg, val);
break;
default:
DRM_ERROR("Unknown id %d\n", id >> 13);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 65f8194..ee0083f 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -479,10 +479,8 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
ret = connector_status_connected;
else {
if (radeon_connector->ddc_bus) {
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (radeon_connector->edid)
ret = connector_status_connected;
}
@@ -587,19 +585,14 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
if (!encoder)
ret = connector_status_disconnected;
- if (radeon_connector->ddc_bus) {
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
- }
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -744,19 +737,14 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false;
- if (radeon_connector->ddc_bus) {
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
- }
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -952,7 +940,7 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
if (radeon_connector->edid)
kfree(radeon_connector->edid);
if (radeon_dig_connector->dp_i2c_bus)
- radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
+ radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus);
kfree(radeon_connector->con_priv);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -988,12 +976,10 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
ret = connector_status_connected;
}
} else {
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
if (radeon_ddc_probe(radeon_connector)) {
radeon_dig_connector->dp_sink_type = sink_type;
ret = connector_status_connected;
}
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 06123ba..dc6eba6 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1644,6 +1644,7 @@ static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_pri
radeon_cp_load_microcode(dev_priv);
radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
+ dev_priv->have_z_offset = 0;
radeon_do_engine_reset(dev);
radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index e9d0850..70ba02e 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -194,11 +194,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
}
radeon_bo_list_unreserve(&parser->validated);
for (i = 0; i < parser->nrelocs; i++) {
- if (parser->relocs[i].gobj) {
- mutex_lock(&parser->rdev->ddev->struct_mutex);
- drm_gem_object_unreference(parser->relocs[i].gobj);
- mutex_unlock(&parser->rdev->ddev->struct_mutex);
- }
+ if (parser->relocs[i].gobj)
+ drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
}
kfree(parser->track);
kfree(parser->relocs);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 28772a3..b7023ff 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -36,7 +36,14 @@ static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
uint32_t cur_lock;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset);
+ if (lock)
+ cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+ else
+ cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
+ WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
+ } else if (ASIC_IS_AVIVO(rdev)) {
cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
if (lock)
cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
@@ -58,7 +65,10 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
+ WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+ } else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
@@ -81,10 +91,14 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
+ WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
+ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+ } else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
- (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+ (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
switch (radeon_crtc->crtc_id) {
case 0:
@@ -109,7 +123,10 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
+ } else if (ASIC_IS_AVIVO(rdev)) {
if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id)
WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0);
@@ -169,17 +186,13 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
unpin:
if (radeon_crtc->cursor_bo) {
radeon_gem_object_unpin(radeon_crtc->cursor_bo);
- mutex_lock(&crtc->dev->struct_mutex);
- drm_gem_object_unreference(radeon_crtc->cursor_bo);
- mutex_unlock(&crtc->dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
}
radeon_crtc->cursor_bo = obj;
return 0;
fail:
- mutex_lock(&crtc->dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&crtc->dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
return 0;
}
@@ -201,7 +214,20 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
yorigin = CURSOR_HEIGHT - 1;
radeon_lock_cursor(crtc, true);
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ /* cursors are offset into the total surface */
+ x += crtc->x;
+ y += crtc->y;
+ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+ /* XXX: check if evergreen has the same issues as avivo chips */
+ WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
+ ((xorigin ? 0 : x) << 16) |
+ (yorigin ? 0 : y));
+ WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
+ ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1));
+ } else if (ASIC_IS_AVIVO(rdev)) {
int w = radeon_crtc->cursor_width;
int i = 0;
struct drm_crtc *crtc_p;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 768b150..e28e4ed 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -30,6 +30,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
@@ -100,80 +101,103 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
}
}
-/*
- * MC common functions
+/**
+ * radeon_vram_location - try to find VRAM location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ * @base: base address at which to put VRAM
+ *
+ * Function will place try to place VRAM at base address provided
+ * as parameter (which is so far either PCI aperture address or
+ * for IGP TOM base address).
+ *
+ * If there is not enough space to fit the unvisible VRAM in the 32bits
+ * address space then we limit the VRAM size to the aperture.
+ *
+ * If we are using AGP and if the AGP aperture doesn't allow us to have
+ * room for all the VRAM than we restrict the VRAM to the PCI aperture
+ * size and print a warning.
+ *
+ * This function will never fails, worst case are limiting VRAM.
+ *
+ * Note: GTT start, end, size should be initialized before calling this
+ * function on AGP platform.
+ *
+ * Note: We don't explictly enforce VRAM start to be aligned on VRAM size,
+ * this shouldn't be a problem as we are using the PCI aperture as a reference.
+ * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
+ * not IGP.
+ *
+ * Note: we use mc_vram_size as on some board we need to program the mc to
+ * cover the whole aperture even if VRAM size is inferior to aperture size
+ * Novell bug 204882 + along with lots of ubuntu ones
+ *
+ * Note: when limiting vram it's safe to overwritte real_vram_size because
+ * we are not in case where real_vram_size is inferior to mc_vram_size (ie
+ * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
+ * ones)
+ *
+ * Note: IGP TOM addr should be the same as the aperture addr, we don't
+ * explicitly check for that thought.
+ *
+ * FIXME: when reducing VRAM size align new size on power of 2.
+ */
+void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
+{
+ mc->vram_start = base;
+ if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
+ dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+ mc->real_vram_size = mc->aper_size;
+ mc->mc_vram_size = mc->aper_size;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
+ dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+ mc->real_vram_size = mc->aper_size;
+ mc->mc_vram_size = mc->aper_size;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+}
+
+/**
+ * radeon_gtt_location - try to find GTT location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place GTT before or after VRAM.
+ *
+ * If GTT size is bigger than space left then we ajust GTT size.
+ * Thus function will never fails.
+ *
+ * FIXME: when reducing GTT size align new size on power of 2.
*/
-int radeon_mc_setup(struct radeon_device *rdev)
+void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
- uint32_t tmp;
+ u64 size_af, size_bf;
- /* Some chips have an "issue" with the memory controller, the
- * location must be aligned to the size. We just align it down,
- * too bad if we walk over the top of system memory, we don't
- * use DMA without a remapped anyway.
- * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
- */
- /* FGLRX seems to setup like this, VRAM a 0, then GART.
- */
- /*
- * Note: from R6xx the address space is 40bits but here we only
- * use 32bits (still have to see a card which would exhaust 4G
- * address space).
- */
- if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
- /* vram location was already setup try to put gtt after
- * if it fits */
- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
- tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
- if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
- rdev->mc.gtt_location = tmp;
- } else {
- if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
- printk(KERN_ERR "[drm] GTT too big to fit "
- "before or after vram location.\n");
- return -EINVAL;
- }
- rdev->mc.gtt_location = 0;
- }
- } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
- /* gtt location was already setup try to put vram before
- * if it fits */
- if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
- rdev->mc.vram_location = 0;
- } else {
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
- tmp += (rdev->mc.mc_vram_size - 1);
- tmp &= ~(rdev->mc.mc_vram_size - 1);
- if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
- rdev->mc.vram_location = tmp;
- } else {
- printk(KERN_ERR "[drm] vram too big to fit "
- "before or after GTT location.\n");
- return -EINVAL;
- }
+ size_af = 0xFFFFFFFF - mc->vram_end;
+ size_bf = mc->vram_start;
+ if (size_bf > size_af) {
+ if (mc->gtt_size > size_bf) {
+ dev_warn(rdev->dev, "limiting GTT\n");
+ mc->gtt_size = size_bf;
}
+ mc->gtt_start = mc->vram_start - mc->gtt_size;
} else {
- rdev->mc.vram_location = 0;
- tmp = rdev->mc.mc_vram_size;
- tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
- rdev->mc.gtt_location = tmp;
- }
- rdev->mc.vram_start = rdev->mc.vram_location;
- rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
- rdev->mc.gtt_start = rdev->mc.gtt_location;
- rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
- DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20));
- DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
- (unsigned)rdev->mc.vram_location,
- (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1));
- DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20));
- DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
- (unsigned)rdev->mc.gtt_location,
- (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1));
- return 0;
+ if (mc->gtt_size > size_af) {
+ dev_warn(rdev->dev, "limiting GTT\n");
+ mc->gtt_size = size_af;
+ }
+ mc->gtt_start = mc->vram_end + 1;
+ }
+ mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
+ dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
+ mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
}
-
/*
* GPU helpers function.
*/
@@ -182,7 +206,16 @@ bool radeon_card_posted(struct radeon_device *rdev)
uint32_t reg;
/* first check CRTCs */
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (reg & EVERGREEN_CRTC_MASTER_EN)
+ return true;
+ } else if (ASIC_IS_AVIVO(rdev)) {
reg = RREG32(AVIVO_D1CRTC_CONTROL) |
RREG32(AVIVO_D2CRTC_CONTROL);
if (reg & AVIVO_CRTC_EN) {
@@ -229,6 +262,8 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev)
int radeon_dummy_page_init(struct radeon_device *rdev)
{
+ if (rdev->dummy_page.page)
+ return 0;
rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
if (rdev->dummy_page.page == NULL)
return -ENOMEM;
@@ -310,7 +345,7 @@ void radeon_register_accessor_init(struct radeon_device *rdev)
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
- if (rdev->family >= CHIP_R600) {
+ if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
}
@@ -329,21 +364,22 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_RS100:
case CHIP_RV200:
case CHIP_RS200:
+ rdev->asic = &r100_asic;
+ break;
case CHIP_R200:
case CHIP_RV250:
case CHIP_RS300:
case CHIP_RV280:
- rdev->asic = &r100_asic;
+ rdev->asic = &r200_asic;
break;
case CHIP_R300:
case CHIP_R350:
case CHIP_RV350:
case CHIP_RV380:
- rdev->asic = &r300_asic;
- if (rdev->flags & RADEON_IS_PCIE) {
- rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
- rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
- }
+ if (rdev->flags & RADEON_IS_PCIE)
+ rdev->asic = &r300_asic_pcie;
+ else
+ rdev->asic = &r300_asic;
break;
case CHIP_R420:
case CHIP_R423:
@@ -387,6 +423,13 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_RV740:
rdev->asic = &rv770_asic;
break;
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ case CHIP_JUNIPER:
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ rdev->asic = &evergreen_asic;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@@ -613,6 +656,36 @@ void radeon_check_arguments(struct radeon_device *rdev)
}
}
+static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct radeon_device *rdev = dev->dev_private;
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+ if (state == VGA_SWITCHEROO_ON) {
+ printk(KERN_INFO "radeon: switched on\n");
+ /* don't suspend or resume card normally */
+ rdev->powered_down = false;
+ radeon_resume_kms(dev);
+ } else {
+ printk(KERN_INFO "radeon: switched off\n");
+ radeon_suspend_kms(dev, pmm);
+ /* don't suspend or resume card normally */
+ rdev->powered_down = true;
+ }
+}
+
+static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+ can_switch = (dev->open_count == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+}
+
+
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
struct pci_dev *pdev,
@@ -638,11 +711,14 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex);
mutex_init(&rdev->cp.mutex);
+ mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
+ mutex_init(&rdev->pm.mutex);
rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
+ init_waitqueue_head(&rdev->irq.vblank_queue);
/* setup workqueue */
rdev->wq = create_workqueue("radeon");
@@ -692,6 +768,9 @@ int radeon_device_init(struct radeon_device *rdev,
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
+ vga_switcheroo_register_client(rdev->pdev,
+ radeon_switcheroo_set_state,
+ radeon_switcheroo_can_switch);
r = radeon_init(rdev);
if (r)
@@ -723,6 +802,7 @@ void radeon_device_fini(struct radeon_device *rdev)
rdev->shutdown = true;
radeon_fini(rdev);
destroy_workqueue(rdev->wq);
+ vga_switcheroo_unregister_client(rdev->pdev);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
@@ -746,6 +826,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
}
rdev = dev->dev_private;
+ if (rdev->powered_down)
+ return 0;
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
@@ -791,6 +873,9 @@ int radeon_resume_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
+ if (rdev->powered_down)
+ return 0;
+
acquire_console_sem();
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7e17a36..ba8d806 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -68,6 +68,36 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
}
+static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int i;
+
+ DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
+ WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+ WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id);
+ WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007);
+
+ WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0);
+ for (i = 0; i < 256; i++) {
+ WREG32(EVERGREEN_DC_LUT_30_COLOR,
+ (radeon_crtc->lut_r[i] << 20) |
+ (radeon_crtc->lut_g[i] << 10) |
+ (radeon_crtc->lut_b[i] << 0));
+ }
+}
+
static void legacy_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -100,7 +130,9 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
if (!crtc->enabled)
return;
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_DCE4(rdev))
+ evergreen_crtc_load_lut(crtc);
+ else if (ASIC_IS_AVIVO(rdev))
avivo_crtc_load_lut(crtc);
else
legacy_crtc_load_lut(crtc);
@@ -361,6 +393,8 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
{
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
int ret = 0;
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
@@ -373,11 +407,11 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
if (!radeon_connector->ddc_bus)
return -1;
if (!radeon_connector->edid) {
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
-
+ /* some servers provide a hardcoded edid in rom for KVMs */
+ if (!radeon_connector->edid)
+ radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev);
if (radeon_connector->edid) {
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
@@ -395,9 +429,7 @@ static int radeon_ddc_dump(struct drm_connector *connector)
if (!radeon_connector->ddc_bus)
return -1;
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (edid) {
kfree(edid);
}
@@ -414,13 +446,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
return n;
}
-void radeon_compute_pll(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p)
+static void radeon_compute_pll_legacy(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p)
{
uint32_t min_ref_div = pll->min_ref_div;
uint32_t max_ref_div = pll->max_ref_div;
@@ -580,95 +612,194 @@ void radeon_compute_pll(struct radeon_pll *pll,
*post_div_p = best_post_div;
}
-void radeon_compute_pll_avivo(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p)
+static bool
+calc_fb_div(struct radeon_pll *pll,
+ uint32_t freq,
+ uint32_t post_div,
+ uint32_t ref_div,
+ uint32_t *fb_div,
+ uint32_t *fb_div_frac)
{
- fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
- fixed20_12 pll_out_max, pll_out_min;
- fixed20_12 pll_in_max, pll_in_min;
- fixed20_12 reference_freq;
- fixed20_12 error, ffreq, a, b;
-
- pll_out_max.full = rfixed_const(pll->pll_out_max);
- pll_out_min.full = rfixed_const(pll->pll_out_min);
- pll_in_max.full = rfixed_const(pll->pll_in_max);
- pll_in_min.full = rfixed_const(pll->pll_in_min);
- reference_freq.full = rfixed_const(pll->reference_freq);
- do_div(freq, 10);
+ fixed20_12 feedback_divider, a, b;
+ u32 vco_freq;
+
+ vco_freq = freq * post_div;
+ /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
+ a.full = rfixed_const(pll->reference_freq);
+ feedback_divider.full = rfixed_const(vco_freq);
+ feedback_divider.full = rfixed_div(feedback_divider, a);
+ a.full = rfixed_const(ref_div);
+ feedback_divider.full = rfixed_mul(feedback_divider, a);
+
+ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+ /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
+ a.full = rfixed_const(10);
+ feedback_divider.full = rfixed_mul(feedback_divider, a);
+ feedback_divider.full += rfixed_const_half(0);
+ feedback_divider.full = rfixed_floor(feedback_divider);
+ feedback_divider.full = rfixed_div(feedback_divider, a);
+
+ /* *fb_div = floor(feedback_divider); */
+ a.full = rfixed_floor(feedback_divider);
+ *fb_div = rfixed_trunc(a);
+ /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
+ a.full = rfixed_const(10);
+ b.full = rfixed_mul(feedback_divider, a);
+
+ feedback_divider.full = rfixed_floor(feedback_divider);
+ feedback_divider.full = rfixed_mul(feedback_divider, a);
+ feedback_divider.full = b.full - feedback_divider.full;
+ *fb_div_frac = rfixed_trunc(feedback_divider);
+ } else {
+ /* *fb_div = floor(feedback_divider + 0.5); */
+ feedback_divider.full += rfixed_const_half(0);
+ feedback_divider.full = rfixed_floor(feedback_divider);
+
+ *fb_div = rfixed_trunc(feedback_divider);
+ *fb_div_frac = 0;
+ }
+
+ if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div))
+ return false;
+ else
+ return true;
+}
+
+static bool
+calc_fb_ref_div(struct radeon_pll *pll,
+ uint32_t freq,
+ uint32_t post_div,
+ uint32_t *fb_div,
+ uint32_t *fb_div_frac,
+ uint32_t *ref_div)
+{
+ fixed20_12 ffreq, max_error, error, pll_out, a;
+ u32 vco;
+
ffreq.full = rfixed_const(freq);
- error.full = rfixed_const(100 * 100);
+ /* max_error = ffreq * 0.0025; */
+ a.full = rfixed_const(400);
+ max_error.full = rfixed_div(ffreq, a);
- /* max p */
- p.full = rfixed_div(pll_out_max, ffreq);
- p.full = rfixed_floor(p);
+ for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
+ if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
+ vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
+ vco = vco / ((*ref_div) * 10);
- /* min m */
- m.full = rfixed_div(reference_freq, pll_in_max);
- m.full = rfixed_ceil(m);
+ if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max))
+ continue;
- while (1) {
- n.full = rfixed_div(ffreq, reference_freq);
- n.full = rfixed_mul(n, m);
- n.full = rfixed_mul(n, p);
+ /* pll_out = vco / post_div; */
+ a.full = rfixed_const(post_div);
+ pll_out.full = rfixed_const(vco);
+ pll_out.full = rfixed_div(pll_out, a);
- f_vco.full = rfixed_div(n, m);
- f_vco.full = rfixed_mul(f_vco, reference_freq);
+ if (pll_out.full >= ffreq.full) {
+ error.full = pll_out.full - ffreq.full;
+ if (error.full <= max_error.full)
+ return true;
+ }
+ }
+ }
+ return false;
+}
- f_pclk.full = rfixed_div(f_vco, p);
+static void radeon_compute_pll_new(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p)
+{
+ u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
+ u32 best_freq = 0, vco_frequency;
- if (f_pclk.full > ffreq.full)
- error.full = f_pclk.full - ffreq.full;
- else
- error.full = ffreq.full - f_pclk.full;
- error.full = rfixed_div(error, f_pclk);
- a.full = rfixed_const(100 * 100);
- error.full = rfixed_mul(error, a);
-
- a.full = rfixed_mul(m, p);
- a.full = rfixed_div(n, a);
- best_freq.full = rfixed_mul(reference_freq, a);
-
- if (rfixed_trunc(error) < 25)
- break;
-
- a.full = rfixed_const(1);
- m.full = m.full + a.full;
- a.full = rfixed_div(reference_freq, m);
- if (a.full >= pll_in_min.full)
- continue;
+ /* freq = freq / 10; */
+ do_div(freq, 10);
- m.full = rfixed_div(reference_freq, pll_in_max);
- m.full = rfixed_ceil(m);
- a.full= rfixed_const(1);
- p.full = p.full - a.full;
- a.full = rfixed_mul(p, ffreq);
- if (a.full >= pll_out_min.full)
- continue;
- else {
- DRM_ERROR("Unable to find pll dividers\n");
- break;
+ if (pll->flags & RADEON_PLL_USE_POST_DIV) {
+ post_div = pll->post_div;
+ if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div))
+ goto done;
+
+ vco_frequency = freq * post_div;
+ if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
+ goto done;
+
+ if (pll->flags & RADEON_PLL_USE_REF_DIV) {
+ ref_div = pll->reference_div;
+ if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
+ goto done;
+ if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
+ goto done;
+ }
+ } else {
+ for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) {
+ if (pll->flags & RADEON_PLL_LEGACY) {
+ if ((post_div == 5) ||
+ (post_div == 7) ||
+ (post_div == 9) ||
+ (post_div == 10) ||
+ (post_div == 11))
+ continue;
+ }
+
+ if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
+ continue;
+
+ vco_frequency = freq * post_div;
+ if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
+ continue;
+ if (pll->flags & RADEON_PLL_USE_REF_DIV) {
+ ref_div = pll->reference_div;
+ if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
+ goto done;
+ if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
+ break;
+ } else {
+ if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div))
+ break;
+ }
}
}
- a.full = rfixed_const(10);
- b.full = rfixed_mul(n, a);
+ best_freq = pll->reference_freq * 10 * fb_div;
+ best_freq += pll->reference_freq * fb_div_frac;
+ best_freq = best_freq / (ref_div * post_div);
- frac_n.full = rfixed_floor(n);
- frac_n.full = rfixed_mul(frac_n, a);
- frac_n.full = b.full - frac_n.full;
+done:
+ if (best_freq == 0)
+ DRM_ERROR("Couldn't find valid PLL dividers\n");
- *dot_clock_p = rfixed_trunc(best_freq);
- *fb_div_p = rfixed_trunc(n);
- *frac_fb_div_p = rfixed_trunc(frac_n);
- *ref_div_p = rfixed_trunc(m);
- *post_div_p = rfixed_trunc(p);
+ *dot_clock_p = best_freq / 10;
+ *fb_div_p = fb_div;
+ *frac_fb_div_p = fb_div_frac;
+ *ref_div_p = ref_div;
+ *post_div_p = post_div;
- DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
+ DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
+}
+
+void radeon_compute_pll(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p)
+{
+ switch (pll->algo) {
+ case PLL_ALGO_NEW:
+ radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p,
+ frac_fb_div_p, ref_div_p, post_div_p);
+ break;
+ case PLL_ALGO_LEGACY:
+ default:
+ radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p,
+ frac_fb_div_p, ref_div_p, post_div_p);
+ break;
+ }
}
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -679,11 +810,8 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
if (fb->fbdev)
radeonfb_remove(dev, fb);
- if (radeon_fb->obj) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(radeon_fb->obj);
- mutex_unlock(&dev->struct_mutex);
- }
+ if (radeon_fb->obj)
+ drm_gem_object_unreference_unlocked(radeon_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(radeon_fb);
}
@@ -819,7 +947,7 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
int radeon_modeset_init(struct radeon_device *rdev)
{
- int num_crtc = 2, i;
+ int i;
int ret;
drm_mode_config_init(rdev->ddev);
@@ -842,11 +970,23 @@ int radeon_modeset_init(struct radeon_device *rdev)
return ret;
}
+ /* check combios for a valid hardcoded EDID - Sun servers */
+ if (!rdev->is_atom_bios) {
+ /* check for hardcoded EDID in BIOS */
+ radeon_combios_check_hardcoded_edid(rdev);
+ }
+
if (rdev->flags & RADEON_SINGLE_CRTC)
- num_crtc = 1;
+ rdev->num_crtc = 1;
+ else {
+ if (ASIC_IS_DCE4(rdev))
+ rdev->num_crtc = 6;
+ else
+ rdev->num_crtc = 2;
+ }
/* allocate crtcs */
- for (i = 0; i < num_crtc; i++) {
+ for (i = 0; i < rdev->num_crtc; i++) {
radeon_crtc_init(rdev->ddev, i);
}
@@ -863,6 +1003,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
void radeon_modeset_fini(struct radeon_device *rdev)
{
+ kfree(rdev->mode_info.bios_hardcoded_edid);
+
if (rdev->mode_info.mode_config_initialized) {
radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 8ba3de7..6eec0ec 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -40,9 +40,11 @@
/*
* KMS wrapper.
+ * - 2.0.0 - initial interface
+ * - 2.1.0 - add square tiling interface
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 0
+#define KMS_DRIVER_MINOR 1
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -86,7 +88,8 @@ int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
-int radeon_new_pll = 1;
+int radeon_new_pll = -1;
+int radeon_dynpm = -1;
int radeon_audio = 1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
@@ -122,9 +125,12 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444);
-MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips");
+MODULE_PARM_DESC(new_pll, "Select new PLL code");
module_param_named(new_pll, radeon_new_pll, int, 0444);
+MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)");
+module_param_named(dynpm, radeon_dynpm, int, 0444);
+
MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
module_param_named(audio, radeon_audio, int, 0444);
@@ -339,6 +345,7 @@ static int __init radeon_init(void)
driver = &kms_driver;
driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = radeon_max_kms_ioctl;
+ radeon_register_atpx_handler();
}
/* if the vga console setting is enabled still
* let modprobe override it */
@@ -348,6 +355,7 @@ static int __init radeon_init(void)
static void __exit radeon_exit(void)
{
drm_exit(driver);
+ radeon_unregister_atpx_handler();
}
module_init(radeon_init);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index c57ad60..ec55f2b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -268,6 +268,8 @@ typedef struct drm_radeon_private {
u32 scratch_ages[5];
+ int have_z_offset;
+
/* starting from here on, data is preserved accross an open */
uint32_t flags; /* see radeon_chip_flags */
resource_size_t fb_aper_offset;
@@ -295,6 +297,9 @@ typedef struct drm_radeon_private {
int r700_sc_prim_fifo_size;
int r700_sc_hiz_tile_fifo_size;
int r700_sc_earlyz_tile_fifo_fize;
+ int r600_group_size;
+ int r600_npipes;
+ int r600_nbanks;
struct mutex cs_mutex;
u32 cs_id_scnt;
@@ -310,9 +315,11 @@ typedef struct drm_radeon_buf_priv {
u32 age;
} drm_radeon_buf_priv_t;
+struct drm_buffer;
+
typedef struct drm_radeon_kcmd_buffer {
int bufsz;
- char *buf;
+ struct drm_buffer *buffer;
int nbox;
struct drm_clip_rect __user *boxes;
} drm_radeon_kcmd_buffer_t;
@@ -455,6 +462,15 @@ extern void r600_blit_swap(struct drm_device *dev,
int sx, int sy, int dx, int dy,
int w, int h, int src_pitch, int dst_pitch, int cpp);
+/* atpx handler */
+#if defined(CONFIG_VGA_SWITCHEROO)
+void radeon_register_atpx_handler(void);
+void radeon_unregister_atpx_handler(void);
+#else
+static inline void radeon_register_atpx_handler(void) {}
+static inline void radeon_unregister_atpx_handler(void) {}
+#endif
+
/* Flags for stats.boxes
*/
#define RADEON_BOX_DMA_IDLE 0x1
@@ -2122,4 +2138,32 @@ extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
write &= mask; \
} while (0)
+/**
+ * Copy given number of dwords from drm buffer to the ring buffer.
+ */
+#define OUT_RING_DRM_BUFFER(buf, sz) do { \
+ int _size = (sz) * 4; \
+ struct drm_buffer *_buf = (buf); \
+ int _part_size; \
+ while (_size > 0) { \
+ _part_size = _size; \
+ \
+ if (write + _part_size/4 > mask) \
+ _part_size = ((mask + 1) - write)*4; \
+ \
+ if (drm_buffer_index(_buf) + _part_size > PAGE_SIZE) \
+ _part_size = PAGE_SIZE - drm_buffer_index(_buf);\
+ \
+ \
+ \
+ memcpy(ring + write, &_buf->data[drm_buffer_page(_buf)] \
+ [drm_buffer_index(_buf)], _part_size); \
+ \
+ _size -= _part_size; \
+ write = (write + _part_size/4) & mask; \
+ drm_buffer_advance(_buf, _part_size); \
+ } \
+} while (0)
+
+
#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 3c91724..bc926ea 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -53,7 +53,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
/* DVO requires 2x ppll clocks depending on tmds chip */
if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
return index_mask;
-
+
count = -1;
list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
@@ -228,6 +228,32 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL;
}
+static struct radeon_connector_atom_dig *
+radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ if (!rdev->is_atom_bios)
+ return NULL;
+
+ connector = radeon_get_connector_for_encoder(encoder);
+ if (!connector)
+ return NULL;
+
+ radeon_connector = to_radeon_connector(connector);
+
+ if (!radeon_connector->con_priv)
+ return NULL;
+
+ dig_connector = radeon_connector->con_priv;
+
+ return dig_connector;
+}
+
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -236,6 +262,9 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
+ /* adjust pm to upcoming mode change */
+ radeon_pm_compute_clocks(rdev);
+
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
@@ -458,34 +487,20 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
union lvds_encoder_control args;
int index = 0;
int hdmi_detected = 0;
uint8_t frev, crev;
- struct radeon_encoder_atom_dig *dig;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
+ if (!dig || !dig_connector)
return;
- radeon_connector = to_radeon_connector(connector);
-
- if (!radeon_encoder->enc_priv)
- return;
-
- dig = radeon_encoder->enc_priv;
-
- if (!radeon_connector->con_priv)
- return;
-
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
hdmi_detected = 1;
- dig_connector = radeon_connector->con_priv;
-
memset(&args, 0, sizeof(args));
switch (radeon_encoder->encoder_id) {
@@ -586,7 +601,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *radeon_dig_connector;
+ struct radeon_connector_atom_dig *dig_connector;
connector = radeon_get_connector_for_encoder(encoder);
if (!connector)
@@ -617,9 +632,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
break;
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- radeon_dig_connector = radeon_connector->con_priv;
- if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
- (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+ dig_connector = radeon_connector->con_priv;
+ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI;
@@ -656,6 +671,18 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
* - 2 DIG encoder blocks.
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
*
+ * DCE 4.0
+ * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 6 DIG encoder blocks.
+ * - DIG to PHY mapping is hardcoded
+ * DIG1 drives UNIPHY0 link A, A+B
+ * DIG2 drives UNIPHY0 link B
+ * DIG3 drives UNIPHY1 link A, A+B
+ * DIG4 drives UNIPHY1 link B
+ * DIG5 drives UNIPHY2 link A, A+B
+ * DIG6 drives UNIPHY2 link B
+ *
* Routing
* crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
* Examples:
@@ -664,88 +691,78 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
* crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
* crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
*/
-static void
+
+union dig_encoder_control {
+ DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
+ DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+};
+
+void
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- DIG_ENCODER_CONTROL_PS_ALLOCATION args;
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
+ union dig_encoder_control args;
int index = 0, num = 0;
uint8_t frev, crev;
- struct radeon_encoder_atom_dig *dig;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
+ if (!dig || !dig_connector)
return;
- radeon_connector = to_radeon_connector(connector);
-
- if (!radeon_connector->con_priv)
- return;
-
- dig_connector = radeon_connector->con_priv;
-
- if (!radeon_encoder->enc_priv)
- return;
-
- dig = radeon_encoder->enc_priv;
-
memset(&args, 0, sizeof(args));
- if (dig->dig_encoder)
- index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ if (ASIC_IS_DCE4(rdev))
+ index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
+ else {
+ if (dig->dig_encoder)
+ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ }
num = dig->dig_encoder + 1;
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
- args.ucAction = action;
- args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v1.ucAction = action;
+ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
- if (ASIC_IS_DCE32(rdev)) {
+ if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+ if (dig_connector->dp_clock == 270000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ args.v1.ucLaneNum = dig_connector->dp_lane_count;
+ } else if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucLaneNum = 8;
+ else
+ args.v1.ucLaneNum = 4;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ args.v3.acConfig.ucDigSel = dig->dig_encoder;
+ args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ } else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
- break;
- }
- } else {
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER1;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER2;
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
break;
}
+ if (dig_connector->linkb)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+ else
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
}
- args.ucEncoderMode = atombios_get_encoder_mode(encoder);
-
- if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
- if (dig_connector->dp_clock == 270000)
- args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
- args.ucLaneNum = dig_connector->dp_lane_count;
- } else if (radeon_encoder->pixel_clock > 165000)
- args.ucLaneNum = 8;
- else
- args.ucLaneNum = 4;
-
- if (dig_connector->linkb)
- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
- else
- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
-
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -753,6 +770,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
};
void
@@ -761,37 +779,29 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
union dig_transmitter_control args;
int index = 0, num = 0;
uint8_t frev, crev;
- struct radeon_encoder_atom_dig *dig;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
bool is_dp = false;
+ int pll_id = 0;
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
+ if (!dig || !dig_connector)
return;
+ connector = radeon_get_connector_for_encoder(encoder);
radeon_connector = to_radeon_connector(connector);
- if (!radeon_encoder->enc_priv)
- return;
-
- dig = radeon_encoder->enc_priv;
-
- if (!radeon_connector->con_priv)
- return;
-
- dig_connector = radeon_connector->con_priv;
-
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
is_dp = true;
memset(&args, 0, sizeof(args));
- if (ASIC_IS_DCE32(rdev))
+ if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev))
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
else {
switch (radeon_encoder->encoder_id) {
@@ -821,7 +831,54 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
else
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
}
- if (ASIC_IS_DCE32(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ if (is_dp)
+ args.v3.ucLaneNum = dig_connector->dp_lane_count;
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v3.ucLaneNum = 8;
+ else
+ args.v3.ucLaneNum = 4;
+
+ if (dig_connector->linkb) {
+ args.v3.acConfig.ucLinkSel = 1;
+ args.v3.acConfig.ucEncoderSel = 1;
+ }
+
+ /* Select the PLL for the PHY
+ * DP PHY should be clocked from external src if there is
+ * one.
+ */
+ if (encoder->crtc) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ pll_id = radeon_crtc->pll_id;
+ }
+ if (is_dp && rdev->clock.dp_extclk)
+ args.v3.acConfig.ucRefClkSource = 2; /* external src */
+ else
+ args.v3.acConfig.ucRefClkSource = pll_id;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v3.acConfig.ucTransmitterSel = 0;
+ num = 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ args.v3.acConfig.ucTransmitterSel = 1;
+ num = 1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.v3.acConfig.ucTransmitterSel = 2;
+ num = 2;
+ break;
+ }
+
+ if (is_dp)
+ args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v3.acConfig.fCoherentMode = 1;
+ }
+ } else if (ASIC_IS_DCE32(rdev)) {
if (dig->dig_encoder == 1)
args.v2.acConfig.ucEncoderSel = 1;
if (dig_connector->linkb)
@@ -849,7 +906,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v2.acConfig.fCoherentMode = 1;
}
} else {
-
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
if (dig->dig_encoder)
@@ -1024,9 +1080,12 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+ /* adjust pm to dpms change */
+ radeon_pm_compute_clocks(rdev);
}
-union crtc_sourc_param {
+union crtc_source_param {
SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
};
@@ -1038,7 +1097,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- union crtc_sourc_param args;
+ union crtc_source_param args;
int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
uint8_t frev, crev;
struct radeon_encoder_atom_dig *dig;
@@ -1107,10 +1166,26 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
dig = radeon_encoder->enc_priv;
- if (dig->dig_encoder)
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- else
+ switch (dig->dig_encoder) {
+ case 0:
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ break;
+ case 1:
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ break;
+ case 2:
+ args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
+ break;
+ case 3:
+ args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
+ break;
+ case 4:
+ args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
+ break;
+ case 5:
+ args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
+ break;
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
@@ -1167,6 +1242,7 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
}
/* set scaler clears this on some chips */
+ /* XXX check DCE4 */
if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
@@ -1183,6 +1259,33 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
struct drm_encoder *test_encoder;
struct radeon_encoder_atom_dig *dig;
uint32_t dig_enc_in_use = 0;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig_connector->linkb)
+ return 1;
+ else
+ return 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig_connector->linkb)
+ return 3;
+ else
+ return 2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig_connector->linkb)
+ return 5;
+ else
+ return 4;
+ break;
+ }
+ }
+
/* on DCE32 and encoder can driver any block so just crtc id */
if (ASIC_IS_DCE32(rdev)) {
return radeon_crtc->crtc_id;
@@ -1254,15 +1357,26 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- /* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
-
- /* setup and enable the encoder and transmitter */
- atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ if (ASIC_IS_DCE4(rdev)) {
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ /* setup and enable the encoder */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP);
+
+ /* init and enable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ } else {
+ /* disable the encoder and transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
+
+ /* setup and enable the encoder and transmitter */
+ atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
atombios_ddia_setup(encoder, ATOM_ENABLE);
@@ -1282,7 +1396,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
- r600_hdmi_setmode(encoder, adjusted_mode);
+ /* XXX */
+ if (!ASIC_IS_DCE4(rdev))
+ r600_hdmi_setmode(encoder, adjusted_mode);
}
static bool
@@ -1480,10 +1596,18 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
return;
encoder = &radeon_encoder->base;
- if (rdev->flags & RADEON_SINGLE_CRTC)
+ switch (rdev->num_crtc) {
+ case 1:
encoder->possible_crtcs = 0x1;
- else
+ break;
+ case 2:
+ default:
encoder->possible_crtcs = 0x3;
+ break;
+ case 6:
+ encoder->possible_crtcs = 0x3f;
+ break;
+ }
radeon_encoder->enc_priv = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 797972e..93c7d5d 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -75,6 +75,11 @@ enum radeon_family {
CHIP_RV730,
CHIP_RV710,
CHIP_RV740,
+ CHIP_CEDAR,
+ CHIP_REDWOOD,
+ CHIP_JUNIPER,
+ CHIP_CYPRESS,
+ CHIP_HEMLOCK,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index d71e346..8fccbf2 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -39,6 +39,8 @@
#include "drm_fb_helper.h"
+#include <linux/vga_switcheroo.h>
+
struct radeon_fb_device {
struct drm_fb_helper helper;
struct radeon_framebuffer *rfb;
@@ -148,7 +150,6 @@ int radeonfb_create(struct drm_device *dev,
unsigned long tmp;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
- int crtc_count;
mode_cmd.width = surface_width;
mode_cmd.height = surface_height;
@@ -239,11 +240,7 @@ int radeonfb_create(struct drm_device *dev,
rfbdev = info->par;
rfbdev->helper.funcs = &radeon_fb_helper_funcs;
rfbdev->helper.dev = dev;
- if (rdev->flags & RADEON_SINGLE_CRTC)
- crtc_count = 1;
- else
- crtc_count = 2;
- ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count,
+ ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
RADEONFB_CONN_LIMIT);
if (ret)
goto out_unref;
@@ -257,7 +254,7 @@ int radeonfb_create(struct drm_device *dev,
info->flags = FBINFO_DEFAULT;
info->fbops = &radeonfb_ops;
- tmp = fb_gpuaddr - rdev->mc.vram_location;
+ tmp = fb_gpuaddr - rdev->mc.vram_start;
info->fix.smem_start = rdev->mc.aper_base + tmp;
info->fix.smem_len = size;
info->screen_base = fbptr;
@@ -291,6 +288,7 @@ int radeonfb_create(struct drm_device *dev,
rfbdev->rdev = rdev;
mutex_unlock(&rdev->ddev->struct_mutex);
+ vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
return 0;
out_unref:
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index e73d56e..1770d3c 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -139,6 +139,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
unsigned t;
unsigned p;
int i, j;
+ u64 page_base;
if (!rdev->gart.ready) {
WARN(1, "trying to unbind memory to unitialized GART !\n");
@@ -151,9 +152,11 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL;
- rdev->gart.pages_addr[p] = 0;
+ rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
+ page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, 0);
+ radeon_gart_set_page(rdev, t, page_base);
+ page_base += RADEON_GPU_PAGE_SIZE;
}
}
}
@@ -199,8 +202,26 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
return 0;
}
+void radeon_gart_restore(struct radeon_device *rdev)
+{
+ int i, j, t;
+ u64 page_base;
+
+ for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
+ page_base = rdev->gart.pages_addr[i];
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ radeon_gart_set_page(rdev, t, page_base);
+ page_base += RADEON_GPU_PAGE_SIZE;
+ }
+ }
+ mb();
+ radeon_gart_tlb_flush(rdev);
+}
+
int radeon_gart_init(struct radeon_device *rdev)
{
+ int r, i;
+
if (rdev->gart.pages) {
return 0;
}
@@ -209,6 +230,9 @@ int radeon_gart_init(struct radeon_device *rdev)
DRM_ERROR("Page size is smaller than GPU page size!\n");
return -EINVAL;
}
+ r = radeon_dummy_page_init(rdev);
+ if (r)
+ return r;
/* Compute table size */
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
@@ -227,6 +251,10 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
+ /* set GART entry to point to the dummy page by default */
+ for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
+ rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index db8e9a3..ef92d14 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -69,9 +69,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
- mutex_lock(&rdev->ddev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&rdev->ddev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
gobj->driver_private = robj;
@@ -202,14 +200,10 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
}
r = drm_gem_handle_create(filp, gobj, &handle);
if (r) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference_unlocked(gobj);
args->handle = handle;
return 0;
}
@@ -236,9 +230,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
@@ -255,9 +247,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
}
robj = gobj->driver_private;
args->addr_ptr = radeon_bo_mmap_offset(robj);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return 0;
}
@@ -288,9 +278,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
default:
break;
}
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
@@ -311,9 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
/* callback hw specific functions if any */
if (robj->rdev->asic->ioctl_wait_idle)
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
@@ -331,9 +317,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
robj = gobj->driver_private;
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
@@ -356,8 +340,6 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
radeon_bo_unreserve(rbo);
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index da3da1e..4ae50c1 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -26,6 +26,7 @@
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
+#include "atom.h"
/**
* radeon_ddc_probe
@@ -59,7 +60,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
}
-void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
+static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
{
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
@@ -71,13 +72,25 @@ void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
*/
if (rec->hw_capable) {
if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
- if (rec->a_clk_reg == RADEON_GPIO_MONID) {
+ u32 reg;
+
+ if (rdev->family >= CHIP_RV350)
+ reg = RADEON_GPIO_MONID;
+ else if ((rdev->family == CHIP_R300) ||
+ (rdev->family == CHIP_R350))
+ reg = RADEON_GPIO_DVI_DDC;
+ else
+ reg = RADEON_GPIO_CRT2_DDC;
+
+ mutex_lock(&rdev->dc_hw_i2c_mutex);
+ if (rec->a_clk_reg == reg) {
WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
} else {
WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
}
+ mutex_unlock(&rdev->dc_hw_i2c_mutex);
}
}
@@ -168,6 +181,692 @@ static void set_data(void *i2c_priv, int data)
WREG32(rec->en_data_reg, val);
}
+static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
+{
+ struct radeon_pll *spll = &rdev->clock.spll;
+ u32 sclk = radeon_get_engine_clock(rdev);
+ u32 prescale = 0;
+ u32 n, m;
+ u8 loop;
+ int i2c_clock;
+
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_R200:
+ case CHIP_RV250:
+ case CHIP_RS300:
+ case CHIP_RV280:
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_RV350:
+ n = (spll->reference_freq) / (4 * 6);
+ for (loop = 1; loop < 255; loop++) {
+ if ((loop * (loop - 1)) > n)
+ break;
+ }
+ m = loop - 1;
+ prescale = m | (loop << 8);
+ break;
+ case CHIP_RV380:
+ case CHIP_RS400:
+ case CHIP_RS480:
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ sclk = radeon_get_engine_clock(rdev);
+ prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
+ break;
+ case CHIP_RS600:
+ case CHIP_RS690:
+ case CHIP_RS740:
+ /* todo */
+ break;
+ case CHIP_RV515:
+ case CHIP_R520:
+ case CHIP_RV530:
+ case CHIP_RV560:
+ case CHIP_RV570:
+ case CHIP_R580:
+ i2c_clock = 50;
+ sclk = radeon_get_engine_clock(rdev);
+ if (rdev->family == CHIP_R520)
+ prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
+ else
+ prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
+ break;
+ case CHIP_R600:
+ case CHIP_RV610:
+ case CHIP_RV630:
+ case CHIP_RV670:
+ /* todo */
+ break;
+ case CHIP_RV620:
+ case CHIP_RV635:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV710:
+ case CHIP_RV740:
+ /* todo */
+ break;
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ case CHIP_JUNIPER:
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ /* todo */
+ break;
+ default:
+ DRM_ERROR("i2c: unhandled radeon chip\n");
+ break;
+ }
+ return prescale;
+}
+
+
+/* hw i2c engine for r1xx-4xx hardware
+ * hw can buffer up to 15 bytes
+ */
+static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ struct i2c_msg *p;
+ int i, j, k, ret = num;
+ u32 prescale;
+ u32 i2c_cntl_0, i2c_cntl_1, i2c_data;
+ u32 tmp, reg;
+
+ mutex_lock(&rdev->dc_hw_i2c_mutex);
+ /* take the pm lock since we need a constant sclk */
+ mutex_lock(&rdev->pm.mutex);
+
+ prescale = radeon_get_i2c_prescale(rdev);
+
+ reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
+ RADEON_I2C_START |
+ RADEON_I2C_STOP |
+ RADEON_I2C_GO);
+
+ if (rdev->is_atom_bios) {
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
+ }
+
+ if (rec->mm_i2c) {
+ i2c_cntl_0 = RADEON_I2C_CNTL_0;
+ i2c_cntl_1 = RADEON_I2C_CNTL_1;
+ i2c_data = RADEON_I2C_DATA;
+ } else {
+ i2c_cntl_0 = RADEON_DVI_I2C_CNTL_0;
+ i2c_cntl_1 = RADEON_DVI_I2C_CNTL_1;
+ i2c_data = RADEON_DVI_I2C_DATA;
+
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_RS300:
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_DVI_DDC:
+ /* no gpio select bit */
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_R200:
+ /* only bit 4 on r200 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_MONID:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_RV250:
+ case CHIP_RV280:
+ /* bits 3 and 4 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_VGA_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
+ break;
+ case RADEON_GPIO_CRT2_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_R300:
+ case CHIP_R350:
+ /* only bit 4 on r300/r350 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_VGA_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_RV350:
+ case CHIP_RV380:
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ case CHIP_RS400:
+ case CHIP_RS480:
+ /* bits 3 and 4 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_VGA_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
+ break;
+ case RADEON_GPIO_MONID:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ default:
+ DRM_ERROR("unsupported asic\n");
+ ret = -EINVAL;
+ goto done;
+ break;
+ }
+ }
+
+ /* check for bus probe */
+ p = &msgs[0];
+ if ((num == 1) && (p->len == 0)) {
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+ WREG32(i2c_data, (p->addr << 1) & 0xff);
+ WREG32(i2c_data, 0);
+ WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+ (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+ RADEON_I2C_EN |
+ (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+ WREG32(i2c_cntl_0, reg);
+ for (k = 0; k < 32; k++) {
+ udelay(10);
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_GO)
+ continue;
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ goto done;
+ }
+
+ for (i = 0; i < num; i++) {
+ p = &msgs[i];
+ for (j = 0; j < p->len; j++) {
+ if (p->flags & I2C_M_RD) {
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+ WREG32(i2c_data, ((p->addr << 1) & 0xff) | 0x1);
+ WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+ (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+ RADEON_I2C_EN |
+ (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+ WREG32(i2c_cntl_0, reg | RADEON_I2C_RECEIVE);
+ for (k = 0; k < 32; k++) {
+ udelay(10);
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_GO)
+ continue;
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c read error 0x%08x\n", tmp);
+ WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ p->buf[j] = RREG32(i2c_data) & 0xff;
+ } else {
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+ WREG32(i2c_data, (p->addr << 1) & 0xff);
+ WREG32(i2c_data, p->buf[j]);
+ WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+ (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+ RADEON_I2C_EN |
+ (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+ WREG32(i2c_cntl_0, reg);
+ for (k = 0; k < 32; k++) {
+ udelay(10);
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_GO)
+ continue;
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ }
+ }
+ }
+
+done:
+ WREG32(i2c_cntl_0, 0);
+ WREG32(i2c_cntl_1, 0);
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+
+ if (rdev->is_atom_bios) {
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp);
+ }
+
+ mutex_unlock(&rdev->pm.mutex);
+ mutex_unlock(&rdev->dc_hw_i2c_mutex);
+
+ return ret;
+}
+
+/* hw i2c engine for r5xx hardware
+ * hw can buffer up to 15 bytes
+ */
+static int r500_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ struct i2c_msg *p;
+ int i, j, remaining, current_count, buffer_offset, ret = num;
+ u32 prescale;
+ u32 tmp, reg;
+ u32 saved1, saved2;
+
+ mutex_lock(&rdev->dc_hw_i2c_mutex);
+ /* take the pm lock since we need a constant sclk */
+ mutex_lock(&rdev->pm.mutex);
+
+ prescale = radeon_get_i2c_prescale(rdev);
+
+ /* clear gpio mask bits */
+ tmp = RREG32(rec->mask_clk_reg);
+ tmp &= ~rec->mask_clk_mask;
+ WREG32(rec->mask_clk_reg, tmp);
+ tmp = RREG32(rec->mask_clk_reg);
+
+ tmp = RREG32(rec->mask_data_reg);
+ tmp &= ~rec->mask_data_mask;
+ WREG32(rec->mask_data_reg, tmp);
+ tmp = RREG32(rec->mask_data_reg);
+
+ /* clear pin values */
+ tmp = RREG32(rec->a_clk_reg);
+ tmp &= ~rec->a_clk_mask;
+ WREG32(rec->a_clk_reg, tmp);
+ tmp = RREG32(rec->a_clk_reg);
+
+ tmp = RREG32(rec->a_data_reg);
+ tmp &= ~rec->a_data_mask;
+ WREG32(rec->a_data_reg, tmp);
+ tmp = RREG32(rec->a_data_reg);
+
+ /* set the pins to input */
+ tmp = RREG32(rec->en_clk_reg);
+ tmp &= ~rec->en_clk_mask;
+ WREG32(rec->en_clk_reg, tmp);
+ tmp = RREG32(rec->en_clk_reg);
+
+ tmp = RREG32(rec->en_data_reg);
+ tmp &= ~rec->en_data_mask;
+ WREG32(rec->en_data_reg, tmp);
+ tmp = RREG32(rec->en_data_reg);
+
+ /* */
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
+ saved1 = RREG32(AVIVO_DC_I2C_CONTROL1);
+ saved2 = RREG32(0x494);
+ WREG32(0x494, saved2 | 0x1);
+
+ WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C);
+ for (i = 0; i < 50; i++) {
+ udelay(1);
+ if (RREG32(AVIVO_DC_I2C_ARBITRATION) & AVIVO_DC_I2C_SW_CAN_USE_I2C)
+ break;
+ }
+ if (i == 50) {
+ DRM_ERROR("failed to get i2c bus\n");
+ ret = -EBUSY;
+ goto done;
+ }
+
+ reg = AVIVO_DC_I2C_START | AVIVO_DC_I2C_STOP | AVIVO_DC_I2C_EN;
+ switch (rec->mask_clk_reg) {
+ case AVIVO_DC_GPIO_DDC1_MASK:
+ reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC1);
+ break;
+ case AVIVO_DC_GPIO_DDC2_MASK:
+ reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC2);
+ break;
+ case AVIVO_DC_GPIO_DDC3_MASK:
+ reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* check for bus probe */
+ p = &msgs[0];
+ if ((num == 1) && (p->len == 0)) {
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
+ WREG32(AVIVO_DC_I2C_DATA, 0);
+
+ WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+ WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+ AVIVO_DC_I2C_DATA_COUNT(1) |
+ (prescale << 16)));
+ WREG32(AVIVO_DC_I2C_CONTROL1, reg);
+ WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+ for (j = 0; j < 200; j++) {
+ udelay(50);
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_GO)
+ continue;
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ goto done;
+ }
+
+ for (i = 0; i < num; i++) {
+ p = &msgs[i];
+ remaining = p->len;
+ buffer_offset = 0;
+ if (p->flags & I2C_M_RD) {
+ while (remaining) {
+ if (remaining > 15)
+ current_count = 15;
+ else
+ current_count = remaining;
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_DATA, ((p->addr << 1) & 0xff) | 0x1);
+ WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+ WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+ AVIVO_DC_I2C_DATA_COUNT(current_count) |
+ (prescale << 16)));
+ WREG32(AVIVO_DC_I2C_CONTROL1, reg | AVIVO_DC_I2C_RECEIVE);
+ WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+ for (j = 0; j < 200; j++) {
+ udelay(50);
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_GO)
+ continue;
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c read error 0x%08x\n", tmp);
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ for (j = 0; j < current_count; j++)
+ p->buf[buffer_offset + j] = RREG32(AVIVO_DC_I2C_DATA) & 0xff;
+ remaining -= current_count;
+ buffer_offset += current_count;
+ }
+ } else {
+ while (remaining) {
+ if (remaining > 15)
+ current_count = 15;
+ else
+ current_count = remaining;
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
+ for (j = 0; j < current_count; j++)
+ WREG32(AVIVO_DC_I2C_DATA, p->buf[buffer_offset + j]);
+
+ WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+ WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+ AVIVO_DC_I2C_DATA_COUNT(current_count) |
+ (prescale << 16)));
+ WREG32(AVIVO_DC_I2C_CONTROL1, reg);
+ WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+ for (j = 0; j < 200; j++) {
+ udelay(50);
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_GO)
+ continue;
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ remaining -= current_count;
+ buffer_offset += current_count;
+ }
+ }
+ }
+
+done:
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_DONE_USING_I2C);
+ WREG32(AVIVO_DC_I2C_CONTROL1, saved1);
+ WREG32(0x494, saved2);
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp);
+
+ mutex_unlock(&rdev->pm.mutex);
+ mutex_unlock(&rdev->dc_hw_i2c_mutex);
+
+ return ret;
+}
+
+static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ int ret;
+
+ radeon_i2c_do_lock(i2c, 1);
+ ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num);
+ radeon_i2c_do_lock(i2c, 0);
+
+ return ret;
+}
+
+static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ int ret;
+
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_R200:
+ case CHIP_RV250:
+ case CHIP_RS300:
+ case CHIP_RV280:
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_RV350:
+ case CHIP_RV380:
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ case CHIP_RS400:
+ case CHIP_RS480:
+ if (rec->hw_capable)
+ ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
+ else
+ ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ case CHIP_RS600:
+ case CHIP_RS690:
+ case CHIP_RS740:
+ /* XXX fill in hw i2c implementation */
+ ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ case CHIP_RV515:
+ case CHIP_R520:
+ case CHIP_RV530:
+ case CHIP_RV560:
+ case CHIP_RV570:
+ case CHIP_R580:
+ if (rec->hw_capable) {
+ if (rec->mm_i2c)
+ ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
+ else
+ ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
+ } else
+ ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ case CHIP_R600:
+ case CHIP_RV610:
+ case CHIP_RV630:
+ case CHIP_RV670:
+ /* XXX fill in hw i2c implementation */
+ ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ case CHIP_RV620:
+ case CHIP_RV635:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV710:
+ case CHIP_RV740:
+ /* XXX fill in hw i2c implementation */
+ ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ case CHIP_JUNIPER:
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ /* XXX fill in hw i2c implementation */
+ ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ default:
+ DRM_ERROR("i2c: unhandled radeon chip\n");
+ ret = -EIO;
+ break;
+ }
+
+ return ret;
+}
+
+static u32 radeon_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm radeon_i2c_algo = {
+ .master_xfer = radeon_i2c_xfer,
+ .functionality = radeon_i2c_func,
+};
+
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name)
@@ -179,23 +878,36 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
if (i2c == NULL)
return NULL;
- i2c->adapter.owner = THIS_MODULE;
- i2c->dev = dev;
- i2c_set_adapdata(&i2c->adapter, i2c);
- i2c->adapter.algo_data = &i2c->algo.bit;
- i2c->algo.bit.setsda = set_data;
- i2c->algo.bit.setscl = set_clock;
- i2c->algo.bit.getsda = get_data;
- i2c->algo.bit.getscl = get_clock;
- i2c->algo.bit.udelay = 20;
+ /* set the internal bit adapter */
+ i2c->algo.radeon.bit_adapter.owner = THIS_MODULE;
+ i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c);
+ sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name);
+ i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data;
+ i2c->algo.radeon.bit_data.setsda = set_data;
+ i2c->algo.radeon.bit_data.setscl = set_clock;
+ i2c->algo.radeon.bit_data.getsda = get_data;
+ i2c->algo.radeon.bit_data.getscl = get_clock;
+ i2c->algo.radeon.bit_data.udelay = 20;
/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
* make this, 2 jiffies is a lot more reliable */
- i2c->algo.bit.timeout = 2;
- i2c->algo.bit.data = i2c;
+ i2c->algo.radeon.bit_data.timeout = 2;
+ i2c->algo.radeon.bit_data.data = i2c;
+ ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter);
+ if (ret) {
+ DRM_ERROR("Failed to register internal bit i2c %s\n", name);
+ goto out_free;
+ }
+ /* set the radeon i2c adapter */
+ i2c->dev = dev;
i2c->rec = *rec;
- ret = i2c_bit_add_bus(&i2c->adapter);
+ i2c->adapter.owner = THIS_MODULE;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ sprintf(i2c->adapter.name, "Radeon i2c %s", name);
+ i2c->adapter.algo_data = &i2c->algo.radeon;
+ i2c->adapter.algo = &radeon_i2c_algo;
+ ret = i2c_add_adapter(&i2c->adapter);
if (ret) {
- DRM_INFO("Failed to register i2c %s\n", name);
+ DRM_ERROR("Failed to register i2c %s\n", name);
goto out_free;
}
@@ -237,11 +949,19 @@ out_free:
}
-
void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
{
if (!i2c)
return;
+ i2c_del_adapter(&i2c->algo.radeon.bit_adapter);
+ i2c_del_adapter(&i2c->adapter);
+ kfree(i2c);
+}
+
+void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c)
+{
+ if (!i2c)
+ return;
i2c_del_adapter(&i2c->adapter);
kfree(i2c);
@@ -252,10 +972,10 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
return NULL;
}
-void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
- u8 slave_addr,
- u8 addr,
- u8 *val)
+void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val)
{
u8 out_buf[2];
u8 in_buf[2];
@@ -286,10 +1006,10 @@ void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
}
}
-void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus,
- u8 slave_addr,
- u8 addr,
- u8 val)
+void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 val)
{
uint8_t out_buf[2];
struct i2c_msg msg = {
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index f23b056..20ec276 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -30,6 +30,8 @@
#include "radeon.h"
#include "radeon_drm.h"
+#include <linux/vga_switcheroo.h>
+
int radeon_driver_unload_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -136,6 +138,7 @@ int radeon_driver_firstopen_kms(struct drm_device *dev)
void radeon_driver_lastclose_kms(struct drm_device *dev)
{
+ vga_switcheroo_process_delayed_switch();
}
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
@@ -276,17 +279,17 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
/* KMS */
- DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index b6d8081..df23d6a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -403,7 +403,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
/* if scanout was in GTT this really wouldn't work */
/* crtc offset is from display base addr not FB location */
- radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
+ radeon_crtc->legacy_display_base_addr = rdev->mc.vram_start;
base -= radeon_crtc->legacy_display_base_addr;
@@ -582,29 +582,6 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
? RADEON_CRTC_V_SYNC_POL
: 0));
- /* TODO -> Dell Server */
- if (0) {
- uint32_t disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
- uint32_t tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
- uint32_t dac2_cntl = RREG32(RADEON_DAC_CNTL2);
- uint32_t crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
-
- dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
- dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
-
- /* For CRT on DAC2, don't turn it on if BIOS didn't
- enable it, even it's detected.
- */
- disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
- tv_dac_cntl &= ~((1<<2) | (3<<8) | (7<<24) | (0xff<<16));
- tv_dac_cntl |= (0x03 | (2<<8) | (0x58<<16));
-
- WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
- WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
- WREG32(RADEON_DAC_CNTL2, dac2_cntl);
- WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
- }
-
if (radeon_crtc->crtc_id) {
uint32_t crtc2_gen_cntl;
uint32_t disp2_merge_cntl;
@@ -726,6 +703,10 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
pll = &rdev->clock.p1pll;
pll->flags = RADEON_PLL_LEGACY;
+ if (radeon_new_pll == 1)
+ pll->algo = PLL_ALGO_NEW;
+ else
+ pll->algo = PLL_ALGO_LEGACY;
if (mode->clock > 200000) /* range limits??? */
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 38e45e2..cf389ce 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -115,6 +115,9 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+ /* adjust pm to dpms change */
+ radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
@@ -214,6 +217,11 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* adjust pm to upcoming mode change */
+ radeon_pm_compute_clocks(rdev);
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
@@ -285,6 +293,9 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+ /* adjust pm to dpms change */
+ radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
@@ -470,6 +481,9 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+ /* adjust pm to dpms change */
+ radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
@@ -635,6 +649,9 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+ /* adjust pm to dpms change */
+ radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
@@ -842,6 +859,9 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+ /* adjust pm to dpms change */
+ radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e81b2ae..1702b82 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -83,6 +83,8 @@ struct radeon_i2c_bus_rec {
bool valid;
/* id used by atom */
uint8_t i2c_id;
+ /* id used by atom */
+ uint8_t hpd_id;
/* can be used with hw i2c engine */
bool hw_capable;
/* uses multi-media i2c engine */
@@ -113,6 +115,7 @@ struct radeon_tmds_pll {
#define RADEON_MAX_BIOS_CONNECTOR 16
+/* pll flags */
#define RADEON_PLL_USE_BIOS_DIVS (1 << 0)
#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1)
#define RADEON_PLL_USE_REF_DIV (1 << 2)
@@ -127,6 +130,12 @@ struct radeon_tmds_pll {
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
#define RADEON_PLL_USE_POST_DIV (1 << 12)
+/* pll algo */
+enum radeon_pll_algo {
+ PLL_ALGO_LEGACY,
+ PLL_ALGO_NEW
+};
+
struct radeon_pll {
/* reference frequency */
uint32_t reference_freq;
@@ -157,6 +166,13 @@ struct radeon_pll {
/* pll id */
uint32_t id;
+ /* pll algo */
+ enum radeon_pll_algo algo;
+};
+
+struct i2c_algo_radeon_data {
+ struct i2c_adapter bit_adapter;
+ struct i2c_algo_bit_data bit_data;
};
struct radeon_i2c_chan {
@@ -164,7 +180,7 @@ struct radeon_i2c_chan {
struct drm_device *dev;
union {
struct i2c_algo_dp_aux_data dp;
- struct i2c_algo_bit_data bit;
+ struct i2c_algo_radeon_data radeon;
} algo;
struct radeon_i2c_bus_rec rec;
};
@@ -193,7 +209,7 @@ struct radeon_mode_info {
struct card_info *atom_card_info;
enum radeon_connector_table connector_table;
bool mode_config_initialized;
- struct radeon_crtc *crtcs[2];
+ struct radeon_crtc *crtcs[6];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
@@ -202,7 +218,8 @@ struct radeon_mode_info {
struct drm_property *tv_std_property;
/* legacy TMDS PLL detect */
struct drm_property *tmds_pll_property;
-
+ /* hardcoded DFP edid from BIOS */
+ struct edid *bios_hardcoded_edid;
};
#define MAX_H_CODE_TIMING_LEN 32
@@ -237,6 +254,7 @@ struct radeon_crtc {
fixed20_12 vsc;
fixed20_12 hsc;
struct drm_display_mode native_mode;
+ int pll_id;
};
struct radeon_encoder_primary_dac {
@@ -303,6 +321,7 @@ struct radeon_encoder_atom_dig {
/* atom lvds */
uint32_t lvds_misc;
uint16_t panel_pwr_delay;
+ enum radeon_pll_algo pll_algo;
struct radeon_atom_ss *ss;
/* panel mode */
struct drm_display_mode native_mode;
@@ -398,6 +417,7 @@ extern void dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector);
extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action);
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set);
@@ -411,14 +431,15 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name);
extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
-extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
- u8 slave_addr,
- u8 addr,
- u8 *val);
-extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c,
- u8 slave_addr,
- u8 addr,
- u8 val);
+extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c);
+extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val);
+extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
+ u8 slave_addr,
+ u8 addr,
+ u8 val);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
@@ -432,14 +453,6 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *ref_div_p,
uint32_t *post_div_p);
-extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p);
-
extern void radeon_setup_encoder_clones(struct drm_device *dev);
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
@@ -473,6 +486,9 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
+extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
+extern struct edid *
+radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
extern bool radeon_atom_get_clock_info(struct drm_device *dev);
extern bool radeon_combios_get_clock_info(struct drm_device *dev);
extern struct radeon_encoder_atom_dig *
@@ -531,7 +547,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc);
void radeon_legacy_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc);
-extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
void radeon_get_clock_info(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index f1da370..fc9d00a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -178,7 +178,6 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
int r, i;
- radeon_ttm_placement_from_domain(bo, domain);
if (bo->pin_count) {
bo->pin_count++;
if (gpu_addr)
@@ -186,6 +185,8 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
return 0;
}
radeon_ttm_placement_from_domain(bo, domain);
+ /* force to pin into visible video ram */
+ bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8bce64c..d4d1c39 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -18,21 +18,413 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Rafał Miłecki <zajec5@gmail.com>
+ * Alex Deucher <alexdeucher@gmail.com>
*/
#include "drmP.h"
#include "radeon.h"
+#include "avivod.h"
-int radeon_debugfs_pm_init(struct radeon_device *rdev);
+#define RADEON_IDLE_LOOP_MS 100
+#define RADEON_RECLOCK_DELAY_MS 200
+#define RADEON_WAIT_VBLANK_TIMEOUT 200
+
+static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
+static void radeon_pm_set_clocks(struct radeon_device *rdev);
+static void radeon_pm_idle_work_handler(struct work_struct *work);
+static int radeon_debugfs_pm_init(struct radeon_device *rdev);
+
+static const char *pm_state_names[4] = {
+ "PM_STATE_DISABLED",
+ "PM_STATE_MINIMUM",
+ "PM_STATE_PAUSED",
+ "PM_STATE_ACTIVE"
+};
+
+static const char *pm_state_types[5] = {
+ "Default",
+ "Powersave",
+ "Battery",
+ "Balanced",
+ "Performance",
+};
+
+static void radeon_print_power_mode_info(struct radeon_device *rdev)
+{
+ int i, j;
+ bool is_default;
+
+ DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
+ is_default = true;
+ else
+ is_default = false;
+ DRM_INFO("State %d %s %s\n", i,
+ pm_state_types[rdev->pm.power_state[i].type],
+ is_default ? "(default)" : "");
+ if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
+ DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
+ DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
+ for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
+ if (rdev->flags & RADEON_IS_IGP)
+ DRM_INFO("\t\t%d engine: %d\n",
+ j,
+ rdev->pm.power_state[i].clock_info[j].sclk * 10);
+ else
+ DRM_INFO("\t\t%d engine/memory: %d/%d\n",
+ j,
+ rdev->pm.power_state[i].clock_info[j].sclk * 10,
+ rdev->pm.power_state[i].clock_info[j].mclk * 10);
+ }
+ }
+}
+
+static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
+ enum radeon_pm_state_type type)
+{
+ int i, j;
+ enum radeon_pm_state_type wanted_types[2];
+ int wanted_count;
+
+ switch (type) {
+ case POWER_STATE_TYPE_DEFAULT:
+ default:
+ return rdev->pm.default_power_state;
+ case POWER_STATE_TYPE_POWERSAVE:
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
+ wanted_types[1] = POWER_STATE_TYPE_BATTERY;
+ wanted_count = 2;
+ } else {
+ wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
+ wanted_count = 1;
+ }
+ break;
+ case POWER_STATE_TYPE_BATTERY:
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ wanted_types[0] = POWER_STATE_TYPE_BATTERY;
+ wanted_types[1] = POWER_STATE_TYPE_POWERSAVE;
+ wanted_count = 2;
+ } else {
+ wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
+ wanted_count = 1;
+ }
+ break;
+ case POWER_STATE_TYPE_BALANCED:
+ case POWER_STATE_TYPE_PERFORMANCE:
+ wanted_types[0] = type;
+ wanted_count = 1;
+ break;
+ }
+
+ for (i = 0; i < wanted_count; i++) {
+ for (j = 0; j < rdev->pm.num_power_states; j++) {
+ if (rdev->pm.power_state[j].type == wanted_types[i])
+ return &rdev->pm.power_state[j];
+ }
+ }
+
+ return rdev->pm.default_power_state;
+}
+
+static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
+ struct radeon_power_state *power_state,
+ enum radeon_pm_clock_mode_type type)
+{
+ switch (type) {
+ case POWER_MODE_TYPE_DEFAULT:
+ default:
+ return power_state->default_clock_mode;
+ case POWER_MODE_TYPE_LOW:
+ return &power_state->clock_info[0];
+ case POWER_MODE_TYPE_MID:
+ if (power_state->num_clock_modes > 2)
+ return &power_state->clock_info[1];
+ else
+ return &power_state->clock_info[0];
+ break;
+ case POWER_MODE_TYPE_HIGH:
+ return &power_state->clock_info[power_state->num_clock_modes - 1];
+ }
+
+}
+
+static void radeon_get_power_state(struct radeon_device *rdev,
+ enum radeon_pm_action action)
+{
+ switch (action) {
+ case PM_ACTION_MINIMUM:
+ rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
+ rdev->pm.requested_clock_mode =
+ radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
+ break;
+ case PM_ACTION_DOWNCLOCK:
+ rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
+ rdev->pm.requested_clock_mode =
+ radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
+ break;
+ case PM_ACTION_UPCLOCK:
+ rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
+ rdev->pm.requested_clock_mode =
+ radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
+ break;
+ case PM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ DRM_INFO("Requested: e: %d m: %d p: %d\n",
+ rdev->pm.requested_clock_mode->sclk,
+ rdev->pm.requested_clock_mode->mclk,
+ rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
+}
+
+static void radeon_set_power_state(struct radeon_device *rdev)
+{
+ /* if *_clock_mode are the same, *_power_state are as well */
+ if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode)
+ return;
+
+ DRM_INFO("Setting: e: %d m: %d p: %d\n",
+ rdev->pm.requested_clock_mode->sclk,
+ rdev->pm.requested_clock_mode->mclk,
+ rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
+ /* set pcie lanes */
+ /* set voltage */
+ /* set engine clock */
+ radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
+ /* set memory clock */
+
+ rdev->pm.current_power_state = rdev->pm.requested_power_state;
+ rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
+}
int radeon_pm_init(struct radeon_device *rdev)
{
+ rdev->pm.state = PM_STATE_DISABLED;
+ rdev->pm.planned_action = PM_ACTION_NONE;
+ rdev->pm.downclocked = false;
+
+ if (rdev->bios) {
+ if (rdev->is_atom_bios)
+ radeon_atombios_get_power_modes(rdev);
+ else
+ radeon_combios_get_power_modes(rdev);
+ radeon_print_power_mode_info(rdev);
+ }
+
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for PM!\n");
}
+ INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
+
+ if (radeon_dynpm != -1 && radeon_dynpm) {
+ rdev->pm.state = PM_STATE_PAUSED;
+ DRM_INFO("radeon: dynamic power management enabled\n");
+ }
+
+ DRM_INFO("radeon: power management initialized\n");
+
return 0;
}
+void radeon_pm_compute_clocks(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_connector *connector;
+ struct radeon_crtc *radeon_crtc;
+ int count = 0;
+
+ if (rdev->pm.state == PM_STATE_DISABLED)
+ return;
+
+ mutex_lock(&rdev->pm.mutex);
+
+ rdev->pm.active_crtcs = 0;
+ list_for_each_entry(connector,
+ &ddev->mode_config.connector_list, head) {
+ if (connector->encoder &&
+ connector->dpms != DRM_MODE_DPMS_OFF) {
+ radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
+ rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
+ ++count;
+ }
+ }
+
+ if (count > 1) {
+ if (rdev->pm.state == PM_STATE_ACTIVE) {
+ cancel_delayed_work(&rdev->pm.idle_work);
+
+ rdev->pm.state = PM_STATE_PAUSED;
+ rdev->pm.planned_action = PM_ACTION_UPCLOCK;
+ if (rdev->pm.downclocked)
+ radeon_pm_set_clocks(rdev);
+
+ DRM_DEBUG("radeon: dynamic power management deactivated\n");
+ }
+ } else if (count == 1) {
+ /* TODO: Increase clocks if needed for current mode */
+
+ if (rdev->pm.state == PM_STATE_MINIMUM) {
+ rdev->pm.state = PM_STATE_ACTIVE;
+ rdev->pm.planned_action = PM_ACTION_UPCLOCK;
+ radeon_pm_set_clocks(rdev);
+
+ queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ }
+ else if (rdev->pm.state == PM_STATE_PAUSED) {
+ rdev->pm.state = PM_STATE_ACTIVE;
+ queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ DRM_DEBUG("radeon: dynamic power management activated\n");
+ }
+ }
+ else { /* count == 0 */
+ if (rdev->pm.state != PM_STATE_MINIMUM) {
+ cancel_delayed_work(&rdev->pm.idle_work);
+
+ rdev->pm.state = PM_STATE_MINIMUM;
+ rdev->pm.planned_action = PM_ACTION_MINIMUM;
+ radeon_pm_set_clocks(rdev);
+ }
+ }
+
+ mutex_unlock(&rdev->pm.mutex);
+}
+
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
+{
+ u32 stat_crtc1 = 0, stat_crtc2 = 0;
+ bool in_vbl = true;
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->pm.active_crtcs & (1 << 0)) {
+ stat_crtc1 = RREG32(D1CRTC_STATUS);
+ if (!(stat_crtc1 & 1))
+ in_vbl = false;
+ }
+ if (rdev->pm.active_crtcs & (1 << 1)) {
+ stat_crtc2 = RREG32(D2CRTC_STATUS);
+ if (!(stat_crtc2 & 1))
+ in_vbl = false;
+ }
+ }
+ if (in_vbl == false)
+ DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
+ stat_crtc2, finish ? "exit" : "entry");
+ return in_vbl;
+}
+static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
+{
+ /*radeon_fence_wait_last(rdev);*/
+ switch (rdev->pm.planned_action) {
+ case PM_ACTION_UPCLOCK:
+ rdev->pm.downclocked = false;
+ break;
+ case PM_ACTION_DOWNCLOCK:
+ rdev->pm.downclocked = true;
+ break;
+ case PM_ACTION_MINIMUM:
+ break;
+ case PM_ACTION_NONE:
+ DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
+ break;
+ }
+
+ /* check if we are in vblank */
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_power_state(rdev);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.planned_action = PM_ACTION_NONE;
+}
+
+static void radeon_pm_set_clocks(struct radeon_device *rdev)
+{
+ radeon_get_power_state(rdev, rdev->pm.planned_action);
+ mutex_lock(&rdev->cp.mutex);
+
+ if (rdev->pm.active_crtcs & (1 << 0)) {
+ rdev->pm.req_vblank |= (1 << 0);
+ drm_vblank_get(rdev->ddev, 0);
+ }
+ if (rdev->pm.active_crtcs & (1 << 1)) {
+ rdev->pm.req_vblank |= (1 << 1);
+ drm_vblank_get(rdev->ddev, 1);
+ }
+ if (rdev->pm.active_crtcs)
+ wait_event_interruptible_timeout(
+ rdev->irq.vblank_queue, 0,
+ msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
+ if (rdev->pm.req_vblank & (1 << 0)) {
+ rdev->pm.req_vblank &= ~(1 << 0);
+ drm_vblank_put(rdev->ddev, 0);
+ }
+ if (rdev->pm.req_vblank & (1 << 1)) {
+ rdev->pm.req_vblank &= ~(1 << 1);
+ drm_vblank_put(rdev->ddev, 1);
+ }
+
+ radeon_pm_set_clocks_locked(rdev);
+ mutex_unlock(&rdev->cp.mutex);
+}
+
+static void radeon_pm_idle_work_handler(struct work_struct *work)
+{
+ struct radeon_device *rdev;
+ rdev = container_of(work, struct radeon_device,
+ pm.idle_work.work);
+
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.state == PM_STATE_ACTIVE) {
+ unsigned long irq_flags;
+ int not_processed = 0;
+
+ read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ if (!list_empty(&rdev->fence_drv.emited)) {
+ struct list_head *ptr;
+ list_for_each(ptr, &rdev->fence_drv.emited) {
+ /* count up to 3, that's enought info */
+ if (++not_processed >= 3)
+ break;
+ }
+ }
+ read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+
+ if (not_processed >= 3) { /* should upclock */
+ if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
+ rdev->pm.planned_action = PM_ACTION_NONE;
+ } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
+ rdev->pm.downclocked) {
+ rdev->pm.planned_action =
+ PM_ACTION_UPCLOCK;
+ rdev->pm.action_timeout = jiffies +
+ msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
+ }
+ } else if (not_processed == 0) { /* should downclock */
+ if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
+ rdev->pm.planned_action = PM_ACTION_NONE;
+ } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
+ !rdev->pm.downclocked) {
+ rdev->pm.planned_action =
+ PM_ACTION_DOWNCLOCK;
+ rdev->pm.action_timeout = jiffies +
+ msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
+ }
+ }
+
+ if (rdev->pm.planned_action != PM_ACTION_NONE &&
+ jiffies > rdev->pm.action_timeout) {
+ radeon_pm_set_clocks(rdev);
+ }
+ }
+ mutex_unlock(&rdev->pm.mutex);
+
+ queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+}
+
/*
* Debugfs info
*/
@@ -44,11 +436,14 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
+ seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
if (rdev->asic->get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+ if (rdev->asic->get_pcie_lanes)
+ seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
return 0;
}
@@ -58,7 +453,7 @@ static struct drm_info_list radeon_pm_info_list[] = {
};
#endif
-int radeon_debugfs_pm_init(struct radeon_device *rdev)
+static int radeon_debugfs_pm_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 6d0a009..5c0dc08 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -54,7 +54,7 @@
#include "r300_reg.h"
#include "r500_reg.h"
#include "r600_reg.h"
-
+#include "evergreen_reg.h"
#define RADEON_MC_AGP_LOCATION 0x014c
#define RADEON_MC_AGP_START_MASK 0x0000FFFF
@@ -1060,32 +1060,38 @@
/* Multimedia I2C bus */
#define RADEON_I2C_CNTL_0 0x0090
-#define RADEON_I2C_DONE (1 << 0)
-#define RADEON_I2C_NACK (1 << 1)
-#define RADEON_I2C_HALT (1 << 2)
-#define RADEON_I2C_SOFT_RST (1 << 5)
-#define RADEON_I2C_DRIVE_EN (1 << 6)
-#define RADEON_I2C_DRIVE_SEL (1 << 7)
-#define RADEON_I2C_START (1 << 8)
-#define RADEON_I2C_STOP (1 << 9)
-#define RADEON_I2C_RECEIVE (1 << 10)
-#define RADEON_I2C_ABORT (1 << 11)
-#define RADEON_I2C_GO (1 << 12)
-#define RADEON_I2C_PRESCALE_SHIFT 16
+# define RADEON_I2C_DONE (1 << 0)
+# define RADEON_I2C_NACK (1 << 1)
+# define RADEON_I2C_HALT (1 << 2)
+# define RADEON_I2C_SOFT_RST (1 << 5)
+# define RADEON_I2C_DRIVE_EN (1 << 6)
+# define RADEON_I2C_DRIVE_SEL (1 << 7)
+# define RADEON_I2C_START (1 << 8)
+# define RADEON_I2C_STOP (1 << 9)
+# define RADEON_I2C_RECEIVE (1 << 10)
+# define RADEON_I2C_ABORT (1 << 11)
+# define RADEON_I2C_GO (1 << 12)
+# define RADEON_I2C_PRESCALE_SHIFT 16
#define RADEON_I2C_CNTL_1 0x0094
-#define RADEON_I2C_DATA_COUNT_SHIFT 0
-#define RADEON_I2C_ADDR_COUNT_SHIFT 4
-#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
-#define RADEON_I2C_SEL (1 << 16)
-#define RADEON_I2C_EN (1 << 17)
-#define RADEON_I2C_TIME_LIMIT_SHIFT 24
+# define RADEON_I2C_DATA_COUNT_SHIFT 0
+# define RADEON_I2C_ADDR_COUNT_SHIFT 4
+# define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
+# define RADEON_I2C_SEL (1 << 16)
+# define RADEON_I2C_EN (1 << 17)
+# define RADEON_I2C_TIME_LIMIT_SHIFT 24
#define RADEON_I2C_DATA 0x0098
#define RADEON_DVI_I2C_CNTL_0 0x02e0
# define R200_DVI_I2C_PIN_SEL(x) ((x) << 3)
-# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */
-# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */
-# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */
+# define R200_SEL_DDC1 0 /* depends on asic */
+# define R200_SEL_DDC2 1 /* depends on asic */
+# define R200_SEL_DDC3 2 /* depends on asic */
+# define RADEON_SW_WANTS_TO_USE_DVI_I2C (1 << 13)
+# define RADEON_SW_CAN_USE_DVI_I2C (1 << 13)
+# define RADEON_SW_DONE_USING_DVI_I2C (1 << 14)
+# define RADEON_HW_NEEDS_DVI_I2C (1 << 14)
+# define RADEON_ABORT_HW_DVI_I2C (1 << 15)
+# define RADEON_HW_USING_DVI_I2C (1 << 15)
#define RADEON_DVI_I2C_CNTL_1 0x02e4
#define RADEON_DVI_I2C_DATA 0x02e8
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 6579eb4..e50513a 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -34,6 +34,36 @@
int radeon_debugfs_ib_init(struct radeon_device *rdev);
+void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
+{
+ struct radeon_ib *ib, *n;
+
+ list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
+ list_del(&ib->list);
+ vfree(ib->ptr);
+ kfree(ib);
+ }
+}
+
+void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ib *bib;
+
+ bib = kmalloc(sizeof(*bib), GFP_KERNEL);
+ if (bib == NULL)
+ return;
+ bib->ptr = vmalloc(ib->length_dw * 4);
+ if (bib->ptr == NULL) {
+ kfree(bib);
+ return;
+ }
+ memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
+ bib->length_dw = ib->length_dw;
+ mutex_lock(&rdev->ib_pool.mutex);
+ list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
+ mutex_unlock(&rdev->ib_pool.mutex);
+}
+
/*
* IB.
*/
@@ -142,6 +172,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
if (rdev->ib_pool.robj)
return 0;
+ INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
/* Allocate 1M object buffer */
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
true, RADEON_GEM_DOMAIN_GTT,
@@ -192,6 +223,8 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
return;
}
mutex_lock(&rdev->ib_pool.mutex);
+ radeon_ib_bogus_cleanup(rdev);
+
if (rdev->ib_pool.robj) {
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
if (likely(r == 0)) {
@@ -349,15 +382,49 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
return 0;
}
+static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct radeon_device *rdev = node->info_ent->data;
+ struct radeon_ib *ib;
+ unsigned i;
+
+ mutex_lock(&rdev->ib_pool.mutex);
+ if (list_empty(&rdev->ib_pool.bogus_ib)) {
+ mutex_unlock(&rdev->ib_pool.mutex);
+ seq_printf(m, "no bogus IB recorded\n");
+ return 0;
+ }
+ ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
+ list_del_init(&ib->list);
+ mutex_unlock(&rdev->ib_pool.mutex);
+ seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
+ for (i = 0; i < ib->length_dw; i++) {
+ seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
+ }
+ vfree(ib->ptr);
+ kfree(ib);
+ return 0;
+}
+
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
+
+static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
+ {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
+};
#endif
int radeon_debugfs_ib_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned i;
+ int r;
+ radeon_debugfs_ib_bogus_info_list[0].data = rdev;
+ r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
+ if (r)
+ return r;
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 067167c..3c32f84 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -29,6 +29,7 @@
#include "drmP.h"
#include "drm.h"
+#include "drm_buffer.h"
#include "drm_sarea.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
@@ -91,21 +92,27 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
dev_priv,
struct drm_file *file_priv,
- int id, u32 *data)
+ int id, struct drm_buffer *buf)
{
+ u32 *data;
switch (id) {
case RADEON_EMIT_PP_MISC:
- if (radeon_check_and_fixup_offset(dev_priv, file_priv,
- &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
+ data = drm_buffer_pointer_to_dword(buf,
+ (RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4);
+
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
DRM_ERROR("Invalid depth buffer offset\n");
return -EINVAL;
}
+ dev_priv->have_z_offset = 1;
break;
case RADEON_EMIT_PP_CNTL:
- if (radeon_check_and_fixup_offset(dev_priv, file_priv,
- &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
+ data = drm_buffer_pointer_to_dword(buf,
+ (RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4);
+
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
DRM_ERROR("Invalid colour buffer offset\n");
return -EINVAL;
}
@@ -117,8 +124,8 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case R200_EMIT_PP_TXOFFSET_3:
case R200_EMIT_PP_TXOFFSET_4:
case R200_EMIT_PP_TXOFFSET_5:
- if (radeon_check_and_fixup_offset(dev_priv, file_priv,
- &data[0])) {
+ data = drm_buffer_pointer_to_dword(buf, 0);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
DRM_ERROR("Invalid R200 texture offset\n");
return -EINVAL;
}
@@ -127,8 +134,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case RADEON_EMIT_PP_TXFILTER_0:
case RADEON_EMIT_PP_TXFILTER_1:
case RADEON_EMIT_PP_TXFILTER_2:
- if (radeon_check_and_fixup_offset(dev_priv, file_priv,
- &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
+ data = drm_buffer_pointer_to_dword(buf,
+ (RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
DRM_ERROR("Invalid R100 texture offset\n");
return -EINVAL;
}
@@ -142,9 +150,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case R200_EMIT_PP_CUBIC_OFFSETS_5:{
int i;
for (i = 0; i < 5; i++) {
+ data = drm_buffer_pointer_to_dword(buf, i);
if (radeon_check_and_fixup_offset(dev_priv,
file_priv,
- &data[i])) {
+ data)) {
DRM_ERROR
("Invalid R200 cubic texture offset\n");
return -EINVAL;
@@ -158,9 +167,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
int i;
for (i = 0; i < 5; i++) {
+ data = drm_buffer_pointer_to_dword(buf, i);
if (radeon_check_and_fixup_offset(dev_priv,
file_priv,
- &data[i])) {
+ data)) {
DRM_ERROR
("Invalid R100 cubic texture offset\n");
return -EINVAL;
@@ -269,23 +279,24 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
cmdbuf,
unsigned int *cmdsz)
{
- u32 *cmd = (u32 *) cmdbuf->buf;
+ u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
u32 offset, narrays;
int count, i, k;
- *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16);
+ count = ((*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16);
+ *cmdsz = 2 + count;
- if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
+ if ((*cmd & 0xc0000000) != RADEON_CP_PACKET3) {
DRM_ERROR("Not a type 3 packet\n");
return -EINVAL;
}
- if (4 * *cmdsz > cmdbuf->bufsz) {
+ if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->buffer)) {
DRM_ERROR("Packet size larger than size of data provided\n");
return -EINVAL;
}
- switch(cmd[0] & 0xff00) {
+ switch (*cmd & 0xff00) {
/* XXX Are there old drivers needing other packets? */
case RADEON_3D_DRAW_IMMD:
@@ -312,7 +323,6 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
break;
case RADEON_3D_LOAD_VBPNTR:
- count = (cmd[0] >> 16) & 0x3fff;
if (count > 18) { /* 12 arrays max */
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
@@ -321,13 +331,16 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
}
/* carefully check packet contents */
- narrays = cmd[1] & ~0xc000;
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+
+ narrays = *cmd & ~0xc000;
k = 0;
i = 2;
while ((k < narrays) && (i < (count + 2))) {
i++; /* skip attribute field */
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
if (radeon_check_and_fixup_offset(dev_priv, file_priv,
- &cmd[i])) {
+ cmd)) {
DRM_ERROR
("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
k, i);
@@ -338,8 +351,10 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
if (k == narrays)
break;
/* have one more to process, they come in pairs */
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+
if (radeon_check_and_fixup_offset(dev_priv,
- file_priv, &cmd[i]))
+ file_priv, cmd))
{
DRM_ERROR
("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
@@ -363,7 +378,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
DRM_ERROR("Invalid 3d packet for r200-class chip\n");
return -EINVAL;
}
- if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) {
+
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
DRM_ERROR("Invalid rndr_gen_indx offset\n");
return -EINVAL;
}
@@ -374,12 +391,15 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
DRM_ERROR("Invalid 3d packet for r100-class chip\n");
return -EINVAL;
}
- if ((cmd[1] & 0x8000ffff) != 0x80000810) {
- DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
+
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if ((*cmd & 0x8000ffff) != 0x80000810) {
+ DRM_ERROR("Invalid indx_buffer reg address %08X\n", *cmd);
return -EINVAL;
}
- if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) {
- DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
+ DRM_ERROR("Invalid indx_buffer offset is %08X\n", *cmd);
return -EINVAL;
}
break;
@@ -388,31 +408,34 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
case RADEON_CNTL_PAINT_MULTI:
case RADEON_CNTL_BITBLT_MULTI:
/* MSB of opcode: next DWORD GUI_CNTL */
- if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if (*cmd & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
- offset = cmd[2] << 10;
+ u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ offset = *cmd2 << 10;
if (radeon_check_and_fixup_offset
(dev_priv, file_priv, &offset)) {
DRM_ERROR("Invalid first packet offset\n");
return -EINVAL;
}
- cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
+ *cmd2 = (*cmd2 & 0xffc00000) | offset >> 10;
}
- if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
- (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
- offset = cmd[3] << 10;
+ if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+ (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+ u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+ offset = *cmd << 10;
if (radeon_check_and_fixup_offset
(dev_priv, file_priv, &offset)) {
DRM_ERROR("Invalid second packet offset\n");
return -EINVAL;
}
- cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
+ *cmd3 = (*cmd3 & 0xffc00000) | offset >> 10;
}
break;
default:
- DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
+ DRM_ERROR("Invalid packet type %x\n", *cmd & 0xff00);
return -EINVAL;
}
@@ -876,6 +899,11 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
if (tmp & RADEON_BACK)
flags |= RADEON_FRONT;
}
+ if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
+ if (!dev_priv->have_z_offset)
+ printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
+ flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+ }
if (flags & (RADEON_FRONT | RADEON_BACK)) {
@@ -2611,7 +2639,6 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
{
int id = (int)header.packet.packet_id;
int sz, reg;
- int *data = (int *)cmdbuf->buf;
RING_LOCALS;
if (id >= RADEON_MAX_STATE_PACKETS)
@@ -2620,23 +2647,22 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
sz = packet[id].len;
reg = packet[id].start;
- if (sz * sizeof(int) > cmdbuf->bufsz) {
+ if (sz * sizeof(u32) > drm_buffer_unprocessed(cmdbuf->buffer)) {
DRM_ERROR("Packet size provided larger than data provided\n");
return -EINVAL;
}
- if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) {
+ if (radeon_check_and_fixup_packets(dev_priv, file_priv, id,
+ cmdbuf->buffer)) {
DRM_ERROR("Packet verification failed\n");
return -EINVAL;
}
BEGIN_RING(sz + 1);
OUT_RING(CP_PACKET0(reg, (sz - 1)));
- OUT_RING_TABLE(data, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * sizeof(int);
- cmdbuf->bufsz -= sz * sizeof(int);
return 0;
}
@@ -2653,10 +2679,8 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
- OUT_RING_TABLE(cmdbuf->buf, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * sizeof(int);
- cmdbuf->bufsz -= sz * sizeof(int);
return 0;
}
@@ -2675,10 +2699,8 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
- OUT_RING_TABLE(cmdbuf->buf, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * sizeof(int);
- cmdbuf->bufsz -= sz * sizeof(int);
return 0;
}
@@ -2696,11 +2718,9 @@ static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
- OUT_RING_TABLE(cmdbuf->buf, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * sizeof(int);
- cmdbuf->bufsz -= sz * sizeof(int);
return 0;
}
@@ -2714,7 +2734,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
if (!sz)
return 0;
- if (sz * 4 > cmdbuf->bufsz)
+ if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
BEGIN_RING(5 + sz);
@@ -2722,11 +2742,9 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
- OUT_RING_TABLE(cmdbuf->buf, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * sizeof(int);
- cmdbuf->bufsz -= sz * sizeof(int);
return 0;
}
@@ -2748,11 +2766,9 @@ static int radeon_emit_packet3(struct drm_device * dev,
}
BEGIN_RING(cmdsz);
- OUT_RING_TABLE(cmdbuf->buf, cmdsz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
ADVANCE_RING();
- cmdbuf->buf += cmdsz * 4;
- cmdbuf->bufsz -= cmdsz * 4;
return 0;
}
@@ -2805,16 +2821,16 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev,
}
BEGIN_RING(cmdsz);
- OUT_RING_TABLE(cmdbuf->buf, cmdsz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
ADVANCE_RING();
} while (++i < cmdbuf->nbox);
if (cmdbuf->nbox == 1)
cmdbuf->nbox = 0;
+ return 0;
out:
- cmdbuf->buf += cmdsz * 4;
- cmdbuf->bufsz -= cmdsz * 4;
+ drm_buffer_advance(cmdbuf->buffer, cmdsz * 4);
return 0;
}
@@ -2847,16 +2863,16 @@ static int radeon_emit_wait(struct drm_device * dev, int flags)
return 0;
}
-static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf = NULL;
+ drm_radeon_cmd_header_t stack_header;
int idx;
drm_radeon_kcmd_buffer_t *cmdbuf = data;
- drm_radeon_cmd_header_t header;
- int orig_nbox, orig_bufsz;
- char *kbuf = NULL;
+ int orig_nbox;
LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -2871,17 +2887,16 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
* races between checking values and using those values in other code,
* and simply to avoid a lot of function calls to copy in data.
*/
- orig_bufsz = cmdbuf->bufsz;
- if (orig_bufsz != 0) {
- kbuf = kmalloc(cmdbuf->bufsz, GFP_KERNEL);
- if (kbuf == NULL)
- return -ENOMEM;
- if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf,
- cmdbuf->bufsz)) {
- kfree(kbuf);
- return -EFAULT;
- }
- cmdbuf->buf = kbuf;
+ if (cmdbuf->bufsz != 0) {
+ int rv;
+ void __user *buffer = cmdbuf->buffer;
+ rv = drm_buffer_alloc(&cmdbuf->buffer, cmdbuf->bufsz);
+ if (rv)
+ return rv;
+ rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
+ cmdbuf->bufsz);
+ if (rv)
+ return rv;
}
orig_nbox = cmdbuf->nbox;
@@ -2890,24 +2905,24 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
int temp;
temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
- if (orig_bufsz != 0)
- kfree(kbuf);
+ if (cmdbuf->bufsz != 0)
+ drm_buffer_free(cmdbuf->buffer);
return temp;
}
/* microcode_version != r300 */
- while (cmdbuf->bufsz >= sizeof(header)) {
+ while (drm_buffer_unprocessed(cmdbuf->buffer) >= sizeof(stack_header)) {
- header.i = *(int *)cmdbuf->buf;
- cmdbuf->buf += sizeof(header);
- cmdbuf->bufsz -= sizeof(header);
+ drm_radeon_cmd_header_t *header;
+ header = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_header), &stack_header);
- switch (header.header.cmd_type) {
+ switch (header->header.cmd_type) {
case RADEON_CMD_PACKET:
DRM_DEBUG("RADEON_CMD_PACKET\n");
if (radeon_emit_packets
- (dev_priv, file_priv, header, cmdbuf)) {
+ (dev_priv, file_priv, *header, cmdbuf)) {
DRM_ERROR("radeon_emit_packets failed\n");
goto err;
}
@@ -2915,7 +2930,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
case RADEON_CMD_SCALARS:
DRM_DEBUG("RADEON_CMD_SCALARS\n");
- if (radeon_emit_scalars(dev_priv, header, cmdbuf)) {
+ if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) {
DRM_ERROR("radeon_emit_scalars failed\n");
goto err;
}
@@ -2923,7 +2938,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
case RADEON_CMD_VECTORS:
DRM_DEBUG("RADEON_CMD_VECTORS\n");
- if (radeon_emit_vectors(dev_priv, header, cmdbuf)) {
+ if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) {
DRM_ERROR("radeon_emit_vectors failed\n");
goto err;
}
@@ -2931,7 +2946,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
case RADEON_CMD_DMA_DISCARD:
DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
- idx = header.dma.buf_idx;
+ idx = header->dma.buf_idx;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
idx, dma->buf_count - 1);
@@ -2968,7 +2983,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
case RADEON_CMD_SCALARS2:
DRM_DEBUG("RADEON_CMD_SCALARS2\n");
- if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) {
+ if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) {
DRM_ERROR("radeon_emit_scalars2 failed\n");
goto err;
}
@@ -2976,37 +2991,37 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
case RADEON_CMD_WAIT:
DRM_DEBUG("RADEON_CMD_WAIT\n");
- if (radeon_emit_wait(dev, header.wait.flags)) {
+ if (radeon_emit_wait(dev, header->wait.flags)) {
DRM_ERROR("radeon_emit_wait failed\n");
goto err;
}
break;
case RADEON_CMD_VECLINEAR:
DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
- if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) {
+ if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) {
DRM_ERROR("radeon_emit_veclinear failed\n");
goto err;
}
break;
default:
- DRM_ERROR("bad cmd_type %d at %p\n",
- header.header.cmd_type,
- cmdbuf->buf - sizeof(header));
+ DRM_ERROR("bad cmd_type %d at byte %d\n",
+ header->header.cmd_type,
+ cmdbuf->buffer->iterator);
goto err;
}
}
- if (orig_bufsz != 0)
- kfree(kbuf);
+ if (cmdbuf->bufsz != 0)
+ drm_buffer_free(cmdbuf->buffer);
DRM_DEBUG("DONE\n");
COMMIT_RING();
return 0;
err:
- if (orig_bufsz != 0)
- kfree(kbuf);
+ if (cmdbuf->bufsz != 0)
+ drm_buffer_free(cmdbuf->buffer);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 9f5e2f9..313c96b 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -186,7 +186,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(gtt_obj[i]);
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
- gtt_addr - rdev->mc.gtt_location);
+ gtt_addr - rdev->mc.gtt_start);
}
out_cleanup:
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 58b5adf..43c5ab3 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
- man->gpu_offset = rdev->mc.gtt_location;
+ man->gpu_offset = rdev->mc.gtt_start;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
- man->gpu_offset = rdev->mc.vram_location;
+ man->gpu_offset = rdev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -262,10 +262,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
- old_start += rdev->mc.vram_location;
+ old_start += rdev->mc.vram_start;
break;
case TTM_PL_TT:
- old_start += rdev->mc.gtt_location;
+ old_start += rdev->mc.gtt_start;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
@@ -273,10 +273,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
}
switch (new_mem->mem_type) {
case TTM_PL_VRAM:
- new_start += rdev->mc.vram_location;
+ new_start += rdev->mc.vram_start;
break;
case TTM_PL_TT:
- new_start += rdev->mc.gtt_location;
+ new_start += rdev->mc.gtt_start;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
new file mode 100644
index 0000000..8f414a5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -0,0 +1,837 @@
+r600 0x9400
+0x000287A0 R7xx_CB_SHADER_CONTROL
+0x00028230 R7xx_PA_SC_EDGERULE
+0x000286C8 R7xx_SPI_THREAD_GROUPING
+0x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x000088C4 VGT_CACHE_INVALIDATION
+0x00028A50 VGT_ENHANCE
+0x000088CC VGT_ES_PER_GS
+0x00028A2C VGT_GROUP_DECR
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x000088C8 VGT_GS_PER_ES
+0x000088E8 VGT_GS_PER_VS
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x0000895C VGT_INDEX_TYPE
+0x00028408 VGT_INDX_OFFSET
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x000088C0 VGT_LAST_COPY_STATE
+0x00028400 VGT_MAX_VTX_INDX
+0x000088D8 VGT_MC_LAT_CNTL
+0x00028404 VGT_MIN_VTX_INDX
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028C5C VGT_OUT_DEALLOC_CNTL
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00008958 VGT_PRIMITIVE_TYPE
+0x00028AB4 VGT_REUSE_OFF
+0x00028C58 VGT_VERTEX_REUSE_BLOCK_CNTL
+0x00028AB8 VGT_VTX_CNT_EN
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x00028810 PA_CL_CLIP_CNTL
+0x00008A14 PA_CL_ENHANCE
+0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
+0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
+0x00028C10 PA_CL_GB_VERT_DISC_ADJ
+0x00028820 PA_CL_NANINF_CNTL
+0x00028E1C PA_CL_POINT_CULL_RAD
+0x00028E18 PA_CL_POINT_SIZE
+0x00028E10 PA_CL_POINT_X_RAD
+0x00028E14 PA_CL_POINT_Y_RAD
+0x00028E2C PA_CL_UCP_0_W
+0x00028E3C PA_CL_UCP_1_W
+0x00028E4C PA_CL_UCP_2_W
+0x00028E5C PA_CL_UCP_3_W
+0x00028E6C PA_CL_UCP_4_W
+0x00028E7C PA_CL_UCP_5_W
+0x00028E20 PA_CL_UCP_0_X
+0x00028E30 PA_CL_UCP_1_X
+0x00028E40 PA_CL_UCP_2_X
+0x00028E50 PA_CL_UCP_3_X
+0x00028E60 PA_CL_UCP_4_X
+0x00028E70 PA_CL_UCP_5_X
+0x00028E24 PA_CL_UCP_0_Y
+0x00028E34 PA_CL_UCP_1_Y
+0x00028E44 PA_CL_UCP_2_Y
+0x00028E54 PA_CL_UCP_3_Y
+0x00028E64 PA_CL_UCP_4_Y
+0x00028E74 PA_CL_UCP_5_Y
+0x00028E28 PA_CL_UCP_0_Z
+0x00028E38 PA_CL_UCP_1_Z
+0x00028E48 PA_CL_UCP_2_Z
+0x00028E58 PA_CL_UCP_3_Z
+0x00028E68 PA_CL_UCP_4_Z
+0x00028E78 PA_CL_UCP_5_Z
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x00028C48 PA_SC_AA_MASK
+0x00008B40 PA_SC_AA_SAMPLE_LOCS_2S
+0x00008B44 PA_SC_AA_SAMPLE_LOCS_4S
+0x00008B48 PA_SC_AA_SAMPLE_LOCS_8S_WD0
+0x00008B4C PA_SC_AA_SAMPLE_LOCS_8S_WD1
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_MCTX
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00008BF0 PA_SC_ENHANCE
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028C00 PA_SC_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00028A4C PA_SC_MODE_CNTL
+0x00028A48 PA_SC_MPASS_PS_CNTL
+0x00008B20 PA_SC_MULTI_CHIP_CNTL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A00 PA_SU_POINT_SIZE
+0x00028E0C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028E08 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028DFC PA_SU_POLY_OFFSET_CLAMP
+0x00028DF8 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028E04 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028C08 PA_SU_VTX_CNTL
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C08 SQ_GPR_RESOURCE_MGMT_2
+0x00008C10 SQ_STACK_RESOURCE_MGMT_1
+0x00008C14 SQ_STACK_RESOURCE_MGMT_2
+0x00008C0C SQ_THREAD_RESOURCE_MGMT
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x000288E0 SQ_VTX_SEMANTIC_CLEAR
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003C000 SQ_TEX_SAMPLER_WORD0_0
+0x0003C004 SQ_TEX_SAMPLER_WORD1_0
+0x0003C008 SQ_TEX_SAMPLER_WORD2_0
+0x00030000 SQ_ALU_CONSTANT0_0
+0x00030004 SQ_ALU_CONSTANT1_0
+0x00030008 SQ_ALU_CONSTANT2_0
+0x0003000C SQ_ALU_CONSTANT3_0
+0x0003E380 SQ_BOOL_CONST_0
+0x0003E384 SQ_BOOL_CONST_1
+0x0003E388 SQ_BOOL_CONST_2
+0x0003E200 SQ_LOOP_CONST_0
+0x0003E200 SQ_LOOP_CONST_DX10_0
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000289C0 SQ_ALU_CONST_CACHE_GS_0
+0x000289C4 SQ_ALU_CONST_CACHE_GS_1
+0x000289C8 SQ_ALU_CONST_CACHE_GS_2
+0x000289CC SQ_ALU_CONST_CACHE_GS_3
+0x000289D0 SQ_ALU_CONST_CACHE_GS_4
+0x000289D4 SQ_ALU_CONST_CACHE_GS_5
+0x000289D8 SQ_ALU_CONST_CACHE_GS_6
+0x000289DC SQ_ALU_CONST_CACHE_GS_7
+0x000289E0 SQ_ALU_CONST_CACHE_GS_8
+0x000289E4 SQ_ALU_CONST_CACHE_GS_9
+0x000289E8 SQ_ALU_CONST_CACHE_GS_10
+0x000289EC SQ_ALU_CONST_CACHE_GS_11
+0x000289F0 SQ_ALU_CONST_CACHE_GS_12
+0x000289F4 SQ_ALU_CONST_CACHE_GS_13
+0x000289F8 SQ_ALU_CONST_CACHE_GS_14
+0x000289FC SQ_ALU_CONST_CACHE_GS_15
+0x00028940 SQ_ALU_CONST_CACHE_PS_0
+0x00028944 SQ_ALU_CONST_CACHE_PS_1
+0x00028948 SQ_ALU_CONST_CACHE_PS_2
+0x0002894C SQ_ALU_CONST_CACHE_PS_3
+0x00028950 SQ_ALU_CONST_CACHE_PS_4
+0x00028954 SQ_ALU_CONST_CACHE_PS_5
+0x00028958 SQ_ALU_CONST_CACHE_PS_6
+0x0002895C SQ_ALU_CONST_CACHE_PS_7
+0x00028960 SQ_ALU_CONST_CACHE_PS_8
+0x00028964 SQ_ALU_CONST_CACHE_PS_9
+0x00028968 SQ_ALU_CONST_CACHE_PS_10
+0x0002896C SQ_ALU_CONST_CACHE_PS_11
+0x00028970 SQ_ALU_CONST_CACHE_PS_12
+0x00028974 SQ_ALU_CONST_CACHE_PS_13
+0x00028978 SQ_ALU_CONST_CACHE_PS_14
+0x0002897C SQ_ALU_CONST_CACHE_PS_15
+0x00028980 SQ_ALU_CONST_CACHE_VS_0
+0x00028984 SQ_ALU_CONST_CACHE_VS_1
+0x00028988 SQ_ALU_CONST_CACHE_VS_2
+0x0002898C SQ_ALU_CONST_CACHE_VS_3
+0x00028990 SQ_ALU_CONST_CACHE_VS_4
+0x00028994 SQ_ALU_CONST_CACHE_VS_5
+0x00028998 SQ_ALU_CONST_CACHE_VS_6
+0x0002899C SQ_ALU_CONST_CACHE_VS_7
+0x000289A0 SQ_ALU_CONST_CACHE_VS_8
+0x000289A4 SQ_ALU_CONST_CACHE_VS_9
+0x000289A8 SQ_ALU_CONST_CACHE_VS_10
+0x000289AC SQ_ALU_CONST_CACHE_VS_11
+0x000289B0 SQ_ALU_CONST_CACHE_VS_12
+0x000289B4 SQ_ALU_CONST_CACHE_VS_13
+0x000289B8 SQ_ALU_CONST_CACHE_VS_14
+0x000289BC SQ_ALU_CONST_CACHE_VS_15
+0x000288D8 SQ_PGM_CF_OFFSET_ES
+0x000288DC SQ_PGM_CF_OFFSET_FS
+0x000288D4 SQ_PGM_CF_OFFSET_GS
+0x000288CC SQ_PGM_CF_OFFSET_PS
+0x000288D0 SQ_PGM_CF_OFFSET_VS
+0x00028854 SQ_PGM_EXPORTS_PS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x000288A4 SQ_PGM_RESOURCES_FS
+0x0002887C SQ_PGM_RESOURCES_GS
+0x00028850 SQ_PGM_RESOURCES_PS
+0x00028868 SQ_PGM_RESOURCES_VS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x000286DC SPI_FOG_CNTL
+0x000286E4 SPI_FOG_FUNC_BIAS
+0x000286E0 SPI_FOG_FUNC_SCALE
+0x000286D8 SPI_INPUT_Z
+0x000286D4 SPI_INTERP_CONTROL_0
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286C4 SPI_VS_OUT_CONFIG
+0x00028614 SPI_VS_OUT_ID_0
+0x00028618 SPI_VS_OUT_ID_1
+0x0002861C SPI_VS_OUT_ID_2
+0x00028620 SPI_VS_OUT_ID_3
+0x00028624 SPI_VS_OUT_ID_4
+0x00028628 SPI_VS_OUT_ID_5
+0x0002862C SPI_VS_OUT_ID_6
+0x00028630 SPI_VS_OUT_ID_7
+0x00028634 SPI_VS_OUT_ID_8
+0x00028638 SPI_VS_OUT_ID_9
+0x00028438 SX_ALPHA_REF
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028350 SX_MISC
+0x0000A020 SMX_DC_CTL0
+0x0000A024 SMX_DC_CTL1
+0x0000A028 SMX_DC_CTL2
+0x00009608 TC_CNTL
+0x00009604 TC_INVALIDATE
+0x00009490 TD_CNTL
+0x00009400 TD_FILTER4
+0x00009404 TD_FILTER4_1
+0x00009408 TD_FILTER4_2
+0x0000940C TD_FILTER4_3
+0x00009410 TD_FILTER4_4
+0x00009414 TD_FILTER4_5
+0x00009418 TD_FILTER4_6
+0x0000941C TD_FILTER4_7
+0x00009420 TD_FILTER4_8
+0x00009424 TD_FILTER4_9
+0x00009428 TD_FILTER4_10
+0x0000942C TD_FILTER4_11
+0x00009430 TD_FILTER4_12
+0x00009434 TD_FILTER4_13
+0x00009438 TD_FILTER4_14
+0x0000943C TD_FILTER4_15
+0x00009440 TD_FILTER4_16
+0x00009444 TD_FILTER4_17
+0x00009448 TD_FILTER4_18
+0x0000944C TD_FILTER4_19
+0x00009450 TD_FILTER4_20
+0x00009454 TD_FILTER4_21
+0x00009458 TD_FILTER4_22
+0x0000945C TD_FILTER4_23
+0x00009460 TD_FILTER4_24
+0x00009464 TD_FILTER4_25
+0x00009468 TD_FILTER4_26
+0x0000946C TD_FILTER4_27
+0x00009470 TD_FILTER4_28
+0x00009474 TD_FILTER4_29
+0x00009478 TD_FILTER4_30
+0x0000947C TD_FILTER4_31
+0x00009480 TD_FILTER4_32
+0x00009484 TD_FILTER4_33
+0x00009488 TD_FILTER4_34
+0x0000948C TD_FILTER4_35
+0x0000A80C TD_GS_SAMPLER0_BORDER_ALPHA
+0x0000A81C TD_GS_SAMPLER1_BORDER_ALPHA
+0x0000A82C TD_GS_SAMPLER2_BORDER_ALPHA
+0x0000A83C TD_GS_SAMPLER3_BORDER_ALPHA
+0x0000A84C TD_GS_SAMPLER4_BORDER_ALPHA
+0x0000A85C TD_GS_SAMPLER5_BORDER_ALPHA
+0x0000A86C TD_GS_SAMPLER6_BORDER_ALPHA
+0x0000A87C TD_GS_SAMPLER7_BORDER_ALPHA
+0x0000A88C TD_GS_SAMPLER8_BORDER_ALPHA
+0x0000A89C TD_GS_SAMPLER9_BORDER_ALPHA
+0x0000A8AC TD_GS_SAMPLER10_BORDER_ALPHA
+0x0000A8BC TD_GS_SAMPLER11_BORDER_ALPHA
+0x0000A8CC TD_GS_SAMPLER12_BORDER_ALPHA
+0x0000A8DC TD_GS_SAMPLER13_BORDER_ALPHA
+0x0000A8EC TD_GS_SAMPLER14_BORDER_ALPHA
+0x0000A8FC TD_GS_SAMPLER15_BORDER_ALPHA
+0x0000A90C TD_GS_SAMPLER16_BORDER_ALPHA
+0x0000A91C TD_GS_SAMPLER17_BORDER_ALPHA
+0x0000A808 TD_GS_SAMPLER0_BORDER_BLUE
+0x0000A818 TD_GS_SAMPLER1_BORDER_BLUE
+0x0000A828 TD_GS_SAMPLER2_BORDER_BLUE
+0x0000A838 TD_GS_SAMPLER3_BORDER_BLUE
+0x0000A848 TD_GS_SAMPLER4_BORDER_BLUE
+0x0000A858 TD_GS_SAMPLER5_BORDER_BLUE
+0x0000A868 TD_GS_SAMPLER6_BORDER_BLUE
+0x0000A878 TD_GS_SAMPLER7_BORDER_BLUE
+0x0000A888 TD_GS_SAMPLER8_BORDER_BLUE
+0x0000A898 TD_GS_SAMPLER9_BORDER_BLUE
+0x0000A8A8 TD_GS_SAMPLER10_BORDER_BLUE
+0x0000A8B8 TD_GS_SAMPLER11_BORDER_BLUE
+0x0000A8C8 TD_GS_SAMPLER12_BORDER_BLUE
+0x0000A8D8 TD_GS_SAMPLER13_BORDER_BLUE
+0x0000A8E8 TD_GS_SAMPLER14_BORDER_BLUE
+0x0000A8F8 TD_GS_SAMPLER15_BORDER_BLUE
+0x0000A908 TD_GS_SAMPLER16_BORDER_BLUE
+0x0000A918 TD_GS_SAMPLER17_BORDER_BLUE
+0x0000A804 TD_GS_SAMPLER0_BORDER_GREEN
+0x0000A814 TD_GS_SAMPLER1_BORDER_GREEN
+0x0000A824 TD_GS_SAMPLER2_BORDER_GREEN
+0x0000A834 TD_GS_SAMPLER3_BORDER_GREEN
+0x0000A844 TD_GS_SAMPLER4_BORDER_GREEN
+0x0000A854 TD_GS_SAMPLER5_BORDER_GREEN
+0x0000A864 TD_GS_SAMPLER6_BORDER_GREEN
+0x0000A874 TD_GS_SAMPLER7_BORDER_GREEN
+0x0000A884 TD_GS_SAMPLER8_BORDER_GREEN
+0x0000A894 TD_GS_SAMPLER9_BORDER_GREEN
+0x0000A8A4 TD_GS_SAMPLER10_BORDER_GREEN
+0x0000A8B4 TD_GS_SAMPLER11_BORDER_GREEN
+0x0000A8C4 TD_GS_SAMPLER12_BORDER_GREEN
+0x0000A8D4 TD_GS_SAMPLER13_BORDER_GREEN
+0x0000A8E4 TD_GS_SAMPLER14_BORDER_GREEN
+0x0000A8F4 TD_GS_SAMPLER15_BORDER_GREEN
+0x0000A904 TD_GS_SAMPLER16_BORDER_GREEN
+0x0000A914 TD_GS_SAMPLER17_BORDER_GREEN
+0x0000A800 TD_GS_SAMPLER0_BORDER_RED
+0x0000A810 TD_GS_SAMPLER1_BORDER_RED
+0x0000A820 TD_GS_SAMPLER2_BORDER_RED
+0x0000A830 TD_GS_SAMPLER3_BORDER_RED
+0x0000A840 TD_GS_SAMPLER4_BORDER_RED
+0x0000A850 TD_GS_SAMPLER5_BORDER_RED
+0x0000A860 TD_GS_SAMPLER6_BORDER_RED
+0x0000A870 TD_GS_SAMPLER7_BORDER_RED
+0x0000A880 TD_GS_SAMPLER8_BORDER_RED
+0x0000A890 TD_GS_SAMPLER9_BORDER_RED
+0x0000A8A0 TD_GS_SAMPLER10_BORDER_RED
+0x0000A8B0 TD_GS_SAMPLER11_BORDER_RED
+0x0000A8C0 TD_GS_SAMPLER12_BORDER_RED
+0x0000A8D0 TD_GS_SAMPLER13_BORDER_RED
+0x0000A8E0 TD_GS_SAMPLER14_BORDER_RED
+0x0000A8F0 TD_GS_SAMPLER15_BORDER_RED
+0x0000A900 TD_GS_SAMPLER16_BORDER_RED
+0x0000A910 TD_GS_SAMPLER17_BORDER_RED
+0x0000A40C TD_PS_SAMPLER0_BORDER_ALPHA
+0x0000A41C TD_PS_SAMPLER1_BORDER_ALPHA
+0x0000A42C TD_PS_SAMPLER2_BORDER_ALPHA
+0x0000A43C TD_PS_SAMPLER3_BORDER_ALPHA
+0x0000A44C TD_PS_SAMPLER4_BORDER_ALPHA
+0x0000A45C TD_PS_SAMPLER5_BORDER_ALPHA
+0x0000A46C TD_PS_SAMPLER6_BORDER_ALPHA
+0x0000A47C TD_PS_SAMPLER7_BORDER_ALPHA
+0x0000A48C TD_PS_SAMPLER8_BORDER_ALPHA
+0x0000A49C TD_PS_SAMPLER9_BORDER_ALPHA
+0x0000A4AC TD_PS_SAMPLER10_BORDER_ALPHA
+0x0000A4BC TD_PS_SAMPLER11_BORDER_ALPHA
+0x0000A4CC TD_PS_SAMPLER12_BORDER_ALPHA
+0x0000A4DC TD_PS_SAMPLER13_BORDER_ALPHA
+0x0000A4EC TD_PS_SAMPLER14_BORDER_ALPHA
+0x0000A4FC TD_PS_SAMPLER15_BORDER_ALPHA
+0x0000A50C TD_PS_SAMPLER16_BORDER_ALPHA
+0x0000A51C TD_PS_SAMPLER17_BORDER_ALPHA
+0x0000A408 TD_PS_SAMPLER0_BORDER_BLUE
+0x0000A418 TD_PS_SAMPLER1_BORDER_BLUE
+0x0000A428 TD_PS_SAMPLER2_BORDER_BLUE
+0x0000A438 TD_PS_SAMPLER3_BORDER_BLUE
+0x0000A448 TD_PS_SAMPLER4_BORDER_BLUE
+0x0000A458 TD_PS_SAMPLER5_BORDER_BLUE
+0x0000A468 TD_PS_SAMPLER6_BORDER_BLUE
+0x0000A478 TD_PS_SAMPLER7_BORDER_BLUE
+0x0000A488 TD_PS_SAMPLER8_BORDER_BLUE
+0x0000A498 TD_PS_SAMPLER9_BORDER_BLUE
+0x0000A4A8 TD_PS_SAMPLER10_BORDER_BLUE
+0x0000A4B8 TD_PS_SAMPLER11_BORDER_BLUE
+0x0000A4C8 TD_PS_SAMPLER12_BORDER_BLUE
+0x0000A4D8 TD_PS_SAMPLER13_BORDER_BLUE
+0x0000A4E8 TD_PS_SAMPLER14_BORDER_BLUE
+0x0000A4F8 TD_PS_SAMPLER15_BORDER_BLUE
+0x0000A508 TD_PS_SAMPLER16_BORDER_BLUE
+0x0000A518 TD_PS_SAMPLER17_BORDER_BLUE
+0x0000A404 TD_PS_SAMPLER0_BORDER_GREEN
+0x0000A414 TD_PS_SAMPLER1_BORDER_GREEN
+0x0000A424 TD_PS_SAMPLER2_BORDER_GREEN
+0x0000A434 TD_PS_SAMPLER3_BORDER_GREEN
+0x0000A444 TD_PS_SAMPLER4_BORDER_GREEN
+0x0000A454 TD_PS_SAMPLER5_BORDER_GREEN
+0x0000A464 TD_PS_SAMPLER6_BORDER_GREEN
+0x0000A474 TD_PS_SAMPLER7_BORDER_GREEN
+0x0000A484 TD_PS_SAMPLER8_BORDER_GREEN
+0x0000A494 TD_PS_SAMPLER9_BORDER_GREEN
+0x0000A4A4 TD_PS_SAMPLER10_BORDER_GREEN
+0x0000A4B4 TD_PS_SAMPLER11_BORDER_GREEN
+0x0000A4C4 TD_PS_SAMPLER12_BORDER_GREEN
+0x0000A4D4 TD_PS_SAMPLER13_BORDER_GREEN
+0x0000A4E4 TD_PS_SAMPLER14_BORDER_GREEN
+0x0000A4F4 TD_PS_SAMPLER15_BORDER_GREEN
+0x0000A504 TD_PS_SAMPLER16_BORDER_GREEN
+0x0000A514 TD_PS_SAMPLER17_BORDER_GREEN
+0x0000A400 TD_PS_SAMPLER0_BORDER_RED
+0x0000A410 TD_PS_SAMPLER1_BORDER_RED
+0x0000A420 TD_PS_SAMPLER2_BORDER_RED
+0x0000A430 TD_PS_SAMPLER3_BORDER_RED
+0x0000A440 TD_PS_SAMPLER4_BORDER_RED
+0x0000A450 TD_PS_SAMPLER5_BORDER_RED
+0x0000A460 TD_PS_SAMPLER6_BORDER_RED
+0x0000A470 TD_PS_SAMPLER7_BORDER_RED
+0x0000A480 TD_PS_SAMPLER8_BORDER_RED
+0x0000A490 TD_PS_SAMPLER9_BORDER_RED
+0x0000A4A0 TD_PS_SAMPLER10_BORDER_RED
+0x0000A4B0 TD_PS_SAMPLER11_BORDER_RED
+0x0000A4C0 TD_PS_SAMPLER12_BORDER_RED
+0x0000A4D0 TD_PS_SAMPLER13_BORDER_RED
+0x0000A4E0 TD_PS_SAMPLER14_BORDER_RED
+0x0000A4F0 TD_PS_SAMPLER15_BORDER_RED
+0x0000A500 TD_PS_SAMPLER16_BORDER_RED
+0x0000A510 TD_PS_SAMPLER17_BORDER_RED
+0x0000AA00 TD_PS_SAMPLER0_CLEARTYPE_KERNEL
+0x0000AA04 TD_PS_SAMPLER1_CLEARTYPE_KERNEL
+0x0000AA08 TD_PS_SAMPLER2_CLEARTYPE_KERNEL
+0x0000AA0C TD_PS_SAMPLER3_CLEARTYPE_KERNEL
+0x0000AA10 TD_PS_SAMPLER4_CLEARTYPE_KERNEL
+0x0000AA14 TD_PS_SAMPLER5_CLEARTYPE_KERNEL
+0x0000AA18 TD_PS_SAMPLER6_CLEARTYPE_KERNEL
+0x0000AA1C TD_PS_SAMPLER7_CLEARTYPE_KERNEL
+0x0000AA20 TD_PS_SAMPLER8_CLEARTYPE_KERNEL
+0x0000AA24 TD_PS_SAMPLER9_CLEARTYPE_KERNEL
+0x0000AA28 TD_PS_SAMPLER10_CLEARTYPE_KERNEL
+0x0000AA2C TD_PS_SAMPLER11_CLEARTYPE_KERNEL
+0x0000AA30 TD_PS_SAMPLER12_CLEARTYPE_KERNEL
+0x0000AA34 TD_PS_SAMPLER13_CLEARTYPE_KERNEL
+0x0000AA38 TD_PS_SAMPLER14_CLEARTYPE_KERNEL
+0x0000AA3C TD_PS_SAMPLER15_CLEARTYPE_KERNEL
+0x0000AA40 TD_PS_SAMPLER16_CLEARTYPE_KERNEL
+0x0000AA44 TD_PS_SAMPLER17_CLEARTYPE_KERNEL
+0x0000A60C TD_VS_SAMPLER0_BORDER_ALPHA
+0x0000A61C TD_VS_SAMPLER1_BORDER_ALPHA
+0x0000A62C TD_VS_SAMPLER2_BORDER_ALPHA
+0x0000A63C TD_VS_SAMPLER3_BORDER_ALPHA
+0x0000A64C TD_VS_SAMPLER4_BORDER_ALPHA
+0x0000A65C TD_VS_SAMPLER5_BORDER_ALPHA
+0x0000A66C TD_VS_SAMPLER6_BORDER_ALPHA
+0x0000A67C TD_VS_SAMPLER7_BORDER_ALPHA
+0x0000A68C TD_VS_SAMPLER8_BORDER_ALPHA
+0x0000A69C TD_VS_SAMPLER9_BORDER_ALPHA
+0x0000A6AC TD_VS_SAMPLER10_BORDER_ALPHA
+0x0000A6BC TD_VS_SAMPLER11_BORDER_ALPHA
+0x0000A6CC TD_VS_SAMPLER12_BORDER_ALPHA
+0x0000A6DC TD_VS_SAMPLER13_BORDER_ALPHA
+0x0000A6EC TD_VS_SAMPLER14_BORDER_ALPHA
+0x0000A6FC TD_VS_SAMPLER15_BORDER_ALPHA
+0x0000A70C TD_VS_SAMPLER16_BORDER_ALPHA
+0x0000A71C TD_VS_SAMPLER17_BORDER_ALPHA
+0x0000A608 TD_VS_SAMPLER0_BORDER_BLUE
+0x0000A618 TD_VS_SAMPLER1_BORDER_BLUE
+0x0000A628 TD_VS_SAMPLER2_BORDER_BLUE
+0x0000A638 TD_VS_SAMPLER3_BORDER_BLUE
+0x0000A648 TD_VS_SAMPLER4_BORDER_BLUE
+0x0000A658 TD_VS_SAMPLER5_BORDER_BLUE
+0x0000A668 TD_VS_SAMPLER6_BORDER_BLUE
+0x0000A678 TD_VS_SAMPLER7_BORDER_BLUE
+0x0000A688 TD_VS_SAMPLER8_BORDER_BLUE
+0x0000A698 TD_VS_SAMPLER9_BORDER_BLUE
+0x0000A6A8 TD_VS_SAMPLER10_BORDER_BLUE
+0x0000A6B8 TD_VS_SAMPLER11_BORDER_BLUE
+0x0000A6C8 TD_VS_SAMPLER12_BORDER_BLUE
+0x0000A6D8 TD_VS_SAMPLER13_BORDER_BLUE
+0x0000A6E8 TD_VS_SAMPLER14_BORDER_BLUE
+0x0000A6F8 TD_VS_SAMPLER15_BORDER_BLUE
+0x0000A708 TD_VS_SAMPLER16_BORDER_BLUE
+0x0000A718 TD_VS_SAMPLER17_BORDER_BLUE
+0x0000A604 TD_VS_SAMPLER0_BORDER_GREEN
+0x0000A614 TD_VS_SAMPLER1_BORDER_GREEN
+0x0000A624 TD_VS_SAMPLER2_BORDER_GREEN
+0x0000A634 TD_VS_SAMPLER3_BORDER_GREEN
+0x0000A644 TD_VS_SAMPLER4_BORDER_GREEN
+0x0000A654 TD_VS_SAMPLER5_BORDER_GREEN
+0x0000A664 TD_VS_SAMPLER6_BORDER_GREEN
+0x0000A674 TD_VS_SAMPLER7_BORDER_GREEN
+0x0000A684 TD_VS_SAMPLER8_BORDER_GREEN
+0x0000A694 TD_VS_SAMPLER9_BORDER_GREEN
+0x0000A6A4 TD_VS_SAMPLER10_BORDER_GREEN
+0x0000A6B4 TD_VS_SAMPLER11_BORDER_GREEN
+0x0000A6C4 TD_VS_SAMPLER12_BORDER_GREEN
+0x0000A6D4 TD_VS_SAMPLER13_BORDER_GREEN
+0x0000A6E4 TD_VS_SAMPLER14_BORDER_GREEN
+0x0000A6F4 TD_VS_SAMPLER15_BORDER_GREEN
+0x0000A704 TD_VS_SAMPLER16_BORDER_GREEN
+0x0000A714 TD_VS_SAMPLER17_BORDER_GREEN
+0x0000A600 TD_VS_SAMPLER0_BORDER_RED
+0x0000A610 TD_VS_SAMPLER1_BORDER_RED
+0x0000A620 TD_VS_SAMPLER2_BORDER_RED
+0x0000A630 TD_VS_SAMPLER3_BORDER_RED
+0x0000A640 TD_VS_SAMPLER4_BORDER_RED
+0x0000A650 TD_VS_SAMPLER5_BORDER_RED
+0x0000A660 TD_VS_SAMPLER6_BORDER_RED
+0x0000A670 TD_VS_SAMPLER7_BORDER_RED
+0x0000A680 TD_VS_SAMPLER8_BORDER_RED
+0x0000A690 TD_VS_SAMPLER9_BORDER_RED
+0x0000A6A0 TD_VS_SAMPLER10_BORDER_RED
+0x0000A6B0 TD_VS_SAMPLER11_BORDER_RED
+0x0000A6C0 TD_VS_SAMPLER12_BORDER_RED
+0x0000A6D0 TD_VS_SAMPLER13_BORDER_RED
+0x0000A6E0 TD_VS_SAMPLER14_BORDER_RED
+0x0000A6F0 TD_VS_SAMPLER15_BORDER_RED
+0x0000A700 TD_VS_SAMPLER16_BORDER_RED
+0x0000A710 TD_VS_SAMPLER17_BORDER_RED
+0x00009508 TA_CNTL_AUX
+0x0002802C DB_DEPTH_CLEAR
+0x00028D24 DB_HTILE_SURFACE
+0x00028D34 DB_PREFETCH_LIMIT
+0x00028D30 DB_PRELOAD_CONTROL
+0x00028D0C DB_RENDER_CONTROL
+0x00028D10 DB_RENDER_OVERRIDE
+0x0002880C DB_SHADER_CONTROL
+0x00028D2C DB_SRESULTS_COMPARE_STATE1
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028028 DB_STENCIL_CLEAR
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x00028804 CB_BLEND_CONTROL
+0x00028420 CB_BLEND_ALPHA
+0x0002841C CB_BLEND_BLUE
+0x00028418 CB_BLEND_GREEN
+0x00028414 CB_BLEND_RED
+0x0002812C CB_CLEAR_ALPHA
+0x00028128 CB_CLEAR_BLUE
+0x00028124 CB_CLEAR_GREEN
+0x00028120 CB_CLEAR_RED
+0x00028C30 CB_CLRCMP_CONTROL
+0x00028C38 CB_CLRCMP_DST
+0x00028C3C CB_CLRCMP_MSK
+0x00028C34 CB_CLRCMP_SRC
+0x00028100 CB_COLOR0_MASK
+0x00028104 CB_COLOR1_MASK
+0x00028108 CB_COLOR2_MASK
+0x0002810C CB_COLOR3_MASK
+0x00028110 CB_COLOR4_MASK
+0x00028114 CB_COLOR5_MASK
+0x00028118 CB_COLOR6_MASK
+0x0002811C CB_COLOR7_MASK
+0x00028080 CB_COLOR0_VIEW
+0x00028084 CB_COLOR1_VIEW
+0x00028088 CB_COLOR2_VIEW
+0x0002808C CB_COLOR3_VIEW
+0x00028090 CB_COLOR4_VIEW
+0x00028094 CB_COLOR5_VIEW
+0x00028098 CB_COLOR6_VIEW
+0x0002809C CB_COLOR7_VIEW
+0x00028808 CB_COLOR_CONTROL
+0x0002842C CB_FOG_BLUE
+0x00028428 CB_FOG_GREEN
+0x00028424 CB_FOG_RED
+0x00008040 WAIT_UNTIL
+0x00008950 CC_GC_SHADER_PIPE_CONFIG
+0x00008954 GC_USER_SHADER_PIPE_CONFIG
+0x00009714 VC_ENHANCE
+0x00009830 DB_DEBUG
+0x00009838 DB_WATERMARKS
+0x00028D28 DB_SRESULTS_COMPARE_STATE0
+0x00028D44 DB_ALPHA_TO_MASK
+0x00009504 TA_CNTL
+0x00009700 VC_CNTL
+0x00009718 VC_CONFIG
+0x0000A02C SMX_DC_MC_INTF_CTL
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 287fceb..626d518 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -113,6 +113,7 @@ int rs400_gart_enable(struct radeon_device *rdev)
uint32_t size_reg;
uint32_t tmp;
+ radeon_gart_restore(rdev);
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
@@ -150,9 +151,8 @@ int rs400_gart_enable(struct radeon_device *rdev)
WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
WREG32(RS480_AGP_BASE_2, 0);
}
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
- tmp = REG_SET(RS690_MC_AGP_TOP, tmp >> 16);
- tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_location >> 16);
+ tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
+ tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
@@ -251,14 +251,19 @@ void rs400_gpu_init(struct radeon_device *rdev)
}
}
-void rs400_vram_info(struct radeon_device *rdev)
+void rs400_mc_init(struct radeon_device *rdev)
{
+ u64 base;
+
rs400_gart_adjust_size(rdev);
+ rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
/* DDR for all card after R300 & IGP */
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
-
r100_vram_init_sizes(rdev);
+ base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ radeon_gtt_location(rdev, &rdev->mc);
}
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -362,22 +367,6 @@ static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
#endif
}
-static int rs400_mc_init(struct radeon_device *rdev)
-{
- int r;
- u32 tmp;
-
- /* Setup GPU memory space */
- tmp = RREG32(R_00015C_NB_TOM);
- rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
- rdev->mc.gtt_location = 0xFFFFFFFFUL;
- r = radeon_mc_setup(rdev);
- rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
- if (r)
- return r;
- return 0;
-}
-
void rs400_mc_program(struct radeon_device *rdev)
{
struct r100_mc_save save;
@@ -516,12 +505,8 @@ int rs400_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- rs400_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = rs400_mc_init(rdev);
- if (r)
- return r;
+ /* initialize memory controller */
+ rs400_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index c381856..47f046b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -45,23 +45,6 @@
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
-int rs600_mc_init(struct radeon_device *rdev)
-{
- /* read back the MC value from the hw */
- int r;
- u32 tmp;
-
- /* Setup GPU memory space */
- tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
- rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
- rdev->mc.gtt_location = 0xffffffffUL;
- r = radeon_mc_setup(rdev);
- rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
- if (r)
- return r;
- return 0;
-}
-
/* hpd for digital panel detect/disconnect */
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
@@ -213,6 +196,7 @@ int rs600_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Enable bus master */
tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
WREG32(R_00004C_BUS_CNTL, tmp);
@@ -406,10 +390,14 @@ int rs600_irq_process(struct radeon_device *rdev)
if (G_000044_SW_INT(status))
radeon_fence_process(rdev);
/* Vertical blank interrupts */
- if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
+ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
drm_handle_vblank(rdev->ddev, 0);
- if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
drm_handle_vblank(rdev->ddev, 1);
+ wake_up(&rdev->irq.vblank_queue);
+ }
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
queue_hotplug = true;
DRM_DEBUG("HPD1\n");
@@ -470,22 +458,22 @@ void rs600_gpu_init(struct radeon_device *rdev)
dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
}
-void rs600_vram_info(struct radeon_device *rdev)
+void rs600_mc_init(struct radeon_device *rdev)
{
+ u64 base;
+
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
-
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
-
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
- rdev->mc.real_vram_size = rdev->mc.aper_size;
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+ base = RREG32_MC(R_000004_MC_FB_LOCATION);
+ base = G_000004_MC_FB_START(base) << 16;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ radeon_gtt_location(rdev, &rdev->mc);
}
void rs600_bandwidth_update(struct radeon_device *rdev)
@@ -661,12 +649,8 @@ int rs600_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- rs600_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = rs600_mc_init(rdev);
- if (r)
- return r;
+ /* initialize memory controller */
+ rs600_mc_init(rdev);
rs600_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 06e2771..83b9174 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -129,27 +129,21 @@ void rs690_pm_info(struct radeon_device *rdev)
rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
}
-void rs690_vram_info(struct radeon_device *rdev)
+void rs690_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
+ u64 base;
rs400_gart_adjust_size(rdev);
-
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
-
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
-
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
- rdev->mc.real_vram_size = rdev->mc.aper_size;
-
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+ base = G_000100_MC_FB_START(base) << 16;
rs690_pm_info(rdev);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
@@ -160,22 +154,9 @@ void rs690_vram_info(struct radeon_device *rdev)
a.full = rfixed_const(16);
/* core_bandwidth = sclk(Mhz) * 16 */
rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
-}
-
-static int rs690_mc_init(struct radeon_device *rdev)
-{
- int r;
- u32 tmp;
-
- /* Setup GPU memory space */
- tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
- rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
- rdev->mc.gtt_location = 0xFFFFFFFFUL;
- r = radeon_mc_setup(rdev);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
- if (r)
- return r;
- return 0;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ radeon_gtt_location(rdev, &rdev->mc);
}
void rs690_line_buffer_adjust(struct radeon_device *rdev,
@@ -728,12 +709,8 @@ int rs690_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- rs690_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = rs690_mc_init(rdev);
- if (r)
- return r;
+ /* initialize memory controller */
+ rs690_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 0e1e6b8..bea747d 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -277,13 +277,15 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
}
}
-void rv515_vram_info(struct radeon_device *rdev)
+void rv515_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
rv515_vram_get_type(rdev);
-
r100_vram_init_sizes(rdev);
+ radeon_vram_location(rdev, &rdev->mc, 0);
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
@@ -587,12 +589,15 @@ int rv515_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- rv515_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
- if (r)
- return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize memory controller */
+ rv515_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 0302167..37887de 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -56,6 +56,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
@@ -273,9 +274,10 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
/*
* Core functions
*/
-static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
- u32 num_backends,
- u32 backend_disable_mask)
+static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ u32 num_tile_pipes,
+ u32 num_backends,
+ u32 backend_disable_mask)
{
u32 backend_map = 0;
u32 enabled_backends_mask;
@@ -284,6 +286,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
u32 swizzle_pipe[R7XX_MAX_PIPES];
u32 cur_backend;
u32 i;
+ bool force_no_swizzle;
if (num_tile_pipes > R7XX_MAX_PIPES)
num_tile_pipes = R7XX_MAX_PIPES;
@@ -313,6 +316,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
+ switch (rdev->family) {
+ case CHIP_RV770:
+ case CHIP_RV730:
+ force_no_swizzle = false;
+ break;
+ case CHIP_RV710:
+ case CHIP_RV740:
+ default:
+ force_no_swizzle = true;
+ break;
+ }
+
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
@@ -323,49 +338,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
swizzle_pipe[1] = 1;
break;
case 3:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 1;
+ }
break;
case 4:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 3;
- swizzle_pipe[3] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 3;
+ swizzle_pipe[3] = 1;
+ }
break;
case 5:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 1;
+ swizzle_pipe[4] = 3;
+ }
break;
case 6:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 5;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 5;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ }
break;
case 7:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 5;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ swizzle_pipe[6] = 5;
+ }
break;
case 8:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 7;
- swizzle_pipe[7] = 5;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ swizzle_pipe[7] = 7;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ swizzle_pipe[6] = 7;
+ swizzle_pipe[7] = 5;
+ }
break;
}
@@ -385,8 +451,10 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
+ u32 ta_aux_cntl;
u32 sx_debug_1;
u32 smx_dc_ctl0;
+ u32 db_debug3;
u32 num_gs_verts_per_thread;
u32 vgt_gs_per_es;
u32 gs_prim_buffer_depth = 0;
@@ -515,6 +583,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
switch (rdev->config.rv770.max_tile_pipes) {
case 1:
+ default:
gb_tiling_config |= PIPE_TILING(0);
break;
case 2:
@@ -526,16 +595,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
case 8:
gb_tiling_config |= PIPE_TILING(3);
break;
- default:
- break;
}
+ rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
if (rdev->family == CHIP_RV770)
gb_tiling_config |= BANK_TILING(1);
else
gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+ rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
gb_tiling_config |= GROUP_SIZE(0);
+ rdev->config.rv770.tiling_group_size = 256;
if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3);
@@ -549,21 +619,27 @@ static void rv770_gpu_init(struct radeon_device *rdev)
gb_tiling_config |= BANK_SWAPS(1);
- if (rdev->family == CHIP_RV740)
- backend_map = 0x28;
- else
- backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
- rdev->config.rv770.max_backends,
- (0xff << rdev->config.rv770.max_backends) & 0xff);
- gb_tiling_config |= BACKEND_MAP(backend_map);
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ cc_rb_backend_disable |=
+ BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
- cc_gc_shader_pipe_config =
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
- cc_rb_backend_disable =
- BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
+ if (rdev->family == CHIP_RV740)
+ backend_map = 0x28;
+ else
+ backend_map = r700_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.rv770.max_tile_pipes,
+ (R7XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable &
+ R7XX_MAX_BACKENDS_MASK) >> 16)),
+ (cc_rb_backend_disable >> 16));
+ gb_tiling_config |= BACKEND_MAP(backend_map);
+
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
@@ -571,16 +647,13 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+ WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
- WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
- WREG32(CGTS_USER_TCC_DISABLE, 0);
num_qd_pipes =
- R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK);
+ R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -590,10 +663,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
- WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
- SYNC_GRADIENT |
- SYNC_WALKER |
- SYNC_ALIGNER));
+ ta_aux_cntl = RREG32(TA_CNTL_AUX);
+ WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
sx_debug_1 = RREG32(SX_DEBUG_1);
sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
@@ -604,14 +675,28 @@ static void rv770_gpu_init(struct radeon_device *rdev)
smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
WREG32(SMX_DC_CTL0, smx_dc_ctl0);
- WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
- GS_FLUSH_CTL(4) |
- ACK_FLUSH_CTL(3) |
- SYNC_FLUSH_CTL));
+ if (rdev->family != CHIP_RV740)
+ WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
+ GS_FLUSH_CTL(4) |
+ ACK_FLUSH_CTL(3) |
+ SYNC_FLUSH_CTL));
- if (rdev->family == CHIP_RV770)
- WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f));
- else {
+ db_debug3 = RREG32(DB_DEBUG3);
+ db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
+ switch (rdev->family) {
+ case CHIP_RV770:
+ case CHIP_RV740:
+ db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
+ break;
+ case CHIP_RV710:
+ case CHIP_RV730:
+ default:
+ db_debug3 |= DB_CLK_OFF_DELAY(2);
+ break;
+ }
+ WREG32(DB_DEBUG3, db_debug3);
+
+ if (rdev->family != CHIP_RV770) {
db_debug4 = RREG32(DB_DEBUG4);
db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
WREG32(DB_DEBUG4, db_debug4);
@@ -640,10 +725,10 @@ static void rv770_gpu_init(struct radeon_device *rdev)
ALU_UPDATE_FIFO_HIWATER(0x8));
switch (rdev->family) {
case CHIP_RV770:
- sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
- break;
case CHIP_RV730:
case CHIP_RV710:
+ sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
+ break;
case CHIP_RV740:
default:
sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
@@ -816,45 +901,13 @@ int rv770_mc_init(struct radeon_device *rdev)
/* Setup GPU memory space */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ /* FIXME remove this once we support unmappable VRAM */
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
rdev->mc.real_vram_size = rdev->mc.aper_size;
-
- if (rdev->flags & RADEON_IS_AGP) {
- /* gtt_size is setup by radeon_agp_init */
- rdev->mc.gtt_location = rdev->mc.agp_base;
- tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
- /* Try to put vram before or after AGP because we
- * we want SYSTEM_APERTURE to cover both VRAM and
- * AGP so that GPU can catch out of VRAM/AGP access
- */
- if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
- /* Enough place before */
- rdev->mc.vram_location = rdev->mc.gtt_location -
- rdev->mc.mc_vram_size;
- } else if (tmp > rdev->mc.mc_vram_size) {
- /* Enough place after */
- rdev->mc.vram_location = rdev->mc.gtt_location +
- rdev->mc.gtt_size;
- } else {
- /* Try to setup VRAM then AGP might not
- * not work on some card
- */
- rdev->mc.vram_location = 0x00000000UL;
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- }
- } else {
- rdev->mc.vram_location = 0x00000000UL;
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
- rdev->mc.vram_start = rdev->mc.vram_location;
- rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
- rdev->mc.gtt_start = rdev->mc.gtt_location;
- rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+ r600_vram_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
@@ -863,6 +916,7 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
return 0;
}
+
int rv770_gpu_reset(struct radeon_device *rdev)
{
/* FIXME: implement any rv770 specific bits */
@@ -1038,6 +1092,7 @@ int rv770_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
+ /* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index a1367ab..9506f8c 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -343,4 +343,6 @@
#define WAIT_UNTIL 0x8040
+#define SRBM_STATUS 0x0E50
+
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 3d47a2c..a759170 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -480,7 +480,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
void *from_virtual;
void *to_virtual;
int i;
- int ret;
+ int ret = -ENOMEM;
if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
@@ -499,8 +499,10 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; ++i) {
from_page = read_mapping_page(swap_space, i, NULL);
- if (IS_ERR(from_page))
+ if (IS_ERR(from_page)) {
+ ret = PTR_ERR(from_page);
goto out_err;
+ }
to_page = __ttm_tt_get_page(ttm, i);
if (unlikely(to_page == NULL))
goto out_err;
@@ -523,7 +525,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
return 0;
out_err:
ttm_tt_free_alloced_pages(ttm);
- return -ENOMEM;
+ return ret;
}
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
@@ -535,6 +537,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
void *from_virtual;
void *to_virtual;
int i;
+ int ret = -ENOMEM;
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
BUG_ON(ttm->caching_state != tt_cached);
@@ -557,7 +560,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
0);
if (unlikely(IS_ERR(swap_storage))) {
printk(KERN_ERR "Failed allocating swap storage.\n");
- return -ENOMEM;
+ return PTR_ERR(swap_storage);
}
} else
swap_storage = persistant_swap_storage;
@@ -569,9 +572,10 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
if (unlikely(from_page == NULL))
continue;
to_page = read_mapping_page(swap_space, i, NULL);
- if (unlikely(to_page == NULL))
+ if (unlikely(IS_ERR(to_page))) {
+ ret = PTR_ERR(to_page);
goto out_err;
-
+ }
preempt_disable();
from_virtual = kmap_atomic(from_page, KM_USER0);
to_virtual = kmap_atomic(to_page, KM_USER1);
@@ -595,5 +599,5 @@ out_err:
if (!persistant_swap_storage)
fput(swap_storage);
- return -ENOMEM;
+ return ret;
}
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 0920492..61ab4da 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -16,3 +16,14 @@ config VGA_ARB_MAX_GPUS
help
Reserves space in the kernel to maintain resource locking for
multiple GPUS. The overhead for each GPU is very small.
+
+config VGA_SWITCHEROO
+ bool "Laptop Hybrid Grapics - GPU switching support"
+ depends on X86
+ depends on ACPI
+ help
+ Many laptops released in 2008/9/10 have two gpus with a multiplxer
+ to switch between them. This adds support for dynamic switching when
+ X isn't running and delayed switching until the next logoff. This
+ features is called hybrid graphics, ATI PowerXpress, and Nvidia
+ HybridPower.
diff --git a/drivers/gpu/vga/Makefile b/drivers/gpu/vga/Makefile
index 7cc8c1e..14ca30b 100644
--- a/drivers/gpu/vga/Makefile
+++ b/drivers/gpu/vga/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_VGA_ARB) += vgaarb.o
+obj-$(CONFIG_VGA_SWITCHEROO) += vga_switcheroo.o
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
new file mode 100644
index 0000000..d6d1149
--- /dev/null
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) 2010 Red Hat Inc.
+ * Author : Dave Airlie <airlied@redhat.com>
+ *
+ *
+ * Licensed under GPLv2
+ *
+ * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
+
+ Switcher interface - methods require for ATPX and DCM
+ - switchto - this throws the output MUX switch
+ - discrete_set_power - sets the power state for the discrete card
+
+ GPU driver interface
+ - set_gpu_state - this should do the equiv of s/r for the card
+ - this should *not* set the discrete power state
+ - switch_check - check if the device is in a position to switch now
+ */
+
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/fb.h>
+
+#include <linux/pci.h>
+#include <linux/vga_switcheroo.h>
+
+struct vga_switcheroo_client {
+ struct pci_dev *pdev;
+ struct fb_info *fb_info;
+ int pwr_state;
+ void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state);
+ bool (*can_switch)(struct pci_dev *pdev);
+ int id;
+ bool active;
+};
+
+static DEFINE_MUTEX(vgasr_mutex);
+
+struct vgasr_priv {
+
+ bool active;
+ bool delayed_switch_active;
+ enum vga_switcheroo_client_id delayed_client_id;
+
+ struct dentry *debugfs_root;
+ struct dentry *switch_file;
+
+ int registered_clients;
+ struct vga_switcheroo_client clients[VGA_SWITCHEROO_MAX_CLIENTS];
+
+ struct vga_switcheroo_handler *handler;
+};
+
+static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
+static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv);
+
+/* only one switcheroo per system */
+static struct vgasr_priv vgasr_priv;
+
+int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
+{
+ mutex_lock(&vgasr_mutex);
+ if (vgasr_priv.handler) {
+ mutex_unlock(&vgasr_mutex);
+ return -EINVAL;
+ }
+
+ vgasr_priv.handler = handler;
+ mutex_unlock(&vgasr_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(vga_switcheroo_register_handler);
+
+void vga_switcheroo_unregister_handler(void)
+{
+ mutex_lock(&vgasr_mutex);
+ vgasr_priv.handler = NULL;
+ mutex_unlock(&vgasr_mutex);
+}
+EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
+
+static void vga_switcheroo_enable(void)
+{
+ int i;
+ int ret;
+ /* call the handler to init */
+ vgasr_priv.handler->init();
+
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ ret = vgasr_priv.handler->get_client_id(vgasr_priv.clients[i].pdev);
+ if (ret < 0)
+ return;
+
+ vgasr_priv.clients[i].id = ret;
+ }
+ vga_switcheroo_debugfs_init(&vgasr_priv);
+ vgasr_priv.active = true;
+}
+
+int vga_switcheroo_register_client(struct pci_dev *pdev,
+ void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state),
+ bool (*can_switch)(struct pci_dev *pdev))
+{
+ int index;
+
+ mutex_lock(&vgasr_mutex);
+ /* don't do IGD vs DIS here */
+ if (vgasr_priv.registered_clients & 1)
+ index = 1;
+ else
+ index = 0;
+
+ vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON;
+ vgasr_priv.clients[index].pdev = pdev;
+ vgasr_priv.clients[index].set_gpu_state = set_gpu_state;
+ vgasr_priv.clients[index].can_switch = can_switch;
+ vgasr_priv.clients[index].id = -1;
+ if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
+ vgasr_priv.clients[index].active = true;
+
+ vgasr_priv.registered_clients |= (1 << index);
+
+ /* if we get two clients + handler */
+ if (vgasr_priv.registered_clients == 0x3 && vgasr_priv.handler) {
+ printk(KERN_INFO "vga_switcheroo: enabled\n");
+ vga_switcheroo_enable();
+ }
+ mutex_unlock(&vgasr_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(vga_switcheroo_register_client);
+
+void vga_switcheroo_unregister_client(struct pci_dev *pdev)
+{
+ int i;
+
+ mutex_lock(&vgasr_mutex);
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].pdev == pdev) {
+ vgasr_priv.registered_clients &= ~(1 << i);
+ break;
+ }
+ }
+
+ printk(KERN_INFO "vga_switcheroo: disabled\n");
+ vga_switcheroo_debugfs_fini(&vgasr_priv);
+ vgasr_priv.active = false;
+ mutex_unlock(&vgasr_mutex);
+}
+EXPORT_SYMBOL(vga_switcheroo_unregister_client);
+
+void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
+ struct fb_info *info)
+{
+ int i;
+
+ mutex_lock(&vgasr_mutex);
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].pdev == pdev) {
+ vgasr_priv.clients[i].fb_info = info;
+ break;
+ }
+ }
+ mutex_unlock(&vgasr_mutex);
+}
+EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
+
+static int vga_switcheroo_show(struct seq_file *m, void *v)
+{
+ int i;
+ mutex_lock(&vgasr_mutex);
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ seq_printf(m, "%d:%c:%s:%s\n", i,
+ vgasr_priv.clients[i].active ? '+' : ' ',
+ vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off",
+ pci_name(vgasr_priv.clients[i].pdev));
+ }
+ mutex_unlock(&vgasr_mutex);
+ return 0;
+}
+
+static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vga_switcheroo_show, NULL);
+}
+
+static int vga_switchon(struct vga_switcheroo_client *client)
+{
+ int ret;
+
+ ret = vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
+ /* call the driver callback to turn on device */
+ client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
+ client->pwr_state = VGA_SWITCHEROO_ON;
+ return 0;
+}
+
+static int vga_switchoff(struct vga_switcheroo_client *client)
+{
+ /* call the driver callback to turn off device */
+ client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
+ vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
+ client->pwr_state = VGA_SWITCHEROO_OFF;
+ return 0;
+}
+
+static int vga_switchto(struct vga_switcheroo_client *new_client)
+{
+ int ret;
+ int i;
+ struct vga_switcheroo_client *active = NULL;
+
+ if (new_client->active == true)
+ return 0;
+
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].active == true) {
+ active = &vgasr_priv.clients[i];
+ break;
+ }
+ }
+ if (!active)
+ return 0;
+
+ /* power up the first device */
+ ret = pci_enable_device(new_client->pdev);
+ if (ret)
+ return ret;
+
+ if (new_client->pwr_state == VGA_SWITCHEROO_OFF)
+ vga_switchon(new_client);
+
+ /* swap shadow resource to denote boot VGA device has changed so X starts on new device */
+ active->active = false;
+
+ active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW;
+ new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
+
+ if (new_client->fb_info) {
+ struct fb_event event;
+ event.info = new_client->fb_info;
+ fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
+ }
+
+ ret = vgasr_priv.handler->switchto(new_client->id);
+ if (ret)
+ return ret;
+
+ if (active->pwr_state == VGA_SWITCHEROO_ON)
+ vga_switchoff(active);
+
+ new_client->active = true;
+ return 0;
+}
+
+static ssize_t
+vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char usercmd[64];
+ const char *pdev_name;
+ int i, ret;
+ bool delay = false, can_switch;
+ int client_id = -1;
+ struct vga_switcheroo_client *client = NULL;
+
+ if (cnt > 63)
+ cnt = 63;
+
+ if (copy_from_user(usercmd, ubuf, cnt))
+ return -EFAULT;
+
+ mutex_lock(&vgasr_mutex);
+
+ if (!vgasr_priv.active)
+ return -EINVAL;
+
+ /* pwr off the device not in use */
+ if (strncmp(usercmd, "OFF", 3) == 0) {
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].active)
+ continue;
+ if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_ON)
+ vga_switchoff(&vgasr_priv.clients[i]);
+ }
+ goto out;
+ }
+ /* pwr on the device not in use */
+ if (strncmp(usercmd, "ON", 2) == 0) {
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].active)
+ continue;
+ if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_OFF)
+ vga_switchon(&vgasr_priv.clients[i]);
+ }
+ goto out;
+ }
+
+ /* request a delayed switch - test can we switch now */
+ if (strncmp(usercmd, "DIGD", 4) == 0) {
+ client_id = VGA_SWITCHEROO_IGD;
+ delay = true;
+ }
+
+ if (strncmp(usercmd, "DDIS", 4) == 0) {
+ client_id = VGA_SWITCHEROO_DIS;
+ delay = true;
+ }
+
+ if (strncmp(usercmd, "IGD", 3) == 0)
+ client_id = VGA_SWITCHEROO_IGD;
+
+ if (strncmp(usercmd, "DIS", 3) == 0)
+ client_id = VGA_SWITCHEROO_DIS;
+
+ if (client_id == -1)
+ goto out;
+
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].id == client_id) {
+ client = &vgasr_priv.clients[i];
+ break;
+ }
+ }
+
+ vgasr_priv.delayed_switch_active = false;
+ /* okay we want a switch - test if devices are willing to switch */
+ can_switch = true;
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
+ if (can_switch == false) {
+ printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
+ break;
+ }
+ }
+
+ if (can_switch == false && delay == false)
+ goto out;
+
+ if (can_switch == true) {
+ pdev_name = pci_name(client->pdev);
+ ret = vga_switchto(client);
+ if (ret)
+ printk(KERN_ERR "vga_switcheroo: switching failed %d\n", ret);
+ } else {
+ printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
+ vgasr_priv.delayed_switch_active = true;
+ vgasr_priv.delayed_client_id = client_id;
+
+ /* we should at least power up the card to
+ make the switch faster */
+ if (client->pwr_state == VGA_SWITCHEROO_OFF)
+ vga_switchon(client);
+ }
+
+out:
+ mutex_unlock(&vgasr_mutex);
+ return cnt;
+}
+
+static const struct file_operations vga_switcheroo_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = vga_switcheroo_debugfs_open,
+ .write = vga_switcheroo_debugfs_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv)
+{
+ if (priv->switch_file) {
+ debugfs_remove(priv->switch_file);
+ priv->switch_file = NULL;
+ }
+ if (priv->debugfs_root) {
+ debugfs_remove(priv->debugfs_root);
+ priv->debugfs_root = NULL;
+ }
+}
+
+static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
+{
+ /* already initialised */
+ if (priv->debugfs_root)
+ return 0;
+ priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL);
+
+ if (!priv->debugfs_root) {
+ printk(KERN_ERR "vga_switcheroo: Cannot create /sys/kernel/debug/vgaswitcheroo\n");
+ goto fail;
+ }
+
+ priv->switch_file = debugfs_create_file("switch", 0644,
+ priv->debugfs_root, NULL, &vga_switcheroo_debugfs_fops);
+ if (!priv->switch_file) {
+ printk(KERN_ERR "vga_switcheroo: cannot create /sys/kernel/debug/vgaswitcheroo/switch\n");
+ goto fail;
+ }
+ return 0;
+fail:
+ vga_switcheroo_debugfs_fini(priv);
+ return -1;
+}
+
+int vga_switcheroo_process_delayed_switch(void)
+{
+ struct vga_switcheroo_client *client = NULL;
+ const char *pdev_name;
+ bool can_switch = true;
+ int i;
+ int ret;
+ int err = -EINVAL;
+
+ mutex_lock(&vgasr_mutex);
+ if (!vgasr_priv.delayed_switch_active)
+ goto err;
+
+ printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
+
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].id == vgasr_priv.delayed_client_id)
+ client = &vgasr_priv.clients[i];
+ can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
+ if (can_switch == false) {
+ printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
+ break;
+ }
+ }
+
+ if (can_switch == false || client == NULL)
+ goto err;
+
+ pdev_name = pci_name(client->pdev);
+ ret = vga_switchto(client);
+ if (ret)
+ printk(KERN_ERR "vga_switcheroo: delayed switching failed %d\n", ret);
+
+ vgasr_priv.delayed_switch_active = false;
+ err = 0;
+err:
+ mutex_unlock(&vgasr_mutex);
+ return err;
+}
+EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
+
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 867e084..433602a 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -265,9 +265,10 @@ static int hiddev_release(struct inode * inode, struct file * file)
static int hiddev_open(struct inode *inode, struct file *file)
{
struct hiddev_list *list;
- int res;
+ int res, i;
- int i = iminor(inode) - HIDDEV_MINOR_BASE;
+ lock_kernel();
+ i = iminor(inode) - HIDDEV_MINOR_BASE;
if (i >= HIDDEV_MINORS || i < 0 || !hiddev_table[i])
return -ENODEV;
@@ -313,10 +314,12 @@ static int hiddev_open(struct inode *inode, struct file *file)
usbhid_open(hid);
}
+ unlock_kernel();
return 0;
bail:
file->private_data = NULL;
kfree(list);
+ unlock_kernel();
return res;
}
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 8d8a00e..02ce9cf 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -61,6 +61,16 @@ config I2C_HELPER_AUTO
In doubt, say Y.
+config I2C_SMBUS
+ tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO
+ help
+ Say Y here if you want support for SMBus extensions to the I2C
+ specification. At the moment, the only supported extension is
+ the SMBus alert protocol.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-smbus.
+
source drivers/i2c/algos/Kconfig
source drivers/i2c/busses/Kconfig
source drivers/i2c/chips/Kconfig
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index ba26e6c..acd0250 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o
obj-$(CONFIG_I2C) += i2c-core.o
+obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o
obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
obj-y += busses/ chips/ algos/
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 78d42aa..dcdaf8e 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -453,8 +453,6 @@ static int pca_init(struct i2c_adapter *adap)
*/
int raise_fall_time;
- struct i2c_algo_pca_data *pca_data = adap->algo_data;
-
/* Ignore the reset function from the module,
* we can use the parallel bus reset
*/
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 737f052..4cc3807 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -77,7 +77,7 @@ config I2C_AMD8111
will be called i2c-amd8111.
config I2C_I801
- tristate "Intel 82801 (ICH)"
+ tristate "Intel 82801 (ICH/PCH)"
depends on PCI
help
If you say yes to this option, support will be included for the Intel
@@ -97,7 +97,8 @@ config I2C_I801
ICH9
Tolapai
ICH10
- PCH
+ 3400/5 Series (PCH)
+ Cougar Point (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -580,6 +581,7 @@ config I2C_PARPORT
tristate "Parallel port adapter"
depends on PARPORT
select I2C_ALGOBIT
+ select I2C_SMBUS
help
This supports parallel port I2C adapters such as the ones made by
Philips or Velleman, Analog Devices evaluation boards, and more.
@@ -603,6 +605,7 @@ config I2C_PARPORT
config I2C_PARPORT_LIGHT
tristate "Parallel port adapter (light)"
select I2C_ALGOBIT
+ select I2C_SMBUS
help
This supports parallel port I2C adapters such as the ones made by
Philips or Velleman, Analog Devices evaluation boards, and more.
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 8de7d7b..bd8f1e4 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -480,7 +480,7 @@ static struct i2c_adapter ali1535_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id ali1535_ids[] = {
+static const struct pci_device_id ali1535_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ },
};
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index 4687af4..a409cfc 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -417,7 +417,7 @@ static void __devexit ali1563_remove(struct pci_dev * dev)
ali1563_shutdown(dev);
}
-static struct pci_device_id __devinitdata ali1563_id_table[] = {
+static const struct pci_device_id ali1563_id_table[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1563) },
{},
};
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index e7e3205..659f63f 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -477,7 +477,7 @@ static struct i2c_adapter ali15x3_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id ali15x3_ids[] = {
+static const struct pci_device_id ali15x3_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 8f0b90e..c5a9fa4 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -308,7 +308,7 @@ static const char* chipname[] = {
"nVidia nForce", "AMD8111",
};
-static struct pci_device_id amd756_ids[] = {
+static const struct pci_device_id amd756_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B),
.driver_data = AMD756 },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7413),
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 5b4ad86..d0dc970 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -351,7 +351,7 @@ static const struct i2c_algorithm smbus_algorithm = {
};
-static struct pci_device_id amd8111_ids[] = {
+static const struct pci_device_id amd8111_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS2) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index bec9b84..c767295 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -105,7 +105,7 @@ static struct i2c_adapter hydra_adap = {
.algo_data = &hydra_bit_data,
};
-static struct pci_device_id hydra_ids[] = {
+static const struct pci_device_id hydra_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_HYDRA) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index df6ab553..9da5b05 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -41,7 +41,8 @@
Tolapai 0x5032 32 hard yes yes yes
ICH10 0x3a30 32 hard yes yes yes
ICH10 0x3a60 32 hard yes yes yes
- PCH 0x3b30 32 hard yes yes yes
+ 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
+ Cougar Point (PCH) 0x1c22 32 hard yes yes yes
Features supported by this driver:
Software PEC no
@@ -561,7 +562,7 @@ static struct i2c_adapter i801_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id i801_ids[] = {
+static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_2) },
@@ -578,6 +579,7 @@ static struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
{ 0, }
};
@@ -707,6 +709,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
case PCI_DEVICE_ID_INTEL_ICH10_4:
case PCI_DEVICE_ID_INTEL_ICH10_5:
case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
+ case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
i801_features |= FEATURE_I2C_BLOCK_READ;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801DB_3:
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index dba6eb0..69c22f7 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -256,7 +256,7 @@ static struct i2c_adapter sch_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id sch_ids[] = {
+static const struct pci_device_id sch_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index ec11d1c..4a70058 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -308,7 +308,7 @@ static struct i2c_algorithm smbus_algorithm = {
};
-static struct pci_device_id nforce2_ids[] = {
+static const struct pci_device_id nforce2_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) },
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index 322c569..5f41ec0 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------ *
* i2c-parport-light.c I2C bus over parallel port *
* ------------------------------------------------------------------------ *
- Copyright (C) 2003-2007 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
Based on older i2c-velleman.c driver
Copyright (C) 1995-2000 Simon G. Vogl
@@ -27,10 +27,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/i2c-smbus.h>
#include <asm/io.h>
#include "i2c-parport.h"
@@ -43,6 +45,10 @@ static u16 base;
module_param(base, ushort, 0);
MODULE_PARM_DESC(base, "Base I/O address");
+static int irq;
+module_param(irq, int, 0);
+MODULE_PARM_DESC(irq, "IRQ (optional)");
+
/* ----- Low-level parallel port access ----------------------------------- */
static inline void port_write(unsigned char p, unsigned char d)
@@ -119,6 +125,16 @@ static struct i2c_adapter parport_adapter = {
.name = "Parallel port adapter (light)",
};
+/* SMBus alert support */
+static struct i2c_smbus_alert_setup alert_data = {
+ .alert_edge_triggered = 1,
+};
+static struct i2c_client *ara;
+static struct lineop parport_ctrl_irq = {
+ .val = (1 << 4),
+ .port = CTRL,
+};
+
static int __devinit i2c_parport_probe(struct platform_device *pdev)
{
int err;
@@ -127,18 +143,39 @@ static int __devinit i2c_parport_probe(struct platform_device *pdev)
parport_setsda(NULL, 1);
parport_setscl(NULL, 1);
/* Other init if needed (power on...) */
- if (adapter_parm[type].init.val)
+ if (adapter_parm[type].init.val) {
line_set(1, &adapter_parm[type].init);
+ /* Give powered devices some time to settle */
+ msleep(100);
+ }
parport_adapter.dev.parent = &pdev->dev;
err = i2c_bit_add_bus(&parport_adapter);
- if (err)
+ if (err) {
dev_err(&pdev->dev, "Unable to register with I2C\n");
- return err;
+ return err;
+ }
+
+ /* Setup SMBus alert if supported */
+ if (adapter_parm[type].smbus_alert && irq) {
+ alert_data.irq = irq;
+ ara = i2c_setup_smbus_alert(&parport_adapter, &alert_data);
+ if (ara)
+ line_set(1, &parport_ctrl_irq);
+ else
+ dev_warn(&pdev->dev, "Failed to register ARA client\n");
+ }
+
+ return 0;
}
static int __devexit i2c_parport_remove(struct platform_device *pdev)
{
+ if (ara) {
+ line_set(0, &parport_ctrl_irq);
+ i2c_unregister_device(ara);
+ ara = NULL;
+ }
i2c_del_adapter(&parport_adapter);
/* Un-init if needed (power off...) */
@@ -205,6 +242,9 @@ static int __init i2c_parport_init(void)
if (!request_region(base, 3, DRVNAME))
return -EBUSY;
+ if (irq != 0)
+ pr_info(DRVNAME ": using irq %d\n", irq);
+
if (!adapter_parm[type].getscl.val)
parport_algo_data.getscl = NULL;
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 0d89986..220fca7 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------ *
* i2c-parport.c I2C bus over parallel port *
* ------------------------------------------------------------------------ *
- Copyright (C) 2003-2007 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
Based on older i2c-philips-par.c driver
Copyright (C) 1995-2000 Simon G. Vogl
@@ -27,9 +27,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/parport.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/i2c-smbus.h>
#include "i2c-parport.h"
/* ----- Device list ------------------------------------------------------ */
@@ -38,6 +40,8 @@ struct i2c_par {
struct pardevice *pdev;
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo_data;
+ struct i2c_smbus_alert_setup alert_data;
+ struct i2c_client *ara;
struct i2c_par *next;
};
@@ -143,6 +147,19 @@ static struct i2c_algo_bit_data parport_algo_data = {
/* ----- I2c and parallel port call-back functions and structures --------- */
+void i2c_parport_irq(void *data)
+{
+ struct i2c_par *adapter = data;
+ struct i2c_client *ara = adapter->ara;
+
+ if (ara) {
+ dev_dbg(&ara->dev, "SMBus alert received\n");
+ i2c_handle_smbus_alert(ara);
+ } else
+ dev_dbg(&adapter->adapter.dev,
+ "SMBus alert received but no ARA client!\n");
+}
+
static void i2c_parport_attach (struct parport *port)
{
struct i2c_par *adapter;
@@ -154,8 +171,9 @@ static void i2c_parport_attach (struct parport *port)
}
pr_debug("i2c-parport: attaching to %s\n", port->name);
+ parport_disable_irq(port);
adapter->pdev = parport_register_device(port, "i2c-parport",
- NULL, NULL, NULL, PARPORT_FLAG_EXCL, NULL);
+ NULL, NULL, i2c_parport_irq, PARPORT_FLAG_EXCL, adapter);
if (!adapter->pdev) {
printk(KERN_ERR "i2c-parport: Unable to register with parport\n");
goto ERROR0;
@@ -185,14 +203,29 @@ static void i2c_parport_attach (struct parport *port)
parport_setsda(port, 1);
parport_setscl(port, 1);
/* Other init if needed (power on...) */
- if (adapter_parm[type].init.val)
+ if (adapter_parm[type].init.val) {
line_set(port, 1, &adapter_parm[type].init);
+ /* Give powered devices some time to settle */
+ msleep(100);
+ }
if (i2c_bit_add_bus(&adapter->adapter) < 0) {
printk(KERN_ERR "i2c-parport: Unable to register with I2C\n");
goto ERROR1;
}
+ /* Setup SMBus alert if supported */
+ if (adapter_parm[type].smbus_alert) {
+ adapter->alert_data.alert_edge_triggered = 1;
+ adapter->ara = i2c_setup_smbus_alert(&adapter->adapter,
+ &adapter->alert_data);
+ if (adapter->ara)
+ parport_enable_irq(port);
+ else
+ printk(KERN_WARNING "i2c-parport: Failed to register "
+ "ARA client\n");
+ }
+
/* Add the new adapter to the list */
adapter->next = adapter_list;
adapter_list = adapter;
@@ -213,6 +246,10 @@ static void i2c_parport_detach (struct parport *port)
for (prev = NULL, adapter = adapter_list; adapter;
prev = adapter, adapter = adapter->next) {
if (adapter->pdev->port == port) {
+ if (adapter->ara) {
+ parport_disable_irq(port);
+ i2c_unregister_device(adapter->ara);
+ }
i2c_del_adapter(&adapter->adapter);
/* Un-init if needed (power off...) */
diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h
index ed69d84..a9f6681 100644
--- a/drivers/i2c/busses/i2c-parport.h
+++ b/drivers/i2c/busses/i2c-parport.h
@@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------ *
* i2c-parport.h I2C bus over parallel port *
* ------------------------------------------------------------------------ *
- Copyright (C) 2003-2004 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -38,6 +38,7 @@ struct adapter_parm {
struct lineop getsda;
struct lineop getscl;
struct lineop init;
+ unsigned int smbus_alert:1;
};
static struct adapter_parm adapter_parm[] = {
@@ -73,6 +74,7 @@ static struct adapter_parm adapter_parm[] = {
.setscl = { 0x01, DATA, 1 },
.getsda = { 0x10, STAT, 1 },
.init = { 0xf0, DATA, 0 },
+ .smbus_alert = 1,
},
/* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */
{
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index adf0fbb..0d20ff4 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -400,7 +400,7 @@ static void __devexit pasemi_smb_remove(struct pci_dev *dev)
kfree(smbus);
}
-static struct pci_device_id pasemi_smb_ids[] = {
+static const struct pci_device_id pasemi_smb_ids[] = {
{ PCI_DEVICE(0x1959, 0xa003) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index e56e4b6..ee9da6f 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -472,7 +472,7 @@ static struct i2c_adapter piix4_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id piix4_ids[] = {
+static const struct pci_device_id piix4_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) },
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 844569f..55a7137 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -369,7 +369,7 @@ static struct i2c_adapter sis5595_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id sis5595_ids[] __devinitdata = {
+static const struct pci_device_id sis5595_ids[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 68cff7a..2309c7f 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -468,7 +468,7 @@ static struct i2c_adapter sis630_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id sis630_ids[] __devinitdata = {
+static const struct pci_device_id sis630_ids[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
{ 0, }
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 1649963..d43d8f8 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -245,7 +245,7 @@ static struct i2c_adapter sis96x_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id sis96x_ids[] = {
+static const struct pci_device_id sis96x_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index e29b6d5..b5b1bbf 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -31,11 +31,13 @@
#define CMD_I2C_IO_BEGIN (1<<0)
#define CMD_I2C_IO_END (1<<1)
-/* i2c bit delay, default is 10us -> 100kHz */
+/* i2c bit delay, default is 10us -> 100kHz max
+ (in practice, due to additional delays in the i2c bitbanging
+ code this results in a i2c clock of about 50kHz) */
static unsigned short delay = 10;
module_param(delay, ushort, 0);
-MODULE_PARM_DESC(delay, "bit delay in microseconds, "
- "e.g. 10 for 100kHz (default is 100kHz)");
+MODULE_PARM_DESC(delay, "bit delay in microseconds "
+ "(default is 10us for 100kHz max)");
static int usb_read(struct i2c_adapter *adapter, int cmd,
int value, int index, void *data, int len);
@@ -137,7 +139,7 @@ static const struct i2c_algorithm usb_algorithm = {
* Future Technology Devices International Ltd., later a pair was
* bought from EZPrototypes
*/
-static struct usb_device_id i2c_tiny_usb_table [] = {
+static const struct usb_device_id i2c_tiny_usb_table[] = {
{ USB_DEVICE(0x0403, 0xc631) }, /* FTDI */
{ USB_DEVICE(0x1c40, 0x0534) }, /* EZPrototypes */
{ } /* Terminating entry */
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 8b24f19..de78283 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -89,7 +89,7 @@ static struct i2c_adapter vt586b_adapter = {
};
-static struct pci_device_id vt586b_ids[] __devinitdata = {
+static const struct pci_device_id vt586b_ids[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index a84a909..d57292e 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -444,7 +444,7 @@ release_region:
return error;
}
-static struct pci_device_id vt596_ids[] = {
+static const struct pci_device_id vt596_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596_3),
.driver_data = SMBBA1 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596B_3),
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 10be7b5..3202a86 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -34,6 +34,7 @@
#include <linux/hardirq.h>
#include <linux/irqflags.h>
#include <linux/rwsem.h>
+#include <linux/pm_runtime.h>
#include <asm/uaccess.h>
#include "i2c-core.h"
@@ -184,6 +185,52 @@ static int i2c_device_pm_resume(struct device *dev)
#define i2c_device_pm_resume NULL
#endif
+#ifdef CONFIG_PM_RUNTIME
+static int i2c_device_runtime_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm;
+
+ if (!dev->driver)
+ return 0;
+ pm = dev->driver->pm;
+ if (!pm || !pm->runtime_suspend)
+ return 0;
+ return pm->runtime_suspend(dev);
+}
+
+static int i2c_device_runtime_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm;
+
+ if (!dev->driver)
+ return 0;
+ pm = dev->driver->pm;
+ if (!pm || !pm->runtime_resume)
+ return 0;
+ return pm->runtime_resume(dev);
+}
+
+static int i2c_device_runtime_idle(struct device *dev)
+{
+ const struct dev_pm_ops *pm = NULL;
+ int ret;
+
+ if (dev->driver)
+ pm = dev->driver->pm;
+ if (pm && pm->runtime_idle) {
+ ret = pm->runtime_idle(dev);
+ if (ret)
+ return ret;
+ }
+
+ return pm_runtime_suspend(dev);
+}
+#else
+#define i2c_device_runtime_suspend NULL
+#define i2c_device_runtime_resume NULL
+#define i2c_device_runtime_idle NULL
+#endif
+
static int i2c_device_suspend(struct device *dev, pm_message_t mesg)
{
struct i2c_client *client = i2c_verify_client(dev);
@@ -251,6 +298,9 @@ static const struct attribute_group *i2c_dev_attr_groups[] = {
static const struct dev_pm_ops i2c_device_pm_ops = {
.suspend = i2c_device_pm_suspend,
.resume = i2c_device_pm_resume,
+ .runtime_suspend = i2c_device_runtime_suspend,
+ .runtime_resume = i2c_device_runtime_resume,
+ .runtime_idle = i2c_device_runtime_idle,
};
struct bus_type i2c_bus_type = {
@@ -1133,7 +1183,7 @@ EXPORT_SYMBOL(i2c_transfer);
* i2c_master_send - issue a single I2C message in master transmit mode
* @client: Handle to slave device
* @buf: Data that will be written to the slave
- * @count: How many bytes to write
+ * @count: How many bytes to write, must be less than 64k since msg.len is u16
*
* Returns negative errno, or else the number of bytes written.
*/
@@ -1160,7 +1210,7 @@ EXPORT_SYMBOL(i2c_master_send);
* i2c_master_recv - issue a single I2C message in master receive mode
* @client: Handle to slave device
* @buf: Where to store data read from slave
- * @count: How many bytes to read
+ * @count: How many bytes to read, must be less than 64k since msg.len is u16
*
* Returns negative errno, or else the number of bytes read.
*/
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
new file mode 100644
index 0000000..4212782
--- /dev/null
+++ b/drivers/i2c/i2c-smbus.c
@@ -0,0 +1,263 @@
+/*
+ * i2c-smbus.c - SMBus extensions to the I2C protocol
+ *
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/semaphore.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
+
+struct i2c_smbus_alert {
+ unsigned int alert_edge_triggered:1;
+ int irq;
+ struct work_struct alert;
+ struct i2c_client *ara; /* Alert response address */
+};
+
+struct alert_data {
+ unsigned short addr;
+ u8 flag:1;
+};
+
+/* If this is the alerting device, notify its driver */
+static int smbus_do_alert(struct device *dev, void *addrp)
+{
+ struct i2c_client *client = i2c_verify_client(dev);
+ struct alert_data *data = addrp;
+
+ if (!client || client->addr != data->addr)
+ return 0;
+ if (client->flags & I2C_CLIENT_TEN)
+ return 0;
+
+ /*
+ * Drivers should either disable alerts, or provide at least
+ * a minimal handler. Lock so client->driver won't change.
+ */
+ down(&dev->sem);
+ if (client->driver) {
+ if (client->driver->alert)
+ client->driver->alert(client, data->flag);
+ else
+ dev_warn(&client->dev, "no driver alert()!\n");
+ } else
+ dev_dbg(&client->dev, "alert with no driver\n");
+ up(&dev->sem);
+
+ /* Stop iterating after we find the device */
+ return -EBUSY;
+}
+
+/*
+ * The alert IRQ handler needs to hand work off to a task which can issue
+ * SMBus calls, because those sleeping calls can't be made in IRQ context.
+ */
+static void smbus_alert(struct work_struct *work)
+{
+ struct i2c_smbus_alert *alert;
+ struct i2c_client *ara;
+ unsigned short prev_addr = 0; /* Not a valid address */
+
+ alert = container_of(work, struct i2c_smbus_alert, alert);
+ ara = alert->ara;
+
+ for (;;) {
+ s32 status;
+ struct alert_data data;
+
+ /*
+ * Devices with pending alerts reply in address order, low
+ * to high, because of slave transmit arbitration. After
+ * responding, an SMBus device stops asserting SMBALERT#.
+ *
+ * Note that SMBus 2.0 reserves 10-bit addresess for future
+ * use. We neither handle them, nor try to use PEC here.
+ */
+ status = i2c_smbus_read_byte(ara);
+ if (status < 0)
+ break;
+
+ data.flag = status & 1;
+ data.addr = status >> 1;
+
+ if (data.addr == prev_addr) {
+ dev_warn(&ara->dev, "Duplicate SMBALERT# from dev "
+ "0x%02x, skipping\n", data.addr);
+ break;
+ }
+ dev_dbg(&ara->dev, "SMBALERT# from dev 0x%02x, flag %d\n",
+ data.addr, data.flag);
+
+ /* Notify driver for the device which issued the alert */
+ device_for_each_child(&ara->adapter->dev, &data,
+ smbus_do_alert);
+ prev_addr = data.addr;
+ }
+
+ /* We handled all alerts; re-enable level-triggered IRQs */
+ if (!alert->alert_edge_triggered)
+ enable_irq(alert->irq);
+}
+
+static irqreturn_t smbalert_irq(int irq, void *d)
+{
+ struct i2c_smbus_alert *alert = d;
+
+ /* Disable level-triggered IRQs until we handle them */
+ if (!alert->alert_edge_triggered)
+ disable_irq_nosync(irq);
+
+ schedule_work(&alert->alert);
+ return IRQ_HANDLED;
+}
+
+/* Setup SMBALERT# infrastructure */
+static int smbalert_probe(struct i2c_client *ara,
+ const struct i2c_device_id *id)
+{
+ struct i2c_smbus_alert_setup *setup = ara->dev.platform_data;
+ struct i2c_smbus_alert *alert;
+ struct i2c_adapter *adapter = ara->adapter;
+ int res;
+
+ alert = kzalloc(sizeof(struct i2c_smbus_alert), GFP_KERNEL);
+ if (!alert)
+ return -ENOMEM;
+
+ alert->alert_edge_triggered = setup->alert_edge_triggered;
+ alert->irq = setup->irq;
+ INIT_WORK(&alert->alert, smbus_alert);
+ alert->ara = ara;
+
+ if (setup->irq > 0) {
+ res = devm_request_irq(&ara->dev, setup->irq, smbalert_irq,
+ 0, "smbus_alert", alert);
+ if (res) {
+ kfree(alert);
+ return res;
+ }
+ }
+
+ i2c_set_clientdata(ara, alert);
+ dev_info(&adapter->dev, "supports SMBALERT#, %s trigger\n",
+ setup->alert_edge_triggered ? "edge" : "level");
+
+ return 0;
+}
+
+/* IRQ resource is managed so it is freed automatically */
+static int smbalert_remove(struct i2c_client *ara)
+{
+ struct i2c_smbus_alert *alert = i2c_get_clientdata(ara);
+
+ cancel_work_sync(&alert->alert);
+
+ i2c_set_clientdata(ara, NULL);
+ kfree(alert);
+ return 0;
+}
+
+static const struct i2c_device_id smbalert_ids[] = {
+ { "smbus_alert", 0 },
+ { /* LIST END */ }
+};
+MODULE_DEVICE_TABLE(i2c, smbalert_ids);
+
+static struct i2c_driver smbalert_driver = {
+ .driver = {
+ .name = "smbus_alert",
+ },
+ .probe = smbalert_probe,
+ .remove = smbalert_remove,
+ .id_table = smbalert_ids,
+};
+
+/**
+ * i2c_setup_smbus_alert - Setup SMBus alert support
+ * @adapter: the target adapter
+ * @setup: setup data for the SMBus alert handler
+ * Context: can sleep
+ *
+ * Setup handling of the SMBus alert protocol on a given I2C bus segment.
+ *
+ * Handling can be done either through our IRQ handler, or by the
+ * adapter (from its handler, periodic polling, or whatever).
+ *
+ * NOTE that if we manage the IRQ, we *MUST* know if it's level or
+ * edge triggered in order to hand it to the workqueue correctly.
+ * If triggering the alert seems to wedge the system, you probably
+ * should have said it's level triggered.
+ *
+ * This returns the ara client, which should be saved for later use with
+ * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or NULL
+ * to indicate an error.
+ */
+struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
+ struct i2c_smbus_alert_setup *setup)
+{
+ struct i2c_board_info ara_board_info = {
+ I2C_BOARD_INFO("smbus_alert", 0x0c),
+ .platform_data = setup,
+ };
+
+ return i2c_new_device(adapter, &ara_board_info);
+}
+EXPORT_SYMBOL_GPL(i2c_setup_smbus_alert);
+
+/**
+ * i2c_handle_smbus_alert - Handle an SMBus alert
+ * @ara: the ARA client on the relevant adapter
+ * Context: can't sleep
+ *
+ * Helper function to be called from an I2C bus driver's interrupt
+ * handler. It will schedule the alert work, in turn calling the
+ * corresponding I2C device driver's alert function.
+ *
+ * It is assumed that ara is a valid i2c client previously returned by
+ * i2c_setup_smbus_alert().
+ */
+int i2c_handle_smbus_alert(struct i2c_client *ara)
+{
+ struct i2c_smbus_alert *alert = i2c_get_clientdata(ara);
+
+ return schedule_work(&alert->alert);
+}
+EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert);
+
+static int __init i2c_smbus_init(void)
+{
+ return i2c_add_driver(&smbalert_driver);
+}
+
+static void __exit i2c_smbus_exit(void)
+{
+ i2c_del_driver(&smbalert_driver);
+}
+
+module_init(i2c_smbus_init);
+module_exit(i2c_smbus_exit);
+
+MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_DESCRIPTION("SMBus protocol extensions support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
index 878f8ec..57d00ca 100644
--- a/drivers/ide/aec62xx.c
+++ b/drivers/ide/aec62xx.c
@@ -81,15 +81,15 @@ static u8 pci_bus_clock_list_ultra (u8 speed, struct chipset_bus_clock_list_entr
return chipset_table->ultra_settings;
}
-static void aec6210_set_mode(ide_drive_t *drive, const u8 speed)
+static void aec6210_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
struct chipset_bus_clock_list_entry *bus_clock = host->host_priv;
u16 d_conf = 0;
u8 ultra = 0, ultra_conf = 0;
u8 tmp0 = 0, tmp1 = 0, tmp2 = 0;
+ const u8 speed = drive->dma_mode;
unsigned long flags;
local_irq_save(flags);
@@ -109,15 +109,15 @@ static void aec6210_set_mode(ide_drive_t *drive, const u8 speed)
local_irq_restore(flags);
}
-static void aec6260_set_mode(ide_drive_t *drive, const u8 speed)
+static void aec6260_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
struct chipset_bus_clock_list_entry *bus_clock = host->host_priv;
u8 unit = drive->dn & 1;
u8 tmp1 = 0, tmp2 = 0;
u8 ultra = 0, drive_conf = 0, ultra_conf = 0;
+ const u8 speed = drive->dma_mode;
unsigned long flags;
local_irq_save(flags);
@@ -134,9 +134,10 @@ static void aec6260_set_mode(ide_drive_t *drive, const u8 speed)
local_irq_restore(flags);
}
-static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void aec_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- drive->hwif->port_ops->set_dma_mode(drive, pio + XFER_PIO_0);
+ drive->dma_mode = drive->pio_mode;
+ hwif->port_ops->set_dma_mode(hwif, drive);
}
static int init_chipset_aec62xx(struct pci_dev *dev)
diff --git a/drivers/ide/ali14xx.c b/drivers/ide/ali14xx.c
index 90da1f9..25b9fe3 100644
--- a/drivers/ide/ali14xx.c
+++ b/drivers/ide/ali14xx.c
@@ -109,13 +109,14 @@ static DEFINE_SPINLOCK(ali14xx_lock);
* This function computes timing parameters
* and sets controller registers accordingly.
*/
-static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void ali14xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
int driveNum;
int time1, time2;
u8 param1, param2, param3, param4;
unsigned long flags;
int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
/* calculate timing, according to PIO mode */
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
index 0abc43f..2c8016a 100644
--- a/drivers/ide/alim15x3.c
+++ b/drivers/ide/alim15x3.c
@@ -8,7 +8,7 @@
* Copyright (C) 2002 Alan Cox
* ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
* Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
+ * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
*
* (U)DMA capable version of ali 1533/1543(C), 1535(D)
*
@@ -48,61 +48,84 @@ static u8 m5229_revision;
static u8 chip_is_1543c_e;
static struct pci_dev *isa_dev;
+static void ali_fifo_control(ide_hwif_t *hwif, ide_drive_t *drive, int on)
+{
+ struct pci_dev *pdev = to_pci_dev(hwif->dev);
+ int pio_fifo = 0x54 + hwif->channel;
+ u8 fifo;
+ int shift = 4 * (drive->dn & 1);
+
+ pci_read_config_byte(pdev, pio_fifo, &fifo);
+ fifo &= ~(0x0F << shift);
+ fifo |= (on << shift);
+ pci_write_config_byte(pdev, pio_fifo, fifo);
+}
+
+static void ali_program_timings(ide_hwif_t *hwif, ide_drive_t *drive,
+ struct ide_timing *t, u8 ultra)
+{
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
+ int port = hwif->channel ? 0x5c : 0x58;
+ int udmat = 0x56 + hwif->channel;
+ u8 unit = drive->dn & 1, udma;
+ int shift = 4 * unit;
+
+ /* Set up the UDMA */
+ pci_read_config_byte(dev, udmat, &udma);
+ udma &= ~(0x0F << shift);
+ udma |= ultra << shift;
+ pci_write_config_byte(dev, udmat, udma);
+
+ if (t == NULL)
+ return;
+
+ t->setup = clamp_val(t->setup, 1, 8) & 7;
+ t->act8b = clamp_val(t->act8b, 1, 8) & 7;
+ t->rec8b = clamp_val(t->rec8b, 1, 16) & 15;
+ t->active = clamp_val(t->active, 1, 8) & 7;
+ t->recover = clamp_val(t->recover, 1, 16) & 15;
+
+ pci_write_config_byte(dev, port, t->setup);
+ pci_write_config_byte(dev, port + 1, (t->act8b << 4) | t->rec8b);
+ pci_write_config_byte(dev, port + unit + 2,
+ (t->active << 4) | t->recover);
+}
+
/**
* ali_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* Program the controller for the given PIO mode.
*/
-static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void ali_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
- int s_time = t->setup, a_time = t->active, c_time = t->cycle;
- u8 s_clc, a_clc, r_clc;
- unsigned long flags;
+ ide_drive_t *pair = ide_get_pair_dev(drive);
int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
- int port = hwif->channel ? 0x5c : 0x58;
- int portFIFO = hwif->channel ? 0x55 : 0x54;
- u8 cd_dma_fifo = 0, unit = drive->dn & 1;
-
- if ((s_clc = (s_time * bus_speed + 999) / 1000) >= 8)
- s_clc = 0;
- if ((a_clc = (a_time * bus_speed + 999) / 1000) >= 8)
- a_clc = 0;
-
- if (!(r_clc = (c_time * bus_speed + 999) / 1000 - a_clc - s_clc)) {
- r_clc = 1;
- } else {
- if (r_clc >= 16)
- r_clc = 0;
+ unsigned long T = 1000000 / bus_speed; /* PCI clock based */
+ struct ide_timing t;
+
+ ide_timing_compute(drive, drive->pio_mode, &t, T, 1);
+ if (pair) {
+ struct ide_timing p;
+
+ ide_timing_compute(pair, pair->pio_mode, &p, T, 1);
+ ide_timing_merge(&p, &t, &t,
+ IDE_TIMING_SETUP | IDE_TIMING_8BIT);
+ if (pair->dma_mode) {
+ ide_timing_compute(pair, pair->dma_mode, &p, T, 1);
+ ide_timing_merge(&p, &t, &t,
+ IDE_TIMING_SETUP | IDE_TIMING_8BIT);
+ }
}
- local_irq_save(flags);
-
+
/*
* PIO mode => ATA FIFO on, ATAPI FIFO off
*/
- pci_read_config_byte(dev, portFIFO, &cd_dma_fifo);
- if (drive->media==ide_disk) {
- if (unit) {
- pci_write_config_byte(dev, portFIFO, (cd_dma_fifo & 0x0F) | 0x50);
- } else {
- pci_write_config_byte(dev, portFIFO, (cd_dma_fifo & 0xF0) | 0x05);
- }
- } else {
- if (unit) {
- pci_write_config_byte(dev, portFIFO, cd_dma_fifo & 0x0F);
- } else {
- pci_write_config_byte(dev, portFIFO, cd_dma_fifo & 0xF0);
- }
- }
-
- pci_write_config_byte(dev, port, s_clc);
- pci_write_config_byte(dev, port + unit + 2, (a_clc << 4) | r_clc);
- local_irq_restore(flags);
+ ali_fifo_control(hwif, drive, (drive->media == ide_disk) ? 0x05 : 0x00);
+
+ ali_program_timings(hwif, drive, &t, 0);
}
/**
@@ -132,44 +155,42 @@ static u8 ali_udma_filter(ide_drive_t *drive)
/**
* ali_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Configure the hardware for the desired IDE transfer mode.
*/
-static void ali_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void ali_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
+ static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD };
struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 speed1 = speed;
- u8 unit = drive->dn & 1;
+ ide_drive_t *pair = ide_get_pair_dev(drive);
+ int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
+ unsigned long T = 1000000 / bus_speed; /* PCI clock based */
+ const u8 speed = drive->dma_mode;
u8 tmpbyte = 0x00;
- int m5229_udma = (hwif->channel) ? 0x57 : 0x56;
-
- if (speed == XFER_UDMA_6)
- speed1 = 0x47;
+ struct ide_timing t;
if (speed < XFER_UDMA_0) {
- u8 ultra_enable = (unit) ? 0x7f : 0xf7;
- /*
- * clear "ultra enable" bit
- */
- pci_read_config_byte(dev, m5229_udma, &tmpbyte);
- tmpbyte &= ultra_enable;
- pci_write_config_byte(dev, m5229_udma, tmpbyte);
-
- /*
- * FIXME: Oh, my... DMA timings are never set.
- */
+ ide_timing_compute(drive, drive->dma_mode, &t, T, 1);
+ if (pair) {
+ struct ide_timing p;
+
+ ide_timing_compute(pair, pair->pio_mode, &p, T, 1);
+ ide_timing_merge(&p, &t, &t,
+ IDE_TIMING_SETUP | IDE_TIMING_8BIT);
+ if (pair->dma_mode) {
+ ide_timing_compute(pair, pair->dma_mode,
+ &p, T, 1);
+ ide_timing_merge(&p, &t, &t,
+ IDE_TIMING_SETUP | IDE_TIMING_8BIT);
+ }
+ }
+ ali_program_timings(hwif, drive, &t, 0);
} else {
- pci_read_config_byte(dev, m5229_udma, &tmpbyte);
- tmpbyte &= (0x0f << ((1-unit) << 2));
- /*
- * enable ultra dma and set timing
- */
- tmpbyte |= ((0x08 | ((4-speed1)&0x07)) << (unit << 2));
- pci_write_config_byte(dev, m5229_udma, tmpbyte);
+ ali_program_timings(hwif, drive, NULL,
+ udma_timing[speed - XFER_UDMA_0]);
if (speed >= XFER_UDMA_3) {
pci_read_config_byte(dev, 0x4b, &tmpbyte);
tmpbyte |= 1;
@@ -355,19 +376,13 @@ static int ali_cable_override(struct pci_dev *pdev)
*
* This checks if the controller and the cable are capable
* of UDMA66 transfers. It doesn't check the drives.
- * But see note 2 below!
- *
- * FIXME: frobs bits that are not defined on newer ALi devicea
*/
static u8 ali_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long flags;
u8 cbl = ATA_CBL_PATA40, tmpbyte;
- local_irq_save(flags);
-
if (m5229_revision >= 0xC2) {
/*
* m5229 80-pin cable detection (from Host View)
@@ -387,8 +402,6 @@ static u8 ali_cable_detect(ide_hwif_t *hwif)
}
}
- local_irq_restore(flags);
-
return cbl;
}
@@ -584,6 +597,6 @@ static void __exit ali15x3_ide_exit(void)
module_init(ali15x3_ide_init);
module_exit(ali15x3_ide_exit);
-MODULE_AUTHOR("Michael Aubry, Andrzej Krzysztofowicz, CJ, Andre Hedrick, Alan Cox");
+MODULE_AUTHOR("Michael Aubry, Andrzej Krzysztofowicz, CJ, Andre Hedrick, Alan Cox, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("PCI driver module for ALi 15x3 IDE");
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
index 628cd2e..3747b25 100644
--- a/drivers/ide/amd74xx.c
+++ b/drivers/ide/amd74xx.c
@@ -3,7 +3,7 @@
* IDE driver for Linux.
*
* Copyright (c) 2000-2002 Vojtech Pavlik
- * Copyright (c) 2007-2008 Bartlomiej Zolnierkiewicz
+ * Copyright (c) 2007-2010 Bartlomiej Zolnierkiewicz
*
* Based on the work of:
* Andre Hedrick
@@ -70,7 +70,8 @@ static void amd_set_speed(struct pci_dev *dev, u8 dn, u8 udma_mask,
default: return;
}
- pci_write_config_byte(dev, AMD_UDMA_TIMING + offset + (3 - dn), t);
+ if (timing->udma)
+ pci_write_config_byte(dev, AMD_UDMA_TIMING + offset + 3 - dn, t);
}
/*
@@ -78,14 +79,14 @@ static void amd_set_speed(struct pci_dev *dev, u8 dn, u8 udma_mask,
* to a desired transfer mode. It also can be called by upper layers.
*/
-static void amd_set_drive(ide_drive_t *drive, const u8 speed)
+static void amd_set_drive(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
ide_drive_t *peer = ide_get_pair_dev(drive);
struct ide_timing t, p;
int T, UT;
u8 udma_mask = hwif->ultra_mask;
+ const u8 speed = drive->dma_mode;
T = 1000000000 / amd_clock;
UT = (udma_mask == ATA_UDMA2) ? T : (T / 2);
@@ -93,7 +94,7 @@ static void amd_set_drive(ide_drive_t *drive, const u8 speed)
ide_timing_compute(drive, speed, &t, T, UT);
if (peer) {
- ide_timing_compute(peer, peer->current_speed, &p, T, UT);
+ ide_timing_compute(peer, peer->pio_mode, &p, T, UT);
ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT);
}
@@ -107,9 +108,10 @@ static void amd_set_drive(ide_drive_t *drive, const u8 speed)
* amd_set_pio_mode() is a callback from upper layers for PIO-only tuning.
*/
-static void amd_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void amd_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- amd_set_drive(drive, XFER_PIO_0 + pio);
+ drive->dma_mode = drive->pio_mode;
+ amd_set_drive(hwif, drive);
}
static void amd7409_cable_detect(struct pci_dev *dev)
@@ -340,6 +342,6 @@ static void __exit amd74xx_ide_exit(void)
module_init(amd74xx_ide_init);
module_exit(amd74xx_ide_exit);
-MODULE_AUTHOR("Vojtech Pavlik");
+MODULE_AUTHOR("Vojtech Pavlik, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("AMD PCI IDE driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c
index 248219a..000a78e 100644
--- a/drivers/ide/at91_ide.c
+++ b/drivers/ide/at91_ide.c
@@ -172,11 +172,12 @@ static void at91_ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
leave_16bit(chipselect, mode);
}
-static void at91_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void at91_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct ide_timing *timing;
- u8 chipselect = drive->hwif->select_data;
+ u8 chipselect = hwif->select_data;
int use_iordy = 0;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
pdbg("chipselect %u pio %u\n", chipselect, pio);
diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
index 837322b..15f0ead 100644
--- a/drivers/ide/atiixp.c
+++ b/drivers/ide/atiixp.c
@@ -42,19 +42,20 @@ static DEFINE_SPINLOCK(atiixp_lock);
/**
* atiixp_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* Set the interface PIO mode.
*/
-static void atiixp_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void atiixp_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long flags;
int timing_shift = (drive->dn ^ 1) * 8;
u32 pio_timing_data;
u16 pio_mode_data;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
spin_lock_irqsave(&atiixp_lock, flags);
@@ -74,21 +75,22 @@ static void atiixp_set_pio_mode(ide_drive_t *drive, const u8 pio)
/**
* atiixp_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Set a ATIIXP host controller to the desired DMA mode. This involves
* programming the right timing data into the PCI configuration space.
*/
-static void atiixp_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void atiixp_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long flags;
int timing_shift = (drive->dn ^ 1) * 8;
u32 tmp32;
u16 tmp16;
u16 udma_ctl = 0;
+ const u8 speed = drive->dma_mode;
spin_lock_irqsave(&atiixp_lock, flags);
diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
index 349a67b..b26c234 100644
--- a/drivers/ide/au1xxx-ide.c
+++ b/drivers/ide/au1xxx-ide.c
@@ -99,12 +99,11 @@ static void au1xxx_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
}
#endif
-static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void au1xxx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
- /* set pio mode! */
- switch(pio) {
+ switch (drive->pio_mode - XFER_PIO_0) {
case 0:
mem_sttime = SBC_IDE_TIMING(PIO0);
@@ -161,11 +160,11 @@ static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
au_writel(mem_stcfg,MEM_STCFG2);
}
-static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void auide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
- switch(speed) {
+ switch (drive->dma_mode) {
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
case XFER_MW_DMA_2:
mem_sttime = SBC_IDE_TIMING(MDMA2);
@@ -297,8 +296,8 @@ static int auide_dma_test_irq(ide_drive_t *drive)
*/
drive->waiting_for_dma++;
if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
- printk(KERN_WARNING "%s: timeout waiting for ddma to \
- complete\n", drive->name);
+ printk(KERN_WARNING "%s: timeout waiting for ddma to complete\n",
+ drive->name);
return 1;
}
udelay(10);
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
index 1a32d62..d2b8b27 100644
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -572,9 +572,10 @@ static void cmd640_set_mode(ide_drive_t *drive, unsigned int index,
program_drive_counts(drive, index);
}
-static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void cmd640_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
unsigned int index = 0, cycle_time;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
u8 b;
switch (pio) {
@@ -605,7 +606,7 @@ static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
}
#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
-static void cmd640_init_dev(ide_drive_t *drive)
+static void __init cmd640_init_dev(ide_drive_t *drive)
{
unsigned int i = drive->hwif->channel * 2 + (drive->dn & 1);
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index f2500c8..5f80312 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -7,6 +7,7 @@
* Copyright (C) 1998 David S. Miller (davem@redhat.com)
*
* Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
* Copyright (C) 2007,2009 MontaVista Software, Inc. <source@mvista.com>
*/
@@ -50,72 +51,42 @@
#define UDIDETCR1 0x7B
#define DTPR1 0x7C
-static u8 quantize_timing(int timing, int quant)
-{
- return (timing + quant - 1) / quant;
-}
-
-/*
- * This routine calculates active/recovery counts and then writes them into
- * the chipset registers.
- */
-static void program_cycle_times (ide_drive_t *drive, int cycle_time, int active_time)
+static void cmd64x_program_timings(ide_drive_t *drive, u8 mode)
{
+ ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- int clock_time = 1000 / (ide_pci_clk ? ide_pci_clk : 33);
- u8 cycle_count, active_count, recovery_count, drwtim;
+ int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
+ const unsigned long T = 1000000 / bus_speed;
static const u8 recovery_values[] =
{15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0};
+ static const u8 setup_values[] = {0x40, 0x40, 0x40, 0x80, 0, 0xc0};
+ static const u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23};
static const u8 drwtim_regs[4] = {DRWTIM0, DRWTIM1, DRWTIM2, DRWTIM3};
+ struct ide_timing t;
+ u8 arttim = 0;
- cycle_count = quantize_timing( cycle_time, clock_time);
- active_count = quantize_timing(active_time, clock_time);
- recovery_count = cycle_count - active_count;
+ ide_timing_compute(drive, mode, &t, T, 0);
/*
* In case we've got too long recovery phase, try to lengthen
* the active phase
*/
- if (recovery_count > 16) {
- active_count += recovery_count - 16;
- recovery_count = 16;
+ if (t.recover > 16) {
+ t.active += t.recover - 16;
+ t.recover = 16;
}
- if (active_count > 16) /* shouldn't actually happen... */
- active_count = 16;
+ if (t.active > 16) /* shouldn't actually happen... */
+ t.active = 16;
/*
* Convert values to internal chipset representation
*/
- recovery_count = recovery_values[recovery_count];
- active_count &= 0x0f;
+ t.recover = recovery_values[t.recover];
+ t.active &= 0x0f;
/* Program the active/recovery counts into the DRWTIM register */
- drwtim = (active_count << 4) | recovery_count;
- (void) pci_write_config_byte(dev, drwtim_regs[drive->dn], drwtim);
-}
-
-/*
- * This routine writes into the chipset registers
- * PIO setup/active/recovery timings.
- */
-static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
- unsigned long setup_count;
- unsigned int cycle_time;
- u8 arttim = 0;
-
- static const u8 setup_values[] = {0x40, 0x40, 0x40, 0x80, 0, 0xc0};
- static const u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23};
-
- cycle_time = ide_pio_cycle_time(drive, pio);
-
- program_cycle_times(drive, cycle_time, t->active);
-
- setup_count = quantize_timing(t->setup,
- 1000 / (ide_pci_clk ? ide_pci_clk : 33));
+ pci_write_config_byte(dev, drwtim_regs[drive->dn],
+ (t.active << 4) | t.recover);
/*
* The primary channel has individual address setup timing registers
@@ -126,15 +97,21 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
if (hwif->channel) {
ide_drive_t *pair = ide_get_pair_dev(drive);
- ide_set_drivedata(drive, (void *)setup_count);
+ if (pair) {
+ struct ide_timing tp;
- if (pair)
- setup_count = max_t(u8, setup_count,
- (unsigned long)ide_get_drivedata(pair));
+ ide_timing_compute(pair, pair->pio_mode, &tp, T, 0);
+ ide_timing_merge(&t, &tp, &t, IDE_TIMING_SETUP);
+ if (pair->dma_mode) {
+ ide_timing_compute(pair, pair->dma_mode,
+ &tp, T, 0);
+ ide_timing_merge(&tp, &t, &t, IDE_TIMING_SETUP);
+ }
+ }
}
- if (setup_count > 5) /* shouldn't actually happen... */
- setup_count = 5;
+ if (t.setup > 5) /* shouldn't actually happen... */
+ t.setup = 5;
/*
* Program the address setup clocks into the ARTTIM registers.
@@ -144,7 +121,7 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
if (hwif->channel)
arttim &= ~ARTTIM23_INTR_CH1;
arttim &= ~0xc0;
- arttim |= setup_values[setup_count];
+ arttim |= setup_values[t.setup];
(void) pci_write_config_byte(dev, arttim_regs[drive->dn], arttim);
}
@@ -153,8 +130,10 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
* Special cases are 8: prefetch off, 9: prefetch on (both never worked)
*/
-static void cmd64x_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void cmd64x_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
+
/*
* Filter out the prefetch control values
* to prevent PIO5 from being programmed
@@ -162,20 +141,18 @@ static void cmd64x_set_pio_mode(ide_drive_t *drive, const u8 pio)
if (pio == 8 || pio == 9)
return;
- cmd64x_tune_pio(drive, pio);
+ cmd64x_program_timings(drive, XFER_PIO_0 + pio);
}
-static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void cmd64x_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 unit = drive->dn & 0x01;
u8 regU = 0, pciU = hwif->channel ? UDIDETCR1 : UDIDETCR0;
+ const u8 speed = drive->dma_mode;
- if (speed >= XFER_SW_DMA_0) {
- (void) pci_read_config_byte(dev, pciU, &regU);
- regU &= ~(unit ? 0xCA : 0x35);
- }
+ pci_read_config_byte(dev, pciU, &regU);
+ regU &= ~(unit ? 0xCA : 0x35);
switch(speed) {
case XFER_UDMA_5:
@@ -197,18 +174,13 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
regU |= unit ? 0xC2 : 0x31;
break;
case XFER_MW_DMA_2:
- program_cycle_times(drive, 120, 70);
- break;
case XFER_MW_DMA_1:
- program_cycle_times(drive, 150, 80);
- break;
case XFER_MW_DMA_0:
- program_cycle_times(drive, 480, 215);
+ cmd64x_program_timings(drive, speed);
break;
}
- if (speed >= XFER_SW_DMA_0)
- (void) pci_write_config_byte(dev, pciU, regU);
+ pci_write_config_byte(dev, pciU, regU);
}
static void cmd648_clear_irq(ide_drive_t *drive)
@@ -471,6 +443,6 @@ static void __exit cmd64x_ide_exit(void)
module_init(cmd64x_ide_init);
module_exit(cmd64x_ide_exit);
-MODULE_AUTHOR("Eddie Dost, David Miller, Andre Hedrick");
+MODULE_AUTHOR("Eddie Dost, David Miller, Andre Hedrick, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("PCI driver module for CMD64x IDE");
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
index 09f98ed..2c1e5f7 100644
--- a/drivers/ide/cs5520.c
+++ b/drivers/ide/cs5520.c
@@ -57,11 +57,11 @@ static struct pio_clocks cs5520_pio_clocks[]={
{1, 2, 1}
};
-static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void cs5520_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *pdev = to_pci_dev(hwif->dev);
int controller = drive->dn > 1 ? 1 : 0;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
/* 8bit CAT/CRT - 8bit command timing for channel */
pci_write_config_byte(pdev, 0x62 + controller,
@@ -81,11 +81,12 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
(cs5520_pio_clocks[pio].assert));
}
-static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void cs5520_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
printk(KERN_ERR "cs55x0: bad ide timing.\n");
- cs5520_set_pio_mode(drive, 0);
+ drive->pio_mode = XFER_PIO_0 + 0;
+ cs5520_set_pio_mode(hwif, drive);
}
static const struct ide_port_ops cs5520_port_ops = {
diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
index 40bf05e..4dc4eb9 100644
--- a/drivers/ide/cs5530.c
+++ b/drivers/ide/cs5530.c
@@ -41,8 +41,8 @@ static unsigned int cs5530_pio_timings[2][5] = {
/**
* cs5530_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* Handles setting of PIO mode for the chipset.
*
@@ -50,10 +50,11 @@ static unsigned int cs5530_pio_timings[2][5] = {
* will have valid default PIO timings set up before we get here.
*/
-static void cs5530_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void cs5530_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- unsigned long basereg = CS5530_BASEREG(drive->hwif);
+ unsigned long basereg = CS5530_BASEREG(hwif);
unsigned int format = (inl(basereg + 4) >> 31) & 1;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
outl(cs5530_pio_timings[format][pio], basereg + ((drive->dn & 1)<<3));
}
@@ -99,12 +100,12 @@ out:
return mask;
}
-static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode)
+static void cs5530_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
unsigned long basereg;
unsigned int reg, timings = 0;
- switch (mode) {
+ switch (drive->dma_mode) {
case XFER_UDMA_0: timings = 0x00921250; break;
case XFER_UDMA_1: timings = 0x00911140; break;
case XFER_UDMA_2: timings = 0x00911030; break;
@@ -112,7 +113,7 @@ static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode)
case XFER_MW_DMA_1: timings = 0x00012121; break;
case XFER_MW_DMA_2: timings = 0x00002020; break;
}
- basereg = CS5530_BASEREG(drive->hwif);
+ basereg = CS5530_BASEREG(hwif);
reg = inl(basereg + 4); /* get drive0 config register */
timings |= reg & 0x80000000; /* preserve PIO format bit */
if ((drive-> dn & 1) == 0) { /* are we configuring drive0? */
diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
index b883838..5059faf 100644
--- a/drivers/ide/cs5535.c
+++ b/drivers/ide/cs5535.c
@@ -86,7 +86,7 @@ static void cs5535_set_speed(ide_drive_t *drive, const u8 speed)
cmd = pioa = speed - XFER_PIO_0;
if (pair) {
- u8 piob = ide_get_best_pio_mode(pair, 255, 4);
+ u8 piob = pair->pio_mode - XFER_PIO_0;
if (piob < cmd)
cmd = piob;
@@ -129,28 +129,28 @@ static void cs5535_set_speed(ide_drive_t *drive, const u8 speed)
/**
* cs5535_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Programs the chipset for DMA mode.
*/
-static void cs5535_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void cs5535_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- cs5535_set_speed(drive, speed);
+ cs5535_set_speed(drive, drive->dma_mode);
}
/**
* cs5535_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* A callback from the upper layers for PIO-only tuning.
*/
-static void cs5535_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void cs5535_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- cs5535_set_speed(drive, XFER_PIO_0 + pio);
+ cs5535_set_speed(drive, drive->pio_mode);
}
static u8 cs5535_cable_detect(ide_hwif_t *hwif)
diff --git a/drivers/ide/cs5536.c b/drivers/ide/cs5536.c
index 9623b85..24214ab 100644
--- a/drivers/ide/cs5536.c
+++ b/drivers/ide/cs5536.c
@@ -125,11 +125,11 @@ static u8 cs5536_cable_detect(ide_hwif_t *hwif)
/**
* cs5536_set_pio_mode - PIO timing setup
+ * @hwif: ATA port
* @drive: ATA device
- * @pio: PIO mode number
*/
-static void cs5536_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void cs5536_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static const u8 drv_timings[5] = {
0x98, 0x55, 0x32, 0x21, 0x20,
@@ -143,15 +143,16 @@ static void cs5536_set_pio_mode(ide_drive_t *drive, const u8 pio)
0x99, 0x92, 0x90, 0x22, 0x20,
};
- struct pci_dev *pdev = to_pci_dev(drive->hwif->dev);
+ struct pci_dev *pdev = to_pci_dev(hwif->dev);
ide_drive_t *pair = ide_get_pair_dev(drive);
int cshift = (drive->dn & 1) ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
unsigned long timings = (unsigned long)ide_get_drivedata(drive);
u32 cast;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
u8 cmd_pio = pio;
if (pair)
- cmd_pio = min(pio, ide_get_best_pio_mode(pair, 255, 4));
+ cmd_pio = min_t(u8, pio, pair->pio_mode - XFER_PIO_0);
timings &= (IDE_DRV_MASK << 8);
timings |= drv_timings[pio];
@@ -172,11 +173,11 @@ static void cs5536_set_pio_mode(ide_drive_t *drive, const u8 pio)
/**
* cs5536_set_dma_mode - DMA timing setup
+ * @hwif: ATA port
* @drive: ATA device
- * @mode: DMA mode
*/
-static void cs5536_set_dma_mode(ide_drive_t *drive, const u8 mode)
+static void cs5536_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static const u8 udma_timings[6] = {
0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6,
@@ -186,10 +187,11 @@ static void cs5536_set_dma_mode(ide_drive_t *drive, const u8 mode)
0x67, 0x21, 0x20,
};
- struct pci_dev *pdev = to_pci_dev(drive->hwif->dev);
+ struct pci_dev *pdev = to_pci_dev(hwif->dev);
int dshift = (drive->dn & 1) ? IDE_D1_SHIFT : IDE_D0_SHIFT;
unsigned long timings = (unsigned long)ide_get_drivedata(drive);
u32 etc;
+ const u8 mode = drive->dma_mode;
cs5536_read(pdev, ETC, &etc);
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
index d6e2cbb..9383f67 100644
--- a/drivers/ide/cy82c693.c
+++ b/drivers/ide/cy82c693.c
@@ -1,43 +1,11 @@
/*
* Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer
* Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator
+ * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
*
* CYPRESS CY82C693 chipset IDE controller
*
* The CY82C693 chipset is used on Digital's PC-Alpha 164SX boards.
- * Writing the driver was quite simple, since most of the job is
- * done by the generic pci-ide support.
- * The hard part was finding the CY82C693's datasheet on Cypress's
- * web page :-(. But Altavista solved this problem :-).
- *
- *
- * Notes:
- * - I recently got a 16.8G IBM DTTA, so I was able to test it with
- * a large and fast disk - the results look great, so I'd say the
- * driver is working fine :-)
- * hdparm -t reports 8.17 MB/sec at about 6% CPU usage for the DTTA
- * - this is my first linux driver, so there's probably a lot of room
- * for optimizations and bug fixing, so feel free to do it.
- * - if using PIO mode it's a good idea to set the PIO mode and
- * 32-bit I/O support (if possible), e.g. hdparm -p2 -c1 /dev/hda
- * - I had some problems with my IBM DHEA with PIO modes < 2
- * (lost interrupts) ?????
- * - first tests with DMA look okay, they seem to work, but there is a
- * problem with sound - the BusMaster IDE TimeOut should fixed this
- *
- * Ancient History:
- * AMH@1999-08-24: v0.34 init_cy82c693_chip moved to pci_init_cy82c693
- * ASK@1999-01-23: v0.33 made a few minor code clean ups
- * removed DMA clock speed setting by default
- * added boot message
- * ASK@1998-11-01: v0.32 added support to set BusMaster IDE TimeOut
- * added support to set DMA Controller Clock Speed
- * ASK@1998-10-31: v0.31 fixed problem with setting to high DMA modes
- * on some drives.
- * ASK@1998-10-29: v0.3 added support to set DMA modes
- * ASK@1998-10-28: v0.2 added support to set PIO modes
- * ASK@1998-10-27: v0.1 first version - chipset detection
- *
*/
#include <linux/module.h>
@@ -81,87 +49,13 @@
#define CY82_INDEX_CHANNEL1 0x31
#define CY82_INDEX_TIMEOUT 0x32
-/* the min and max PCI bus speed in MHz - from datasheet */
-#define CY82C963_MIN_BUS_SPEED 25
-#define CY82C963_MAX_BUS_SPEED 33
-
-/* the struct for the PIO mode timings */
-typedef struct pio_clocks_s {
- u8 address_time; /* Address setup (clocks) */
- u8 time_16r; /* clocks for 16bit IOR (0xF0=Active/data, 0x0F=Recovery) */
- u8 time_16w; /* clocks for 16bit IOW (0xF0=Active/data, 0x0F=Recovery) */
- u8 time_8; /* clocks for 8bit (0xF0=Active/data, 0x0F=Recovery) */
-} pio_clocks_t;
-
-/*
- * calc clocks using bus_speed
- * returns (rounded up) time in bus clocks for time in ns
- */
-static int calc_clk(int time, int bus_speed)
-{
- int clocks;
-
- clocks = (time*bus_speed+999)/1000 - 1;
-
- if (clocks < 0)
- clocks = 0;
-
- if (clocks > 0x0F)
- clocks = 0x0F;
-
- return clocks;
-}
-
-/*
- * compute the values for the clock registers for PIO
- * mode and pci_clk [MHz] speed
- *
- * NOTE: for mode 0,1 and 2 drives 8-bit IDE command control registers are used
- * for mode 3 and 4 drives 8 and 16-bit timings are the same
- *
- */
-static void compute_clocks(u8 pio, pio_clocks_t *p_pclk)
-{
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
- int clk1, clk2;
- int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
-
- /* we don't check against CY82C693's min and max speed,
- * so you can play with the idebus=xx parameter
- */
-
- /* let's calc the address setup time clocks */
- p_pclk->address_time = (u8)calc_clk(t->setup, bus_speed);
-
- /* let's calc the active and recovery time clocks */
- clk1 = calc_clk(t->active, bus_speed);
-
- /* calc recovery timing */
- clk2 = t->cycle - t->active - t->setup;
-
- clk2 = calc_clk(clk2, bus_speed);
-
- clk1 = (clk1<<4)|clk2; /* combine active and recovery clocks */
-
- /* note: we use the same values for 16bit IOR and IOW
- * those are all the same, since I don't have other
- * timings than those from ide-lib.c
- */
-
- p_pclk->time_16r = (u8)clk1;
- p_pclk->time_16w = (u8)clk1;
-
- /* what are good values for 8bit ?? */
- p_pclk->time_8 = (u8)clk1;
-}
-
/*
* set DMA mode a specific channel for CY82C693
*/
-static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
+static void cy82c693_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
+ const u8 mode = drive->dma_mode;
u8 single = (mode & 0x10) >> 4, index = 0, data = 0;
index = hwif->channel ? CY82_INDEX_CHANNEL1 : CY82_INDEX_CHANNEL0;
@@ -186,12 +80,14 @@ static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
outb(data, CY82_DATA_PORT);
}
-static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
- pio_clocks_t pclk;
+ int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
+ const unsigned long T = 1000000 / bus_speed;
unsigned int addrCtrl;
+ struct ide_timing t;
+ u8 time_16, time_8;
/* select primary or secondary channel */
if (hwif->index > 0) { /* drive is on the secondary channel */
@@ -204,8 +100,12 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
}
}
- /* let's calc the values for this PIO mode */
- compute_clocks(pio, &pclk);
+ ide_timing_compute(drive, drive->pio_mode, &t, T, 1);
+
+ time_16 = clamp_val(t.recover - 1, 0, 15) |
+ (clamp_val(t.active - 1, 0, 15) << 4);
+ time_8 = clamp_val(t.act8b - 1, 0, 15) |
+ (clamp_val(t.rec8b - 1, 0, 15) << 4);
/* now let's write the clocks registers */
if ((drive->dn & 1) == 0) {
@@ -217,13 +117,13 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
addrCtrl &= (~0xF);
- addrCtrl |= (unsigned int)pclk.address_time;
+ addrCtrl |= clamp_val(t.setup - 1, 0, 15);
pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl);
/* now let's set the remaining registers */
- pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, pclk.time_16r);
- pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, pclk.time_16w);
- pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, pclk.time_8);
+ pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, time_16);
+ pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, time_16);
+ pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, time_8);
} else {
/*
* set slave drive
@@ -233,13 +133,13 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
addrCtrl &= (~0xF0);
- addrCtrl |= ((unsigned int)pclk.address_time<<4);
+ addrCtrl |= (clamp_val(t.setup - 1, 0, 15) << 4);
pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl);
/* now let's set the remaining registers */
- pci_write_config_byte(dev, CY82_IDE_SLAVE_IOR, pclk.time_16r);
- pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, pclk.time_16w);
- pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, pclk.time_8);
+ pci_write_config_byte(dev, CY82_IDE_SLAVE_IOR, time_16);
+ pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16);
+ pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8);
}
}
@@ -325,6 +225,6 @@ static void __exit cy82c693_ide_exit(void)
module_init(cy82c693_ide_init);
module_exit(cy82c693_ide_exit);
-MODULE_AUTHOR("Andreas Krebs, Andre Hedrick");
+MODULE_AUTHOR("Andreas Krebs, Andre Hedrick, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("PCI driver module for the Cypress CY82C693 IDE");
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/dtc2278.c b/drivers/ide/dtc2278.c
index c6b1381..6929f7f 100644
--- a/drivers/ide/dtc2278.c
+++ b/drivers/ide/dtc2278.c
@@ -68,11 +68,11 @@ static void sub22 (char b, char c)
static DEFINE_SPINLOCK(dtc2278_lock);
-static void dtc2278_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void dtc2278_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
unsigned long flags;
- if (pio >= 3) {
+ if (drive->pio_mode >= XFER_PIO_3) {
spin_lock_irqsave(&dtc2278_lock, flags);
/*
* This enables PIO mode4 (3?) on the first interface
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 4d90ac2..b885c1d 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -627,14 +627,14 @@ static u32 get_speed_setting(u8 speed, struct hpt_info *info)
return info->timings->clock_table[info->clock][i];
}
-static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed)
+static void hpt3xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct hpt_info *info = hpt3xx_get_info(hwif->dev);
struct hpt_timings *t = info->timings;
u8 itr_addr = 0x40 + (drive->dn * 4);
u32 old_itr = 0;
+ const u8 speed = drive->dma_mode;
u32 new_itr = get_speed_setting(speed, info);
u32 itr_mask = speed < XFER_MW_DMA_0 ? t->pio_mask :
(speed < XFER_UDMA_0 ? t->dma_mask :
@@ -651,9 +651,10 @@ static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_dword(dev, itr_addr, new_itr);
}
-static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void hpt3xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- hpt3xx_set_mode(drive, XFER_PIO_0 + pio);
+ drive->dma_mode = drive->pio_mode;
+ hpt3xx_set_mode(hwif, drive);
}
static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
diff --git a/drivers/ide/ht6560b.c b/drivers/ide/ht6560b.c
index aafed80..d81e496 100644
--- a/drivers/ide/ht6560b.c
+++ b/drivers/ide/ht6560b.c
@@ -279,9 +279,10 @@ static void ht_set_prefetch(ide_drive_t *drive, u8 state)
#endif
}
-static void ht6560b_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void ht6560b_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
unsigned long flags, config;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
u8 timing;
switch (pio) {
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 0f67f1a..4a697a2 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -65,6 +65,8 @@ static struct cardinfo icside_cardinfo_v6_2 = {
};
struct icside_state {
+ unsigned int channel;
+ unsigned int enabled;
void __iomem *irq_port;
void __iomem *ioc_base;
unsigned int sel;
@@ -114,11 +116,18 @@ static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
struct icside_state *state = ec->irq_data;
void __iomem *base = state->irq_port;
- writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
- readb(base + ICS_ARCIN_V6_INTROFFSET_2);
+ state->enabled = 1;
- writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
- readb(base + ICS_ARCIN_V6_INTROFFSET_1);
+ switch (state->channel) {
+ case 0:
+ writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
+ readb(base + ICS_ARCIN_V6_INTROFFSET_2);
+ break;
+ case 1:
+ writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
+ readb(base + ICS_ARCIN_V6_INTROFFSET_1);
+ break;
+ }
}
/* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
@@ -128,6 +137,8 @@ static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
{
struct icside_state *state = ec->irq_data;
+ state->enabled = 0;
+
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
}
@@ -149,6 +160,44 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = {
.irqpending = icside_irqpending_arcin_v6,
};
+/*
+ * Handle routing of interrupts. This is called before
+ * we write the command to the drive.
+ */
+static void icside_maskproc(ide_drive_t *drive, int mask)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct expansion_card *ec = ECARD_DEV(hwif->dev);
+ struct icside_state *state = ecard_get_drvdata(ec);
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ state->channel = hwif->channel;
+
+ if (state->enabled && !mask) {
+ switch (hwif->channel) {
+ case 0:
+ writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
+ readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
+ break;
+ case 1:
+ writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
+ readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
+ break;
+ }
+ } else {
+ readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
+ readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
+ }
+
+ local_irq_restore(flags);
+}
+
+static const struct ide_port_ops icside_v6_no_dma_port_ops = {
+ .maskproc = icside_maskproc,
+};
+
#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
/*
* SG-DMA support.
@@ -185,10 +234,11 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = {
* MW1 80 50 50 150 C
* MW2 70 25 25 120 C
*/
-static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
+static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
unsigned long cycle_time;
int use_dma_info = 0;
+ const u8 xfer_mode = drive->dma_mode;
switch (xfer_mode) {
case XFER_MW_DMA_2:
@@ -228,6 +278,7 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
static const struct ide_port_ops icside_v6_port_ops = {
.set_dma_mode = icside_set_dma_mode,
+ .maskproc = icside_maskproc,
};
static void icside_dma_host_set(ide_drive_t *drive, int on)
@@ -272,6 +323,11 @@ static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
BUG_ON(dma_channel_active(ec->dma));
/*
+ * Ensure that we have the right interrupt routed.
+ */
+ icside_maskproc(drive, 0);
+
+ /*
* Route the DMA signals to the correct interface.
*/
writeb(state->sel | hwif->channel, state->ioc_base);
@@ -399,6 +455,7 @@ err_free:
static const struct ide_port_info icside_v6_port_info __initdata = {
.init_dma = icside_dma_off_init,
+ .port_ops = &icside_v6_no_dma_port_ops,
.dma_ops = &icside_v6_dma_ops,
.host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
.mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index dd63963..ab87e4f 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -121,19 +121,11 @@ static int ide_probe(struct pcmcia_device *link)
static void ide_detach(struct pcmcia_device *link)
{
ide_info_t *info = link->priv;
- ide_hwif_t *hwif = info->host->ports[0];
- unsigned long data_addr, ctl_addr;
dev_dbg(&link->dev, "ide_detach(0x%p)\n", link);
- data_addr = hwif->io_ports.data_addr;
- ctl_addr = hwif->io_ports.ctl_addr;
-
ide_release(link);
- release_region(ctl_addr, 1);
- release_region(data_addr, 8);
-
kfree(info);
} /* ide_detach */
@@ -354,12 +346,19 @@ static void ide_release(struct pcmcia_device *link)
dev_dbg(&link->dev, "ide_release(0x%p)\n", link);
- if (info->ndev)
- /* FIXME: if this fails we need to queue the cleanup somehow
- -- need to investigate the required PCMCIA magic */
+ if (info->ndev) {
+ ide_hwif_t *hwif = host->ports[0];
+ unsigned long data_addr, ctl_addr;
+
+ data_addr = hwif->io_ports.data_addr;
+ ctl_addr = hwif->io_ports.ctl_addr;
+
ide_host_remove(host);
+ info->ndev = 0;
- info->ndev = 0;
+ release_region(ctl_addr, 1);
+ release_region(data_addr, 8);
+ }
pcmcia_disable_device(link);
} /* ide_release */
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index 1099bf7..c6935c7 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -105,15 +105,17 @@ static int set_pio_mode(ide_drive_t *drive, int arg)
return -ENOSYS;
if (set_pio_mode_abuse(drive->hwif, arg)) {
+ drive->pio_mode = arg + XFER_PIO_0;
+
if (arg == 8 || arg == 9) {
unsigned long flags;
/* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */
spin_lock_irqsave(&hwif->lock, flags);
- port_ops->set_pio_mode(drive, arg);
+ port_ops->set_pio_mode(hwif, drive);
spin_unlock_irqrestore(&hwif->lock, flags);
} else
- port_ops->set_pio_mode(drive, arg);
+ port_ops->set_pio_mode(hwif, drive);
} else {
int keep_dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 222c1ef..376f2dc 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -231,7 +231,7 @@ u8 eighty_ninty_three(ide_drive_t *drive)
u16 *id = drive->id;
int ivb = ide_in_drive_list(id, ivb_list);
- if (hwif->cbl == ATA_CBL_PATA40_SHORT)
+ if (hwif->cbl == ATA_CBL_SATA || hwif->cbl == ATA_CBL_PATA40_SHORT)
return 1;
if (ivb)
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index f8c1ae6..fbedd35 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1042,6 +1042,8 @@ static void ide_port_init_devices(ide_hwif_t *hwif)
if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS)
drive->dev_flags |= IDE_DFLAG_NO_UNMASK;
+ drive->pio_mode = XFER_PIO_0;
+
if (port_ops && port_ops->init_dev)
port_ops->init_dev(drive);
}
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 6a0e625..b072328 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1365,7 +1365,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
* supported here, and not in the corresponding block interface. Our own
* ide-tape ioctls are supported on both interfaces.
*/
-static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
+static long do_idetape_chrdev_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct ide_tape_obj *tape = file->private_data;
@@ -1420,6 +1420,16 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
}
}
+static long idetape_chrdev_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+ lock_kernel();
+ ret = do_idetape_chrdev_ioctl(file, cmd, arg);
+ unlock_kernel();
+ return ret;
+}
+
/*
* Do a mode sense page 0 with block descriptor and if it succeeds set the tape
* block size with the reported value.
@@ -1888,7 +1898,7 @@ static const struct file_operations idetape_fops = {
.owner = THIS_MODULE,
.read = idetape_chrdev_read,
.write = idetape_chrdev_write,
- .ioctl = idetape_chrdev_ioctl,
+ .unlocked_ioctl = idetape_chrdev_ioctl,
.open = idetape_chrdev_open,
.release = idetape_chrdev_release,
};
diff --git a/drivers/ide/ide-timings.c b/drivers/ide/ide-timings.c
index 001a563..0e05f75 100644
--- a/drivers/ide/ide-timings.c
+++ b/drivers/ide/ide-timings.c
@@ -166,12 +166,13 @@ int ide_timing_compute(ide_drive_t *drive, u8 speed,
if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
memset(&p, 0, sizeof(p));
- if (speed <= XFER_PIO_2)
- p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
- else if ((speed <= XFER_PIO_4) ||
- (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
- p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
- else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
+ if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
+ if (speed <= XFER_PIO_2)
+ p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
+ else if ((speed <= XFER_PIO_4) ||
+ (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
+ p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
+ } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
p.cycle = id[ATA_ID_EIDE_DMA_MIN];
ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B);
@@ -185,11 +186,10 @@ int ide_timing_compute(ide_drive_t *drive, u8 speed,
/*
* Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
* S.M.A.R.T and some other commands. We have to ensure that the
- * DMA cycle timing is slower/equal than the fastest PIO timing.
+ * DMA cycle timing is slower/equal than the current PIO timing.
*/
if (speed >= XFER_SW_DMA_0) {
- u8 pio = ide_get_best_pio_mode(drive, 255, 5);
- ide_timing_compute(drive, XFER_PIO_0 + pio, &p, T, UT);
+ ide_timing_compute(drive, drive->pio_mode, &p, T, UT);
ide_timing_merge(&p, t, t, IDE_TIMING_ALL);
}
diff --git a/drivers/ide/ide-xfer-mode.c b/drivers/ide/ide-xfer-mode.c
index 46d203c..5fc8d5c 100644
--- a/drivers/ide/ide-xfer-mode.c
+++ b/drivers/ide/ide-xfer-mode.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL(ide_xfer_verbose);
* This is used by most chipset support modules when "auto-tuning".
*/
-u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
+static u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
{
u16 *id = drive->id;
int pio_mode = -1, overridden = 0;
@@ -105,7 +105,6 @@ u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
return pio_mode;
}
-EXPORT_SYMBOL_GPL(ide_get_best_pio_mode);
int ide_pio_need_iordy(ide_drive_t *drive, const u8 pio)
{
@@ -135,17 +134,20 @@ int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
* set transfer mode on the device in ->set_pio_mode method...
*/
if (port_ops->set_dma_mode == NULL) {
- port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
+ drive->pio_mode = mode;
+ port_ops->set_pio_mode(hwif, drive);
return 0;
}
if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
if (ide_config_drive_speed(drive, mode))
return -1;
- port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
+ drive->pio_mode = mode;
+ port_ops->set_pio_mode(hwif, drive);
return 0;
} else {
- port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
+ drive->pio_mode = mode;
+ port_ops->set_pio_mode(hwif, drive);
return ide_config_drive_speed(drive, mode);
}
}
@@ -164,10 +166,12 @@ int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
if (ide_config_drive_speed(drive, mode))
return -1;
- port_ops->set_dma_mode(drive, mode);
+ drive->dma_mode = mode;
+ port_ops->set_dma_mode(hwif, drive);
return 0;
} else {
- port_ops->set_dma_mode(drive, mode);
+ drive->dma_mode = mode;
+ port_ops->set_dma_mode(hwif, drive);
return ide_config_drive_speed(drive, mode);
}
}
diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
index 0d266a5..560e66d 100644
--- a/drivers/ide/it8172.c
+++ b/drivers/ide/it8172.c
@@ -37,12 +37,12 @@
#define DRV_NAME "IT8172"
-static void it8172_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void it8172_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u16 drive_enables;
u32 drive_timing;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
/*
* The highest value of DIOR/DIOW pulse width and recovery time
@@ -77,14 +77,14 @@ static void it8172_set_pio_mode(ide_drive_t *drive, const u8 pio)
pci_write_config_dword(dev, 0x44, drive_timing);
}
-static void it8172_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void it8172_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
int a_speed = 3 << (drive->dn * 4);
int u_flag = 1 << drive->dn;
int u_speed = 0;
u8 reg48, reg4a;
+ const u8 speed = drive->dma_mode;
pci_read_config_byte(dev, 0x48, &reg48);
pci_read_config_byte(dev, 0x4a, &reg4a);
@@ -98,14 +98,14 @@ static void it8172_set_dma_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_byte(dev, 0x4a, reg4a | u_speed);
} else {
const u8 mwdma_to_pio[] = { 0, 3, 4 };
- u8 pio;
pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
pci_write_config_byte(dev, 0x4a, reg4a & ~a_speed);
- pio = mwdma_to_pio[speed - XFER_MW_DMA_0];
+ drive->pio_mode =
+ mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
- it8172_set_pio_mode(drive, pio);
+ it8172_set_pio_mode(hwif, drive);
}
}
diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
index 4797616..46816ba 100644
--- a/drivers/ide/it8213.c
+++ b/drivers/ide/it8213.c
@@ -17,15 +17,14 @@
/**
* it8213_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* Set the interface PIO mode.
*/
-static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void it8213_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
int is_slave = drive->dn & 1;
int master_port = 0x40;
@@ -35,6 +34,7 @@ static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio)
u8 slave_data;
static DEFINE_SPINLOCK(tune_lock);
int control = 0;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
static const u8 timings[][2] = {
{ 0, 0 },
@@ -74,15 +74,14 @@ static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio)
/**
* it8213_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Tune the ITE chipset for the DMA mode.
*/
-static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void it8213_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 maslave = 0x40;
int a_speed = 3 << (drive->dn * 4);
@@ -92,6 +91,7 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
int u_speed = 0;
u16 reg4042, reg4a;
u8 reg48, reg54, reg55;
+ const u8 speed = drive->dma_mode;
pci_read_config_word(dev, maslave, &reg4042);
pci_read_config_byte(dev, 0x48, &reg48);
@@ -120,7 +120,6 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
} else {
const u8 mwdma_to_pio[] = { 0, 3, 4 };
- u8 pio;
if (reg48 & u_flag)
pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
@@ -132,11 +131,12 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
if (speed >= XFER_MW_DMA_0)
- pio = mwdma_to_pio[speed - XFER_MW_DMA_0];
+ drive->pio_mode =
+ mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
else
- pio = 2; /* only SWDMA2 is allowed */
+ drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
- it8213_set_pio_mode(drive, pio);
+ it8213_set_pio_mode(hwif, drive);
}
}
diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
index 51aa745..b2709c7 100644
--- a/drivers/ide/it821x.c
+++ b/drivers/ide/it821x.c
@@ -228,18 +228,18 @@ static void it821x_clock_strategy(ide_drive_t *drive)
/**
* it821x_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* Tune the host to the desired PIO mode taking into the consideration
* the maximum PIO mode supported by the other device on the cable.
*/
-static void it821x_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void it821x_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct it821x_dev *itdev = ide_get_hwifdata(hwif);
ide_drive_t *pair = ide_get_pair_dev(drive);
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
u8 unit = drive->dn & 1, set_pio = pio;
/* Spec says 89 ref driver uses 88 */
@@ -252,7 +252,7 @@ static void it821x_set_pio_mode(ide_drive_t *drive, const u8 pio)
* on the cable.
*/
if (pair) {
- u8 pair_pio = ide_get_best_pio_mode(pair, 255, 4);
+ u8 pair_pio = pair->pio_mode - XFER_PIO_0;
/* trim PIO to the slowest of the master/slave */
if (pair_pio < set_pio)
set_pio = pair_pio;
@@ -393,14 +393,16 @@ static int it821x_dma_end(ide_drive_t *drive)
/**
* it821x_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Tune the ITE chipset for the desired DMA mode.
*/
-static void it821x_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void it821x_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
+ const u8 speed = drive->dma_mode;
+
/*
* MWDMA tuning is really hard because our MWDMA and PIO
* timings are kept in the same place. We can switch in the
diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
index bf2be64..74c2c4a 100644
--- a/drivers/ide/jmicron.c
+++ b/drivers/ide/jmicron.c
@@ -80,19 +80,19 @@ static u8 jmicron_cable_detect(ide_hwif_t *hwif)
return ATA_CBL_PATA80;
}
-static void jmicron_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void jmicron_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
}
/**
* jmicron_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @mode: DMA mode
*
* As the JMicron snoops for timings we don't need to do anything here.
*/
-static void jmicron_set_dma_mode(ide_drive_t *drive, const u8 mode)
+static void jmicron_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
}
diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
index f1d70d6..1a53a4c 100644
--- a/drivers/ide/opti621.c
+++ b/drivers/ide/opti621.c
@@ -8,77 +8,6 @@
* Jan Harkes <jaharkes@cwi.nl>,
* Mark Lord <mlord@pobox.com>
* Some parts of code are from ali14xx.c and from rz1000.c.
- *
- * OPTi is trademark of OPTi, Octek is trademark of Octek.
- *
- * I used docs from OPTi databook, from ftp.opti.com, file 9123-0002.ps
- * and disassembled/traced setupvic.exe (DOS program).
- * It increases kernel code about 2 kB.
- * I don't have this card no more, but I hope I can get some in case
- * of needed development.
- * My card is Octek PIDE 1.01 (on card) or OPTiViC (program).
- * It has a place for a secondary connector in circuit, but nothing
- * is there. Also BIOS says no address for
- * secondary controller (see bellow in ide_init_opti621).
- * I've only tested this on my system, which only has one disk.
- * It's Western Digital WDAC2850, with PIO mode 3. The PCI bus
- * is at 20 MHz (I have DX2/80, I tried PCI at 40, but I got random
- * lockups). I tried the OCTEK double speed CD-ROM and
- * it does not work! But I can't boot DOS also, so it's probably
- * hardware fault. I have connected Conner 80MB, the Seagate 850MB (no
- * problems) and Seagate 1GB (as slave, WD as master). My experiences
- * with the third, 1GB drive: I got 3MB/s (hdparm), but sometimes
- * it slows to about 100kB/s! I don't know why and I have
- * not this drive now, so I can't try it again.
- * I write this driver because I lost the paper ("manual") with
- * settings of jumpers on the card and I have to boot Linux with
- * Loadlin except LILO, cause I have to run the setupvic.exe program
- * already or I get disk errors (my test: rpm -Vf
- * /usr/X11R6/bin/XF86_SVGA - or any big file).
- * Some numbers from hdparm -t /dev/hda:
- * Timing buffer-cache reads: 32 MB in 3.02 seconds =10.60 MB/sec
- * Timing buffered disk reads: 16 MB in 5.52 seconds = 2.90 MB/sec
- * I have 4 Megs/s before, but I don't know why (maybe changes
- * in hdparm test).
- * After release of 0.1, I got some successful reports, so it might work.
- *
- * The main problem with OPTi is that some timings for master
- * and slave must be the same. For example, if you have master
- * PIO 3 and slave PIO 0, driver have to set some timings of
- * master for PIO 0. Second problem is that opti621_set_pio_mode
- * got only one drive to set, but have to set both drives.
- * This is solved in compute_pios. If you don't set
- * the second drive, compute_pios use ide_get_best_pio_mode
- * for autoselect mode (you can change it to PIO 0, if you want).
- * If you then set the second drive to another PIO, the old value
- * (automatically selected) will be overrided by yours.
- * There is a 25/33MHz switch in configuration
- * register, but driver is written for use at any frequency.
- *
- * Version 0.1, Nov 8, 1996
- * by Jaromir Koutek, for 2.1.8.
- * Initial version of driver.
- *
- * Version 0.2
- * Number 0.2 skipped.
- *
- * Version 0.3, Nov 29, 1997
- * by Mark Lord (probably), for 2.1.68
- * Updates for use with new IDE block driver.
- *
- * Version 0.4, Dec 14, 1997
- * by Jan Harkes
- * Fixed some errors and cleaned the code.
- *
- * Version 0.5, Jan 2, 1998
- * by Jaromir Koutek
- * Updates for use with (again) new IDE block driver.
- * Update of documentation.
- *
- * Version 0.6, Jan 2, 1999
- * by Jaromir Koutek
- * Reversed to version 0.3 of the driver, because
- * 0.5 doesn't work.
*/
#include <linux/types.h>
@@ -133,12 +62,12 @@ static u8 read_reg(int reg)
return ret;
}
-static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void opti621_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
ide_drive_t *pair = ide_get_pair_dev(drive);
unsigned long flags;
- unsigned long mode = XFER_PIO_0 + pio, pair_mode;
+ unsigned long mode = drive->pio_mode, pair_mode;
+ const u8 pio = mode - XFER_PIO_0;
u8 tim, misc, addr_pio = pio, clk;
/* DRDY is default 2 (by OPTi Databook) */
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index f8eddf0..9e8f4e1 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -166,7 +166,7 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
writel(val32, base + BK3710_DATRCVR);
if (mate) {
- u8 mode2 = ide_get_best_pio_mode(mate, 255, 4);
+ u8 mode2 = mate->pio_mode - XFER_PIO_0;
if (mode2 < mode)
mode = mode2;
@@ -188,10 +188,11 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
writel(val32, base + BK3710_REGRCVR);
}
-static void palm_bk3710_set_dma_mode(ide_drive_t *drive, u8 xferspeed)
+static void palm_bk3710_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
int is_slave = drive->dn & 1;
- void __iomem *base = (void *)drive->hwif->dma_base;
+ void __iomem *base = (void *)hwif->dma_base;
+ const u8 xferspeed = drive->dma_mode;
if (xferspeed >= XFER_UDMA_0) {
palm_bk3710_setudmamode(base, is_slave,
@@ -203,12 +204,13 @@ static void palm_bk3710_set_dma_mode(ide_drive_t *drive, u8 xferspeed)
}
}
-static void palm_bk3710_set_pio_mode(ide_drive_t *drive, u8 pio)
+static void palm_bk3710_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
unsigned int cycle_time;
int is_slave = drive->dn & 1;
ide_drive_t *mate;
- void __iomem *base = (void *)drive->hwif->dma_base;
+ void __iomem *base = (void *)hwif->dma_base;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
/*
* Obtain the drive PIO data for tuning the Palm Chip registers
diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
index 65ba823..9546fe2 100644
--- a/drivers/ide/pdc202xx_new.c
+++ b/drivers/ide/pdc202xx_new.c
@@ -129,11 +129,11 @@ static struct udma_timing {
{ 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
};
-static void pdcnew_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void pdcnew_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
+ const u8 speed = drive->dma_mode;
/*
* IDE core issues SETFEATURES_XFER to the drive first (thanks to
@@ -167,11 +167,11 @@ static void pdcnew_set_dma_mode(ide_drive_t *drive, const u8 speed)
}
}
-static void pdcnew_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void pdcnew_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
if (max_dma_rate(dev) == 4) {
set_indexed_reg(hwif, 0x0c + adj, pio_timings[pio].reg0c);
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 35161dd..c5f3841 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2006-2007, 2009 MontaVista Software, Inc.
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
+ * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
*
* Portions Copyright (C) 1999 Promise Technology, Inc.
* Author: Frank Tiernan (frankt@promise.com)
@@ -21,23 +21,15 @@
#define DRV_NAME "pdc202xx_old"
-static void pdc_old_disable_66MHz_clock(ide_hwif_t *);
-
-static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed)
+static void pdc202xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 drive_pci = 0x60 + (drive->dn << 2);
+ const u8 speed = drive->dma_mode;
u8 AP = 0, BP = 0, CP = 0;
u8 TA = 0, TB = 0, TC = 0;
- /*
- * TODO: do this once per channel
- */
- if (dev->device != PCI_DEVICE_ID_PROMISE_20246)
- pdc_old_disable_66MHz_clock(hwif);
-
pci_read_config_byte(dev, drive_pci, &AP);
pci_read_config_byte(dev, drive_pci + 1, &BP);
pci_read_config_byte(dev, drive_pci + 2, &CP);
@@ -84,9 +76,10 @@ static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed)
}
}
-static void pdc202xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void pdc202xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- pdc202xx_set_mode(drive, XFER_PIO_0 + pio);
+ drive->dma_mode = drive->pio_mode;
+ pdc202xx_set_mode(hwif, drive);
}
static int pdc202xx_test_irq(ide_hwif_t *hwif)
@@ -100,13 +93,13 @@ static int pdc202xx_test_irq(ide_hwif_t *hwif)
* bit 7: error, bit 6: interrupting,
* bit 5: FIFO full, bit 4: FIFO empty
*/
- return ((sc1d & 0x50) == 0x40) ? 1 : 0;
+ return ((sc1d & 0x50) == 0x50) ? 1 : 0;
} else {
/*
* bit 3: error, bit 2: interrupting,
* bit 1: FIFO full, bit 0: FIFO empty
*/
- return ((sc1d & 0x05) == 0x04) ? 1 : 0;
+ return ((sc1d & 0x05) == 0x05) ? 1 : 0;
}
}
@@ -145,6 +138,11 @@ static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif)
outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg);
}
+static void pdc2026x_init_hwif(ide_hwif_t *hwif)
+{
+ pdc_old_disable_66MHz_clock(hwif);
+}
+
static void pdc202xx_dma_start(ide_drive_t *drive)
{
if (drive->current_speed > XFER_UDMA_2)
@@ -261,6 +259,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
{ \
.name = DRV_NAME, \
.init_chipset = init_chipset_pdc202xx, \
+ .init_hwif = pdc2026x_init_hwif, \
.port_ops = &pdc2026x_port_ops, \
.dma_ops = &pdc2026x_dma_ops, \
.host_flags = IDE_HFLAGS_PDC202XX, \
@@ -356,6 +355,6 @@ static void __exit pdc202xx_ide_exit(void)
module_init(pdc202xx_ide_init);
module_exit(pdc202xx_ide_exit);
-MODULE_AUTHOR("Andre Hedrick, Frank Tiernan");
+MODULE_AUTHOR("Andre Hedrick, Frank Tiernan, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("PCI driver module for older Promise IDE");
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
index bf14f39..1bdca49 100644
--- a/drivers/ide/piix.c
+++ b/drivers/ide/piix.c
@@ -59,15 +59,14 @@ static int no_piix_dma;
/**
* piix_set_pio_mode - set host controller for PIO mode
+ * @port: port
* @drive: drive
- * @pio: PIO mode number
*
* Set the interface PIO mode based upon the settings done by AMI BIOS.
*/
-static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void piix_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
int is_slave = drive->dn & 1;
int master_port = hwif->channel ? 0x42 : 0x40;
@@ -77,6 +76,7 @@ static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio)
u8 slave_data;
static DEFINE_SPINLOCK(tune_lock);
int control = 0;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
/* ISP RTC */
static const u8 timings[][2]= {
@@ -127,16 +127,15 @@ static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio)
/**
* piix_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Set a PIIX host controller to the desired DMA mode. This involves
* programming the right timing data into the PCI configuration space.
*/
-static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void piix_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 maslave = hwif->channel ? 0x42 : 0x40;
int a_speed = 3 << (drive->dn * 4);
@@ -147,6 +146,7 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
int sitre;
u16 reg4042, reg4a;
u8 reg48, reg54, reg55;
+ const u8 speed = drive->dma_mode;
pci_read_config_word(dev, maslave, &reg4042);
sitre = (reg4042 & 0x4000) ? 1 : 0;
@@ -176,7 +176,6 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
} else {
const u8 mwdma_to_pio[] = { 0, 3, 4 };
- u8 pio;
if (reg48 & u_flag)
pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
@@ -188,11 +187,12 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
if (speed >= XFER_MW_DMA_0)
- pio = mwdma_to_pio[speed - XFER_MW_DMA_0];
+ drive->pio_mode =
+ mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
else
- pio = 2; /* only SWDMA2 is allowed */
+ drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
- piix_set_pio_mode(drive, pio);
+ piix_set_pio_mode(hwif, drive);
}
}
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 7a4e788..850ee45 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -496,12 +496,11 @@ static void pmac_write_devctl(ide_hwif_t *hwif, u8 ctl)
/*
* Old tuning functions (called on hdparm -p), sets up drive PIO timings
*/
-static void
-pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void pmac_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
u32 *timings, t;
unsigned accessTicks, recTicks;
@@ -778,14 +777,14 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
#endif
}
-static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void pmac_ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
int ret = 0;
u32 *timings, *timings2, tl[2];
u8 unit = drive->dn & 1;
+ const u8 speed = drive->dma_mode;
timings = &pmif->timings[unit];
timings2 = &pmif->timings[unit+2];
@@ -1651,8 +1650,8 @@ pmac_ide_dma_test_irq (ide_drive_t *drive)
if ((status & FLUSH) == 0)
break;
if (++timeout > 100) {
- printk(KERN_WARNING "ide%d, ide_dma_test_irq \
- timeout flushing channel\n", hwif->index);
+ printk(KERN_WARNING "ide%d, ide_dma_test_irq timeout flushing channel\n",
+ hwif->index);
break;
}
}
diff --git a/drivers/ide/qd65xx.c b/drivers/ide/qd65xx.c
index 74696ed..3f0244f 100644
--- a/drivers/ide/qd65xx.c
+++ b/drivers/ide/qd65xx.c
@@ -189,15 +189,13 @@ static void qd_set_timing (ide_drive_t *drive, u8 timing)
printk(KERN_DEBUG "%s: %#x\n", drive->name, timing);
}
-static void qd6500_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void qd6500_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
u16 *id = drive->id;
int active_time = 175;
int recovery_time = 415; /* worst case values from the dos driver */
- /*
- * FIXME: use "pio" value
- */
+ /* FIXME: use drive->pio_mode value */
if (!qd_find_disk_type(drive, &active_time, &recovery_time) &&
(id[ATA_ID_OLD_PIO_MODES] & 0xff) && (id[ATA_ID_FIELD_VALID] & 2) &&
id[ATA_ID_EIDE_PIO] >= 240) {
@@ -211,9 +209,9 @@ static void qd6500_set_pio_mode(ide_drive_t *drive, const u8 pio)
active_time, recovery_time));
}
-static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void qd6580_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
unsigned int cycle_time;
int active_time = 175;
diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
index d467478..134f1fd 100644
--- a/drivers/ide/sc1200.c
+++ b/drivers/ide/sc1200.c
@@ -122,13 +122,13 @@ out:
return mask;
}
-static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode)
+static void sc1200_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned int reg, timings;
unsigned short pci_clock;
unsigned int basereg = hwif->channel ? 0x50 : 0x40;
+ const u8 mode = drive->dma_mode;
static const u32 udma_timing[3][3] = {
{ 0x00921250, 0x00911140, 0x00911030 },
@@ -193,10 +193,10 @@ static int sc1200_dma_end(ide_drive_t *drive)
* will have valid default PIO timings set up before we get here.
*/
-static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void sc1200_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
int mode = -1;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
/*
* bad abuse of ->set_pio_mode interface
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
index 1104bb3..b7f5b0c 100644
--- a/drivers/ide/scc_pata.c
+++ b/drivers/ide/scc_pata.c
@@ -199,16 +199,15 @@ scc_ide_outsl(unsigned long port, void *addr, u32 count)
/**
* scc_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* Load the timing settings for this device mode into the
* controller.
*/
-static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void scc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct scc_ports *ports = ide_get_hwifdata(hwif);
unsigned long ctl_base = ports->ctl;
unsigned long cckctrl_port = ctl_base + 0xff0;
@@ -216,6 +215,7 @@ static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
unsigned long pioct_port = ctl_base + 0x004;
unsigned long reg;
int offset;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
reg = in_be32((void __iomem *)cckctrl_port);
if (reg & CCKCTRL_ATACLKOEN) {
@@ -231,16 +231,15 @@ static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
/**
* scc_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Load the timing settings for this device mode into the
* controller.
*/
-static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void scc_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct scc_ports *ports = ide_get_hwifdata(hwif);
unsigned long ctl_base = ports->ctl;
unsigned long cckctrl_port = ctl_base + 0xff0;
@@ -254,6 +253,7 @@ static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
int offset, idx;
unsigned long reg;
unsigned long jcactsel;
+ const u8 speed = drive->dma_mode;
reg = in_be32((void __iomem *)cckctrl_port);
if (reg & CCKCTRL_ATACLKOEN) {
@@ -872,20 +872,18 @@ static struct pci_driver scc_pci_driver = {
.remove = __devexit_p(scc_remove),
};
-static int scc_ide_init(void)
+static int __init scc_ide_init(void)
{
return ide_pci_register_driver(&scc_pci_driver);
}
-module_init(scc_ide_init);
-/* -- No exit code?
-static void scc_ide_exit(void)
+static void __exit scc_ide_exit(void)
{
- ide_pci_unregister_driver(&scc_pci_driver);
+ pci_unregister_driver(&scc_pci_driver);
}
-module_exit(scc_ide_exit);
- */
+module_init(scc_ide_init);
+module_exit(scc_ide_exit);
MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
index b6554ef..35fb8da 100644
--- a/drivers/ide/serverworks.c
+++ b/drivers/ide/serverworks.c
@@ -2,7 +2,7 @@
* Copyright (C) 1998-2000 Michel Aubry
* Copyright (C) 1998-2000 Andrzej Krzysztofowicz
* Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
+ * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
* Portions copyright (c) 2001 Sun Microsystems
*
*
@@ -52,8 +52,6 @@ static const char *svwks_bad_ata100[] = {
NULL
};
-static struct pci_dev *isa_dev;
-
static int check_in_drive_lists (ide_drive_t *drive, const char **list)
{
char *m = (char *)&drive->id[ATA_ID_PROD];
@@ -67,26 +65,14 @@ static int check_in_drive_lists (ide_drive_t *drive, const char **list)
static u8 svwks_udma_filter(ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u8 mask = 0;
- if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
+ if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) {
return 0x1f;
- if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
- u32 reg = 0;
- if (isa_dev)
- pci_read_config_dword(isa_dev, 0x64, &reg);
-
- /*
- * Don't enable UDMA on disk devices for the moment
- */
- if(drive->media == ide_disk)
- return 0;
- /* Check the OSB4 DMA33 enable bit */
- return ((reg & 0x00004000) == 0x00004000) ? 0x07 : 0;
} else if (dev->revision < SVWKS_CSB5_REVISION_NEW) {
return 0x07;
- } else if (dev->revision >= SVWKS_CSB5_REVISION_NEW) {
- u8 btr = 0, mode;
+ } else {
+ u8 btr = 0, mode, mask;
+
pci_read_config_byte(dev, 0x5A, &btr);
mode = btr & 0x3;
@@ -101,13 +87,9 @@ static u8 svwks_udma_filter(ide_drive_t *drive)
case 1: mask = 0x07; break;
default: mask = 0x00; break;
}
- }
- if (((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
- (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) &&
- (!(PCI_FUNC(dev->devfn) & 1)))
- mask = 0x1f;
- return mask;
+ return mask;
+ }
}
static u8 svwks_csb_check (struct pci_dev *dev)
@@ -124,12 +106,13 @@ static u8 svwks_csb_check (struct pci_dev *dev)
return 0;
}
-static void svwks_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void svwks_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static const u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
static const u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 };
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]);
@@ -145,14 +128,14 @@ static void svwks_set_pio_mode(ide_drive_t *drive, const u8 pio)
}
}
-static void svwks_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void svwks_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static const u8 udma_modes[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05 };
static const u8 dma_modes[] = { 0x77, 0x21, 0x20 };
static const u8 drive_pci2[] = { 0x45, 0x44, 0x47, 0x46 };
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
+ const u8 speed = drive->dma_mode;
u8 unit = drive->dn & 1;
u8 ultra_enable = 0, ultra_timing = 0, dma_timing = 0;
@@ -185,8 +168,9 @@ static int init_chipset_svwks(struct pci_dev *dev)
/* OSB4 : South Bridge and IDE */
if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
- isa_dev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
- PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
+ struct pci_dev *isa_dev =
+ pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+ PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
if (isa_dev) {
pci_read_config_dword(isa_dev, 0x64, &reg);
reg &= ~0x00002000; /* disable 600ns interrupt mask */
@@ -195,6 +179,7 @@ static int init_chipset_svwks(struct pci_dev *dev)
"enabled.\n", pci_name(dev));
reg |= 0x00004000; /* enable UDMA/33 support */
pci_write_config_dword(isa_dev, 0x64, reg);
+ pci_dev_put(isa_dev);
}
}
@@ -343,7 +328,6 @@ static u8 svwks_cable_detect(ide_hwif_t *hwif)
static const struct ide_port_ops osb4_port_ops = {
.set_pio_mode = svwks_set_pio_mode,
.set_dma_mode = svwks_set_dma_mode,
- .udma_filter = svwks_udma_filter,
};
static const struct ide_port_ops svwks_port_ops = {
@@ -460,6 +444,6 @@ static void __exit svwks_ide_exit(void)
module_init(svwks_ide_init);
module_exit(svwks_ide_exit);
-MODULE_AUTHOR("Michael Aubry. Andrzej Krzysztofowicz, Andre Hedrick");
+MODULE_AUTHOR("Michael Aubry. Andrzej Krzysztofowicz, Andre Hedrick, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("PCI driver module for Serverworks OSB4/CSB5/CSB6 IDE");
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c
index b7d61dc..e3ea591 100644
--- a/drivers/ide/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
@@ -255,7 +255,7 @@ static int sgiioc4_dma_end(ide_drive_t *drive)
return dma_stat;
}
-static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void sgiioc4_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
}
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
index d95df52..ddeda44 100644
--- a/drivers/ide/siimage.c
+++ b/drivers/ide/siimage.c
@@ -229,19 +229,18 @@ static u8 sil_sata_udma_filter(ide_drive_t *drive)
/**
* sil_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* Load the timing settings for this device mode into the
* controller.
*/
-static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
+static void sil_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static const u16 tf_speed[] = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 };
static const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
ide_drive_t *pair = ide_get_pair_dev(drive);
u32 speedt = 0;
@@ -249,6 +248,7 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
unsigned long addr = siimage_seldev(drive, 0x04);
unsigned long tfaddr = siimage_selreg(hwif, 0x02);
unsigned long base = (unsigned long)hwif->hwif_data;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
u8 tf_pio = pio;
u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
u8 addr_mask = hwif->channel ? (mmio ? 0xF4 : 0x84)
@@ -258,7 +258,7 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
/* trim *taskfile* PIO to the slowest of the master/slave */
if (pair) {
- u8 pair_pio = ide_get_best_pio_mode(pair, 255, 4);
+ u8 pair_pio = pair->pio_mode - XFER_PIO_0;
if (pair_pio < tf_pio)
tf_pio = pair_pio;
@@ -289,19 +289,18 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
/**
* sil_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Tune the SiI chipset for the desired DMA mode.
*/
-static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void sil_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static const u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 };
static const u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 };
static const u16 dma[] = { 0x2208, 0x10C2, 0x10C1 };
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long base = (unsigned long)hwif->hwif_data;
u16 ultra = 0, multi = 0;
@@ -311,6 +310,7 @@ static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
: (mmio ? 0xB4 : 0x80);
unsigned long ma = siimage_seldev(drive, 0x08);
unsigned long ua = siimage_seldev(drive, 0x0C);
+ const u8 speed = drive->dma_mode;
scsc = sil_ioread8 (dev, base + (mmio ? 0x4A : 0x8A));
mode = sil_ioread8 (dev, base + addr_mask);
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
index 4687060..db7f4e7 100644
--- a/drivers/ide/sis5513.c
+++ b/drivers/ide/sis5513.c
@@ -290,10 +290,10 @@ static void config_drive_art_rwp(ide_drive_t *drive)
pci_write_config_byte(dev, 0x4b, rw_prefetch);
}
-static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void sis_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
config_drive_art_rwp(drive);
- sis_program_timings(drive, XFER_PIO_0 + pio);
+ sis_program_timings(drive, drive->pio_mode);
}
static void sis_ata133_program_udma_timings(ide_drive_t *drive, const u8 mode)
@@ -340,8 +340,10 @@ static void sis_program_udma_timings(ide_drive_t *drive, const u8 mode)
sis_ata33_program_udma_timings(drive, mode);
}
-static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void sis_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
+ const u8 speed = drive->dma_mode;
+
if (speed >= XFER_UDMA_0)
sis_program_udma_timings(drive, speed);
else
diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
index 3c2bbf0..f21dc2a 100644
--- a/drivers/ide/sl82c105.c
+++ b/drivers/ide/sl82c105.c
@@ -63,12 +63,13 @@ static unsigned int get_pio_timings(ide_drive_t *drive, u8 pio)
/*
* Configure the chipset for PIO mode.
*/
-static void sl82c105_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void sl82c105_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long timings = (unsigned long)ide_get_drivedata(drive);
int reg = 0x44 + drive->dn * 4;
u16 drv_ctrl;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
drv_ctrl = get_pio_timings(drive, pio);
@@ -91,11 +92,12 @@ static void sl82c105_set_pio_mode(ide_drive_t *drive, const u8 pio)
/*
* Configure the chipset for DMA mode.
*/
-static void sl82c105_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void sl82c105_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static u16 mwdma_timings[] = {0x0707, 0x0201, 0x0200};
unsigned long timings = (unsigned long)ide_get_drivedata(drive);
u16 drv_ctrl;
+ const u8 speed = drive->dma_mode;
drv_ctrl = mwdma_timings[speed - XFER_MW_DMA_0];
diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
index 1ccfb40..864ffe0 100644
--- a/drivers/ide/slc90e66.c
+++ b/drivers/ide/slc90e66.c
@@ -18,9 +18,8 @@
static DEFINE_SPINLOCK(slc90e66_lock);
-static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void slc90e66_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
int is_slave = drive->dn & 1;
int master_port = hwif->channel ? 0x42 : 0x40;
@@ -29,6 +28,8 @@ static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio)
u16 master_data;
u8 slave_data;
int control = 0;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
+
/* ISP RTC */
static const u8 timings[][2] = {
{ 0, 0 },
@@ -71,14 +72,14 @@ static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio)
spin_unlock_irqrestore(&slc90e66_lock, flags);
}
-static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void slc90e66_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 maslave = hwif->channel ? 0x42 : 0x40;
int sitre = 0, a_speed = 7 << (drive->dn * 4);
int u_speed = 0, u_flag = 1 << drive->dn;
u16 reg4042, reg44, reg48, reg4a;
+ const u8 speed = drive->dma_mode;
pci_read_config_word(dev, maslave, &reg4042);
sitre = (reg4042 & 0x4000) ? 1 : 0;
@@ -98,7 +99,6 @@ static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed)
}
} else {
const u8 mwdma_to_pio[] = { 0, 3, 4 };
- u8 pio;
if (reg48 & u_flag)
pci_write_config_word(dev, 0x48, reg48 & ~u_flag);
@@ -106,11 +106,12 @@ static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
if (speed >= XFER_MW_DMA_0)
- pio = mwdma_to_pio[speed - XFER_MW_DMA_0];
+ drive->pio_mode =
+ mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
else
- pio = 2; /* only SWDMA2 is allowed */
+ drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
- slc90e66_set_pio_mode(drive, pio);
+ slc90e66_set_pio_mode(hwif, drive);
}
}
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
index 05a93d6..e444d24 100644
--- a/drivers/ide/tc86c001.c
+++ b/drivers/ide/tc86c001.c
@@ -13,11 +13,11 @@
#define DRV_NAME "tc86c001"
-static void tc86c001_set_mode(ide_drive_t *drive, const u8 speed)
+static void tc86c001_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00);
u16 mode, scr = inw(scr_port);
+ const u8 speed = drive->dma_mode;
switch (speed) {
case XFER_UDMA_4: mode = 0x00c0; break;
@@ -41,9 +41,10 @@ static void tc86c001_set_mode(ide_drive_t *drive, const u8 speed)
outw(scr, scr_port);
}
-static void tc86c001_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void tc86c001_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- tc86c001_set_mode(drive, XFER_PIO_0 + pio);
+ drive->dma_mode = drive->pio_mode;
+ tc86c001_set_mode(hwif, drive);
}
/*
diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
index 8773c3b..7953447 100644
--- a/drivers/ide/triflex.c
+++ b/drivers/ide/triflex.c
@@ -34,9 +34,8 @@
#define DRV_NAME "triflex"
-static void triflex_set_mode(ide_drive_t *drive, const u8 speed)
+static void triflex_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u32 triflex_timings = 0;
u16 timing = 0;
@@ -44,7 +43,7 @@ static void triflex_set_mode(ide_drive_t *drive, const u8 speed)
pci_read_config_dword(dev, channel_offset, &triflex_timings);
- switch(speed) {
+ switch (drive->dma_mode) {
case XFER_MW_DMA_2:
timing = 0x0103;
break;
@@ -82,9 +81,10 @@ static void triflex_set_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_dword(dev, channel_offset, triflex_timings);
}
-static void triflex_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void triflex_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- triflex_set_mode(drive, XFER_PIO_0 + pio);
+ drive->dma_mode = drive->pio_mode;
+ triflex_set_mode(hwif, drive);
}
static const struct ide_port_ops triflex_port_ops = {
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
index fd59c0d..1d80f1f 100644
--- a/drivers/ide/tx4938ide.c
+++ b/drivers/ide/tx4938ide.c
@@ -56,16 +56,15 @@ static void tx4938ide_tune_ebusc(unsigned int ebus_ch,
&tx4938_ebuscptr->cr[ebus_ch]);
}
-static void tx4938ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void tx4938ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct tx4938ide_platform_info *pdata = hwif->dev->platform_data;
- u8 safe = pio;
+ u8 safe = drive->pio_mode - XFER_PIO_0;
ide_drive_t *pair;
pair = ide_get_pair_dev(drive);
if (pair)
- safe = min(safe, ide_get_best_pio_mode(pair, 255, 5));
+ safe = min(safe, pair->pio_mode - XFER_PIO_0);
tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, safe);
}
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 64b58ec..3c73677 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -104,17 +104,17 @@ static void tx4939ide_writeb(u8 val, void __iomem *base, u32 reg)
#define TX4939IDE_BASE(hwif) ((void __iomem *)(hwif)->extra_base)
-static void tx4939ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void tx4939ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
int is_slave = drive->dn;
u32 mask, val;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
u8 safe = pio;
ide_drive_t *pair;
pair = ide_get_pair_dev(drive);
if (pair)
- safe = min(safe, ide_get_best_pio_mode(pair, 255, 4));
+ safe = min(safe, pair->pio_mode - XFER_PIO_0);
/*
* Update Command Transfer Mode for master/slave and Data
* Transfer Mode for this drive.
@@ -125,10 +125,10 @@ static void tx4939ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
/* tx4939ide_tf_load_fixup() will set the Sys_Ctl register */
}
-static void tx4939ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
+static void tx4939ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
u32 mask, val;
+ const u8 mode = drive->dma_mode;
/* Update Data Transfer Mode for this drive. */
if (mode >= XFER_UDMA_0)
diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c
index 60f936e..47adcd0 100644
--- a/drivers/ide/umc8672.c
+++ b/drivers/ide/umc8672.c
@@ -104,10 +104,11 @@ static void umc_set_speeds(u8 speeds[])
speeds[0], speeds[1], speeds[2], speeds[3]);
}
-static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void umc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif, *mate = hwif->mate;
+ ide_hwif_t *mate = hwif->mate;
unsigned long uninitialized_var(flags);
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
drive->name, pio, pio_to_umc[pio]);
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index 028de26..e65d010 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -6,7 +6,7 @@
* vt8235, vt8237, vt8237a
*
* Copyright (c) 2000-2002 Vojtech Pavlik
- * Copyright (c) 2007 Bartlomiej Zolnierkiewicz
+ * Copyright (c) 2007-2010 Bartlomiej Zolnierkiewicz
*
* Based on the work of:
* Michel Aubry
@@ -54,6 +54,11 @@
#define VIA_NO_UNMASK 0x08 /* Doesn't work with IRQ unmasking on */
#define VIA_BAD_ID 0x10 /* Has wrong vendor ID (0x1107) */
#define VIA_BAD_AST 0x20 /* Don't touch Address Setup Timing */
+#define VIA_SATA_PATA 0x80 /* SATA/PATA combined configuration */
+
+enum {
+ VIA_IDFLAG_SINGLE = (1 << 1), /* single channel controller */
+};
/*
* VIA SouthBridge chips.
@@ -67,11 +72,13 @@ static struct via_isa_bridge {
u8 udma_mask;
u8 flags;
} via_isa_bridges[] = {
- { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+ { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
+ { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
+ { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
+ { "vt8261", PCI_DEVICE_ID_VIA_8261, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+ { "vt6415", PCI_DEVICE_ID_VIA_6410, 0x00, 0xff, ATA_UDMA6, VIA_BAD_AST },
{ "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
@@ -92,6 +99,7 @@ static struct via_isa_bridge {
{ "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, 0x00, VIA_SET_FIFO },
{ "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK },
{ "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
+ { "vtxxxx", PCI_DEVICE_ID_VIA_ANON, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ NULL }
};
@@ -102,6 +110,7 @@ struct via82cxxx_dev
{
struct via_isa_bridge *via_config;
unsigned int via_80w;
+ u8 cached_device[2];
};
/**
@@ -137,30 +146,45 @@ static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing)
case ATA_UDMA4: t = timing->udma ? (0xe8 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x0f; break;
case ATA_UDMA5: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break;
case ATA_UDMA6: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break;
- default: return;
}
- pci_write_config_byte(dev, VIA_UDMA_TIMING + (3 - dn), t);
+ /* Set UDMA unless device is not UDMA capable */
+ if (vdev->via_config->udma_mask) {
+ u8 udma_etc;
+
+ pci_read_config_byte(dev, VIA_UDMA_TIMING + 3 - dn, &udma_etc);
+
+ /* clear transfer mode bit */
+ udma_etc &= ~0x20;
+
+ if (timing->udma) {
+ /* preserve 80-wire cable detection bit */
+ udma_etc &= 0x10;
+ udma_etc |= t;
+ }
+
+ pci_write_config_byte(dev, VIA_UDMA_TIMING + 3 - dn, udma_etc);
+ }
}
/**
* via_set_drive - configure transfer mode
+ * @hwif: port
* @drive: Drive to set up
- * @speed: desired speed
*
* via_set_drive() computes timing values configures the chipset to
* a desired transfer mode. It also can be called by upper layers.
*/
-static void via_set_drive(ide_drive_t *drive, const u8 speed)
+static void via_set_drive(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
ide_drive_t *peer = ide_get_pair_dev(drive);
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
struct via82cxxx_dev *vdev = host->host_priv;
struct ide_timing t, p;
unsigned int T, UT;
+ const u8 speed = drive->dma_mode;
T = 1000000000 / via_clock;
@@ -175,7 +199,7 @@ static void via_set_drive(ide_drive_t *drive, const u8 speed)
ide_timing_compute(drive, speed, &t, T, UT);
if (peer) {
- ide_timing_compute(peer, peer->current_speed, &p, T, UT);
+ ide_timing_compute(peer, peer->pio_mode, &p, T, UT);
ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT);
}
@@ -184,22 +208,24 @@ static void via_set_drive(ide_drive_t *drive, const u8 speed)
/**
* via_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* A callback from the upper layers for PIO-only tuning.
*/
-static void via_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void via_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- via_set_drive(drive, XFER_PIO_0 + pio);
+ drive->dma_mode = drive->pio_mode;
+ via_set_drive(hwif, drive);
}
static struct via_isa_bridge *via_config_find(struct pci_dev **isa)
{
struct via_isa_bridge *via_config;
- for (via_config = via_isa_bridges; via_config->id; via_config++)
+ for (via_config = via_isa_bridges;
+ via_config->id != PCI_DEVICE_ID_VIA_ANON; via_config++)
if ((*isa = pci_get_device(PCI_VENDOR_ID_VIA +
!!(via_config->flags & VIA_BAD_ID),
via_config->id, NULL))) {
@@ -362,6 +388,9 @@ static u8 via82cxxx_cable_detect(ide_hwif_t *hwif)
if (via_cable_override(pdev))
return ATA_CBL_PATA40_SHORT;
+ if ((vdev->via_config->flags & VIA_SATA_PATA) && hwif->channel == 0)
+ return ATA_CBL_SATA;
+
if ((vdev->via_80w >> hwif->channel) & 1)
return ATA_CBL_PATA80;
else
@@ -374,10 +403,66 @@ static const struct ide_port_ops via_port_ops = {
.cable_detect = via82cxxx_cable_detect,
};
+static void via_write_devctl(ide_hwif_t *hwif, u8 ctl)
+{
+ struct via82cxxx_dev *vdev = hwif->host->host_priv;
+
+ outb(ctl, hwif->io_ports.ctl_addr);
+ outb(vdev->cached_device[hwif->channel], hwif->io_ports.device_addr);
+}
+
+static void __via_dev_select(ide_drive_t *drive, u8 select)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct via82cxxx_dev *vdev = hwif->host->host_priv;
+
+ outb(select, hwif->io_ports.device_addr);
+ vdev->cached_device[hwif->channel] = select;
+}
+
+static void via_dev_select(ide_drive_t *drive)
+{
+ __via_dev_select(drive, drive->select | ATA_DEVICE_OBS);
+}
+
+static void via_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+
+ if (valid & IDE_VALID_FEATURE)
+ outb(tf->feature, io_ports->feature_addr);
+ if (valid & IDE_VALID_NSECT)
+ outb(tf->nsect, io_ports->nsect_addr);
+ if (valid & IDE_VALID_LBAL)
+ outb(tf->lbal, io_ports->lbal_addr);
+ if (valid & IDE_VALID_LBAM)
+ outb(tf->lbam, io_ports->lbam_addr);
+ if (valid & IDE_VALID_LBAH)
+ outb(tf->lbah, io_ports->lbah_addr);
+ if (valid & IDE_VALID_DEVICE)
+ __via_dev_select(drive, tf->device);
+}
+
+const struct ide_tp_ops via_tp_ops = {
+ .exec_command = ide_exec_command,
+ .read_status = ide_read_status,
+ .read_altstatus = ide_read_altstatus,
+ .write_devctl = via_write_devctl,
+
+ .dev_select = via_dev_select,
+ .tf_load = via_tf_load,
+ .tf_read = ide_tf_read,
+
+ .input_data = ide_input_data,
+ .output_data = ide_output_data,
+};
+
static const struct ide_port_info via82cxxx_chipset __devinitdata = {
.name = DRV_NAME,
.init_chipset = init_chipset_via82cxxx,
.enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
+ .tp_ops = &via_tp_ops,
.port_ops = &via_port_ops,
.host_flags = IDE_HFLAG_PIO_NO_BLACKLIST |
IDE_HFLAG_POST_SET_MODE |
@@ -402,11 +487,6 @@ static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_i
* Find the ISA bridge and check we know what it is.
*/
via_config = via_config_find(&isa);
- if (!via_config->id) {
- printk(KERN_WARNING DRV_NAME " %s: unknown chipset, skipping\n",
- pci_name(dev));
- return -ENODEV;
- }
/*
* Print the boot message.
@@ -436,10 +516,13 @@ static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_i
via_clock = 33333;
}
- if (idx == 0)
- d.host_flags |= IDE_HFLAG_NO_AUTODMA;
- else
+ if (idx == 1)
d.enablebits[1].reg = d.enablebits[0].reg = 0;
+ else
+ d.host_flags |= IDE_HFLAG_NO_AUTODMA;
+
+ if (idx == VIA_IDFLAG_SINGLE)
+ d.host_flags |= IDE_HFLAG_SINGLE;
if ((via_config->flags & VIA_NO_UNMASK) == 0)
d.host_flags |= IDE_HFLAG_UNMASK_IRQS;
@@ -475,8 +558,9 @@ static const struct pci_device_id via_pci_tbl[] = {
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), 0 },
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), 0 },
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_CX700_IDE), 0 },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_VX855_IDE), 0 },
+ { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_VX855_IDE), VIA_IDFLAG_SINGLE },
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), 1 },
+ { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6415), 1 },
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 },
{ 0, },
};
@@ -504,6 +588,6 @@ static void __exit via_ide_exit(void)
module_init(via_ide_init);
module_exit(via_ide_exit);
-MODULE_AUTHOR("Vojtech Pavlik, Michel Aubry, Jeff Garzik, Andre Hedrick");
+MODULE_AUTHOR("Vojtech Pavlik, Bartlomiej Zolnierkiewicz, Michel Aubry, Jeff Garzik, Andre Hedrick");
MODULE_DESCRIPTION("PCI driver module for VIA IDE");
MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index dd0db67..975adce 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -20,6 +20,7 @@ config INFINIBAND_USER_MAD
config INFINIBAND_USER_ACCESS
tristate "InfiniBand userspace access (verbs and CM)"
+ select ANON_INODES
---help---
Userspace InfiniBand access support. This enables the
kernel side of userspace verbs and the userspace
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index f504c9b..1b09b73 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1215,15 +1215,18 @@ static void ib_ucm_release_dev(struct device *dev)
ucm_dev = container_of(dev, struct ib_ucm_device, dev);
cdev_del(&ucm_dev->cdev);
- clear_bit(ucm_dev->devnum, dev_map);
+ if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
+ clear_bit(ucm_dev->devnum, dev_map);
+ else
+ clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map);
kfree(ucm_dev);
}
static const struct file_operations ucm_fops = {
- .owner = THIS_MODULE,
- .open = ib_ucm_open,
+ .owner = THIS_MODULE,
+ .open = ib_ucm_open,
.release = ib_ucm_close,
- .write = ib_ucm_write,
+ .write = ib_ucm_write,
.poll = ib_ucm_poll,
};
@@ -1237,8 +1240,32 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
+static dev_t overflow_maj;
+static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
+static int find_overflow_devnum(void)
+{
+ int ret;
+
+ if (!overflow_maj) {
+ ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
+ "infiniband_cm");
+ if (ret) {
+ printk(KERN_ERR "ucm: couldn't register dynamic device number\n");
+ return ret;
+ }
+ }
+
+ ret = find_first_zero_bit(overflow_map, IB_UCM_MAX_DEVICES);
+ if (ret >= IB_UCM_MAX_DEVICES)
+ return -1;
+
+ return ret;
+}
+
static void ib_ucm_add_one(struct ib_device *device)
{
+ int devnum;
+ dev_t base;
struct ib_ucm_device *ucm_dev;
if (!device->alloc_ucontext ||
@@ -1251,16 +1278,25 @@ static void ib_ucm_add_one(struct ib_device *device)
ucm_dev->ib_dev = device;
- ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
- if (ucm_dev->devnum >= IB_UCM_MAX_DEVICES)
- goto err;
-
- set_bit(ucm_dev->devnum, dev_map);
+ devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
+ if (devnum >= IB_UCM_MAX_DEVICES) {
+ devnum = find_overflow_devnum();
+ if (devnum < 0)
+ goto err;
+
+ ucm_dev->devnum = devnum + IB_UCM_MAX_DEVICES;
+ base = devnum + overflow_maj;
+ set_bit(devnum, overflow_map);
+ } else {
+ ucm_dev->devnum = devnum;
+ base = devnum + IB_UCM_BASE_DEV;
+ set_bit(devnum, dev_map);
+ }
cdev_init(&ucm_dev->cdev, &ucm_fops);
ucm_dev->cdev.owner = THIS_MODULE;
kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum);
- if (cdev_add(&ucm_dev->cdev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1))
+ if (cdev_add(&ucm_dev->cdev, base, 1))
goto err;
ucm_dev->dev.class = &cm_class;
@@ -1281,7 +1317,10 @@ err_dev:
device_unregister(&ucm_dev->dev);
err_cdev:
cdev_del(&ucm_dev->cdev);
- clear_bit(ucm_dev->devnum, dev_map);
+ if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
+ clear_bit(devnum, dev_map);
+ else
+ clear_bit(devnum, overflow_map);
err:
kfree(ucm_dev);
return;
@@ -1340,6 +1379,8 @@ static void __exit ib_ucm_cleanup(void)
ib_unregister_client(&ucm_client);
class_remove_file(&cm_class, &class_attr_abi_version);
unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
+ if (overflow_maj)
+ unregister_chrdev_region(overflow_maj, IB_UCM_MAX_DEVICES);
idr_destroy(&ctx_id_table);
}
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 8ec7876..650b501 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -181,6 +181,7 @@ static const struct ib_field deth_table[] = {
* ib_ud_header_init - Initialize UD header structure
* @payload_bytes:Length of packet payload
* @grh_present:GRH flag (if non-zero, GRH will be included)
+ * @immediate_present: specify if immediate data should be used
* @header:Structure to initialize
*
* ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header,
@@ -191,21 +192,13 @@ static const struct ib_field deth_table[] = {
*/
void ib_ud_header_init(int payload_bytes,
int grh_present,
+ int immediate_present,
struct ib_ud_header *header)
{
- int header_len;
u16 packet_length;
memset(header, 0, sizeof *header);
- header_len =
- IB_LRH_BYTES +
- IB_BTH_BYTES +
- IB_DETH_BYTES;
- if (grh_present) {
- header_len += IB_GRH_BYTES;
- }
-
header->lrh.link_version = 0;
header->lrh.link_next_header =
grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
@@ -231,7 +224,8 @@ void ib_ud_header_init(int payload_bytes,
header->lrh.packet_length = cpu_to_be16(packet_length);
- if (header->immediate_present)
+ header->immediate_present = immediate_present;
+ if (immediate_present)
header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
else
header->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 6f7c096..4f906f0 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -136,7 +136,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
down_write(&current->mm->mmap_sem);
locked = npages + current->mm->locked_vm;
- lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 7de0296..02d360c 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -65,12 +65,9 @@ enum {
};
/*
- * Our lifetime rules for these structs are the following: each time a
- * device special file is opened, we look up the corresponding struct
- * ib_umad_port by minor in the umad_port[] table while holding the
- * port_lock. If this lookup succeeds, we take a reference on the
- * ib_umad_port's struct ib_umad_device while still holding the
- * port_lock; if the lookup fails, we fail the open(). We drop these
+ * Our lifetime rules for these structs are the following:
+ * device special file is opened, we take a reference on the
+ * ib_umad_port's struct ib_umad_device. We drop these
* references in the corresponding close().
*
* In addition to references coming from open character devices, there
@@ -78,19 +75,14 @@ enum {
* module's reference taken when allocating the ib_umad_device in
* ib_umad_add_one().
*
- * When destroying an ib_umad_device, we clear all of its
- * ib_umad_ports from umad_port[] while holding port_lock before
- * dropping the module's reference to the ib_umad_device. This is
- * always safe because any open() calls will either succeed and obtain
- * a reference before we clear the umad_port[] entries, or fail after
- * we clear the umad_port[] entries.
+ * When destroying an ib_umad_device, we drop the module's reference.
*/
struct ib_umad_port {
- struct cdev *cdev;
+ struct cdev cdev;
struct device *dev;
- struct cdev *sm_cdev;
+ struct cdev sm_cdev;
struct device *sm_dev;
struct semaphore sm_sem;
@@ -136,7 +128,6 @@ static struct class *umad_class;
static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
static DEFINE_SPINLOCK(port_lock);
-static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS];
static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
static void ib_umad_add_one(struct ib_device *device);
@@ -496,8 +487,8 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
ah_attr.ah_flags = IB_AH_GRH;
memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
ah_attr.grh.sgid_index = packet->mad.hdr.gid_index;
- ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
- ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
+ ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
+ ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
}
@@ -528,9 +519,9 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
goto err_ah;
}
- packet->msg->ah = ah;
+ packet->msg->ah = ah;
packet->msg->timeout_ms = packet->mad.hdr.timeout_ms;
- packet->msg->retries = packet->mad.hdr.retries;
+ packet->msg->retries = packet->mad.hdr.retries;
packet->msg->context[0] = packet;
/* Copy MAD header. Any RMPP header is already in place. */
@@ -779,15 +770,11 @@ static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd,
/*
* ib_umad_open() does not need the BKL:
*
- * - umad_port[] accesses are protected by port_lock, the
- * ib_umad_port structures are properly reference counted, and
+ * - the ib_umad_port structures are properly reference counted, and
* everything else is purely local to the file being created, so
* races against other open calls are not a problem;
* - the ioctl method does not affect any global state outside of the
* file structure being operated on;
- * - the port is added to umad_port[] as the last part of module
- * initialization so the open method will either immediately run
- * -ENXIO, or all required initialization will be done.
*/
static int ib_umad_open(struct inode *inode, struct file *filp)
{
@@ -795,13 +782,10 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
struct ib_umad_file *file;
int ret = 0;
- spin_lock(&port_lock);
- port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE];
+ port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
if (port)
kref_get(&port->umad_dev->ref);
- spin_unlock(&port_lock);
-
- if (!port)
+ else
return -ENXIO;
mutex_lock(&port->file_mutex);
@@ -872,16 +856,16 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
}
static const struct file_operations umad_fops = {
- .owner = THIS_MODULE,
- .read = ib_umad_read,
- .write = ib_umad_write,
- .poll = ib_umad_poll,
+ .owner = THIS_MODULE,
+ .read = ib_umad_read,
+ .write = ib_umad_write,
+ .poll = ib_umad_poll,
.unlocked_ioctl = ib_umad_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = ib_umad_compat_ioctl,
+ .compat_ioctl = ib_umad_compat_ioctl,
#endif
- .open = ib_umad_open,
- .release = ib_umad_close
+ .open = ib_umad_open,
+ .release = ib_umad_close
};
static int ib_umad_sm_open(struct inode *inode, struct file *filp)
@@ -892,13 +876,10 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
};
int ret;
- spin_lock(&port_lock);
- port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE - IB_UMAD_MAX_PORTS];
+ port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
if (port)
kref_get(&port->umad_dev->ref);
- spin_unlock(&port_lock);
-
- if (!port)
+ else
return -ENXIO;
if (filp->f_flags & O_NONBLOCK) {
@@ -949,8 +930,8 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
}
static const struct file_operations umad_sm_fops = {
- .owner = THIS_MODULE,
- .open = ib_umad_sm_open,
+ .owner = THIS_MODULE,
+ .open = ib_umad_sm_open,
.release = ib_umad_sm_close
};
@@ -990,16 +971,51 @@ static ssize_t show_abi_version(struct class *class, char *buf)
}
static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
+static dev_t overflow_maj;
+static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS);
+static int find_overflow_devnum(void)
+{
+ int ret;
+
+ if (!overflow_maj) {
+ ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2,
+ "infiniband_mad");
+ if (ret) {
+ printk(KERN_ERR "user_mad: couldn't register dynamic device number\n");
+ return ret;
+ }
+ }
+
+ ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS);
+ if (ret >= IB_UMAD_MAX_PORTS)
+ return -1;
+
+ return ret;
+}
+
static int ib_umad_init_port(struct ib_device *device, int port_num,
struct ib_umad_port *port)
{
+ int devnum;
+ dev_t base;
+
spin_lock(&port_lock);
- port->dev_num = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
- if (port->dev_num >= IB_UMAD_MAX_PORTS) {
+ devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
+ if (devnum >= IB_UMAD_MAX_PORTS) {
spin_unlock(&port_lock);
- return -1;
+ devnum = find_overflow_devnum();
+ if (devnum < 0)
+ return -1;
+
+ spin_lock(&port_lock);
+ port->dev_num = devnum + IB_UMAD_MAX_PORTS;
+ base = devnum + overflow_maj;
+ set_bit(devnum, overflow_map);
+ } else {
+ port->dev_num = devnum;
+ base = devnum + base_dev;
+ set_bit(devnum, dev_map);
}
- set_bit(port->dev_num, dev_map);
spin_unlock(&port_lock);
port->ib_dev = device;
@@ -1008,17 +1024,14 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
mutex_init(&port->file_mutex);
INIT_LIST_HEAD(&port->file_list);
- port->cdev = cdev_alloc();
- if (!port->cdev)
- return -1;
- port->cdev->owner = THIS_MODULE;
- port->cdev->ops = &umad_fops;
- kobject_set_name(&port->cdev->kobj, "umad%d", port->dev_num);
- if (cdev_add(port->cdev, base_dev + port->dev_num, 1))
+ cdev_init(&port->cdev, &umad_fops);
+ port->cdev.owner = THIS_MODULE;
+ kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
+ if (cdev_add(&port->cdev, base, 1))
goto err_cdev;
port->dev = device_create(umad_class, device->dma_device,
- port->cdev->dev, port,
+ port->cdev.dev, port,
"umad%d", port->dev_num);
if (IS_ERR(port->dev))
goto err_cdev;
@@ -1028,17 +1041,15 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
if (device_create_file(port->dev, &dev_attr_port))
goto err_dev;
- port->sm_cdev = cdev_alloc();
- if (!port->sm_cdev)
- goto err_dev;
- port->sm_cdev->owner = THIS_MODULE;
- port->sm_cdev->ops = &umad_sm_fops;
- kobject_set_name(&port->sm_cdev->kobj, "issm%d", port->dev_num);
- if (cdev_add(port->sm_cdev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1))
+ base += IB_UMAD_MAX_PORTS;
+ cdev_init(&port->sm_cdev, &umad_sm_fops);
+ port->sm_cdev.owner = THIS_MODULE;
+ kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
+ if (cdev_add(&port->sm_cdev, base, 1))
goto err_sm_cdev;
port->sm_dev = device_create(umad_class, device->dma_device,
- port->sm_cdev->dev, port,
+ port->sm_cdev.dev, port,
"issm%d", port->dev_num);
if (IS_ERR(port->sm_dev))
goto err_sm_cdev;
@@ -1048,24 +1059,23 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
if (device_create_file(port->sm_dev, &dev_attr_port))
goto err_sm_dev;
- spin_lock(&port_lock);
- umad_port[port->dev_num] = port;
- spin_unlock(&port_lock);
-
return 0;
err_sm_dev:
- device_destroy(umad_class, port->sm_cdev->dev);
+ device_destroy(umad_class, port->sm_cdev.dev);
err_sm_cdev:
- cdev_del(port->sm_cdev);
+ cdev_del(&port->sm_cdev);
err_dev:
- device_destroy(umad_class, port->cdev->dev);
+ device_destroy(umad_class, port->cdev.dev);
err_cdev:
- cdev_del(port->cdev);
- clear_bit(port->dev_num, dev_map);
+ cdev_del(&port->cdev);
+ if (port->dev_num < IB_UMAD_MAX_PORTS)
+ clear_bit(devnum, dev_map);
+ else
+ clear_bit(devnum, overflow_map);
return -1;
}
@@ -1079,15 +1089,11 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
dev_set_drvdata(port->dev, NULL);
dev_set_drvdata(port->sm_dev, NULL);
- device_destroy(umad_class, port->cdev->dev);
- device_destroy(umad_class, port->sm_cdev->dev);
+ device_destroy(umad_class, port->cdev.dev);
+ device_destroy(umad_class, port->sm_cdev.dev);
- cdev_del(port->cdev);
- cdev_del(port->sm_cdev);
-
- spin_lock(&port_lock);
- umad_port[port->dev_num] = NULL;
- spin_unlock(&port_lock);
+ cdev_del(&port->cdev);
+ cdev_del(&port->sm_cdev);
mutex_lock(&port->file_mutex);
@@ -1106,7 +1112,10 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
mutex_unlock(&port->file_mutex);
- clear_bit(port->dev_num, dev_map);
+ if (port->dev_num < IB_UMAD_MAX_PORTS)
+ clear_bit(port->dev_num, dev_map);
+ else
+ clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map);
}
static void ib_umad_add_one(struct ib_device *device)
@@ -1214,6 +1223,8 @@ static void __exit ib_umad_cleanup(void)
ib_unregister_client(&umad_client);
class_destroy(umad_class);
unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
+ if (overflow_maj)
+ unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2);
}
module_init(ib_umad_init);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index b3ea958..a078e56 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -41,6 +41,7 @@
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/completion.h>
+#include <linux/cdev.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_umem.h>
@@ -69,23 +70,23 @@
struct ib_uverbs_device {
struct kref ref;
+ int num_comp_vectors;
struct completion comp;
- int devnum;
- struct cdev *cdev;
struct device *dev;
struct ib_device *ib_dev;
- int num_comp_vectors;
+ int devnum;
+ struct cdev cdev;
};
struct ib_uverbs_event_file {
struct kref ref;
+ int is_async;
struct ib_uverbs_file *uverbs_file;
spinlock_t lock;
+ int is_closed;
wait_queue_head_t poll_wait;
struct fasync_struct *async_queue;
struct list_head event_list;
- int is_async;
- int is_closed;
};
struct ib_uverbs_file {
@@ -145,7 +146,7 @@ extern struct idr ib_uverbs_srq_idr;
void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
- int is_async, int *fd);
+ int is_async);
struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd);
void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 112d397..f71cf13 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -301,10 +301,15 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
resp.num_comp_vectors = file->device->num_comp_vectors;
- filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd);
+ ret = get_unused_fd();
+ if (ret < 0)
+ goto err_free;
+ resp.async_fd = ret;
+
+ filp = ib_uverbs_alloc_event_file(file, 1);
if (IS_ERR(filp)) {
ret = PTR_ERR(filp);
- goto err_free;
+ goto err_fd;
}
if (copy_to_user((void __user *) (unsigned long) cmd.response,
@@ -332,9 +337,11 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
return in_len;
err_file:
- put_unused_fd(resp.async_fd);
fput(filp);
+err_fd:
+ put_unused_fd(resp.async_fd);
+
err_free:
ibdev->dealloc_ucontext(ucontext);
@@ -715,6 +722,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
struct ib_uverbs_create_comp_channel cmd;
struct ib_uverbs_create_comp_channel_resp resp;
struct file *filp;
+ int ret;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -722,9 +730,16 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd);
- if (IS_ERR(filp))
+ ret = get_unused_fd();
+ if (ret < 0)
+ return ret;
+ resp.fd = ret;
+
+ filp = ib_uverbs_alloc_event_file(file, 0);
+ if (IS_ERR(filp)) {
+ put_unused_fd(resp.fd);
return PTR_ERR(filp);
+ }
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp)) {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5f284ff..4fa2e65 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -42,8 +42,8 @@
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/file.h>
-#include <linux/mount.h>
#include <linux/cdev.h>
+#include <linux/anon_inodes.h>
#include <asm/uaccess.h>
@@ -53,8 +53,6 @@ MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand userspace verbs access");
MODULE_LICENSE("Dual BSD/GPL");
-#define INFINIBANDEVENTFS_MAGIC 0x49426576 /* "IBev" */
-
enum {
IB_UVERBS_MAJOR = 231,
IB_UVERBS_BASE_MINOR = 192,
@@ -75,44 +73,41 @@ DEFINE_IDR(ib_uverbs_qp_idr);
DEFINE_IDR(ib_uverbs_srq_idr);
static DEFINE_SPINLOCK(map_lock);
-static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len) = {
- [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context,
- [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device,
- [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port,
- [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd,
- [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd,
- [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr,
- [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr,
+ [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context,
+ [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device,
+ [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port,
+ [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd,
+ [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd,
+ [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr,
+ [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr,
[IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel,
- [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq,
- [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq,
- [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq,
- [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq,
- [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq,
- [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp,
- [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp,
- [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp,
- [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp,
- [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send,
- [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv,
- [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv,
- [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah,
- [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah,
- [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast,
- [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast,
- [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq,
- [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq,
- [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq,
- [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
+ [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq,
+ [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq,
+ [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq,
+ [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq,
+ [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq,
+ [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp,
+ [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp,
+ [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp,
+ [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp,
+ [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send,
+ [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv,
+ [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv,
+ [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah,
+ [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah,
+ [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast,
+ [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast,
+ [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq,
+ [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq,
+ [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq,
+ [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
};
-static struct vfsmount *uverbs_event_mnt;
-
static void ib_uverbs_add_one(struct ib_device *device);
static void ib_uverbs_remove_one(struct ib_device *device);
@@ -370,7 +365,7 @@ static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
static const struct file_operations uverbs_event_fops = {
.owner = THIS_MODULE,
- .read = ib_uverbs_event_read,
+ .read = ib_uverbs_event_read,
.poll = ib_uverbs_event_poll,
.release = ib_uverbs_event_close,
.fasync = ib_uverbs_event_fasync
@@ -489,12 +484,10 @@ void ib_uverbs_event_handler(struct ib_event_handler *handler,
}
struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
- int is_async, int *fd)
+ int is_async)
{
struct ib_uverbs_event_file *ev_file;
- struct path path;
struct file *filp;
- int ret;
ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL);
if (!ev_file)
@@ -509,38 +502,12 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
ev_file->is_async = is_async;
ev_file->is_closed = 0;
- *fd = get_unused_fd();
- if (*fd < 0) {
- ret = *fd;
- goto err;
- }
-
- /*
- * fops_get() can't fail here, because we're coming from a
- * system call on a uverbs file, which will already have a
- * module reference.
- */
- path.mnt = uverbs_event_mnt;
- path.dentry = uverbs_event_mnt->mnt_root;
- path_get(&path);
- filp = alloc_file(&path, FMODE_READ, fops_get(&uverbs_event_fops));
- if (!filp) {
- ret = -ENFILE;
- goto err_fd;
- }
-
- filp->private_data = ev_file;
+ filp = anon_inode_getfile("[infinibandevent]", &uverbs_event_fops,
+ ev_file, O_RDONLY);
+ if (IS_ERR(filp))
+ kfree(ev_file);
return filp;
-
-err_fd:
- fops_put(&uverbs_event_fops);
- path_put(&path);
- put_unused_fd(*fd);
-
-err:
- kfree(ev_file);
- return ERR_PTR(ret);
}
/*
@@ -617,14 +584,12 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
/*
* ib_uverbs_open() does not need the BKL:
*
- * - dev_table[] accesses are protected by map_lock, the
- * ib_uverbs_device structures are properly reference counted, and
+ * - the ib_uverbs_device structures are properly reference counted and
* everything else is purely local to the file being created, so
* races against other open calls are not a problem;
* - there is no ioctl method to race against;
- * - the device is added to dev_table[] as the last part of module
- * initialization, the open method will either immediately run
- * -ENXIO, or all required initialization will be done.
+ * - the open method will either immediately run -ENXIO, or all
+ * required initialization will be done.
*/
static int ib_uverbs_open(struct inode *inode, struct file *filp)
{
@@ -632,13 +597,10 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
struct ib_uverbs_file *file;
int ret;
- spin_lock(&map_lock);
- dev = dev_table[iminor(inode) - IB_UVERBS_BASE_MINOR];
+ dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
if (dev)
kref_get(&dev->ref);
- spin_unlock(&map_lock);
-
- if (!dev)
+ else
return -ENXIO;
if (!try_module_get(dev->ib_dev->owner)) {
@@ -685,17 +647,17 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
}
static const struct file_operations uverbs_fops = {
- .owner = THIS_MODULE,
- .write = ib_uverbs_write,
- .open = ib_uverbs_open,
+ .owner = THIS_MODULE,
+ .write = ib_uverbs_write,
+ .open = ib_uverbs_open,
.release = ib_uverbs_close
};
static const struct file_operations uverbs_mmap_fops = {
- .owner = THIS_MODULE,
- .write = ib_uverbs_write,
+ .owner = THIS_MODULE,
+ .write = ib_uverbs_write,
.mmap = ib_uverbs_mmap,
- .open = ib_uverbs_open,
+ .open = ib_uverbs_open,
.release = ib_uverbs_close
};
@@ -735,8 +697,38 @@ static ssize_t show_abi_version(struct class *class, char *buf)
}
static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
+static dev_t overflow_maj;
+static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES);
+
+/*
+ * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
+ * requesting a new major number and doubling the number of max devices we
+ * support. It's stupid, but simple.
+ */
+static int find_overflow_devnum(void)
+{
+ int ret;
+
+ if (!overflow_maj) {
+ ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
+ "infiniband_verbs");
+ if (ret) {
+ printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n");
+ return ret;
+ }
+ }
+
+ ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES);
+ if (ret >= IB_UVERBS_MAX_DEVICES)
+ return -1;
+
+ return ret;
+}
+
static void ib_uverbs_add_one(struct ib_device *device)
{
+ int devnum;
+ dev_t base;
struct ib_uverbs_device *uverbs_dev;
if (!device->alloc_ucontext)
@@ -750,28 +742,36 @@ static void ib_uverbs_add_one(struct ib_device *device)
init_completion(&uverbs_dev->comp);
spin_lock(&map_lock);
- uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
- if (uverbs_dev->devnum >= IB_UVERBS_MAX_DEVICES) {
+ devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
+ if (devnum >= IB_UVERBS_MAX_DEVICES) {
spin_unlock(&map_lock);
- goto err;
+ devnum = find_overflow_devnum();
+ if (devnum < 0)
+ goto err;
+
+ spin_lock(&map_lock);
+ uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES;
+ base = devnum + overflow_maj;
+ set_bit(devnum, overflow_map);
+ } else {
+ uverbs_dev->devnum = devnum;
+ base = devnum + IB_UVERBS_BASE_DEV;
+ set_bit(devnum, dev_map);
}
- set_bit(uverbs_dev->devnum, dev_map);
spin_unlock(&map_lock);
uverbs_dev->ib_dev = device;
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
- uverbs_dev->cdev = cdev_alloc();
- if (!uverbs_dev->cdev)
- goto err;
- uverbs_dev->cdev->owner = THIS_MODULE;
- uverbs_dev->cdev->ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
- kobject_set_name(&uverbs_dev->cdev->kobj, "uverbs%d", uverbs_dev->devnum);
- if (cdev_add(uverbs_dev->cdev, IB_UVERBS_BASE_DEV + uverbs_dev->devnum, 1))
+ cdev_init(&uverbs_dev->cdev, NULL);
+ uverbs_dev->cdev.owner = THIS_MODULE;
+ uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
+ kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
+ if (cdev_add(&uverbs_dev->cdev, base, 1))
goto err_cdev;
uverbs_dev->dev = device_create(uverbs_class, device->dma_device,
- uverbs_dev->cdev->dev, uverbs_dev,
+ uverbs_dev->cdev.dev, uverbs_dev,
"uverbs%d", uverbs_dev->devnum);
if (IS_ERR(uverbs_dev->dev))
goto err_cdev;
@@ -781,20 +781,19 @@ static void ib_uverbs_add_one(struct ib_device *device)
if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
goto err_class;
- spin_lock(&map_lock);
- dev_table[uverbs_dev->devnum] = uverbs_dev;
- spin_unlock(&map_lock);
-
ib_set_client_data(device, &uverbs_client, uverbs_dev);
return;
err_class:
- device_destroy(uverbs_class, uverbs_dev->cdev->dev);
+ device_destroy(uverbs_class, uverbs_dev->cdev.dev);
err_cdev:
- cdev_del(uverbs_dev->cdev);
- clear_bit(uverbs_dev->devnum, dev_map);
+ cdev_del(&uverbs_dev->cdev);
+ if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
+ clear_bit(devnum, dev_map);
+ else
+ clear_bit(devnum, overflow_map);
err:
kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
@@ -811,35 +810,19 @@ static void ib_uverbs_remove_one(struct ib_device *device)
return;
dev_set_drvdata(uverbs_dev->dev, NULL);
- device_destroy(uverbs_class, uverbs_dev->cdev->dev);
- cdev_del(uverbs_dev->cdev);
+ device_destroy(uverbs_class, uverbs_dev->cdev.dev);
+ cdev_del(&uverbs_dev->cdev);
- spin_lock(&map_lock);
- dev_table[uverbs_dev->devnum] = NULL;
- spin_unlock(&map_lock);
-
- clear_bit(uverbs_dev->devnum, dev_map);
+ if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
+ clear_bit(uverbs_dev->devnum, dev_map);
+ else
+ clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
wait_for_completion(&uverbs_dev->comp);
kfree(uverbs_dev);
}
-static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data,
- struct vfsmount *mnt)
-{
- return get_sb_pseudo(fs_type, "infinibandevent:", NULL,
- INFINIBANDEVENTFS_MAGIC, mnt);
-}
-
-static struct file_system_type uverbs_event_fs = {
- /* No owner field so module can be unloaded */
- .name = "infinibandeventfs",
- .get_sb = uverbs_event_get_sb,
- .kill_sb = kill_litter_super
-};
-
static int __init ib_uverbs_init(void)
{
int ret;
@@ -864,33 +847,14 @@ static int __init ib_uverbs_init(void)
goto out_class;
}
- ret = register_filesystem(&uverbs_event_fs);
- if (ret) {
- printk(KERN_ERR "user_verbs: couldn't register infinibandeventfs\n");
- goto out_class;
- }
-
- uverbs_event_mnt = kern_mount(&uverbs_event_fs);
- if (IS_ERR(uverbs_event_mnt)) {
- ret = PTR_ERR(uverbs_event_mnt);
- printk(KERN_ERR "user_verbs: couldn't mount infinibandeventfs\n");
- goto out_fs;
- }
-
ret = ib_register_client(&uverbs_client);
if (ret) {
printk(KERN_ERR "user_verbs: couldn't register client\n");
- goto out_mnt;
+ goto out_class;
}
return 0;
-out_mnt:
- mntput(uverbs_event_mnt);
-
-out_fs:
- unregister_filesystem(&uverbs_event_fs);
-
out_class:
class_destroy(uverbs_class);
@@ -904,10 +868,10 @@ out:
static void __exit ib_uverbs_cleanup(void)
{
ib_unregister_client(&uverbs_client);
- mntput(uverbs_event_mnt);
- unregister_filesystem(&uverbs_event_fs);
class_destroy(uverbs_class);
unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
+ if (overflow_maj)
+ unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
idr_destroy(&ib_uverbs_pd_idr);
idr_destroy(&ib_uverbs_mr_idr);
idr_destroy(&ib_uverbs_mw_idr);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 0677fc7..a28e862 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -109,7 +109,6 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
udelay(1);
if (i++ > 1000000) {
- BUG_ON(1);
printk(KERN_ERR "%s: stalled rnic\n",
rdev_p->dev_name);
return -EIO;
@@ -155,7 +154,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
}
-int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
+int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
{
struct rdma_cq_setup setup;
int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
@@ -163,12 +162,12 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
if (!cq->cqid)
return -ENOMEM;
- cq->sw_queue = kzalloc(size, GFP_KERNEL);
- if (!cq->sw_queue)
- return -ENOMEM;
- cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << (cq->size_log2)) *
- sizeof(struct t3_cqe),
+ if (kernel) {
+ cq->sw_queue = kzalloc(size, GFP_KERNEL);
+ if (!cq->sw_queue)
+ return -ENOMEM;
+ }
+ cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
&(cq->dma_addr), GFP_KERNEL);
if (!cq->queue) {
kfree(cq->sw_queue);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index f3d440c..073373c 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -53,7 +53,7 @@
#define T3_MAX_PBL_SIZE 256
#define T3_MAX_RQ_SIZE 1024
#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 8192
+#define T3_MAX_CQ_DEPTH 262144
#define T3_MAX_NUM_STAG (1<<15)
#define T3_MAX_MR_SIZE 0x100000000ULL
#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
@@ -157,7 +157,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev);
void cxio_rdev_close(struct cxio_rdev *rdev);
int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
enum t3_cq_opcode op, u32 credit);
-int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
+int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel);
int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index a197a5b..15073b2 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -730,7 +730,22 @@ struct t3_cq {
static inline void cxio_set_wq_in_error(struct t3_wq *wq)
{
- wq->queue->wq_in_err.err = 1;
+ wq->queue->wq_in_err.err |= 1;
+}
+
+static inline void cxio_disable_wq_db(struct t3_wq *wq)
+{
+ wq->queue->wq_in_err.err |= 2;
+}
+
+static inline void cxio_enable_wq_db(struct t3_wq *wq)
+{
+ wq->queue->wq_in_err.err &= ~2;
+}
+
+static inline int cxio_wq_db_enabled(struct t3_wq *wq)
+{
+ return !(wq->queue->wq_in_err.err & 2);
}
static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index b0ea010..ee1d8b4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -65,6 +65,46 @@ struct cxgb3_client t3c_client = {
static LIST_HEAD(dev_list);
static DEFINE_MUTEX(dev_mutex);
+static int disable_qp_db(int id, void *p, void *data)
+{
+ struct iwch_qp *qhp = p;
+
+ cxio_disable_wq_db(&qhp->wq);
+ return 0;
+}
+
+static int enable_qp_db(int id, void *p, void *data)
+{
+ struct iwch_qp *qhp = p;
+
+ if (data)
+ ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid);
+ cxio_enable_wq_db(&qhp->wq);
+ return 0;
+}
+
+static void disable_dbs(struct iwch_dev *rnicp)
+{
+ spin_lock_irq(&rnicp->lock);
+ idr_for_each(&rnicp->qpidr, disable_qp_db, NULL);
+ spin_unlock_irq(&rnicp->lock);
+}
+
+static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
+{
+ spin_lock_irq(&rnicp->lock);
+ idr_for_each(&rnicp->qpidr, enable_qp_db,
+ (void *)(unsigned long)ring_db);
+ spin_unlock_irq(&rnicp->lock);
+}
+
+static void iwch_db_drop_task(struct work_struct *work)
+{
+ struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
+ db_drop_task.work);
+ enable_dbs(rnicp, 1);
+}
+
static void rnic_init(struct iwch_dev *rnicp)
{
PDBG("%s iwch_dev %p\n", __func__, rnicp);
@@ -72,6 +112,7 @@ static void rnic_init(struct iwch_dev *rnicp)
idr_init(&rnicp->qpidr);
idr_init(&rnicp->mmidr);
spin_lock_init(&rnicp->lock);
+ INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
@@ -147,6 +188,8 @@ static void close_rnic_dev(struct t3cdev *tdev)
mutex_lock(&dev_mutex);
list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
if (dev->rdev.t3cdev_p == tdev) {
+ dev->rdev.flags = CXIO_ERROR_FATAL;
+ cancel_delayed_work_sync(&dev->db_drop_task);
list_del(&dev->entry);
iwch_unregister_device(dev);
cxio_rdev_close(&dev->rdev);
@@ -165,7 +208,8 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
struct cxio_rdev *rdev = tdev->ulp;
struct iwch_dev *rnicp;
struct ib_event event;
- u32 portnum = port_id + 1;
+ u32 portnum = port_id + 1;
+ int dispatch = 0;
if (!rdev)
return;
@@ -174,21 +218,49 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
case OFFLOAD_STATUS_DOWN: {
rdev->flags = CXIO_ERROR_FATAL;
event.event = IB_EVENT_DEVICE_FATAL;
+ dispatch = 1;
break;
}
case OFFLOAD_PORT_DOWN: {
event.event = IB_EVENT_PORT_ERR;
+ dispatch = 1;
break;
}
case OFFLOAD_PORT_UP: {
event.event = IB_EVENT_PORT_ACTIVE;
+ dispatch = 1;
+ break;
+ }
+ case OFFLOAD_DB_FULL: {
+ disable_dbs(rnicp);
+ break;
+ }
+ case OFFLOAD_DB_EMPTY: {
+ enable_dbs(rnicp, 1);
+ break;
+ }
+ case OFFLOAD_DB_DROP: {
+ unsigned long delay = 1000;
+ unsigned short r;
+
+ disable_dbs(rnicp);
+ get_random_bytes(&r, 2);
+ delay += r & 1023;
+
+ /*
+ * delay is between 1000-2023 usecs.
+ */
+ schedule_delayed_work(&rnicp->db_drop_task,
+ usecs_to_jiffies(delay));
break;
}
}
- event.device = &rnicp->ibdev;
- event.element.port_num = portnum;
- ib_dispatch_event(&event);
+ if (dispatch) {
+ event.device = &rnicp->ibdev;
+ event.element.port_num = portnum;
+ ib_dispatch_event(&event);
+ }
return;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index 8473550..a1c4457 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -36,6 +36,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
+#include <linux/workqueue.h>
#include <rdma/ib_verbs.h>
@@ -110,6 +111,7 @@ struct iwch_dev {
struct idr mmidr;
spinlock_t lock;
struct list_head entry;
+ struct delayed_work db_drop_task;
};
static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index ed71755..47b35c6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -187,7 +187,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
entries = roundup_pow_of_two(entries);
chp->cq.size_log2 = ilog2(entries);
- if (cxio_create_cq(&rhp->rdev, &chp->cq)) {
+ if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
kfree(chp);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 3eb8cec..b4d893d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -452,7 +452,8 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
++(qhp->wq.sq_wptr);
}
spin_unlock_irqrestore(&qhp->lock, flag);
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
+ if (cxio_wq_db_enabled(&qhp->wq))
+ ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
out:
if (err)
@@ -514,7 +515,8 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
num_wrs--;
}
spin_unlock_irqrestore(&qhp->lock, flag);
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
+ if (cxio_wq_db_enabled(&qhp->wq))
+ ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
out:
if (err)
@@ -597,7 +599,8 @@ int iwch_bind_mw(struct ib_qp *qp,
++(qhp->wq.sq_wptr);
spin_unlock_irqrestore(&qhp->lock, flag);
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
+ if (cxio_wq_db_enabled(&qhp->wq))
+ ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
return err;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 42be0b1..b2b6fea 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -548,11 +548,10 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
struct ehca_eq *eq = &shca->eq;
struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
u64 eqe_value, ret;
- unsigned long flags;
int eqe_cnt, i;
int eq_empty = 0;
- spin_lock_irqsave(&eq->irq_spinlock, flags);
+ spin_lock(&eq->irq_spinlock);
if (is_irq) {
const int max_query_cnt = 100;
int query_cnt = 0;
@@ -643,7 +642,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
} while (1);
unlock_irq_spinlock:
- spin_unlock_irqrestore(&eq->irq_spinlock, flags);
+ spin_unlock(&eq->irq_spinlock);
}
void ehca_tasklet_eq(unsigned long data)
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 0338f1f..b105f66 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -55,9 +55,7 @@ static struct kmem_cache *qp_cache;
/*
* attributes not supported by query qp
*/
-#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_MAX_DEST_RD_ATOMIC | \
- IB_QP_MAX_QP_RD_ATOMIC | \
- IB_QP_ACCESS_FLAGS | \
+#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \
IB_QP_EN_SQD_ASYNC_NOTIFY)
/*
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 8c1213f..dba8f9f 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -222,7 +222,7 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
{
int ret;
- if (!port_num || port_num > ibdev->phys_port_cnt)
+ if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
return IB_MAD_RESULT_FAILURE;
/* accept only pma request */
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 82878e3..eb7d59a 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -59,8 +59,7 @@ static int __get_user_pages(unsigned long start_page, size_t num_pages,
size_t got;
int ret;
- lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >>
- PAGE_SHIFT;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (num_pages > lock_limit) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 2a97c96..ae75389 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1214,7 +1214,7 @@ out:
static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
void *wqe, unsigned *mlx_seg_len)
{
- struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev;
+ struct ib_device *ib_dev = sqp->qp.ibqp.device;
struct mlx4_wqe_mlx_seg *mlx = wqe;
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
@@ -1228,7 +1228,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
for (i = 0; i < wr->num_sge; ++i)
send_size += wr->sg_list[i].length;
- ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), &sqp->ud_header);
+ ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), 0, &sqp->ud_header);
sqp->ud_header.lrh.service_level =
be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index c10576f..d2d172e 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1494,7 +1494,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
u16 pkey;
ib_ud_header_init(256, /* assume a MAD */
- mthca_ah_grh_present(to_mah(wr->wr.ud.ah)),
+ mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0,
&sqp->ud_header);
err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b9d09ba..4272c52 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -110,6 +110,7 @@ static unsigned int sysfs_idx_addr;
static struct pci_device_id nes_pci_table[] = {
{PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID},
+ {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR, PCI_ANY_ID, PCI_ANY_ID},
{0}
};
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 9884056..cc78fee 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -64,8 +64,9 @@
* NetEffect PCI vendor id and NE010 PCI device id.
*/
#ifndef PCI_VENDOR_ID_NETEFFECT /* not in pci.ids yet */
-#define PCI_VENDOR_ID_NETEFFECT 0x1678
-#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100
+#define PCI_VENDOR_ID_NETEFFECT 0x1678
+#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100
+#define PCI_DEVICE_ID_NETEFFECT_NE020_KR 0x0110
#endif
#define NE020_REV 4
@@ -193,8 +194,8 @@ extern u32 cm_packets_created;
extern u32 cm_packets_received;
extern u32 cm_packets_dropped;
extern u32 cm_packets_retrans;
-extern u32 cm_listens_created;
-extern u32 cm_listens_destroyed;
+extern atomic_t cm_listens_created;
+extern atomic_t cm_listens_destroyed;
extern u32 cm_backlog_drops;
extern atomic_t cm_loopbacks;
extern atomic_t cm_nodes_created;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 39468c2..2a49ee4 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -67,8 +67,8 @@ u32 cm_packets_dropped;
u32 cm_packets_retrans;
u32 cm_packets_created;
u32 cm_packets_received;
-u32 cm_listens_created;
-u32 cm_listens_destroyed;
+atomic_t cm_listens_created;
+atomic_t cm_listens_destroyed;
u32 cm_backlog_drops;
atomic_t cm_loopbacks;
atomic_t cm_nodes_created;
@@ -1011,9 +1011,10 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
event.cm_info.loc_port =
loopback->loc_port;
event.cm_info.cm_id = loopback->cm_id;
+ add_ref_cm_node(loopback);
+ loopback->state = NES_CM_STATE_CLOSED;
cm_event_connect_error(&event);
cm_node->state = NES_CM_STATE_LISTENER_DESTROYED;
- loopback->state = NES_CM_STATE_CLOSED;
rem_ref_cm_node(cm_node->cm_core,
cm_node);
@@ -1042,7 +1043,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
kfree(listener);
listener = NULL;
ret = 0;
- cm_listens_destroyed++;
+ atomic_inc(&cm_listens_destroyed);
} else {
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
}
@@ -3172,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
return err;
}
- cm_listens_created++;
+ atomic_inc(&cm_listens_created);
}
cm_id->add_ref(cm_id);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index b1c2cbb..ce7f538 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -748,16 +748,28 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
if (hw_rev != NE020_REV) {
/* init serdes 0 */
- if (wide_ppm_offset && (nesadapter->phy_type[0] == NES_PHY_TYPE_CX4))
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA);
- else
+ switch (nesadapter->phy_type[0]) {
+ case NES_PHY_TYPE_CX4:
+ if (wide_ppm_offset)
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA);
+ else
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
+ break;
+ case NES_PHY_TYPE_KR:
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000);
+ break;
+ case NES_PHY_TYPE_PUMA_1G:
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
-
- if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0);
sds |= 0x00000100;
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds);
+ break;
+ default:
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
+ break;
}
+
if (!OneG_Mode)
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
@@ -778,6 +790,9 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
if (wide_ppm_offset)
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000FFFAA);
break;
+ case NES_PHY_TYPE_KR:
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000);
+ break;
case NES_PHY_TYPE_PUMA_1G:
sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
sds |= 0x000000100;
@@ -1279,115 +1294,115 @@ int nes_destroy_cqp(struct nes_device *nesdev)
/**
- * nes_init_phy
+ * nes_init_1g_phy
*/
-int nes_init_phy(struct nes_device *nesdev)
+int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
{
- struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 counter = 0;
- u32 sds;
- u32 mac_index = nesdev->mac_index;
- u32 tx_config = 0;
u16 phy_data;
- u32 temp_phy_data = 0;
- u32 temp_phy_data2 = 0;
- u8 phy_type = nesadapter->phy_type[mac_index];
- u8 phy_index = nesadapter->phy_index[mac_index];
-
- if ((nesadapter->OneG_Mode) &&
- (phy_type != NES_PHY_TYPE_PUMA_1G)) {
- nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
- if (phy_type == NES_PHY_TYPE_1G) {
- tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
- tx_config &= 0xFFFFFFE3;
- tx_config |= 0x04;
- nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
- }
+ int ret = 0;
- nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000);
+ nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data);
+ nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000);
- /* Reset the PHY */
- nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000);
- udelay(100);
- counter = 0;
- do {
- nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- if (counter++ > 100)
- break;
- } while (phy_data & 0x8000);
-
- /* Setting no phy loopback */
- phy_data &= 0xbfff;
- phy_data |= 0x1140;
- nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
+ /* Reset the PHY */
+ nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000);
+ udelay(100);
+ counter = 0;
+ do {
nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data);
- nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data);
-
- /* Setting the interrupt mask */
- nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee);
- nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
+ if (counter++ > 100) {
+ ret = -1;
+ break;
+ }
+ } while (phy_data & 0x8000);
+
+ /* Setting no phy loopback */
+ phy_data &= 0xbfff;
+ phy_data |= 0x1140;
+ nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
+ nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
+ nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data);
+ nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data);
+
+ /* Setting the interrupt mask */
+ nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
+ nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee);
+ nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
+
+ /* turning on flow control */
+ nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
+ nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00);
+ nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
+
+ /* Clear Half duplex */
+ nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
+ nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100));
+ nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
+
+ nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
+ nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300);
+
+ return ret;
+}
- /* turning on flow control */
- nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00);
- nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
- /* Clear Half duplex */
- nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100));
- nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
+/**
+ * nes_init_2025_phy
+ */
+int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
+{
+ u32 temp_phy_data = 0;
+ u32 temp_phy_data2 = 0;
+ u32 counter = 0;
+ u32 sds;
+ u32 mac_index = nesdev->mac_index;
+ int ret = 0;
+ unsigned int first_attempt = 1;
- nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300);
+ /* Check firmware heartbeat */
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
+ temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ udelay(1500);
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
+ temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- return 0;
+ if (temp_phy_data != temp_phy_data2) {
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
+ temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ if ((temp_phy_data & 0xff) > 0x20)
+ return 0;
+ printk(PFX "Reinitialize external PHY\n");
}
- if ((phy_type == NES_PHY_TYPE_IRIS) ||
- (phy_type == NES_PHY_TYPE_ARGUS) ||
- (phy_type == NES_PHY_TYPE_SFP_D)) {
- /* setup 10G MDIO operation */
- tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
- tx_config &= 0xFFFFFFE3;
- tx_config |= 0x15;
- nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
- }
- if ((phy_type == NES_PHY_TYPE_ARGUS) ||
- (phy_type == NES_PHY_TYPE_SFP_D)) {
- u32 first_time = 1;
+ /* no heartbeat, configure the PHY */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
- /* Check firmware heartbeat */
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- udelay(1500);
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
- temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ switch (phy_type) {
+ case NES_PHY_TYPE_ARGUS:
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
- if (temp_phy_data != temp_phy_data2) {
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- if ((temp_phy_data & 0xff) > 0x20)
- return 0;
- printk(PFX "Reinitializing PHY\n");
- }
+ /* setup LEDs */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009);
+ break;
- /* no heartbeat, configure the PHY */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000);
+ case NES_PHY_TYPE_SFP_D:
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
- if (phy_type == NES_PHY_TYPE_ARGUS) {
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
- } else {
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
- }
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
@@ -1395,71 +1410,136 @@ int nes_init_phy(struct nes_device *nesdev)
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007);
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A);
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009);
+ break;
+
+ case NES_PHY_TYPE_KR:
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0010);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0080);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
+
+ /* setup LEDs */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x000B);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x0003);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0004);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0022, 0x406D);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0023, 0x0020);
+ break;
+ }
+
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528);
- /* Bring PHY out of reset */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002);
+ /* Bring PHY out of reset */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002);
- /* Check for heartbeat */
- counter = 0;
- mdelay(690);
+ /* Check for heartbeat */
+ counter = 0;
+ mdelay(690);
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
+ temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ do {
+ if (counter++ > 150) {
+ printk(PFX "No PHY heartbeat\n");
+ break;
+ }
+ mdelay(1);
nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
+ temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ } while ((temp_phy_data2 == temp_phy_data));
+
+ /* wait for tracking */
+ counter = 0;
+ do {
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- do {
- if (counter++ > 150) {
- printk(PFX "No PHY heartbeat\n");
+ if (counter++ > 300) {
+ if (((temp_phy_data & 0xff) == 0x0) && first_attempt) {
+ first_attempt = 0;
+ counter = 0;
+ /* reset AMCC PHY and try again */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040);
+ continue;
+ } else {
+ ret = 1;
break;
}
- mdelay(1);
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
- temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- } while ((temp_phy_data2 == temp_phy_data));
-
- /* wait for tracking */
- counter = 0;
- do {
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- if (counter++ > 300) {
- if (((temp_phy_data & 0xff) == 0x0) && first_time) {
- first_time = 0;
- counter = 0;
- /* reset AMCC PHY and try again */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040);
- continue;
- } else {
- printk(PFX "PHY did not track\n");
- break;
- }
- }
- mdelay(10);
- } while ((temp_phy_data & 0xff) < 0x30);
-
- /* setup signal integrity */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032);
+ }
+ mdelay(10);
+ } while ((temp_phy_data & 0xff) < 0x30);
+
+ /* setup signal integrity */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032);
+ if (phy_type == NES_PHY_TYPE_KR) {
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x000C);
+ } else {
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x0002);
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc314, 0x0063);
+ }
+
+ /* reset serdes */
+ sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200);
+ sds |= 0x1;
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds);
+ sds &= 0xfffffffe;
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds);
+
+ counter = 0;
+ while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040)
+ && (counter++ < 5000))
+ ;
+
+ return ret;
+}
+
- /* reset serdes */
- sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 +
- mac_index * 0x200);
- sds |= 0x1;
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 +
- mac_index * 0x200, sds);
- sds &= 0xfffffffe;
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 +
- mac_index * 0x200, sds);
-
- counter = 0;
- while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040)
- && (counter++ < 5000))
- ;
+/**
+ * nes_init_phy
+ */
+int nes_init_phy(struct nes_device *nesdev)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 mac_index = nesdev->mac_index;
+ u32 tx_config = 0;
+ unsigned long flags;
+ u8 phy_type = nesadapter->phy_type[mac_index];
+ u8 phy_index = nesadapter->phy_index[mac_index];
+ int ret = 0;
+
+ tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
+ if (phy_type == NES_PHY_TYPE_1G) {
+ /* setup 1G MDIO operation */
+ tx_config &= 0xFFFFFFE3;
+ tx_config |= 0x04;
+ } else {
+ /* setup 10G MDIO operation */
+ tx_config &= 0xFFFFFFE3;
+ tx_config |= 0x15;
}
- return 0;
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
+
+ spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
+
+ switch (phy_type) {
+ case NES_PHY_TYPE_1G:
+ ret = nes_init_1g_phy(nesdev, phy_type, phy_index);
+ break;
+ case NES_PHY_TYPE_ARGUS:
+ case NES_PHY_TYPE_SFP_D:
+ case NES_PHY_TYPE_KR:
+ ret = nes_init_2025_phy(nesdev, phy_type, phy_index);
+ break;
+ }
+
+ spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
+
+ return ret;
}
@@ -2460,23 +2540,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
}
} else {
switch (nesadapter->phy_type[mac_index]) {
- case NES_PHY_TYPE_IRIS:
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- u32temp = 20;
- do {
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
- phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- if ((phy_data == temp_phy_data) || (!(--u32temp)))
- break;
- temp_phy_data = phy_data;
- } while (1);
- nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
- __func__, phy_data, nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP");
- break;
-
case NES_PHY_TYPE_ARGUS:
case NES_PHY_TYPE_SFP_D:
+ case NES_PHY_TYPE_KR:
/* clear the alarms */
nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008);
nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001);
@@ -3352,8 +3418,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
u16 async_event_id;
u8 tcp_state;
u8 iwarp_state;
- int must_disconn = 1;
- int must_terminate = 0;
struct ib_event ibevent;
nes_debug(NES_DBG_AEQ, "\n");
@@ -3367,6 +3431,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
BUG_ON(!context);
}
+ /* context is nesqp unless async_event_id == CQ ERROR */
+ nesqp = (struct nes_qp *)(unsigned long)context;
async_event_id = (u16)aeq_info;
tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
@@ -3378,8 +3444,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
switch (async_event_id) {
case NES_AEQE_AEID_LLP_FIN_RECEIVED:
- nesqp = (struct nes_qp *)(unsigned long)context;
-
if (nesqp->term_flags)
return; /* Ignore it, wait for close complete */
@@ -3394,79 +3458,48 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
async_event_id, nesqp->last_aeq, tcp_state);
}
- if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
- (nesqp->ibqp_state != IB_QPS_RTS)) {
- /* FIN Received but tcp state or IB state moved on,
- should expect a close complete */
- return;
- }
-
+ break;
case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
- nesqp = (struct nes_qp *)(unsigned long)context;
if (nesqp->term_flags) {
nes_terminate_done(nesqp, 0);
return;
}
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_CLOSING, 0, 0);
+ nes_cm_disconn(nesqp);
+ break;
- case NES_AEQE_AEID_LLP_CONNECTION_RESET:
case NES_AEQE_AEID_RESET_SENT:
- nesqp = (struct nes_qp *)(unsigned long)context;
- if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
- tcp_state = NES_AEQE_TCP_STATE_CLOSED;
- }
+ tcp_state = NES_AEQE_TCP_STATE_CLOSED;
spin_lock_irqsave(&nesqp->lock, flags);
nesqp->hw_iwarp_state = iwarp_state;
nesqp->hw_tcp_state = tcp_state;
nesqp->last_aeq = async_event_id;
-
- if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
- (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
- nesqp->hte_added = 0;
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
- }
-
- if ((nesqp->ibqp_state == IB_QPS_RTS) &&
- ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
- (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
- switch (nesqp->hw_iwarp_state) {
- case NES_AEQE_IWARP_STATE_RTS:
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
- nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
- break;
- case NES_AEQE_IWARP_STATE_TERMINATE:
- must_disconn = 0; /* terminate path takes care of disconn */
- if (nesqp->term_flags == 0)
- must_terminate = 1;
- break;
- }
- } else {
- if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) {
- /* FIN Received but ib state not RTS,
- close complete will be on its way */
- must_disconn = 0;
- }
- }
+ nesqp->hte_added = 0;
spin_unlock_irqrestore(&nesqp->lock, flags);
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+ nes_cm_disconn(nesqp);
+ break;
- if (must_terminate)
- nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
- else if (must_disconn) {
- if (next_iwarp_state) {
- nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n",
- nesqp->hwqp.qp_id, next_iwarp_state);
- nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
- }
- nes_cm_disconn(nesqp);
- }
+ case NES_AEQE_AEID_LLP_CONNECTION_RESET:
+ if (atomic_read(&nesqp->close_timer_started))
+ return;
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_cm_disconn(nesqp);
break;
case NES_AEQE_AEID_TERMINATE_SENT:
- nesqp = (struct nes_qp *)(unsigned long)context;
nes_terminate_send_fin(nesdev, nesqp, aeqe);
break;
case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
- nesqp = (struct nes_qp *)(unsigned long)context;
nes_terminate_received(nesdev, nesqp, aeqe);
break;
@@ -3480,7 +3513,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
case NES_AEQE_AEID_AMP_TO_WRAP:
- nesqp = (struct nes_qp *)(unsigned long)context;
+ printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_ACCESS_ERR\n",
+ nesqp->hwqp.qp_id, async_event_id);
nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR);
break;
@@ -3488,7 +3522,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
- nesqp = (struct nes_qp *)(unsigned long)context;
if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) {
aeq_info &= 0xffff0000;
aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE;
@@ -3530,7 +3563,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
case NES_AEQE_AEID_STAG_ZERO_INVALID:
case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST:
case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
- nesqp = (struct nes_qp *)(unsigned long)context;
+ printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_FATAL\n",
+ nesqp->hwqp.qp_id, async_event_id);
nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
break;
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 084be0e..9b1e7f8 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -37,12 +37,12 @@
#define NES_PHY_TYPE_CX4 1
#define NES_PHY_TYPE_1G 2
-#define NES_PHY_TYPE_IRIS 3
#define NES_PHY_TYPE_ARGUS 4
#define NES_PHY_TYPE_PUMA_1G 5
#define NES_PHY_TYPE_PUMA_10G 6
#define NES_PHY_TYPE_GLADIUS 7
#define NES_PHY_TYPE_SFP_D 8
+#define NES_PHY_TYPE_KR 9
#define NES_MULTICAST_PF_MAX 8
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 9384f5d..a1d79b6 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1243,8 +1243,8 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
target_stat_values[++index] = cm_packets_received;
target_stat_values[++index] = cm_packets_dropped;
target_stat_values[++index] = cm_packets_retrans;
- target_stat_values[++index] = cm_listens_created;
- target_stat_values[++index] = cm_listens_destroyed;
+ target_stat_values[++index] = atomic_read(&cm_listens_created);
+ target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
target_stat_values[++index] = cm_backlog_drops;
target_stat_values[++index] = atomic_read(&cm_loopbacks);
target_stat_values[++index] = atomic_read(&cm_nodes_created);
@@ -1474,9 +1474,9 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
}
return 0;
}
- if ((phy_type == NES_PHY_TYPE_IRIS) ||
- (phy_type == NES_PHY_TYPE_ARGUS) ||
- (phy_type == NES_PHY_TYPE_SFP_D)) {
+ if ((phy_type == NES_PHY_TYPE_ARGUS) ||
+ (phy_type == NES_PHY_TYPE_SFP_D) ||
+ (phy_type == NES_PHY_TYPE_KR)) {
et_cmd->transceiver = XCVR_EXTERNAL;
et_cmd->port = PORT_FIBRE;
et_cmd->supported = SUPPORTED_FIBRE;
@@ -1596,8 +1596,7 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
struct net_device *netdev;
struct nic_qp_map *curr_qp_map;
u32 u32temp;
- u16 phy_data;
- u16 temp_phy_data;
+ u8 phy_type = nesdev->nesadapter->phy_type[nesdev->mac_index];
netdev = alloc_etherdev(sizeof(struct nes_vnic));
if (!netdev) {
@@ -1705,65 +1704,23 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
if ((nesdev->netdev_count == 0) &&
((PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index) ||
- ((nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) &&
+ ((phy_type == NES_PHY_TYPE_PUMA_1G) &&
(((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) ||
((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) {
- /*
- * nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n",
- * NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesvnic->logical_port & 1)));
- */
u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200 * (nesdev->mac_index & 1)));
- if (nesdev->nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G) {
+ if (phy_type != NES_PHY_TYPE_PUMA_1G) {
u32temp |= 0x00200000;
nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200 * (nesdev->mac_index & 1)), u32temp);
}
- u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
- (0x200 * (nesdev->mac_index & 1)));
-
- if ((u32temp&0x0f1f0000) == 0x0f0f0000) {
- if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_IRIS) {
- nes_init_phy(nesdev);
- nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1);
- temp_phy_data = (u16)nes_read_indexed(nesdev,
- NES_IDX_MAC_MDIO_CONTROL);
- u32temp = 20;
- do {
- nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1);
- phy_data = (u16)nes_read_indexed(nesdev,
- NES_IDX_MAC_MDIO_CONTROL);
- if ((phy_data == temp_phy_data) || (!(--u32temp)))
- break;
- temp_phy_data = phy_data;
- } while (1);
- if (phy_data & 4) {
- nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
- nesvnic->linkup = 1;
- } else {
- nes_debug(NES_DBG_INIT, "The Link is DOWN!!.\n");
- }
- } else {
- nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
- nesvnic->linkup = 1;
- }
- } else if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) {
- nes_debug(NES_DBG_INIT, "mac_index=%d, logical_port=%d, u32temp=0x%04X, PCI_FUNC=%d\n",
- nesdev->mac_index, nesvnic->logical_port, u32temp, PCI_FUNC(nesdev->pcidev->devfn));
- if (((nesdev->mac_index < 2) && ((u32temp&0x01010000) == 0x01010000)) ||
- ((nesdev->mac_index > 1) && ((u32temp&0x02020000) == 0x02020000))) {
- nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
- nesvnic->linkup = 1;
- }
- }
/* clear the MAC interrupt status, assumes direct logical to physical mapping */
u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp);
nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index), u32temp);
- if (nesdev->nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_IRIS)
- nes_init_phy(nesdev);
+ nes_init_phy(nesdev);
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 64d3136..815725f 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -228,7 +228,7 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
/* Check for SQ overflow */
if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
spin_unlock_irqrestore(&nesqp->lock, flags);
- return -EINVAL;
+ return -ENOMEM;
}
wqe = &nesqp->hwqp.sq_vbase[head];
@@ -3294,7 +3294,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
/* Check for SQ overflow */
if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
- err = -EINVAL;
+ err = -ENOMEM;
break;
}
@@ -3577,7 +3577,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
}
/* Check for RQ overflow */
if (((head + (2 * qsize) - nesqp->hwqp.rq_tail) % qsize) == (qsize - 1)) {
- err = -EINVAL;
+ err = -ENOMEM;
break;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index e9795f6..d10b4ec 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -55,9 +55,7 @@ static int ipoib_get_coalesce(struct net_device *dev,
struct ipoib_dev_priv *priv = netdev_priv(dev);
coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs;
- coal->tx_coalesce_usecs = priv->ethtool.coalesce_usecs;
coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
- coal->tx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
return 0;
}
@@ -69,10 +67,8 @@ static int ipoib_set_coalesce(struct net_device *dev,
int ret;
/*
- * Since IPoIB uses a single CQ for both rx and tx, we assume
- * that rx params dictate the configuration. These values are
- * saved in the private data and returned when ipoib_get_coalesce()
- * is called.
+ * These values are saved in the private data and returned
+ * when ipoib_get_coalesce() is called
*/
if (coal->rx_coalesce_usecs > 0xffff ||
coal->rx_max_coalesced_frames > 0xffff)
@@ -85,8 +81,6 @@ static int ipoib_set_coalesce(struct net_device *dev,
return ret;
}
- coal->tx_coalesce_usecs = coal->rx_coalesce_usecs;
- coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames;
priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs;
priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 5f7a6fc..71237f8f 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -128,6 +128,28 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
return 0;
}
+int iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc)
+{
+ struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
+ struct iser_device *device = iser_conn->ib_conn->device;
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ u64 dma_addr;
+
+ dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(device->ib_device, dma_addr))
+ return -ENOMEM;
+
+ tx_desc->dma_addr = dma_addr;
+ tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
+ tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
+ tx_desc->tx_sg[0].lkey = device->mr->lkey;
+
+ iser_task->headers_initialized = 1;
+ iser_task->iser_conn = iser_conn;
+ return 0;
+}
/**
* iscsi_iser_task_init - Initialize task
* @task: iscsi task
@@ -137,17 +159,17 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
static int
iscsi_iser_task_init(struct iscsi_task *task)
{
- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
+ if (!iser_task->headers_initialized)
+ if (iser_initialize_task_headers(task, &iser_task->desc))
+ return -ENOMEM;
+
/* mgmt task */
- if (!task->sc) {
- iser_task->desc.data = task->data;
+ if (!task->sc)
return 0;
- }
iser_task->command_sent = 0;
- iser_task->iser_conn = iser_conn;
iser_task_rdma_init(iser_task);
return 0;
}
@@ -168,7 +190,7 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
{
int error = 0;
- iser_dbg("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
+ iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt);
error = iser_send_control(conn, task);
@@ -178,9 +200,6 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
* - if yes, the task is recycled at iscsi_complete_pdu
* - if no, the task is recycled at iser_snd_completion
*/
- if (error && error != -ENOBUFS)
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
-
return error;
}
@@ -232,7 +251,7 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
task->imm_count, task->unsol_r2t.data_length);
}
- iser_dbg("task deq [cid %d itt 0x%x]\n",
+ iser_dbg("ctask xmit [cid %d itt 0x%x]\n",
conn->id, task->itt);
/* Send the cmd PDU */
@@ -248,8 +267,6 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
error = iscsi_iser_task_xmit_unsol_data(conn, task);
iscsi_iser_task_xmit_exit:
- if (error && error != -ENOBUFS)
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return error;
}
@@ -283,7 +300,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
* due to issues with the login code re iser sematics
* this not set in iscsi_conn_setup - FIXME
*/
- conn->max_recv_dlength = 128;
+ conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN;
iser_conn = conn->dd_data;
conn->dd_data = iser_conn;
@@ -401,7 +418,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct Scsi_Host *shost;
struct iser_conn *ib_conn;
- shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 1);
+ shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost)
return NULL;
shost->transportt = iscsi_iser_scsi_transport;
@@ -675,7 +692,7 @@ static int __init iser_init(void)
memset(&ig, 0, sizeof(struct iser_global));
ig.desc_cache = kmem_cache_create("iser_descriptors",
- sizeof (struct iser_desc),
+ sizeof(struct iser_tx_desc),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (ig.desc_cache == NULL)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 9d529ca..036934c 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -102,9 +102,9 @@
#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
* SCSI_TMFUNC(2), LOGOUT(1) */
-#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \
- ISER_MAX_RX_MISC_PDUS + \
- ISER_MAX_TX_MISC_PDUS)
+#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX)
+
+#define ISER_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
/* the max TX (send) WR supported by the iSER QP is defined by *
* max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
@@ -132,6 +132,12 @@ struct iser_hdr {
__be64 read_va;
} __attribute__((packed));
+/* Constant PDU lengths calculations */
+#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
+
+#define ISER_RECV_DATA_SEG_LEN 128
+#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
+#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
/* Length of an object name string */
#define ISER_OBJECT_NAME_SIZE 64
@@ -187,51 +193,43 @@ struct iser_regd_buf {
struct iser_mem_reg reg; /* memory registration info */
void *virt_addr;
struct iser_device *device; /* device->device for dma_unmap */
- u64 dma_addr; /* if non zero, addr for dma_unmap */
enum dma_data_direction direction; /* direction for dma_unmap */
unsigned int data_size;
- atomic_t ref_count; /* refcount, freed when dec to 0 */
-};
-
-#define MAX_REGD_BUF_VECTOR_LEN 2
-
-struct iser_dto {
- struct iscsi_iser_task *task;
- struct iser_conn *ib_conn;
- int notify_enable;
-
- /* vector of registered buffers */
- unsigned int regd_vector_len;
- struct iser_regd_buf *regd[MAX_REGD_BUF_VECTOR_LEN];
-
- /* offset into the registered buffer may be specified */
- unsigned int offset[MAX_REGD_BUF_VECTOR_LEN];
-
- /* a smaller size may be specified, if 0, then full size is used */
- unsigned int used_sz[MAX_REGD_BUF_VECTOR_LEN];
};
enum iser_desc_type {
- ISCSI_RX,
ISCSI_TX_CONTROL ,
ISCSI_TX_SCSI_COMMAND,
ISCSI_TX_DATAOUT
};
-struct iser_desc {
+struct iser_tx_desc {
struct iser_hdr iser_header;
struct iscsi_hdr iscsi_header;
- struct iser_regd_buf hdr_regd_buf;
- void *data; /* used by RX & TX_CONTROL */
- struct iser_regd_buf data_regd_buf; /* used by RX & TX_CONTROL */
enum iser_desc_type type;
- struct iser_dto dto;
+ u64 dma_addr;
+ /* sg[0] points to iser/iscsi headers, sg[1] optionally points to either
+ of immediate data, unsolicited data-out or control (login,text) */
+ struct ib_sge tx_sg[2];
+ int num_sge;
};
+#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
+ sizeof(u64) + sizeof(struct ib_sge)))
+struct iser_rx_desc {
+ struct iser_hdr iser_header;
+ struct iscsi_hdr iscsi_header;
+ char data[ISER_RECV_DATA_SEG_LEN];
+ u64 dma_addr;
+ struct ib_sge rx_sg;
+ char pad[ISER_RX_PAD_SIZE];
+} __attribute__((packed));
+
struct iser_device {
struct ib_device *ib_device;
struct ib_pd *pd;
- struct ib_cq *cq;
+ struct ib_cq *rx_cq;
+ struct ib_cq *tx_cq;
struct ib_mr *mr;
struct tasklet_struct cq_tasklet;
struct list_head ig_list; /* entry in ig devices list */
@@ -250,15 +248,18 @@ struct iser_conn {
struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
int disc_evt_flag; /* disconn event delivered */
wait_queue_head_t wait; /* waitq for conn/disconn */
- atomic_t post_recv_buf_count; /* posted rx count */
+ int post_recv_buf_count; /* posted rx count */
atomic_t post_send_buf_count; /* posted tx count */
- atomic_t unexpected_pdu_count;/* count of received *
- * unexpected pdus *
- * not yet retired */
char name[ISER_OBJECT_NAME_SIZE];
struct iser_page_vec *page_vec; /* represents SG to fmr maps*
* maps serialized as tx is*/
struct list_head conn_list; /* entry in ig conn list */
+
+ char *login_buf;
+ u64 login_dma;
+ unsigned int rx_desc_head;
+ struct iser_rx_desc *rx_descs;
+ struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
};
struct iscsi_iser_conn {
@@ -267,7 +268,7 @@ struct iscsi_iser_conn {
};
struct iscsi_iser_task {
- struct iser_desc desc;
+ struct iser_tx_desc desc;
struct iscsi_iser_conn *iser_conn;
enum iser_task_status status;
int command_sent; /* set if command sent */
@@ -275,6 +276,7 @@ struct iscsi_iser_task {
struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */
struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/
struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */
+ int headers_initialized;
};
struct iser_page_vec {
@@ -322,22 +324,17 @@ void iser_conn_put(struct iser_conn *ib_conn);
void iser_conn_terminate(struct iser_conn *ib_conn);
-void iser_rcv_completion(struct iser_desc *desc,
- unsigned long dto_xfer_len);
+void iser_rcv_completion(struct iser_rx_desc *desc,
+ unsigned long dto_xfer_len,
+ struct iser_conn *ib_conn);
-void iser_snd_completion(struct iser_desc *desc);
+void iser_snd_completion(struct iser_tx_desc *desc, struct iser_conn *ib_conn);
void iser_task_rdma_init(struct iscsi_iser_task *task);
void iser_task_rdma_finalize(struct iscsi_iser_task *task);
-void iser_dto_buffs_release(struct iser_dto *dto);
-
-int iser_regd_buff_release(struct iser_regd_buf *regd_buf);
-
-void iser_reg_single(struct iser_device *device,
- struct iser_regd_buf *regd_buf,
- enum dma_data_direction direction);
+void iser_free_rx_descriptors(struct iser_conn *ib_conn);
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
@@ -356,11 +353,9 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
void iser_unreg_mem(struct iser_mem_reg *mem_reg);
-int iser_post_recv(struct iser_desc *rx_desc);
-int iser_post_send(struct iser_desc *tx_desc);
-
-int iser_conn_state_comp(struct iser_conn *ib_conn,
- enum iser_ib_conn_state comp);
+int iser_post_recvl(struct iser_conn *ib_conn);
+int iser_post_recvm(struct iser_conn *ib_conn, int count);
+int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc);
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
@@ -368,4 +363,6 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
enum dma_data_direction dma_dir);
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
+int iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc);
#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 9de6402..0b9ef07 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -39,29 +39,6 @@
#include "iscsi_iser.h"
-/* Constant PDU lengths calculations */
-#define ISER_TOTAL_HEADERS_LEN (sizeof (struct iser_hdr) + \
- sizeof (struct iscsi_hdr))
-
-/* iser_dto_add_regd_buff - increments the reference count for *
- * the registered buffer & adds it to the DTO object */
-static void iser_dto_add_regd_buff(struct iser_dto *dto,
- struct iser_regd_buf *regd_buf,
- unsigned long use_offset,
- unsigned long use_size)
-{
- int add_idx;
-
- atomic_inc(&regd_buf->ref_count);
-
- add_idx = dto->regd_vector_len;
- dto->regd[add_idx] = regd_buf;
- dto->used_sz[add_idx] = use_size;
- dto->offset[add_idx] = use_offset;
-
- dto->regd_vector_len++;
-}
-
/* Register user buffer memory and initialize passive rdma
* dto descriptor. Total data size is stored in
* iser_task->data[ISER_DIR_IN].data_len
@@ -122,9 +99,9 @@ iser_prepare_write_cmd(struct iscsi_task *task,
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_regd_buf *regd_buf;
int err;
- struct iser_dto *send_dto = &iser_task->desc.dto;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
+ struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
err = iser_dma_map_task_data(iser_task,
buf_out,
@@ -163,135 +140,100 @@ iser_prepare_write_cmd(struct iscsi_task *task,
if (imm_sz > 0) {
iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
task->itt, imm_sz);
- iser_dto_add_regd_buff(send_dto,
- regd_buf,
- 0,
- imm_sz);
+ tx_dsg->addr = regd_buf->reg.va;
+ tx_dsg->length = imm_sz;
+ tx_dsg->lkey = regd_buf->reg.lkey;
+ iser_task->desc.num_sge = 2;
}
return 0;
}
-/**
- * iser_post_receive_control - allocates, initializes and posts receive DTO.
- */
-static int iser_post_receive_control(struct iscsi_conn *conn)
+/* creates a new tx descriptor and adds header regd buffer */
+static void iser_create_send_desc(struct iser_conn *ib_conn,
+ struct iser_tx_desc *tx_desc)
{
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
- struct iser_desc *rx_desc;
- struct iser_regd_buf *regd_hdr;
- struct iser_regd_buf *regd_data;
- struct iser_dto *recv_dto = NULL;
- struct iser_device *device = iser_conn->ib_conn->device;
- int rx_data_size, err;
- int posts, outstanding_unexp_pdus;
-
- /* for the login sequence we must support rx of upto 8K; login is done
- * after conn create/bind (connect) and conn stop/bind (reconnect),
- * what's common for both schemes is that the connection is not started
- */
- if (conn->c_stage != ISCSI_CONN_STARTED)
- rx_data_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
- else /* FIXME till user space sets conn->max_recv_dlength correctly */
- rx_data_size = 128;
-
- outstanding_unexp_pdus =
- atomic_xchg(&iser_conn->ib_conn->unexpected_pdu_count, 0);
-
- /*
- * in addition to the response buffer, replace those consumed by
- * unexpected pdus.
- */
- for (posts = 0; posts < 1 + outstanding_unexp_pdus; posts++) {
- rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO);
- if (rx_desc == NULL) {
- iser_err("Failed to alloc desc for post recv %d\n",
- posts);
- err = -ENOMEM;
- goto post_rx_cache_alloc_failure;
- }
- rx_desc->type = ISCSI_RX;
- rx_desc->data = kmalloc(rx_data_size, GFP_NOIO);
- if (rx_desc->data == NULL) {
- iser_err("Failed to alloc data buf for post recv %d\n",
- posts);
- err = -ENOMEM;
- goto post_rx_kmalloc_failure;
- }
-
- recv_dto = &rx_desc->dto;
- recv_dto->ib_conn = iser_conn->ib_conn;
- recv_dto->regd_vector_len = 0;
+ struct iser_device *device = ib_conn->device;
- regd_hdr = &rx_desc->hdr_regd_buf;
- memset(regd_hdr, 0, sizeof(struct iser_regd_buf));
- regd_hdr->device = device;
- regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */
- regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN;
+ ib_dma_sync_single_for_cpu(device->ib_device,
+ tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
- iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE);
-
- iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0);
+ memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
+ tx_desc->iser_header.flags = ISER_VER;
- regd_data = &rx_desc->data_regd_buf;
- memset(regd_data, 0, sizeof(struct iser_regd_buf));
- regd_data->device = device;
- regd_data->virt_addr = rx_desc->data;
- regd_data->data_size = rx_data_size;
+ tx_desc->num_sge = 1;
- iser_reg_single(device, regd_data, DMA_FROM_DEVICE);
+ if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
+ tx_desc->tx_sg[0].lkey = device->mr->lkey;
+ iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc);
+ }
+}
- iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0);
- err = iser_post_recv(rx_desc);
- if (err) {
- iser_err("Failed iser_post_recv for post %d\n", posts);
- goto post_rx_post_recv_failure;
- }
+int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
+{
+ int i, j;
+ u64 dma_addr;
+ struct iser_rx_desc *rx_desc;
+ struct ib_sge *rx_sg;
+ struct iser_device *device = ib_conn->device;
+
+ ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS *
+ sizeof(struct iser_rx_desc), GFP_KERNEL);
+ if (!ib_conn->rx_descs)
+ goto rx_desc_alloc_fail;
+
+ rx_desc = ib_conn->rx_descs;
+
+ for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++) {
+ dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(device->ib_device, dma_addr))
+ goto rx_desc_dma_map_failed;
+
+ rx_desc->dma_addr = dma_addr;
+
+ rx_sg = &rx_desc->rx_sg;
+ rx_sg->addr = rx_desc->dma_addr;
+ rx_sg->length = ISER_RX_PAYLOAD_SIZE;
+ rx_sg->lkey = device->mr->lkey;
}
- /* all posts successful */
- return 0;
-post_rx_post_recv_failure:
- iser_dto_buffs_release(recv_dto);
- kfree(rx_desc->data);
-post_rx_kmalloc_failure:
- kmem_cache_free(ig.desc_cache, rx_desc);
-post_rx_cache_alloc_failure:
- if (posts > 0) {
- /*
- * response buffer posted, but did not replace all unexpected
- * pdu recv bufs. Ignore error, retry occurs next send
- */
- outstanding_unexp_pdus -= (posts - 1);
- err = 0;
- }
- atomic_add(outstanding_unexp_pdus,
- &iser_conn->ib_conn->unexpected_pdu_count);
+ ib_conn->rx_desc_head = 0;
+ return 0;
- return err;
+rx_desc_dma_map_failed:
+ rx_desc = ib_conn->rx_descs;
+ for (j = 0; j < i; j++, rx_desc++)
+ ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ kfree(ib_conn->rx_descs);
+ ib_conn->rx_descs = NULL;
+rx_desc_alloc_fail:
+ iser_err("failed allocating rx descriptors / data buffers\n");
+ return -ENOMEM;
}
-/* creates a new tx descriptor and adds header regd buffer */
-static void iser_create_send_desc(struct iscsi_iser_conn *iser_conn,
- struct iser_desc *tx_desc)
+void iser_free_rx_descriptors(struct iser_conn *ib_conn)
{
- struct iser_regd_buf *regd_hdr = &tx_desc->hdr_regd_buf;
- struct iser_dto *send_dto = &tx_desc->dto;
+ int i;
+ struct iser_rx_desc *rx_desc;
+ struct iser_device *device = ib_conn->device;
- memset(regd_hdr, 0, sizeof(struct iser_regd_buf));
- regd_hdr->device = iser_conn->ib_conn->device;
- regd_hdr->virt_addr = tx_desc; /* == &tx_desc->iser_header */
- regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN;
+ if (ib_conn->login_buf) {
+ ib_dma_unmap_single(device->ib_device, ib_conn->login_dma,
+ ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
+ kfree(ib_conn->login_buf);
+ }
- send_dto->ib_conn = iser_conn->ib_conn;
- send_dto->notify_enable = 1;
- send_dto->regd_vector_len = 0;
+ if (!ib_conn->rx_descs)
+ return;
- memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
- tx_desc->iser_header.flags = ISER_VER;
-
- iser_dto_add_regd_buff(send_dto, regd_hdr, 0, 0);
+ rx_desc = ib_conn->rx_descs;
+ for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)
+ ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ kfree(ib_conn->rx_descs);
}
/**
@@ -301,46 +243,23 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
- int i;
- /*
- * FIXME this value should be declared to the target during login with
- * the MaxOutstandingUnexpectedPDUs key when supported
- */
- int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS;
-
- iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num);
+ iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
/* Check that there is no posted recv or send buffers left - */
/* they must be consumed during the login phase */
- BUG_ON(atomic_read(&iser_conn->ib_conn->post_recv_buf_count) != 0);
+ BUG_ON(iser_conn->ib_conn->post_recv_buf_count != 0);
BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
- /* Initial post receive buffers */
- for (i = 0; i < initial_post_recv_bufs_num; i++) {
- if (iser_post_receive_control(conn) != 0) {
- iser_err("Failed to post recv bufs at:%d conn:0x%p\n",
- i, conn);
- return -ENOMEM;
- }
- }
- iser_dbg("Posted %d post recv bufs, conn:0x%p\n", i, conn);
- return 0;
-}
+ if (iser_alloc_rx_descriptors(iser_conn->ib_conn))
+ return -ENOMEM;
-static int
-iser_check_xmit(struct iscsi_conn *conn, void *task)
-{
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ /* Initial post receive buffers */
+ if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX))
+ return -ENOMEM;
- if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
- ISER_QP_MAX_REQ_DTOS) {
- iser_dbg("%ld can't xmit task %p\n",jiffies,task);
- return -ENOBUFS;
- }
return 0;
}
-
/**
* iser_send_command - send command PDU
*/
@@ -349,27 +268,18 @@ int iser_send_command(struct iscsi_conn *conn,
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_dto *send_dto = NULL;
unsigned long edtl;
- int err = 0;
+ int err;
struct iser_data_buf *data_buf;
struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr;
struct scsi_cmnd *sc = task->sc;
-
- if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
- iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
- return -EPERM;
- }
- if (iser_check_xmit(conn, task))
- return -ENOBUFS;
+ struct iser_tx_desc *tx_desc = &iser_task->desc;
edtl = ntohl(hdr->data_length);
/* build the tx desc regd header and add it to the tx desc dto */
- iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
- send_dto = &iser_task->desc.dto;
- send_dto->task = iser_task;
- iser_create_send_desc(iser_conn, &iser_task->desc);
+ tx_desc->type = ISCSI_TX_SCSI_COMMAND;
+ iser_create_send_desc(iser_conn->ib_conn, tx_desc);
if (hdr->flags & ISCSI_FLAG_CMD_READ)
data_buf = &iser_task->data[ISER_DIR_IN];
@@ -398,23 +308,13 @@ int iser_send_command(struct iscsi_conn *conn,
goto send_command_error;
}
- iser_reg_single(iser_conn->ib_conn->device,
- send_dto->regd[0], DMA_TO_DEVICE);
-
- if (iser_post_receive_control(conn) != 0) {
- iser_err("post_recv failed!\n");
- err = -ENOMEM;
- goto send_command_error;
- }
-
iser_task->status = ISER_TASK_STATUS_STARTED;
- err = iser_post_send(&iser_task->desc);
+ err = iser_post_send(iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
send_command_error:
- iser_dto_buffs_release(send_dto);
iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
return err;
}
@@ -428,20 +328,13 @@ int iser_send_data_out(struct iscsi_conn *conn,
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_desc *tx_desc = NULL;
- struct iser_dto *send_dto = NULL;
+ struct iser_tx_desc *tx_desc = NULL;
+ struct iser_regd_buf *regd_buf;
unsigned long buf_offset;
unsigned long data_seg_len;
uint32_t itt;
int err = 0;
-
- if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
- iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
- return -EPERM;
- }
-
- if (iser_check_xmit(conn, task))
- return -ENOBUFS;
+ struct ib_sge *tx_dsg;
itt = (__force uint32_t)hdr->itt;
data_seg_len = ntoh24(hdr->dlength);
@@ -450,28 +343,25 @@ int iser_send_data_out(struct iscsi_conn *conn,
iser_dbg("%s itt %d dseg_len %d offset %d\n",
__func__,(int)itt,(int)data_seg_len,(int)buf_offset);
- tx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO);
+ tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
if (tx_desc == NULL) {
iser_err("Failed to alloc desc for post dataout\n");
return -ENOMEM;
}
tx_desc->type = ISCSI_TX_DATAOUT;
+ tx_desc->iser_header.flags = ISER_VER;
memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
- /* build the tx desc regd header and add it to the tx desc dto */
- send_dto = &tx_desc->dto;
- send_dto->task = iser_task;
- iser_create_send_desc(iser_conn, tx_desc);
-
- iser_reg_single(iser_conn->ib_conn->device,
- send_dto->regd[0], DMA_TO_DEVICE);
+ /* build the tx desc */
+ iser_initialize_task_headers(task, tx_desc);
- /* all data was registered for RDMA, we can use the lkey */
- iser_dto_add_regd_buff(send_dto,
- &iser_task->rdma_regd[ISER_DIR_OUT],
- buf_offset,
- data_seg_len);
+ regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
+ tx_dsg = &tx_desc->tx_sg[1];
+ tx_dsg->addr = regd_buf->reg.va + buf_offset;
+ tx_dsg->length = data_seg_len;
+ tx_dsg->lkey = regd_buf->reg.lkey;
+ tx_desc->num_sge = 2;
if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
iser_err("Offset:%ld & DSL:%ld in Data-Out "
@@ -485,12 +375,11 @@ int iser_send_data_out(struct iscsi_conn *conn,
itt, buf_offset, data_seg_len);
- err = iser_post_send(tx_desc);
+ err = iser_post_send(iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
send_data_out_error:
- iser_dto_buffs_release(send_dto);
kmem_cache_free(ig.desc_cache, tx_desc);
iser_err("conn %p failed err %d\n",conn, err);
return err;
@@ -501,64 +390,44 @@ int iser_send_control(struct iscsi_conn *conn,
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_desc *mdesc = &iser_task->desc;
- struct iser_dto *send_dto = NULL;
+ struct iser_tx_desc *mdesc = &iser_task->desc;
unsigned long data_seg_len;
int err = 0;
- struct iser_regd_buf *regd_buf;
struct iser_device *device;
- unsigned char opcode;
-
- if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
- iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
- return -EPERM;
- }
-
- if (iser_check_xmit(conn, task))
- return -ENOBUFS;
/* build the tx desc regd header and add it to the tx desc dto */
mdesc->type = ISCSI_TX_CONTROL;
- send_dto = &mdesc->dto;
- send_dto->task = NULL;
- iser_create_send_desc(iser_conn, mdesc);
+ iser_create_send_desc(iser_conn->ib_conn, mdesc);
device = iser_conn->ib_conn->device;
- iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
-
data_seg_len = ntoh24(task->hdr->dlength);
if (data_seg_len > 0) {
- regd_buf = &mdesc->data_regd_buf;
- memset(regd_buf, 0, sizeof(struct iser_regd_buf));
- regd_buf->device = device;
- regd_buf->virt_addr = task->data;
- regd_buf->data_size = task->data_count;
- iser_reg_single(device, regd_buf,
- DMA_TO_DEVICE);
- iser_dto_add_regd_buff(send_dto, regd_buf,
- 0,
- data_seg_len);
+ struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
+ if (task != conn->login_task) {
+ iser_err("data present on non login task!!!\n");
+ goto send_control_error;
+ }
+ memcpy(iser_conn->ib_conn->login_buf, task->data,
+ task->data_count);
+ tx_dsg->addr = iser_conn->ib_conn->login_dma;
+ tx_dsg->length = data_seg_len;
+ tx_dsg->lkey = device->mr->lkey;
+ mdesc->num_sge = 2;
}
- opcode = task->hdr->opcode & ISCSI_OPCODE_MASK;
-
- /* post recv buffer for response if one is expected */
- if (!(opcode == ISCSI_OP_NOOP_OUT && task->hdr->itt == RESERVED_ITT)) {
- if (iser_post_receive_control(conn) != 0) {
- iser_err("post_rcv_buff failed!\n");
- err = -ENOMEM;
+ if (task == conn->login_task) {
+ err = iser_post_recvl(iser_conn->ib_conn);
+ if (err)
goto send_control_error;
- }
}
- err = iser_post_send(mdesc);
+ err = iser_post_send(iser_conn->ib_conn, mdesc);
if (!err)
return 0;
send_control_error:
- iser_dto_buffs_release(send_dto);
iser_err("conn %p failed err %d\n",conn, err);
return err;
}
@@ -566,104 +435,71 @@ send_control_error:
/**
* iser_rcv_dto_completion - recv DTO completion
*/
-void iser_rcv_completion(struct iser_desc *rx_desc,
- unsigned long dto_xfer_len)
+void iser_rcv_completion(struct iser_rx_desc *rx_desc,
+ unsigned long rx_xfer_len,
+ struct iser_conn *ib_conn)
{
- struct iser_dto *dto = &rx_desc->dto;
- struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
- struct iscsi_task *task;
- struct iscsi_iser_task *iser_task;
+ struct iscsi_iser_conn *conn = ib_conn->iser_conn;
struct iscsi_hdr *hdr;
- char *rx_data = NULL;
- int rx_data_len = 0;
- unsigned char opcode;
-
- hdr = &rx_desc->iscsi_header;
+ u64 rx_dma;
+ int rx_buflen, outstanding, count, err;
+
+ /* differentiate between login to all other PDUs */
+ if ((char *)rx_desc == ib_conn->login_buf) {
+ rx_dma = ib_conn->login_dma;
+ rx_buflen = ISER_RX_LOGIN_SIZE;
+ } else {
+ rx_dma = rx_desc->dma_addr;
+ rx_buflen = ISER_RX_PAYLOAD_SIZE;
+ }
- iser_dbg("op 0x%x itt 0x%x\n", hdr->opcode,hdr->itt);
+ ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
+ rx_buflen, DMA_FROM_DEVICE);
- if (dto_xfer_len > ISER_TOTAL_HEADERS_LEN) { /* we have data */
- rx_data_len = dto_xfer_len - ISER_TOTAL_HEADERS_LEN;
- rx_data = dto->regd[1]->virt_addr;
- rx_data += dto->offset[1];
- }
+ hdr = &rx_desc->iscsi_header;
- opcode = hdr->opcode & ISCSI_OPCODE_MASK;
-
- if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
- spin_lock(&conn->iscsi_conn->session->lock);
- task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
- if (task)
- __iscsi_get_task(task);
- spin_unlock(&conn->iscsi_conn->session->lock);
-
- if (!task)
- iser_err("itt can't be matched to task!!! "
- "conn %p opcode %d itt %d\n",
- conn->iscsi_conn, opcode, hdr->itt);
- else {
- iser_task = task->dd_data;
- iser_dbg("itt %d task %p\n",hdr->itt, task);
- iser_task->status = ISER_TASK_STATUS_COMPLETED;
- iser_task_rdma_finalize(iser_task);
- iscsi_put_task(task);
- }
- }
- iser_dto_buffs_release(dto);
+ iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
+ hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
- iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
+ iscsi_iser_recv(conn->iscsi_conn, hdr,
+ rx_desc->data, rx_xfer_len - ISER_HEADERS_LEN);
- kfree(rx_desc->data);
- kmem_cache_free(ig.desc_cache, rx_desc);
+ ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
+ rx_buflen, DMA_FROM_DEVICE);
/* decrementing conn->post_recv_buf_count only --after-- freeing the *
* task eliminates the need to worry on tasks which are completed in *
* parallel to the execution of iser_conn_term. So the code that waits *
* for the posted rx bufs refcount to become zero handles everything */
- atomic_dec(&conn->ib_conn->post_recv_buf_count);
+ conn->ib_conn->post_recv_buf_count--;
- /*
- * if an unexpected PDU was received then the recv wr consumed must
- * be replaced, this is done in the next send of a control-type PDU
- */
- if (opcode == ISCSI_OP_NOOP_IN && hdr->itt == RESERVED_ITT) {
- /* nop-in with itt = 0xffffffff */
- atomic_inc(&conn->ib_conn->unexpected_pdu_count);
- }
- else if (opcode == ISCSI_OP_ASYNC_EVENT) {
- /* asyncronous message */
- atomic_inc(&conn->ib_conn->unexpected_pdu_count);
+ if (rx_dma == ib_conn->login_dma)
+ return;
+
+ outstanding = ib_conn->post_recv_buf_count;
+ if (outstanding + ISER_MIN_POSTED_RX <= ISER_QP_MAX_RECV_DTOS) {
+ count = min(ISER_QP_MAX_RECV_DTOS - outstanding,
+ ISER_MIN_POSTED_RX);
+ err = iser_post_recvm(ib_conn, count);
+ if (err)
+ iser_err("posting %d rx bufs err %d\n", count, err);
}
- /* a reject PDU consumes the recv buf posted for the response */
}
-void iser_snd_completion(struct iser_desc *tx_desc)
+void iser_snd_completion(struct iser_tx_desc *tx_desc,
+ struct iser_conn *ib_conn)
{
- struct iser_dto *dto = &tx_desc->dto;
- struct iser_conn *ib_conn = dto->ib_conn;
- struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
- struct iscsi_conn *conn = iser_conn->iscsi_conn;
struct iscsi_task *task;
- int resume_tx = 0;
-
- iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
-
- iser_dto_buffs_release(dto);
+ struct iser_device *device = ib_conn->device;
- if (tx_desc->type == ISCSI_TX_DATAOUT)
+ if (tx_desc->type == ISCSI_TX_DATAOUT) {
+ ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
kmem_cache_free(ig.desc_cache, tx_desc);
-
- if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
- ISER_QP_MAX_REQ_DTOS)
- resume_tx = 1;
+ }
atomic_dec(&ib_conn->post_send_buf_count);
- if (resume_tx) {
- iser_dbg("%ld resuming tx\n",jiffies);
- iscsi_conn_queue_work(conn);
- }
-
if (tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */
task = (void *) ((long)(void *)tx_desc -
@@ -692,7 +528,6 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
- int deferred;
int is_rdma_aligned = 1;
struct iser_regd_buf *regd;
@@ -710,32 +545,17 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
if (iser_task->dir[ISER_DIR_IN]) {
regd = &iser_task->rdma_regd[ISER_DIR_IN];
- deferred = iser_regd_buff_release(regd);
- if (deferred) {
- iser_err("%d references remain for BUF-IN rdma reg\n",
- atomic_read(&regd->ref_count));
- }
+ if (regd->reg.is_fmr)
+ iser_unreg_mem(&regd->reg);
}
if (iser_task->dir[ISER_DIR_OUT]) {
regd = &iser_task->rdma_regd[ISER_DIR_OUT];
- deferred = iser_regd_buff_release(regd);
- if (deferred) {
- iser_err("%d references remain for BUF-OUT rdma reg\n",
- atomic_read(&regd->ref_count));
- }
+ if (regd->reg.is_fmr)
+ iser_unreg_mem(&regd->reg);
}
/* if the data was unaligned, it was already unmapped and then copied */
if (is_rdma_aligned)
iser_dma_unmap_task_data(iser_task);
}
-
-void iser_dto_buffs_release(struct iser_dto *dto)
-{
- int i;
-
- for (i = 0; i < dto->regd_vector_len; i++)
- iser_regd_buff_release(dto->regd[i]);
-}
-
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 274c883..fb88d68 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -41,62 +41,6 @@
#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
/**
- * Decrements the reference count for the
- * registered buffer & releases it
- *
- * returns 0 if released, 1 if deferred
- */
-int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
-{
- struct ib_device *dev;
-
- if ((atomic_read(&regd_buf->ref_count) == 0) ||
- atomic_dec_and_test(&regd_buf->ref_count)) {
- /* if we used the dma mr, unreg is just NOP */
- if (regd_buf->reg.is_fmr)
- iser_unreg_mem(&regd_buf->reg);
-
- if (regd_buf->dma_addr) {
- dev = regd_buf->device->ib_device;
- ib_dma_unmap_single(dev,
- regd_buf->dma_addr,
- regd_buf->data_size,
- regd_buf->direction);
- }
- /* else this regd buf is associated with task which we */
- /* dma_unmap_single/sg later */
- return 0;
- } else {
- iser_dbg("Release deferred, regd.buff: 0x%p\n", regd_buf);
- return 1;
- }
-}
-
-/**
- * iser_reg_single - fills registered buffer descriptor with
- * registration information
- */
-void iser_reg_single(struct iser_device *device,
- struct iser_regd_buf *regd_buf,
- enum dma_data_direction direction)
-{
- u64 dma_addr;
-
- dma_addr = ib_dma_map_single(device->ib_device,
- regd_buf->virt_addr,
- regd_buf->data_size, direction);
- BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));
-
- regd_buf->reg.lkey = device->mr->lkey;
- regd_buf->reg.len = regd_buf->data_size;
- regd_buf->reg.va = dma_addr;
- regd_buf->reg.is_fmr = 0;
-
- regd_buf->dma_addr = dma_addr;
- regd_buf->direction = direction;
-}
-
-/**
* iser_start_rdma_unaligned_sg
*/
static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
@@ -109,10 +53,10 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
unsigned long cmd_data_len = data->data_len;
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
- mem = (void *)__get_free_pages(GFP_NOIO,
+ mem = (void *)__get_free_pages(GFP_ATOMIC,
ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
else
- mem = kmalloc(cmd_data_len, GFP_NOIO);
+ mem = kmalloc(cmd_data_len, GFP_ATOMIC);
if (mem == NULL) {
iser_err("Failed to allocate mem size %d %d for copying sglist\n",
@@ -474,9 +418,5 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
return err;
}
}
-
- /* take a reference on this regd buf such that it will not be released *
- * (eg in send dto completion) before we get the scsi response */
- atomic_inc(&regd_buf->ref_count);
return 0;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 8579f32..308d17b 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -37,9 +37,8 @@
#include "iscsi_iser.h"
#define ISCSI_ISER_MAX_CONN 8
-#define ISER_MAX_CQ_LEN ((ISER_QP_MAX_RECV_DTOS + \
- ISER_QP_MAX_REQ_DTOS) * \
- ISCSI_ISER_MAX_CONN)
+#define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
+#define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
@@ -67,15 +66,23 @@ static int iser_create_device_ib_res(struct iser_device *device)
if (IS_ERR(device->pd))
goto pd_err;
- device->cq = ib_create_cq(device->ib_device,
+ device->rx_cq = ib_create_cq(device->ib_device,
iser_cq_callback,
iser_cq_event_callback,
(void *)device,
- ISER_MAX_CQ_LEN, 0);
- if (IS_ERR(device->cq))
- goto cq_err;
+ ISER_MAX_RX_CQ_LEN, 0);
+ if (IS_ERR(device->rx_cq))
+ goto rx_cq_err;
- if (ib_req_notify_cq(device->cq, IB_CQ_NEXT_COMP))
+ device->tx_cq = ib_create_cq(device->ib_device,
+ NULL, iser_cq_event_callback,
+ (void *)device,
+ ISER_MAX_TX_CQ_LEN, 0);
+
+ if (IS_ERR(device->tx_cq))
+ goto tx_cq_err;
+
+ if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP))
goto cq_arm_err;
tasklet_init(&device->cq_tasklet,
@@ -93,8 +100,10 @@ static int iser_create_device_ib_res(struct iser_device *device)
dma_mr_err:
tasklet_kill(&device->cq_tasklet);
cq_arm_err:
- ib_destroy_cq(device->cq);
-cq_err:
+ ib_destroy_cq(device->tx_cq);
+tx_cq_err:
+ ib_destroy_cq(device->rx_cq);
+rx_cq_err:
ib_dealloc_pd(device->pd);
pd_err:
iser_err("failed to allocate an IB resource\n");
@@ -112,11 +121,13 @@ static void iser_free_device_ib_res(struct iser_device *device)
tasklet_kill(&device->cq_tasklet);
(void)ib_dereg_mr(device->mr);
- (void)ib_destroy_cq(device->cq);
+ (void)ib_destroy_cq(device->tx_cq);
+ (void)ib_destroy_cq(device->rx_cq);
(void)ib_dealloc_pd(device->pd);
device->mr = NULL;
- device->cq = NULL;
+ device->tx_cq = NULL;
+ device->rx_cq = NULL;
device->pd = NULL;
}
@@ -129,13 +140,23 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
{
struct iser_device *device;
struct ib_qp_init_attr init_attr;
- int ret;
+ int ret = -ENOMEM;
struct ib_fmr_pool_param params;
BUG_ON(ib_conn->device == NULL);
device = ib_conn->device;
+ ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
+ if (!ib_conn->login_buf) {
+ goto alloc_err;
+ ret = -ENOMEM;
+ }
+
+ ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device,
+ (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE,
+ DMA_FROM_DEVICE);
+
ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
(sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
GFP_KERNEL);
@@ -169,12 +190,12 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
init_attr.event_handler = iser_qp_event_callback;
init_attr.qp_context = (void *)ib_conn;
- init_attr.send_cq = device->cq;
- init_attr.recv_cq = device->cq;
+ init_attr.send_cq = device->tx_cq;
+ init_attr.recv_cq = device->rx_cq;
init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
- init_attr.cap.max_send_sge = MAX_REGD_BUF_VECTOR_LEN;
- init_attr.cap.max_recv_sge = 2;
+ init_attr.cap.max_send_sge = 2;
+ init_attr.cap.max_recv_sge = 1;
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr.qp_type = IB_QPT_RC;
@@ -192,6 +213,7 @@ qp_err:
(void)ib_destroy_fmr_pool(ib_conn->fmr_pool);
fmr_pool_err:
kfree(ib_conn->page_vec);
+ kfree(ib_conn->login_buf);
alloc_err:
iser_err("unable to alloc mem or create resource, err %d\n", ret);
return ret;
@@ -278,17 +300,6 @@ static void iser_device_try_release(struct iser_device *device)
mutex_unlock(&ig.device_list_mutex);
}
-int iser_conn_state_comp(struct iser_conn *ib_conn,
- enum iser_ib_conn_state comp)
-{
- int ret;
-
- spin_lock_bh(&ib_conn->lock);
- ret = (ib_conn->state == comp);
- spin_unlock_bh(&ib_conn->lock);
- return ret;
-}
-
static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
enum iser_ib_conn_state comp,
enum iser_ib_conn_state exch)
@@ -314,7 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn)
mutex_lock(&ig.connlist_mutex);
list_del(&ib_conn->conn_list);
mutex_unlock(&ig.connlist_mutex);
-
+ iser_free_rx_descriptors(ib_conn);
iser_free_ib_conn_res(ib_conn);
ib_conn->device = NULL;
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
@@ -442,7 +453,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
ISCSI_ERR_CONN_FAILED);
/* Complete the termination process if no posts are pending */
- if ((atomic_read(&ib_conn->post_recv_buf_count) == 0) &&
+ if (ib_conn->post_recv_buf_count == 0 &&
(atomic_read(&ib_conn->post_send_buf_count) == 0)) {
ib_conn->state = ISER_CONN_DOWN;
wake_up_interruptible(&ib_conn->wait);
@@ -489,9 +500,8 @@ void iser_conn_init(struct iser_conn *ib_conn)
{
ib_conn->state = ISER_CONN_INIT;
init_waitqueue_head(&ib_conn->wait);
- atomic_set(&ib_conn->post_recv_buf_count, 0);
+ ib_conn->post_recv_buf_count = 0;
atomic_set(&ib_conn->post_send_buf_count, 0);
- atomic_set(&ib_conn->unexpected_pdu_count, 0);
atomic_set(&ib_conn->refcount, 1);
INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock);
@@ -626,136 +636,97 @@ void iser_unreg_mem(struct iser_mem_reg *reg)
reg->mem_h = NULL;
}
-/**
- * iser_dto_to_iov - builds IOV from a dto descriptor
- */
-static void iser_dto_to_iov(struct iser_dto *dto, struct ib_sge *iov, int iov_len)
+int iser_post_recvl(struct iser_conn *ib_conn)
{
- int i;
- struct ib_sge *sge;
- struct iser_regd_buf *regd_buf;
-
- if (dto->regd_vector_len > iov_len) {
- iser_err("iov size %d too small for posting dto of len %d\n",
- iov_len, dto->regd_vector_len);
- BUG();
- }
+ struct ib_recv_wr rx_wr, *rx_wr_failed;
+ struct ib_sge sge;
+ int ib_ret;
- for (i = 0; i < dto->regd_vector_len; i++) {
- sge = &iov[i];
- regd_buf = dto->regd[i];
-
- sge->addr = regd_buf->reg.va;
- sge->length = regd_buf->reg.len;
- sge->lkey = regd_buf->reg.lkey;
-
- if (dto->used_sz[i] > 0) /* Adjust size */
- sge->length = dto->used_sz[i];
-
- /* offset and length should not exceed the regd buf length */
- if (sge->length + dto->offset[i] > regd_buf->reg.len) {
- iser_err("Used len:%ld + offset:%d, exceed reg.buf.len:"
- "%ld in dto:0x%p [%d], va:0x%08lX\n",
- (unsigned long)sge->length, dto->offset[i],
- (unsigned long)regd_buf->reg.len, dto, i,
- (unsigned long)sge->addr);
- BUG();
- }
+ sge.addr = ib_conn->login_dma;
+ sge.length = ISER_RX_LOGIN_SIZE;
+ sge.lkey = ib_conn->device->mr->lkey;
- sge->addr += dto->offset[i]; /* Adjust offset */
+ rx_wr.wr_id = (unsigned long)ib_conn->login_buf;
+ rx_wr.sg_list = &sge;
+ rx_wr.num_sge = 1;
+ rx_wr.next = NULL;
+
+ ib_conn->post_recv_buf_count++;
+ ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
+ if (ib_ret) {
+ iser_err("ib_post_recv failed ret=%d\n", ib_ret);
+ ib_conn->post_recv_buf_count--;
}
+ return ib_ret;
}
-/**
- * iser_post_recv - Posts a receive buffer.
- *
- * returns 0 on success, -1 on failure
- */
-int iser_post_recv(struct iser_desc *rx_desc)
+int iser_post_recvm(struct iser_conn *ib_conn, int count)
{
- int ib_ret, ret_val = 0;
- struct ib_recv_wr recv_wr, *recv_wr_failed;
- struct ib_sge iov[2];
- struct iser_conn *ib_conn;
- struct iser_dto *recv_dto = &rx_desc->dto;
-
- /* Retrieve conn */
- ib_conn = recv_dto->ib_conn;
-
- iser_dto_to_iov(recv_dto, iov, 2);
+ struct ib_recv_wr *rx_wr, *rx_wr_failed;
+ int i, ib_ret;
+ unsigned int my_rx_head = ib_conn->rx_desc_head;
+ struct iser_rx_desc *rx_desc;
+
+ for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
+ rx_desc = &ib_conn->rx_descs[my_rx_head];
+ rx_wr->wr_id = (unsigned long)rx_desc;
+ rx_wr->sg_list = &rx_desc->rx_sg;
+ rx_wr->num_sge = 1;
+ rx_wr->next = rx_wr + 1;
+ my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1);
+ }
- recv_wr.next = NULL;
- recv_wr.sg_list = iov;
- recv_wr.num_sge = recv_dto->regd_vector_len;
- recv_wr.wr_id = (unsigned long)rx_desc;
+ rx_wr--;
+ rx_wr->next = NULL; /* mark end of work requests list */
- atomic_inc(&ib_conn->post_recv_buf_count);
- ib_ret = ib_post_recv(ib_conn->qp, &recv_wr, &recv_wr_failed);
+ ib_conn->post_recv_buf_count += count;
+ ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
if (ib_ret) {
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
- atomic_dec(&ib_conn->post_recv_buf_count);
- ret_val = -1;
- }
-
- return ret_val;
+ ib_conn->post_recv_buf_count -= count;
+ } else
+ ib_conn->rx_desc_head = my_rx_head;
+ return ib_ret;
}
+
/**
* iser_start_send - Initiate a Send DTO operation
*
* returns 0 on success, -1 on failure
*/
-int iser_post_send(struct iser_desc *tx_desc)
+int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc)
{
- int ib_ret, ret_val = 0;
+ int ib_ret;
struct ib_send_wr send_wr, *send_wr_failed;
- struct ib_sge iov[MAX_REGD_BUF_VECTOR_LEN];
- struct iser_conn *ib_conn;
- struct iser_dto *dto = &tx_desc->dto;
- ib_conn = dto->ib_conn;
-
- iser_dto_to_iov(dto, iov, MAX_REGD_BUF_VECTOR_LEN);
+ ib_dma_sync_single_for_device(ib_conn->device->ib_device,
+ tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
send_wr.next = NULL;
send_wr.wr_id = (unsigned long)tx_desc;
- send_wr.sg_list = iov;
- send_wr.num_sge = dto->regd_vector_len;
+ send_wr.sg_list = tx_desc->tx_sg;
+ send_wr.num_sge = tx_desc->num_sge;
send_wr.opcode = IB_WR_SEND;
- send_wr.send_flags = dto->notify_enable ? IB_SEND_SIGNALED : 0;
+ send_wr.send_flags = IB_SEND_SIGNALED;
atomic_inc(&ib_conn->post_send_buf_count);
ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
if (ib_ret) {
- iser_err("Failed to start SEND DTO, dto: 0x%p, IOV len: %d\n",
- dto, dto->regd_vector_len);
iser_err("ib_post_send failed, ret:%d\n", ib_ret);
atomic_dec(&ib_conn->post_send_buf_count);
- ret_val = -1;
}
-
- return ret_val;
+ return ib_ret;
}
-static void iser_handle_comp_error(struct iser_desc *desc)
+static void iser_handle_comp_error(struct iser_tx_desc *desc,
+ struct iser_conn *ib_conn)
{
- struct iser_dto *dto = &desc->dto;
- struct iser_conn *ib_conn = dto->ib_conn;
-
- iser_dto_buffs_release(dto);
-
- if (desc->type == ISCSI_RX) {
- kfree(desc->data);
+ if (desc && desc->type == ISCSI_TX_DATAOUT)
kmem_cache_free(ig.desc_cache, desc);
- atomic_dec(&ib_conn->post_recv_buf_count);
- } else { /* type is TX control/command/dataout */
- if (desc->type == ISCSI_TX_DATAOUT)
- kmem_cache_free(ig.desc_cache, desc);
- atomic_dec(&ib_conn->post_send_buf_count);
- }
- if (atomic_read(&ib_conn->post_recv_buf_count) == 0 &&
+ if (ib_conn->post_recv_buf_count == 0 &&
atomic_read(&ib_conn->post_send_buf_count) == 0) {
/* getting here when the state is UP means that the conn is *
* being terminated asynchronously from the iSCSI layer's *
@@ -774,32 +745,74 @@ static void iser_handle_comp_error(struct iser_desc *desc)
}
}
+static int iser_drain_tx_cq(struct iser_device *device)
+{
+ struct ib_cq *cq = device->tx_cq;
+ struct ib_wc wc;
+ struct iser_tx_desc *tx_desc;
+ struct iser_conn *ib_conn;
+ int completed_tx = 0;
+
+ while (ib_poll_cq(cq, 1, &wc) == 1) {
+ tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id;
+ ib_conn = wc.qp->qp_context;
+ if (wc.status == IB_WC_SUCCESS) {
+ if (wc.opcode == IB_WC_SEND)
+ iser_snd_completion(tx_desc, ib_conn);
+ else
+ iser_err("expected opcode %d got %d\n",
+ IB_WC_SEND, wc.opcode);
+ } else {
+ iser_err("tx id %llx status %d vend_err %x\n",
+ wc.wr_id, wc.status, wc.vendor_err);
+ atomic_dec(&ib_conn->post_send_buf_count);
+ iser_handle_comp_error(tx_desc, ib_conn);
+ }
+ completed_tx++;
+ }
+ return completed_tx;
+}
+
+
static void iser_cq_tasklet_fn(unsigned long data)
{
struct iser_device *device = (struct iser_device *)data;
- struct ib_cq *cq = device->cq;
+ struct ib_cq *cq = device->rx_cq;
struct ib_wc wc;
- struct iser_desc *desc;
+ struct iser_rx_desc *desc;
unsigned long xfer_len;
+ struct iser_conn *ib_conn;
+ int completed_tx, completed_rx;
+ completed_tx = completed_rx = 0;
while (ib_poll_cq(cq, 1, &wc) == 1) {
- desc = (struct iser_desc *) (unsigned long) wc.wr_id;
+ desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
BUG_ON(desc == NULL);
-
+ ib_conn = wc.qp->qp_context;
if (wc.status == IB_WC_SUCCESS) {
- if (desc->type == ISCSI_RX) {
+ if (wc.opcode == IB_WC_RECV) {
xfer_len = (unsigned long)wc.byte_len;
- iser_rcv_completion(desc, xfer_len);
- } else /* type == ISCSI_TX_CONTROL/SCSI_CMD/DOUT */
- iser_snd_completion(desc);
+ iser_rcv_completion(desc, xfer_len, ib_conn);
+ } else
+ iser_err("expected opcode %d got %d\n",
+ IB_WC_RECV, wc.opcode);
} else {
- iser_err("comp w. error op %d status %d\n",desc->type,wc.status);
- iser_handle_comp_error(desc);
+ if (wc.status != IB_WC_WR_FLUSH_ERR)
+ iser_err("rx id %llx status %d vend_err %x\n",
+ wc.wr_id, wc.status, wc.vendor_err);
+ ib_conn->post_recv_buf_count--;
+ iser_handle_comp_error(NULL, ib_conn);
}
+ completed_rx++;
+ if (!(completed_rx & 63))
+ completed_tx += iser_drain_tx_cq(device);
}
/* #warning "it is assumed here that arming CQ only once its empty" *
* " would not cause interrupts to be missed" */
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+
+ completed_tx += iser_drain_tx_cq(device);
+ iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
}
static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 54c8fe2..ed3f9eb 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -80,7 +80,8 @@ MODULE_PARM_DESC(mellanox_workarounds,
static void srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device);
-static void srp_completion(struct ib_cq *cq, void *target_ptr);
+static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
+static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
static struct scsi_transport_template *ib_srp_transport_template;
@@ -227,14 +228,21 @@ static int srp_create_target_ib(struct srp_target_port *target)
if (!init_attr)
return -ENOMEM;
- target->cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_completion, NULL, target, SRP_CQ_SIZE, 0);
- if (IS_ERR(target->cq)) {
- ret = PTR_ERR(target->cq);
- goto out;
+ target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
+ srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
+ if (IS_ERR(target->recv_cq)) {
+ ret = PTR_ERR(target->recv_cq);
+ goto err;
}
- ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP);
+ target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
+ srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
+ if (IS_ERR(target->send_cq)) {
+ ret = PTR_ERR(target->send_cq);
+ goto err_recv_cq;
+ }
+
+ ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
init_attr->event_handler = srp_qp_event;
init_attr->cap.max_send_wr = SRP_SQ_SIZE;
@@ -243,24 +251,32 @@ static int srp_create_target_ib(struct srp_target_port *target)
init_attr->cap.max_send_sge = 1;
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
init_attr->qp_type = IB_QPT_RC;
- init_attr->send_cq = target->cq;
- init_attr->recv_cq = target->cq;
+ init_attr->send_cq = target->send_cq;
+ init_attr->recv_cq = target->recv_cq;
target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
if (IS_ERR(target->qp)) {
ret = PTR_ERR(target->qp);
- ib_destroy_cq(target->cq);
- goto out;
+ goto err_send_cq;
}
ret = srp_init_qp(target, target->qp);
- if (ret) {
- ib_destroy_qp(target->qp);
- ib_destroy_cq(target->cq);
- goto out;
- }
+ if (ret)
+ goto err_qp;
-out:
+ kfree(init_attr);
+ return 0;
+
+err_qp:
+ ib_destroy_qp(target->qp);
+
+err_send_cq:
+ ib_destroy_cq(target->send_cq);
+
+err_recv_cq:
+ ib_destroy_cq(target->recv_cq);
+
+err:
kfree(init_attr);
return ret;
}
@@ -270,7 +286,8 @@ static void srp_free_target_ib(struct srp_target_port *target)
int i;
ib_destroy_qp(target->qp);
- ib_destroy_cq(target->cq);
+ ib_destroy_cq(target->send_cq);
+ ib_destroy_cq(target->recv_cq);
for (i = 0; i < SRP_RQ_SIZE; ++i)
srp_free_iu(target->srp_host, target->rx_ring[i]);
@@ -568,7 +585,9 @@ static int srp_reconnect_target(struct srp_target_port *target)
if (ret)
goto err;
- while (ib_poll_cq(target->cq, 1, &wc) > 0)
+ while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
+ ; /* nothing */
+ while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
; /* nothing */
spin_lock_irq(target->scsi_host->host_lock);
@@ -851,7 +870,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
struct srp_iu *iu;
u8 opcode;
- iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
+ iu = target->rx_ring[wc->wr_id];
dev = target->srp_host->srp_dev->dev;
ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
@@ -898,7 +917,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
DMA_FROM_DEVICE);
}
-static void srp_completion(struct ib_cq *cq, void *target_ptr)
+static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
{
struct srp_target_port *target = target_ptr;
struct ib_wc wc;
@@ -907,17 +926,31 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr)
while (ib_poll_cq(cq, 1, &wc) > 0) {
if (wc.status) {
shost_printk(KERN_ERR, target->scsi_host,
- PFX "failed %s status %d\n",
- wc.wr_id & SRP_OP_RECV ? "receive" : "send",
+ PFX "failed receive status %d\n",
wc.status);
target->qp_in_error = 1;
break;
}
- if (wc.wr_id & SRP_OP_RECV)
- srp_handle_recv(target, &wc);
- else
- ++target->tx_tail;
+ srp_handle_recv(target, &wc);
+ }
+}
+
+static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
+{
+ struct srp_target_port *target = target_ptr;
+ struct ib_wc wc;
+
+ while (ib_poll_cq(cq, 1, &wc) > 0) {
+ if (wc.status) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "failed send status %d\n",
+ wc.status);
+ target->qp_in_error = 1;
+ break;
+ }
+
+ ++target->tx_tail;
}
}
@@ -930,7 +963,7 @@ static int __srp_post_recv(struct srp_target_port *target)
int ret;
next = target->rx_head & (SRP_RQ_SIZE - 1);
- wr.wr_id = next | SRP_OP_RECV;
+ wr.wr_id = next;
iu = target->rx_ring[next];
list.addr = iu->dma;
@@ -970,6 +1003,8 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
{
s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
+ srp_send_completion(target->send_cq, target);
+
if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
return NULL;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index e185b90..5a80eac 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -60,7 +60,6 @@ enum {
SRP_RQ_SHIFT = 6,
SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
SRP_SQ_SIZE = SRP_RQ_SIZE - 1,
- SRP_CQ_SIZE = SRP_SQ_SIZE + SRP_RQ_SIZE,
SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1),
@@ -69,8 +68,6 @@ enum {
SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4
};
-#define SRP_OP_RECV (1 << 31)
-
enum srp_target_state {
SRP_TARGET_LIVE,
SRP_TARGET_CONNECTING,
@@ -133,7 +130,8 @@ struct srp_target_port {
int path_query_id;
struct ib_cm_id *cm_id;
- struct ib_cq *cq;
+ struct ib_cq *recv_cq;
+ struct ib_cq *send_cq;
struct ib_qp *qp;
int max_ti_iu_len;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 509c8f3..70ffbd0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4680,7 +4680,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
{
unsigned long cpu;
struct page *spare_page;
- struct raid5_percpu *allcpus;
+ struct raid5_percpu __percpu *allcpus;
void *scribble;
int err;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index dd70835..0f86f5e 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -405,7 +405,7 @@ struct raid5_private_data {
* lists and performing address
* conversions
*/
- } *percpu;
+ } __percpu *percpu;
size_t scribble_len; /* size of scribble region must be
* associated with conf to handle
* cpu hotplug while reshaping
diff --git a/drivers/media/dvb/firewire/firedtv-fw.c b/drivers/media/dvb/firewire/firedtv-fw.c
index 7a3de16..75afe4f 100644
--- a/drivers/media/dvb/firewire/firedtv-fw.c
+++ b/drivers/media/dvb/firewire/firedtv-fw.c
@@ -239,47 +239,18 @@ static const struct fw_address_region fcp_region = {
};
/* Adjust the template string if models with longer names appear. */
-#define MAX_MODEL_NAME_LEN ((int)DIV_ROUND_UP(sizeof("FireDTV ????"), 4))
-
-static size_t model_name(u32 *directory, __be32 *buffer)
-{
- struct fw_csr_iterator ci;
- int i, length, key, value, last_key = 0;
- u32 *block = NULL;
-
- fw_csr_iterator_init(&ci, directory);
- while (fw_csr_iterator_next(&ci, &key, &value)) {
- if (last_key == CSR_MODEL &&
- key == (CSR_DESCRIPTOR | CSR_LEAF))
- block = ci.p - 1 + value;
- last_key = key;
- }
-
- if (block == NULL)
- return 0;
-
- length = min((int)(block[0] >> 16) - 2, MAX_MODEL_NAME_LEN);
- if (length <= 0)
- return 0;
-
- /* fast-forward to text string */
- block += 3;
-
- for (i = 0; i < length; i++)
- buffer[i] = cpu_to_be32(block[i]);
-
- return length * 4;
-}
+#define MAX_MODEL_NAME_LEN sizeof("FireDTV ????")
static int node_probe(struct device *dev)
{
struct firedtv *fdtv;
- __be32 name[MAX_MODEL_NAME_LEN];
+ char name[MAX_MODEL_NAME_LEN];
int name_len, err;
- name_len = model_name(fw_unit(dev)->directory, name);
+ name_len = fw_csr_string(fw_unit(dev)->directory, CSR_MODEL,
+ name, sizeof(name));
- fdtv = fdtv_alloc(dev, &backend, (char *)name, name_len);
+ fdtv = fdtv_alloc(dev, &backend, name, name_len >= 0 ? name_len : 0);
if (!fdtv)
return -ENOMEM;
diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c
index 9b413a3..0f50508 100644
--- a/drivers/media/video/dabusb.c
+++ b/drivers/media/video/dabusb.c
@@ -616,10 +616,12 @@ static int dabusb_open (struct inode *inode, struct file *file)
{
int devnum = iminor(inode);
pdabusb_t s;
+ int r;
if (devnum < DABUSB_MINOR || devnum >= (DABUSB_MINOR + NRDABUSB))
return -EIO;
+ lock_kernel();
s = &dabusb[devnum - DABUSB_MINOR];
dbg("dabusb_open");
@@ -634,6 +636,7 @@ static int dabusb_open (struct inode *inode, struct file *file)
msleep_interruptible(500);
if (signal_pending (current)) {
+ unlock_kernel();
return -EAGAIN;
}
mutex_lock(&s->mutex);
@@ -641,6 +644,7 @@ static int dabusb_open (struct inode *inode, struct file *file)
if (usb_set_interface (s->usbdev, _DABUSB_IF, 1) < 0) {
mutex_unlock(&s->mutex);
dev_err(&s->usbdev->dev, "set_interface failed\n");
+ unlock_kernel();
return -EINVAL;
}
s->opened = 1;
@@ -649,7 +653,9 @@ static int dabusb_open (struct inode *inode, struct file *file)
file->f_pos = 0;
file->private_data = s;
- return nonseekable_open(inode, file);
+ r = nonseekable_open(inode, file);
+ unlock_kernel();
+ return r;
}
static int dabusb_release (struct inode *inode, struct file *file)
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index f537555..3fab78b 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -37,6 +37,7 @@
#include <linux/gfp.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
+#include <linux/kfifo.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
@@ -47,19 +48,9 @@
#define UART_NR 8 /* Number of UARTs this driver can handle */
-#define UART_XMIT_SIZE PAGE_SIZE
+#define FIFO_SIZE PAGE_SIZE
#define WAKEUP_CHARS 256
-#define circ_empty(circ) ((circ)->head == (circ)->tail)
-#define circ_clear(circ) ((circ)->head = (circ)->tail = 0)
-
-#define circ_chars_pending(circ) \
- (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE))
-
-#define circ_chars_free(circ) \
- (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE))
-
-
struct uart_icount {
__u32 cts;
__u32 dsr;
@@ -82,7 +73,7 @@ struct sdio_uart_port {
struct mutex func_lock;
struct task_struct *in_sdio_uart_irq;
unsigned int regs_offset;
- struct circ_buf xmit;
+ struct kfifo xmit_fifo;
spinlock_t write_lock;
struct uart_icount icount;
unsigned int uartclk;
@@ -105,6 +96,8 @@ static int sdio_uart_add_port(struct sdio_uart_port *port)
kref_init(&port->kref);
mutex_init(&port->func_lock);
spin_lock_init(&port->write_lock);
+ if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL))
+ return -ENOMEM;
spin_lock(&sdio_uart_table_lock);
for (index = 0; index < UART_NR; index++) {
@@ -140,6 +133,7 @@ static void sdio_uart_port_destroy(struct kref *kref)
{
struct sdio_uart_port *port =
container_of(kref, struct sdio_uart_port, kref);
+ kfifo_free(&port->xmit_fifo);
kfree(port);
}
@@ -456,9 +450,11 @@ static void sdio_uart_receive_chars(struct sdio_uart_port *port,
static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
{
- struct circ_buf *xmit = &port->xmit;
+ struct kfifo *xmit = &port->xmit_fifo;
int count;
struct tty_struct *tty;
+ u8 iobuf[16];
+ int len;
if (port->x_char) {
sdio_out(port, UART_TX, port->x_char);
@@ -469,27 +465,25 @@ static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
tty = tty_port_tty_get(&port->port);
- if (tty == NULL || circ_empty(xmit) ||
+ if (tty == NULL || !kfifo_len(xmit) ||
tty->stopped || tty->hw_stopped) {
sdio_uart_stop_tx(port);
tty_kref_put(tty);
return;
}
- count = 16;
- do {
- sdio_out(port, UART_TX, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ len = kfifo_out_locked(xmit, iobuf, 16, &port->write_lock);
+ for (count = 0; count < len; count++) {
+ sdio_out(port, UART_TX, iobuf[count]);
port->icount.tx++;
- if (circ_empty(xmit))
- break;
- } while (--count > 0);
+ }
- if (circ_chars_pending(xmit) < WAKEUP_CHARS)
+ len = kfifo_len(xmit);
+ if (len < WAKEUP_CHARS) {
tty_wakeup(tty);
-
- if (circ_empty(xmit))
- sdio_uart_stop_tx(port);
+ if (len == 0)
+ sdio_uart_stop_tx(port);
+ }
tty_kref_put(tty);
}
@@ -632,7 +626,6 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
{
struct sdio_uart_port *port =
container_of(tport, struct sdio_uart_port, port);
- unsigned long page;
int ret;
/*
@@ -641,22 +634,17 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
*/
set_bit(TTY_IO_ERROR, &tty->flags);
- /* Initialise and allocate the transmit buffer. */
- page = __get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
- port->xmit.buf = (unsigned char *)page;
- circ_clear(&port->xmit);
+ kfifo_reset(&port->xmit_fifo);
ret = sdio_uart_claim_func(port);
if (ret)
- goto err1;
+ return ret;
ret = sdio_enable_func(port->func);
if (ret)
- goto err2;
+ goto err1;
ret = sdio_claim_irq(port->func, sdio_uart_irq);
if (ret)
- goto err3;
+ goto err2;
/*
* Clear the FIFO buffers and disable them.
@@ -700,12 +688,10 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
sdio_uart_release_func(port);
return 0;
-err3:
- sdio_disable_func(port->func);
err2:
- sdio_uart_release_func(port);
+ sdio_disable_func(port->func);
err1:
- free_page((unsigned long)port->xmit.buf);
+ sdio_uart_release_func(port);
return ret;
}
@@ -727,7 +713,7 @@ static void sdio_uart_shutdown(struct tty_port *tport)
ret = sdio_uart_claim_func(port);
if (ret)
- goto skip;
+ return;
sdio_uart_stop_rx(port);
@@ -749,10 +735,6 @@ static void sdio_uart_shutdown(struct tty_port *tport)
sdio_disable_func(port->func);
sdio_uart_release_func(port);
-
-skip:
- /* Free the transmit buffer page. */
- free_page((unsigned long)port->xmit.buf);
}
/**
@@ -822,27 +804,12 @@ static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
int count)
{
struct sdio_uart_port *port = tty->driver_data;
- struct circ_buf *circ = &port->xmit;
- int c, ret = 0;
+ int ret;
if (!port->func)
return -ENODEV;
- spin_lock(&port->write_lock);
- while (1) {
- c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
- if (count < c)
- c = count;
- if (c <= 0)
- break;
- memcpy(circ->buf + circ->head, buf, c);
- circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
- buf += c;
- count -= c;
- ret += c;
- }
- spin_unlock(&port->write_lock);
-
+ ret = kfifo_in_locked(&port->xmit_fifo, buf, count, &port->write_lock);
if (!(port->ier & UART_IER_THRI)) {
int err = sdio_uart_claim_func(port);
if (!err) {
@@ -859,13 +826,13 @@ static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
static int sdio_uart_write_room(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
- return port ? circ_chars_free(&port->xmit) : 0;
+ return FIFO_SIZE - kfifo_len(&port->xmit_fifo);
}
static int sdio_uart_chars_in_buffer(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
- return port ? circ_chars_pending(&port->xmit) : 0;
+ return kfifo_len(&port->xmit_fifo);
}
static void sdio_uart_send_xchar(struct tty_struct *tty, char ch)
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 3e8618b..4cd7f42 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -264,6 +264,10 @@ struct adapter {
struct work_struct fatal_error_handler_task;
struct work_struct link_fault_handler_task;
+ struct work_struct db_full_task;
+ struct work_struct db_empty_task;
+ struct work_struct db_drop_task;
+
struct dentry *debugfs_root;
struct mutex mdio_lock;
@@ -335,6 +339,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
unsigned char *data);
irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
+extern struct workqueue_struct *cxgb3_wq;
int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 6fd968a..3e453e1 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -45,6 +45,7 @@
#include <linux/firmware.h>
#include <linux/log2.h>
#include <linux/stringify.h>
+#include <linux/sched.h>
#include <asm/uaccess.h>
#include "common.h"
@@ -140,7 +141,7 @@ MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
* will block keventd as it needs the rtnl lock, and we'll deadlock waiting
* for our work to complete. Get our own work queue to solve this.
*/
-static struct workqueue_struct *cxgb3_wq;
+struct workqueue_struct *cxgb3_wq;
/**
* link_report - show link status and link speed/duplex
@@ -586,6 +587,19 @@ static void setup_rss(struct adapter *adap)
V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
}
+static void ring_dbs(struct adapter *adap)
+{
+ int i, j;
+
+ for (i = 0; i < SGE_QSETS; i++) {
+ struct sge_qset *qs = &adap->sge.qs[i];
+
+ if (qs->adap)
+ for (j = 0; j < SGE_TXQ_PER_SET; j++)
+ t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
+ }
+}
+
static void init_napi(struct adapter *adap)
{
int i;
@@ -2750,6 +2764,42 @@ static void t3_adap_check_task(struct work_struct *work)
spin_unlock_irq(&adapter->work_lock);
}
+static void db_full_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_full_task);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
+}
+
+static void db_empty_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_empty_task);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
+}
+
+static void db_drop_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_drop_task);
+ unsigned long delay = 1000;
+ unsigned short r;
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
+
+ /*
+ * Sleep a while before ringing the driver qset dbs.
+ * The delay is between 1000-2023 usecs.
+ */
+ get_random_bytes(&r, 2);
+ delay += r & 1023;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(delay));
+ ring_dbs(adapter);
+}
+
/*
* Processes external (PHY) interrupts in process context.
*/
@@ -3218,6 +3268,11 @@ static int __devinit init_one(struct pci_dev *pdev,
INIT_LIST_HEAD(&adapter->adapter_list);
INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
+
+ INIT_WORK(&adapter->db_full_task, db_full_task);
+ INIT_WORK(&adapter->db_empty_task, db_empty_task);
+ INIT_WORK(&adapter->db_drop_task, db_drop_task);
+
INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index 670aa62..929c298 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -73,7 +73,10 @@ enum {
OFFLOAD_STATUS_UP,
OFFLOAD_STATUS_DOWN,
OFFLOAD_PORT_DOWN,
- OFFLOAD_PORT_UP
+ OFFLOAD_PORT_UP,
+ OFFLOAD_DB_FULL,
+ OFFLOAD_DB_EMPTY,
+ OFFLOAD_DB_DROP
};
struct cxgb3_client {
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 1b5327b..cb42353 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -254,6 +254,22 @@
#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
+#define S_HIPRIORITYDBFULL 7
+#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
+#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
+
+#define S_HIPRIORITYDBEMPTY 6
+#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
+#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
+
+#define S_LOPRIORITYDBFULL 5
+#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
+#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
+
+#define S_LOPRIORITYDBEMPTY 4
+#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
+#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
+
#define S_RSPQDISABLED 3
#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
#define F_RSPQDISABLED V_RSPQDISABLED(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 0482059..78e265b 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -42,6 +42,7 @@
#include "sge_defs.h"
#include "t3_cpl.h"
#include "firmware_exports.h"
+#include "cxgb3_offload.h"
#define USE_GTS 0
@@ -2841,8 +2842,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
}
if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
- CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
- status & F_HIPIODRBDROPERR ? "high" : "lo");
+ queue_work(cxgb3_wq, &adapter->db_drop_task);
+
+ if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
+ queue_work(cxgb3_wq, &adapter->db_full_task);
+
+ if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
+ queue_work(cxgb3_wq, &adapter->db_empty_task);
t3_write_reg(adapter, A_SG_INT_CAUSE, status);
if (status & SGE_FATALERR)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 3ab9f51..95a8ba0 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1433,7 +1433,10 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
- F_HIRCQPARITYERROR)
+ F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
+ F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
+ F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
+ F_LOPIODRBDROPERR)
#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
F_NFASRCHFAIL)
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index ad113b0..0950fa4 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2908,6 +2908,7 @@ enum parport_pc_pci_cards {
netmos_9805,
netmos_9815,
netmos_9901,
+ netmos_9865,
quatech_sppxp100,
};
@@ -2989,6 +2990,7 @@ static struct parport_pc_pci {
/* netmos_9805 */ { 1, { { 0, -1 }, } },
/* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
/* netmos_9901 */ { 1, { { 0, -1 }, } },
+ /* netmos_9865 */ { 1, { { 0, -1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
};
@@ -3092,6 +3094,10 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
0xA000, 0x2000, 0, 0, netmos_9901 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x1000, 0, 0, netmos_9865 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x2000, 0, 0, netmos_9865 },
/* Quatech SPPXP-100 Parallel port PCI ExpressCard */
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index ec73294..e2dc289 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -40,7 +40,7 @@ static void update_resources (struct bus_node *bus_cur, int type, int rangeno);
static int once_over (void);
static int remove_ranges (struct bus_node *, struct bus_node *);
static int update_bridge_ranges (struct bus_node **);
-static int add_range (int type, struct range_node *, struct bus_node *);
+static int add_bus_range (int type, struct range_node *, struct bus_node *);
static void fix_resources (struct bus_node *);
static struct bus_node *find_bus_wprev (u8, struct bus_node **, u8);
@@ -133,7 +133,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
newrange->rangeno = 1;
else {
/* need to insert our range */
- add_range (flag, newrange, newbus);
+ add_bus_range (flag, newrange, newbus);
debug ("%d resource Primary Bus inserted on bus %x [%x - %x]\n", flag, newbus->busno, newrange->start, newrange->end);
}
@@ -384,7 +384,7 @@ int __init ibmphp_rsrc_init (void)
* Input: type of the resource, range to add, current bus
* Output: 0 or -1, bus and range ptrs
********************************************************************************/
-static int add_range (int type, struct range_node *range, struct bus_node *bus_cur)
+static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur)
{
struct range_node *range_cur = NULL;
struct range_node *range_prev;
@@ -455,7 +455,7 @@ static int add_range (int type, struct range_node *range, struct bus_node *bus_c
/*******************************************************************************
* This routine goes through the list of resources of type 'type' and updates
- * the range numbers that they correspond to. It was called from add_range fnc
+ * the range numbers that they correspond to. It was called from add_bus_range fnc
*
* Input: bus, type of the resource, the rangeno starting from which to update
******************************************************************************/
@@ -1999,7 +1999,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
if (bus_sec->noIORanges > 0) {
if (!range_exists_already (range, bus_sec, IO)) {
- add_range (IO, range, bus_sec);
+ add_bus_range (IO, range, bus_sec);
++bus_sec->noIORanges;
} else {
kfree (range);
@@ -2048,7 +2048,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
if (bus_sec->noMemRanges > 0) {
if (!range_exists_already (range, bus_sec, MEM)) {
- add_range (MEM, range, bus_sec);
+ add_bus_range (MEM, range, bus_sec);
++bus_sec->noMemRanges;
} else {
kfree (range);
@@ -2102,7 +2102,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
if (bus_sec->noPFMemRanges > 0) {
if (!range_exists_already (range, bus_sec, PFMEM)) {
- add_range (PFMEM, range, bus_sec);
+ add_bus_range (PFMEM, range, bus_sec);
++bus_sec->noPFMemRanges;
} else {
kfree (range);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 0a6601c..d189e47 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -51,17 +51,23 @@ config PCMCIA_LOAD_CIS
config PCMCIA_IOCTL
bool "PCMCIA control ioctl (obsolete)"
- depends on PCMCIA
+ depends on PCMCIA && ARM && !SMP && !PREEMPT
default y
help
If you say Y here, the deprecated ioctl interface to the PCMCIA
- subsystem will be built. It is needed by cardmgr and cardctl
- (pcmcia-cs) to function properly.
+ subsystem will be built. It is needed by the deprecated pcmcia-cs
+ tools (cardmgr, cardctl) to function properly.
You should use the new pcmciautils package instead (see
<file:Documentation/Changes> for location and details).
- If unsure, say Y.
+ This config option will most likely be removed from kernel 2.6.35,
+ the associated code from kernel 2.6.36.
+
+ As the PCMCIA ioctl is not locking safe, it depends on !SMP and
+ !PREEMPT.
+
+ If unsure, say N.
config CARDBUS
bool "32-bit CardBus support"
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index ac0686e..e6ab2a4 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -71,7 +71,7 @@ int __ref cb_alloc(struct pcmcia_socket *s)
unsigned int max, pass;
s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
- pci_fixup_cardbus(bus);
+ pci_fixup_cardbus(bus);
max = bus->secondary;
for (pass = 0; pass < 2; pass++)
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 2f3622d..f230f65 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -54,46 +54,44 @@ static const u_int exponent[] = {
/* Upper limit on reasonable # of tuples */
#define MAX_TUPLES 200
-/*====================================================================*/
-
-/* Parameters that can be set with 'insmod' */
-
/* 16-bit CIS? */
static int cis_width;
module_param(cis_width, int, 0444);
void release_cis_mem(struct pcmcia_socket *s)
{
- mutex_lock(&s->ops_mutex);
- if (s->cis_mem.flags & MAP_ACTIVE) {
- s->cis_mem.flags &= ~MAP_ACTIVE;
- s->ops->set_mem_map(s, &s->cis_mem);
- if (s->cis_mem.res) {
- release_resource(s->cis_mem.res);
- kfree(s->cis_mem.res);
- s->cis_mem.res = NULL;
+ mutex_lock(&s->ops_mutex);
+ if (s->cis_mem.flags & MAP_ACTIVE) {
+ s->cis_mem.flags &= ~MAP_ACTIVE;
+ s->ops->set_mem_map(s, &s->cis_mem);
+ if (s->cis_mem.res) {
+ release_resource(s->cis_mem.res);
+ kfree(s->cis_mem.res);
+ s->cis_mem.res = NULL;
+ }
+ iounmap(s->cis_virt);
+ s->cis_virt = NULL;
}
- iounmap(s->cis_virt);
- s->cis_virt = NULL;
- }
- mutex_unlock(&s->ops_mutex);
+ mutex_unlock(&s->ops_mutex);
}
-/*
- * Map the card memory at "card_offset" into virtual space.
+/**
+ * set_cis_map() - map the card memory at "card_offset" into virtual space.
+ *
* If flags & MAP_ATTRIB, map the attribute space, otherwise
* map the memory space.
*
* Must be called with ops_mutex held.
*/
-static void __iomem *
-set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flags)
+static void __iomem *set_cis_map(struct pcmcia_socket *s,
+ unsigned int card_offset, unsigned int flags)
{
pccard_mem_map *mem = &s->cis_mem;
int ret;
if (!(s->features & SS_CAP_STATIC_MAP) && (mem->res == NULL)) {
- mem->res = pcmcia_find_mem_region(0, s->map_size, s->map_size, 0, s);
+ mem->res = pcmcia_find_mem_region(0, s->map_size,
+ s->map_size, 0, s);
if (mem->res == NULL) {
dev_printk(KERN_NOTICE, &s->dev,
"cs: unable to map card memory!\n");
@@ -124,165 +122,170 @@ set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flag
return s->cis_virt;
}
-/*======================================================================
-
- Low-level functions to read and write CIS memory. I think the
- write routine is only useful for writing one-byte registers.
-
-======================================================================*/
/* Bits in attr field */
#define IS_ATTR 1
#define IS_INDIRECT 8
+/**
+ * pcmcia_read_cis_mem() - low-level function to read CIS memory
+ */
int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
u_int len, void *ptr)
{
- void __iomem *sys, *end;
- unsigned char *buf = ptr;
-
- dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
-
- mutex_lock(&s->ops_mutex);
- if (attr & IS_INDIRECT) {
- /* Indirect accesses use a bunch of special registers at fixed
- locations in common memory */
- u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
- if (attr & IS_ATTR) {
- addr *= 2;
- flags = ICTRL0_AUTOINC;
- }
+ void __iomem *sys, *end;
+ unsigned char *buf = ptr;
- sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0));
- if (!sys) {
- dev_dbg(&s->dev, "could not map memory\n");
- memset(ptr, 0xff, len);
- mutex_unlock(&s->ops_mutex);
- return -1;
- }
+ dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
- writeb(flags, sys+CISREG_ICTRL0);
- writeb(addr & 0xff, sys+CISREG_IADDR0);
- writeb((addr>>8) & 0xff, sys+CISREG_IADDR1);
- writeb((addr>>16) & 0xff, sys+CISREG_IADDR2);
- writeb((addr>>24) & 0xff, sys+CISREG_IADDR3);
- for ( ; len > 0; len--, buf++)
- *buf = readb(sys+CISREG_IDATA0);
- } else {
- u_int inc = 1, card_offset, flags;
-
- if (addr > CISTPL_MAX_CIS_SIZE)
- dev_dbg(&s->dev, "attempt to read CIS mem at addr %#x", addr);
-
- flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
- if (attr) {
- flags |= MAP_ATTRIB;
- inc++;
- addr *= 2;
- }
+ mutex_lock(&s->ops_mutex);
+ if (attr & IS_INDIRECT) {
+ /* Indirect accesses use a bunch of special registers at fixed
+ locations in common memory */
+ u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
+ if (attr & IS_ATTR) {
+ addr *= 2;
+ flags = ICTRL0_AUTOINC;
+ }
- card_offset = addr & ~(s->map_size-1);
- while (len) {
- sys = set_cis_map(s, card_offset, flags);
- if (!sys) {
- dev_dbg(&s->dev, "could not map memory\n");
- memset(ptr, 0xff, len);
- mutex_unlock(&s->ops_mutex);
- return -1;
- }
- end = sys + s->map_size;
- sys = sys + (addr & (s->map_size-1));
- for ( ; len > 0; len--, buf++, sys += inc) {
- if (sys == end)
- break;
- *buf = readb(sys);
- }
- card_offset += s->map_size;
- addr = 0;
+ sys = set_cis_map(s, 0, MAP_ACTIVE |
+ ((cis_width) ? MAP_16BIT : 0));
+ if (!sys) {
+ dev_dbg(&s->dev, "could not map memory\n");
+ memset(ptr, 0xff, len);
+ mutex_unlock(&s->ops_mutex);
+ return -1;
+ }
+
+ writeb(flags, sys+CISREG_ICTRL0);
+ writeb(addr & 0xff, sys+CISREG_IADDR0);
+ writeb((addr>>8) & 0xff, sys+CISREG_IADDR1);
+ writeb((addr>>16) & 0xff, sys+CISREG_IADDR2);
+ writeb((addr>>24) & 0xff, sys+CISREG_IADDR3);
+ for ( ; len > 0; len--, buf++)
+ *buf = readb(sys+CISREG_IDATA0);
+ } else {
+ u_int inc = 1, card_offset, flags;
+
+ if (addr > CISTPL_MAX_CIS_SIZE)
+ dev_dbg(&s->dev,
+ "attempt to read CIS mem at addr %#x", addr);
+
+ flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
+ if (attr) {
+ flags |= MAP_ATTRIB;
+ inc++;
+ addr *= 2;
+ }
+
+ card_offset = addr & ~(s->map_size-1);
+ while (len) {
+ sys = set_cis_map(s, card_offset, flags);
+ if (!sys) {
+ dev_dbg(&s->dev, "could not map memory\n");
+ memset(ptr, 0xff, len);
+ mutex_unlock(&s->ops_mutex);
+ return -1;
+ }
+ end = sys + s->map_size;
+ sys = sys + (addr & (s->map_size-1));
+ for ( ; len > 0; len--, buf++, sys += inc) {
+ if (sys == end)
+ break;
+ *buf = readb(sys);
+ }
+ card_offset += s->map_size;
+ addr = 0;
+ }
}
- }
- mutex_unlock(&s->ops_mutex);
- dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n",
- *(u_char *)(ptr+0), *(u_char *)(ptr+1),
- *(u_char *)(ptr+2), *(u_char *)(ptr+3));
- return 0;
+ mutex_unlock(&s->ops_mutex);
+ dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n",
+ *(u_char *)(ptr+0), *(u_char *)(ptr+1),
+ *(u_char *)(ptr+2), *(u_char *)(ptr+3));
+ return 0;
}
+/**
+ * pcmcia_write_cis_mem() - low-level function to write CIS memory
+ *
+ * Probably only useful for writing one-byte registers.
+ */
void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
u_int len, void *ptr)
{
- void __iomem *sys, *end;
- unsigned char *buf = ptr;
-
- dev_dbg(&s->dev, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
-
- mutex_lock(&s->ops_mutex);
- if (attr & IS_INDIRECT) {
- /* Indirect accesses use a bunch of special registers at fixed
- locations in common memory */
- u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
- if (attr & IS_ATTR) {
- addr *= 2;
- flags = ICTRL0_AUTOINC;
- }
+ void __iomem *sys, *end;
+ unsigned char *buf = ptr;
- sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0));
- if (!sys) {
- dev_dbg(&s->dev, "could not map memory\n");
- mutex_unlock(&s->ops_mutex);
- return; /* FIXME: Error */
- }
+ dev_dbg(&s->dev,
+ "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
- writeb(flags, sys+CISREG_ICTRL0);
- writeb(addr & 0xff, sys+CISREG_IADDR0);
- writeb((addr>>8) & 0xff, sys+CISREG_IADDR1);
- writeb((addr>>16) & 0xff, sys+CISREG_IADDR2);
- writeb((addr>>24) & 0xff, sys+CISREG_IADDR3);
- for ( ; len > 0; len--, buf++)
- writeb(*buf, sys+CISREG_IDATA0);
- } else {
- u_int inc = 1, card_offset, flags;
-
- flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
- if (attr & IS_ATTR) {
- flags |= MAP_ATTRIB;
- inc++;
- addr *= 2;
- }
+ mutex_lock(&s->ops_mutex);
+ if (attr & IS_INDIRECT) {
+ /* Indirect accesses use a bunch of special registers at fixed
+ locations in common memory */
+ u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
+ if (attr & IS_ATTR) {
+ addr *= 2;
+ flags = ICTRL0_AUTOINC;
+ }
- card_offset = addr & ~(s->map_size-1);
- while (len) {
- sys = set_cis_map(s, card_offset, flags);
- if (!sys) {
- dev_dbg(&s->dev, "could not map memory\n");
- mutex_unlock(&s->ops_mutex);
- return; /* FIXME: error */
- }
-
- end = sys + s->map_size;
- sys = sys + (addr & (s->map_size-1));
- for ( ; len > 0; len--, buf++, sys += inc) {
- if (sys == end)
- break;
- writeb(*buf, sys);
- }
- card_offset += s->map_size;
- addr = 0;
- }
- }
- mutex_unlock(&s->ops_mutex);
-}
+ sys = set_cis_map(s, 0, MAP_ACTIVE |
+ ((cis_width) ? MAP_16BIT : 0));
+ if (!sys) {
+ dev_dbg(&s->dev, "could not map memory\n");
+ mutex_unlock(&s->ops_mutex);
+ return; /* FIXME: Error */
+ }
+
+ writeb(flags, sys+CISREG_ICTRL0);
+ writeb(addr & 0xff, sys+CISREG_IADDR0);
+ writeb((addr>>8) & 0xff, sys+CISREG_IADDR1);
+ writeb((addr>>16) & 0xff, sys+CISREG_IADDR2);
+ writeb((addr>>24) & 0xff, sys+CISREG_IADDR3);
+ for ( ; len > 0; len--, buf++)
+ writeb(*buf, sys+CISREG_IDATA0);
+ } else {
+ u_int inc = 1, card_offset, flags;
+ flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
+ if (attr & IS_ATTR) {
+ flags |= MAP_ATTRIB;
+ inc++;
+ addr *= 2;
+ }
-/*======================================================================
+ card_offset = addr & ~(s->map_size-1);
+ while (len) {
+ sys = set_cis_map(s, card_offset, flags);
+ if (!sys) {
+ dev_dbg(&s->dev, "could not map memory\n");
+ mutex_unlock(&s->ops_mutex);
+ return; /* FIXME: error */
+ }
- This is a wrapper around read_cis_mem, with the same interface,
- but which caches information, for cards whose CIS may not be
- readable all the time.
+ end = sys + s->map_size;
+ sys = sys + (addr & (s->map_size-1));
+ for ( ; len > 0; len--, buf++, sys += inc) {
+ if (sys == end)
+ break;
+ writeb(*buf, sys);
+ }
+ card_offset += s->map_size;
+ addr = 0;
+ }
+ }
+ mutex_unlock(&s->ops_mutex);
+}
-======================================================================*/
+/**
+ * read_cis_cache() - read CIS memory or its associated cache
+ *
+ * This is a wrapper around read_cis_mem, with the same interface,
+ * but which caches information, for cards whose CIS may not be
+ * readable all the time.
+ */
static int read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr,
size_t len, void *ptr)
{
@@ -353,7 +356,6 @@ remove_cis_cache(struct pcmcia_socket *s, int attr, u_int addr, u_int len)
* This destroys the CIS cache but keeps any fake CIS alive. Must be
* called with ops_mutex held.
*/
-
void destroy_cis_cache(struct pcmcia_socket *s)
{
struct list_head *l, *n;
@@ -366,13 +368,9 @@ void destroy_cis_cache(struct pcmcia_socket *s)
}
}
-/*======================================================================
-
- This verifies if the CIS of a card matches what is in the CIS
- cache.
-
-======================================================================*/
-
+/**
+ * verify_cis_cache() - does the CIS match what is in the CIS cache?
+ */
int verify_cis_cache(struct pcmcia_socket *s)
{
struct cis_cache_entry *cis;
@@ -404,13 +402,12 @@ int verify_cis_cache(struct pcmcia_socket *s)
return 0;
}
-/*======================================================================
-
- For really bad cards, we provide a facility for uploading a
- replacement CIS.
-
-======================================================================*/
-
+/**
+ * pcmcia_replace_cis() - use a replacement CIS instead of the card's CIS
+ *
+ * For really bad cards, we provide a facility for uploading a
+ * replacement CIS.
+ */
int pcmcia_replace_cis(struct pcmcia_socket *s,
const u8 *data, const size_t len)
{
@@ -433,17 +430,13 @@ int pcmcia_replace_cis(struct pcmcia_socket *s,
return 0;
}
-/*======================================================================
-
- The high-level CIS tuple services
-
-======================================================================*/
+/* The high-level CIS tuple services */
typedef struct tuple_flags {
- u_int link_space:4;
- u_int has_link:1;
- u_int mfc_fn:3;
- u_int space:4;
+ u_int link_space:4;
+ u_int has_link:1;
+ u_int mfc_fn:3;
+ u_int space:4;
} tuple_flags;
#define LINK_SPACE(f) (((tuple_flags *)(&(f)))->link_space)
@@ -451,982 +444,961 @@ typedef struct tuple_flags {
#define MFC_FN(f) (((tuple_flags *)(&(f)))->mfc_fn)
#define SPACE(f) (((tuple_flags *)(&(f)))->space)
-int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple)
+int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function,
+ tuple_t *tuple)
{
- if (!s)
- return -EINVAL;
-
- if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
- return -ENODEV;
- tuple->TupleLink = tuple->Flags = 0;
-
- /* Assume presence of a LONGLINK_C to address 0 */
- tuple->CISOffset = tuple->LinkOffset = 0;
- SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1;
-
- if ((s->functions > 1) && !(tuple->Attributes & TUPLE_RETURN_COMMON)) {
- cisdata_t req = tuple->DesiredTuple;
- tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
- if (pccard_get_next_tuple(s, function, tuple) == 0) {
- tuple->DesiredTuple = CISTPL_LINKTARGET;
- if (pccard_get_next_tuple(s, function, tuple) != 0)
- return -ENOSPC;
- } else
- tuple->CISOffset = tuple->TupleLink = 0;
- tuple->DesiredTuple = req;
- }
- return pccard_get_next_tuple(s, function, tuple);
+ if (!s)
+ return -EINVAL;
+
+ if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
+ return -ENODEV;
+ tuple->TupleLink = tuple->Flags = 0;
+
+ /* Assume presence of a LONGLINK_C to address 0 */
+ tuple->CISOffset = tuple->LinkOffset = 0;
+ SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1;
+
+ if ((s->functions > 1) && !(tuple->Attributes & TUPLE_RETURN_COMMON)) {
+ cisdata_t req = tuple->DesiredTuple;
+ tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
+ if (pccard_get_next_tuple(s, function, tuple) == 0) {
+ tuple->DesiredTuple = CISTPL_LINKTARGET;
+ if (pccard_get_next_tuple(s, function, tuple) != 0)
+ return -ENOSPC;
+ } else
+ tuple->CISOffset = tuple->TupleLink = 0;
+ tuple->DesiredTuple = req;
+ }
+ return pccard_get_next_tuple(s, function, tuple);
}
static int follow_link(struct pcmcia_socket *s, tuple_t *tuple)
{
- u_char link[5];
- u_int ofs;
- int ret;
-
- if (MFC_FN(tuple->Flags)) {
- /* Get indirect link from the MFC tuple */
- ret = read_cis_cache(s, LINK_SPACE(tuple->Flags),
- tuple->LinkOffset, 5, link);
- if (ret)
+ u_char link[5];
+ u_int ofs;
+ int ret;
+
+ if (MFC_FN(tuple->Flags)) {
+ /* Get indirect link from the MFC tuple */
+ ret = read_cis_cache(s, LINK_SPACE(tuple->Flags),
+ tuple->LinkOffset, 5, link);
+ if (ret)
+ return -1;
+ ofs = get_unaligned_le32(link + 1);
+ SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR);
+ /* Move to the next indirect link */
+ tuple->LinkOffset += 5;
+ MFC_FN(tuple->Flags)--;
+ } else if (HAS_LINK(tuple->Flags)) {
+ ofs = tuple->LinkOffset;
+ SPACE(tuple->Flags) = LINK_SPACE(tuple->Flags);
+ HAS_LINK(tuple->Flags) = 0;
+ } else
return -1;
- ofs = get_unaligned_le32(link + 1);
- SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR);
- /* Move to the next indirect link */
- tuple->LinkOffset += 5;
- MFC_FN(tuple->Flags)--;
- } else if (HAS_LINK(tuple->Flags)) {
- ofs = tuple->LinkOffset;
- SPACE(tuple->Flags) = LINK_SPACE(tuple->Flags);
- HAS_LINK(tuple->Flags) = 0;
- } else {
- return -1;
- }
- if (SPACE(tuple->Flags)) {
- /* This is ugly, but a common CIS error is to code the long
- link offset incorrectly, so we check the right spot... */
+
+ if (SPACE(tuple->Flags)) {
+ /* This is ugly, but a common CIS error is to code the long
+ link offset incorrectly, so we check the right spot... */
+ ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
+ if (ret)
+ return -1;
+ if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
+ (strncmp(link+2, "CIS", 3) == 0))
+ return ofs;
+ remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5);
+ /* Then, we try the wrong spot... */
+ ofs = ofs >> 1;
+ }
ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
if (ret)
return -1;
if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
- (strncmp(link+2, "CIS", 3) == 0))
- return ofs;
+ (strncmp(link+2, "CIS", 3) == 0))
+ return ofs;
remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5);
- /* Then, we try the wrong spot... */
- ofs = ofs >> 1;
- }
- ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
- if (ret)
- return -1;
- if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
- (strncmp(link+2, "CIS", 3) == 0))
- return ofs;
- remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5);
- return -1;
+ return -1;
}
-int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple)
+int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function,
+ tuple_t *tuple)
{
- u_char link[2], tmp;
- int ofs, i, attr;
- int ret;
-
- if (!s)
- return -EINVAL;
- if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
- return -ENODEV;
-
- link[1] = tuple->TupleLink;
- ofs = tuple->CISOffset + tuple->TupleLink;
- attr = SPACE(tuple->Flags);
-
- for (i = 0; i < MAX_TUPLES; i++) {
- if (link[1] == 0xff) {
- link[0] = CISTPL_END;
- } else {
- ret = read_cis_cache(s, attr, ofs, 2, link);
- if (ret)
- return -1;
- if (link[0] == CISTPL_NULL) {
- ofs++; continue;
- }
- }
+ u_char link[2], tmp;
+ int ofs, i, attr;
+ int ret;
- /* End of chain? Follow long link if possible */
- if (link[0] == CISTPL_END) {
- ofs = follow_link(s, tuple);
- if (ofs < 0)
- return -ENOSPC;
- attr = SPACE(tuple->Flags);
- ret = read_cis_cache(s, attr, ofs, 2, link);
- if (ret)
- return -1;
- }
+ if (!s)
+ return -EINVAL;
+ if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
+ return -ENODEV;
- /* Is this a link tuple? Make a note of it */
- if ((link[0] == CISTPL_LONGLINK_A) ||
- (link[0] == CISTPL_LONGLINK_C) ||
- (link[0] == CISTPL_LONGLINK_MFC) ||
- (link[0] == CISTPL_LINKTARGET) ||
- (link[0] == CISTPL_INDIRECT) ||
- (link[0] == CISTPL_NO_LINK)) {
- switch (link[0]) {
- case CISTPL_LONGLINK_A:
- HAS_LINK(tuple->Flags) = 1;
- LINK_SPACE(tuple->Flags) = attr | IS_ATTR;
- ret = read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset);
- if (ret)
- return -1;
- break;
- case CISTPL_LONGLINK_C:
- HAS_LINK(tuple->Flags) = 1;
- LINK_SPACE(tuple->Flags) = attr & ~IS_ATTR;
- ret = read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset);
- if (ret)
- return -1;
- break;
- case CISTPL_INDIRECT:
- HAS_LINK(tuple->Flags) = 1;
- LINK_SPACE(tuple->Flags) = IS_ATTR | IS_INDIRECT;
- tuple->LinkOffset = 0;
- break;
- case CISTPL_LONGLINK_MFC:
- tuple->LinkOffset = ofs + 3;
- LINK_SPACE(tuple->Flags) = attr;
- if (function == BIND_FN_ALL) {
- /* Follow all the MFC links */
- ret = read_cis_cache(s, attr, ofs+2, 1, &tmp);
- if (ret)
- return -1;
- MFC_FN(tuple->Flags) = tmp;
- } else {
- /* Follow exactly one of the links */
- MFC_FN(tuple->Flags) = 1;
- tuple->LinkOffset += function * 5;
+ link[1] = tuple->TupleLink;
+ ofs = tuple->CISOffset + tuple->TupleLink;
+ attr = SPACE(tuple->Flags);
+
+ for (i = 0; i < MAX_TUPLES; i++) {
+ if (link[1] == 0xff)
+ link[0] = CISTPL_END;
+ else {
+ ret = read_cis_cache(s, attr, ofs, 2, link);
+ if (ret)
+ return -1;
+ if (link[0] == CISTPL_NULL) {
+ ofs++;
+ continue;
+ }
}
- break;
- case CISTPL_NO_LINK:
- HAS_LINK(tuple->Flags) = 0;
- break;
- }
- if ((tuple->Attributes & TUPLE_RETURN_LINK) &&
- (tuple->DesiredTuple == RETURN_FIRST_TUPLE))
- break;
- } else
- if (tuple->DesiredTuple == RETURN_FIRST_TUPLE)
- break;
- if (link[0] == tuple->DesiredTuple)
- break;
- ofs += link[1] + 2;
- }
- if (i == MAX_TUPLES) {
- dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n");
- return -ENOSPC;
- }
-
- tuple->TupleCode = link[0];
- tuple->TupleLink = link[1];
- tuple->CISOffset = ofs + 2;
- return 0;
-}
+ /* End of chain? Follow long link if possible */
+ if (link[0] == CISTPL_END) {
+ ofs = follow_link(s, tuple);
+ if (ofs < 0)
+ return -ENOSPC;
+ attr = SPACE(tuple->Flags);
+ ret = read_cis_cache(s, attr, ofs, 2, link);
+ if (ret)
+ return -1;
+ }
-/*====================================================================*/
+ /* Is this a link tuple? Make a note of it */
+ if ((link[0] == CISTPL_LONGLINK_A) ||
+ (link[0] == CISTPL_LONGLINK_C) ||
+ (link[0] == CISTPL_LONGLINK_MFC) ||
+ (link[0] == CISTPL_LINKTARGET) ||
+ (link[0] == CISTPL_INDIRECT) ||
+ (link[0] == CISTPL_NO_LINK)) {
+ switch (link[0]) {
+ case CISTPL_LONGLINK_A:
+ HAS_LINK(tuple->Flags) = 1;
+ LINK_SPACE(tuple->Flags) = attr | IS_ATTR;
+ ret = read_cis_cache(s, attr, ofs+2, 4,
+ &tuple->LinkOffset);
+ if (ret)
+ return -1;
+ break;
+ case CISTPL_LONGLINK_C:
+ HAS_LINK(tuple->Flags) = 1;
+ LINK_SPACE(tuple->Flags) = attr & ~IS_ATTR;
+ ret = read_cis_cache(s, attr, ofs+2, 4,
+ &tuple->LinkOffset);
+ if (ret)
+ return -1;
+ break;
+ case CISTPL_INDIRECT:
+ HAS_LINK(tuple->Flags) = 1;
+ LINK_SPACE(tuple->Flags) = IS_ATTR |
+ IS_INDIRECT;
+ tuple->LinkOffset = 0;
+ break;
+ case CISTPL_LONGLINK_MFC:
+ tuple->LinkOffset = ofs + 3;
+ LINK_SPACE(tuple->Flags) = attr;
+ if (function == BIND_FN_ALL) {
+ /* Follow all the MFC links */
+ ret = read_cis_cache(s, attr, ofs+2,
+ 1, &tmp);
+ if (ret)
+ return -1;
+ MFC_FN(tuple->Flags) = tmp;
+ } else {
+ /* Follow exactly one of the links */
+ MFC_FN(tuple->Flags) = 1;
+ tuple->LinkOffset += function * 5;
+ }
+ break;
+ case CISTPL_NO_LINK:
+ HAS_LINK(tuple->Flags) = 0;
+ break;
+ }
+ if ((tuple->Attributes & TUPLE_RETURN_LINK) &&
+ (tuple->DesiredTuple == RETURN_FIRST_TUPLE))
+ break;
+ } else
+ if (tuple->DesiredTuple == RETURN_FIRST_TUPLE)
+ break;
+
+ if (link[0] == tuple->DesiredTuple)
+ break;
+ ofs += link[1] + 2;
+ }
+ if (i == MAX_TUPLES) {
+ dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n");
+ return -ENOSPC;
+ }
-#define _MIN(a, b) (((a) < (b)) ? (a) : (b))
+ tuple->TupleCode = link[0];
+ tuple->TupleLink = link[1];
+ tuple->CISOffset = ofs + 2;
+ return 0;
+}
int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple)
{
- u_int len;
- int ret;
+ u_int len;
+ int ret;
- if (!s)
- return -EINVAL;
+ if (!s)
+ return -EINVAL;
- if (tuple->TupleLink < tuple->TupleOffset)
- return -ENOSPC;
- len = tuple->TupleLink - tuple->TupleOffset;
- tuple->TupleDataLen = tuple->TupleLink;
- if (len == 0)
+ if (tuple->TupleLink < tuple->TupleOffset)
+ return -ENOSPC;
+ len = tuple->TupleLink - tuple->TupleOffset;
+ tuple->TupleDataLen = tuple->TupleLink;
+ if (len == 0)
+ return 0;
+ ret = read_cis_cache(s, SPACE(tuple->Flags),
+ tuple->CISOffset + tuple->TupleOffset,
+ min(len, (u_int) tuple->TupleDataMax),
+ tuple->TupleData);
+ if (ret)
+ return -1;
return 0;
- ret = read_cis_cache(s, SPACE(tuple->Flags),
- tuple->CISOffset + tuple->TupleOffset,
- _MIN(len, tuple->TupleDataMax), tuple->TupleData);
- if (ret)
- return -1;
- return 0;
}
-/*======================================================================
-
- Parsing routines for individual tuples
-
-======================================================================*/
+/* Parsing routines for individual tuples */
static int parse_device(tuple_t *tuple, cistpl_device_t *device)
{
- int i;
- u_char scale;
- u_char *p, *q;
+ int i;
+ u_char scale;
+ u_char *p, *q;
- p = (u_char *)tuple->TupleData;
- q = p + tuple->TupleDataLen;
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
- device->ndev = 0;
- for (i = 0; i < CISTPL_MAX_DEVICES; i++) {
+ device->ndev = 0;
+ for (i = 0; i < CISTPL_MAX_DEVICES; i++) {
- if (*p == 0xff)
- break;
- device->dev[i].type = (*p >> 4);
- device->dev[i].wp = (*p & 0x08) ? 1 : 0;
- switch (*p & 0x07) {
- case 0:
- device->dev[i].speed = 0;
- break;
- case 1:
- device->dev[i].speed = 250;
- break;
- case 2:
- device->dev[i].speed = 200;
- break;
- case 3:
- device->dev[i].speed = 150;
- break;
- case 4:
- device->dev[i].speed = 100;
- break;
- case 7:
- if (++p == q)
- return -EINVAL;
- device->dev[i].speed = SPEED_CVT(*p);
- while (*p & 0x80)
+ if (*p == 0xff)
+ break;
+ device->dev[i].type = (*p >> 4);
+ device->dev[i].wp = (*p & 0x08) ? 1 : 0;
+ switch (*p & 0x07) {
+ case 0:
+ device->dev[i].speed = 0;
+ break;
+ case 1:
+ device->dev[i].speed = 250;
+ break;
+ case 2:
+ device->dev[i].speed = 200;
+ break;
+ case 3:
+ device->dev[i].speed = 150;
+ break;
+ case 4:
+ device->dev[i].speed = 100;
+ break;
+ case 7:
if (++p == q)
return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
+ device->dev[i].speed = SPEED_CVT(*p);
+ while (*p & 0x80)
+ if (++p == q)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
- if (++p == q)
- return -EINVAL;
- if (*p == 0xff)
- break;
- scale = *p & 7;
- if (scale == 7)
- return -EINVAL;
- device->dev[i].size = ((*p >> 3) + 1) * (512 << (scale*2));
- device->ndev++;
- if (++p == q)
- break;
- }
+ if (++p == q)
+ return -EINVAL;
+ if (*p == 0xff)
+ break;
+ scale = *p & 7;
+ if (scale == 7)
+ return -EINVAL;
+ device->dev[i].size = ((*p >> 3) + 1) * (512 << (scale*2));
+ device->ndev++;
+ if (++p == q)
+ break;
+ }
- return 0;
+ return 0;
}
-/*====================================================================*/
static int parse_checksum(tuple_t *tuple, cistpl_checksum_t *csum)
{
- u_char *p;
- if (tuple->TupleDataLen < 5)
- return -EINVAL;
- p = (u_char *) tuple->TupleData;
- csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2;
- csum->len = get_unaligned_le16(p + 2);
- csum->sum = *(p + 4);
- return 0;
+ u_char *p;
+ if (tuple->TupleDataLen < 5)
+ return -EINVAL;
+ p = (u_char *) tuple->TupleData;
+ csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2;
+ csum->len = get_unaligned_le16(p + 2);
+ csum->sum = *(p + 4);
+ return 0;
}
-/*====================================================================*/
static int parse_longlink(tuple_t *tuple, cistpl_longlink_t *link)
{
- if (tuple->TupleDataLen < 4)
- return -EINVAL;
- link->addr = get_unaligned_le32(tuple->TupleData);
- return 0;
+ if (tuple->TupleDataLen < 4)
+ return -EINVAL;
+ link->addr = get_unaligned_le32(tuple->TupleData);
+ return 0;
}
-/*====================================================================*/
-static int parse_longlink_mfc(tuple_t *tuple,
- cistpl_longlink_mfc_t *link)
+static int parse_longlink_mfc(tuple_t *tuple, cistpl_longlink_mfc_t *link)
{
- u_char *p;
- int i;
-
- p = (u_char *)tuple->TupleData;
-
- link->nfn = *p; p++;
- if (tuple->TupleDataLen <= link->nfn*5)
- return -EINVAL;
- for (i = 0; i < link->nfn; i++) {
- link->fn[i].space = *p; p++;
- link->fn[i].addr = get_unaligned_le32(p);
- p += 4;
- }
- return 0;
+ u_char *p;
+ int i;
+
+ p = (u_char *)tuple->TupleData;
+
+ link->nfn = *p; p++;
+ if (tuple->TupleDataLen <= link->nfn*5)
+ return -EINVAL;
+ for (i = 0; i < link->nfn; i++) {
+ link->fn[i].space = *p; p++;
+ link->fn[i].addr = get_unaligned_le32(p);
+ p += 4;
+ }
+ return 0;
}
-/*====================================================================*/
static int parse_strings(u_char *p, u_char *q, int max,
char *s, u_char *ofs, u_char *found)
{
- int i, j, ns;
+ int i, j, ns;
- if (p == q)
- return -EINVAL;
- ns = 0; j = 0;
- for (i = 0; i < max; i++) {
- if (*p == 0xff)
- break;
- ofs[i] = j;
- ns++;
- for (;;) {
- s[j++] = (*p == 0xff) ? '\0' : *p;
- if ((*p == '\0') || (*p == 0xff))
- break;
- if (++p == q)
- return -EINVAL;
+ if (p == q)
+ return -EINVAL;
+ ns = 0; j = 0;
+ for (i = 0; i < max; i++) {
+ if (*p == 0xff)
+ break;
+ ofs[i] = j;
+ ns++;
+ for (;;) {
+ s[j++] = (*p == 0xff) ? '\0' : *p;
+ if ((*p == '\0') || (*p == 0xff))
+ break;
+ if (++p == q)
+ return -EINVAL;
+ }
+ if ((*p == 0xff) || (++p == q))
+ break;
}
- if ((*p == 0xff) || (++p == q))
- break;
- }
- if (found) {
- *found = ns;
- return 0;
- } else {
+ if (found) {
+ *found = ns;
+ return 0;
+ }
+
return (ns == max) ? 0 : -EINVAL;
- }
}
-/*====================================================================*/
static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1)
{
- u_char *p, *q;
+ u_char *p, *q;
- p = (u_char *)tuple->TupleData;
- q = p + tuple->TupleDataLen;
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
- vers_1->major = *p; p++;
- vers_1->minor = *p; p++;
- if (p >= q)
- return -EINVAL;
+ vers_1->major = *p; p++;
+ vers_1->minor = *p; p++;
+ if (p >= q)
+ return -EINVAL;
- return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS,
- vers_1->str, vers_1->ofs, &vers_1->ns);
+ return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS,
+ vers_1->str, vers_1->ofs, &vers_1->ns);
}
-/*====================================================================*/
static int parse_altstr(tuple_t *tuple, cistpl_altstr_t *altstr)
{
- u_char *p, *q;
+ u_char *p, *q;
- p = (u_char *)tuple->TupleData;
- q = p + tuple->TupleDataLen;
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
- return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS,
- altstr->str, altstr->ofs, &altstr->ns);
+ return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS,
+ altstr->str, altstr->ofs, &altstr->ns);
}
-/*====================================================================*/
static int parse_jedec(tuple_t *tuple, cistpl_jedec_t *jedec)
{
- u_char *p, *q;
- int nid;
+ u_char *p, *q;
+ int nid;
- p = (u_char *)tuple->TupleData;
- q = p + tuple->TupleDataLen;
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
- for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) {
- if (p > q-2)
- break;
- jedec->id[nid].mfr = p[0];
- jedec->id[nid].info = p[1];
- p += 2;
- }
- jedec->nid = nid;
- return 0;
+ for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) {
+ if (p > q-2)
+ break;
+ jedec->id[nid].mfr = p[0];
+ jedec->id[nid].info = p[1];
+ p += 2;
+ }
+ jedec->nid = nid;
+ return 0;
}
-/*====================================================================*/
static int parse_manfid(tuple_t *tuple, cistpl_manfid_t *m)
{
- if (tuple->TupleDataLen < 4)
- return -EINVAL;
- m->manf = get_unaligned_le16(tuple->TupleData);
- m->card = get_unaligned_le16(tuple->TupleData + 2);
- return 0;
+ if (tuple->TupleDataLen < 4)
+ return -EINVAL;
+ m->manf = get_unaligned_le16(tuple->TupleData);
+ m->card = get_unaligned_le16(tuple->TupleData + 2);
+ return 0;
}
-/*====================================================================*/
static int parse_funcid(tuple_t *tuple, cistpl_funcid_t *f)
{
- u_char *p;
- if (tuple->TupleDataLen < 2)
- return -EINVAL;
- p = (u_char *)tuple->TupleData;
- f->func = p[0];
- f->sysinit = p[1];
- return 0;
+ u_char *p;
+ if (tuple->TupleDataLen < 2)
+ return -EINVAL;
+ p = (u_char *)tuple->TupleData;
+ f->func = p[0];
+ f->sysinit = p[1];
+ return 0;
}
-/*====================================================================*/
static int parse_funce(tuple_t *tuple, cistpl_funce_t *f)
{
- u_char *p;
- int i;
- if (tuple->TupleDataLen < 1)
- return -EINVAL;
- p = (u_char *)tuple->TupleData;
- f->type = p[0];
- for (i = 1; i < tuple->TupleDataLen; i++)
- f->data[i-1] = p[i];
- return 0;
+ u_char *p;
+ int i;
+ if (tuple->TupleDataLen < 1)
+ return -EINVAL;
+ p = (u_char *)tuple->TupleData;
+ f->type = p[0];
+ for (i = 1; i < tuple->TupleDataLen; i++)
+ f->data[i-1] = p[i];
+ return 0;
}
-/*====================================================================*/
static int parse_config(tuple_t *tuple, cistpl_config_t *config)
{
- int rasz, rmsz, i;
- u_char *p;
-
- p = (u_char *)tuple->TupleData;
- rasz = *p & 0x03;
- rmsz = (*p & 0x3c) >> 2;
- if (tuple->TupleDataLen < rasz+rmsz+4)
- return -EINVAL;
- config->last_idx = *(++p);
- p++;
- config->base = 0;
- for (i = 0; i <= rasz; i++)
- config->base += p[i] << (8*i);
- p += rasz+1;
- for (i = 0; i < 4; i++)
- config->rmask[i] = 0;
- for (i = 0; i <= rmsz; i++)
- config->rmask[i>>2] += p[i] << (8*(i%4));
- config->subtuples = tuple->TupleDataLen - (rasz+rmsz+4);
- return 0;
+ int rasz, rmsz, i;
+ u_char *p;
+
+ p = (u_char *)tuple->TupleData;
+ rasz = *p & 0x03;
+ rmsz = (*p & 0x3c) >> 2;
+ if (tuple->TupleDataLen < rasz+rmsz+4)
+ return -EINVAL;
+ config->last_idx = *(++p);
+ p++;
+ config->base = 0;
+ for (i = 0; i <= rasz; i++)
+ config->base += p[i] << (8*i);
+ p += rasz+1;
+ for (i = 0; i < 4; i++)
+ config->rmask[i] = 0;
+ for (i = 0; i <= rmsz; i++)
+ config->rmask[i>>2] += p[i] << (8*(i%4));
+ config->subtuples = tuple->TupleDataLen - (rasz+rmsz+4);
+ return 0;
}
-/*======================================================================
+/* The following routines are all used to parse the nightmarish
+ * config table entries.
+ */
+
+static u_char *parse_power(u_char *p, u_char *q, cistpl_power_t *pwr)
+{
+ int i;
+ u_int scale;
- The following routines are all used to parse the nightmarish
- config table entries.
+ if (p == q)
+ return NULL;
+ pwr->present = *p;
+ pwr->flags = 0;
+ p++;
+ for (i = 0; i < 7; i++)
+ if (pwr->present & (1<<i)) {
+ if (p == q)
+ return NULL;
+ pwr->param[i] = POWER_CVT(*p);
+ scale = POWER_SCALE(*p);
+ while (*p & 0x80) {
+ if (++p == q)
+ return NULL;
+ if ((*p & 0x7f) < 100)
+ pwr->param[i] +=
+ (*p & 0x7f) * scale / 100;
+ else if (*p == 0x7d)
+ pwr->flags |= CISTPL_POWER_HIGHZ_OK;
+ else if (*p == 0x7e)
+ pwr->param[i] = 0;
+ else if (*p == 0x7f)
+ pwr->flags |= CISTPL_POWER_HIGHZ_REQ;
+ else
+ return NULL;
+ }
+ p++;
+ }
+ return p;
+}
-======================================================================*/
-static u_char *parse_power(u_char *p, u_char *q,
- cistpl_power_t *pwr)
+static u_char *parse_timing(u_char *p, u_char *q, cistpl_timing_t *timing)
{
- int i;
- u_int scale;
-
- if (p == q)
- return NULL;
- pwr->present = *p;
- pwr->flags = 0;
- p++;
- for (i = 0; i < 7; i++)
- if (pwr->present & (1<<i)) {
- if (p == q)
- return NULL;
- pwr->param[i] = POWER_CVT(*p);
- scale = POWER_SCALE(*p);
- while (*p & 0x80) {
+ u_char scale;
+
+ if (p == q)
+ return NULL;
+ scale = *p;
+ if ((scale & 3) != 3) {
if (++p == q)
return NULL;
- if ((*p & 0x7f) < 100)
- pwr->param[i] += (*p & 0x7f) * scale / 100;
- else if (*p == 0x7d)
- pwr->flags |= CISTPL_POWER_HIGHZ_OK;
- else if (*p == 0x7e)
- pwr->param[i] = 0;
- else if (*p == 0x7f)
- pwr->flags |= CISTPL_POWER_HIGHZ_REQ;
- else
- return NULL;
- }
- p++;
- }
- return p;
+ timing->wait = SPEED_CVT(*p);
+ timing->waitscale = exponent[scale & 3];
+ } else
+ timing->wait = 0;
+ scale >>= 2;
+ if ((scale & 7) != 7) {
+ if (++p == q)
+ return NULL;
+ timing->ready = SPEED_CVT(*p);
+ timing->rdyscale = exponent[scale & 7];
+ } else
+ timing->ready = 0;
+ scale >>= 3;
+ if (scale != 7) {
+ if (++p == q)
+ return NULL;
+ timing->reserved = SPEED_CVT(*p);
+ timing->rsvscale = exponent[scale];
+ } else
+ timing->reserved = 0;
+ p++;
+ return p;
}
-/*====================================================================*/
-static u_char *parse_timing(u_char *p, u_char *q,
- cistpl_timing_t *timing)
+static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
{
- u_char scale;
+ int i, j, bsz, lsz;
- if (p == q)
- return NULL;
- scale = *p;
- if ((scale & 3) != 3) {
- if (++p == q)
- return NULL;
- timing->wait = SPEED_CVT(*p);
- timing->waitscale = exponent[scale & 3];
- } else
- timing->wait = 0;
- scale >>= 2;
- if ((scale & 7) != 7) {
- if (++p == q)
+ if (p == q)
return NULL;
- timing->ready = SPEED_CVT(*p);
- timing->rdyscale = exponent[scale & 7];
- } else
- timing->ready = 0;
- scale >>= 3;
- if (scale != 7) {
+ io->flags = *p;
+
+ if (!(*p & 0x80)) {
+ io->nwin = 1;
+ io->win[0].base = 0;
+ io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK));
+ return p+1;
+ }
+
if (++p == q)
return NULL;
- timing->reserved = SPEED_CVT(*p);
- timing->rsvscale = exponent[scale];
- } else
- timing->reserved = 0;
- p++;
- return p;
-}
-
-/*====================================================================*/
+ io->nwin = (*p & 0x0f) + 1;
+ bsz = (*p & 0x30) >> 4;
+ if (bsz == 3)
+ bsz++;
+ lsz = (*p & 0xc0) >> 6;
+ if (lsz == 3)
+ lsz++;
+ p++;
-static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
-{
- int i, j, bsz, lsz;
-
- if (p == q)
- return NULL;
- io->flags = *p;
-
- if (!(*p & 0x80)) {
- io->nwin = 1;
- io->win[0].base = 0;
- io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK));
- return p+1;
- }
-
- if (++p == q)
- return NULL;
- io->nwin = (*p & 0x0f) + 1;
- bsz = (*p & 0x30) >> 4;
- if (bsz == 3)
- bsz++;
- lsz = (*p & 0xc0) >> 6;
- if (lsz == 3)
- lsz++;
- p++;
-
- for (i = 0; i < io->nwin; i++) {
- io->win[i].base = 0;
- io->win[i].len = 1;
- for (j = 0; j < bsz; j++, p++) {
- if (p == q)
- return NULL;
- io->win[i].base += *p << (j*8);
- }
- for (j = 0; j < lsz; j++, p++) {
- if (p == q)
- return NULL;
- io->win[i].len += *p << (j*8);
+ for (i = 0; i < io->nwin; i++) {
+ io->win[i].base = 0;
+ io->win[i].len = 1;
+ for (j = 0; j < bsz; j++, p++) {
+ if (p == q)
+ return NULL;
+ io->win[i].base += *p << (j*8);
+ }
+ for (j = 0; j < lsz; j++, p++) {
+ if (p == q)
+ return NULL;
+ io->win[i].len += *p << (j*8);
+ }
}
- }
- return p;
+ return p;
}
-/*====================================================================*/
static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem)
{
- int i, j, asz, lsz, has_ha;
- u_int len, ca, ha;
-
- if (p == q)
- return NULL;
-
- mem->nwin = (*p & 0x07) + 1;
- lsz = (*p & 0x18) >> 3;
- asz = (*p & 0x60) >> 5;
- has_ha = (*p & 0x80);
- if (++p == q)
- return NULL;
-
- for (i = 0; i < mem->nwin; i++) {
- len = ca = ha = 0;
- for (j = 0; j < lsz; j++, p++) {
- if (p == q)
- return NULL;
- len += *p << (j*8);
- }
- for (j = 0; j < asz; j++, p++) {
- if (p == q)
- return NULL;
- ca += *p << (j*8);
+ int i, j, asz, lsz, has_ha;
+ u_int len, ca, ha;
+
+ if (p == q)
+ return NULL;
+
+ mem->nwin = (*p & 0x07) + 1;
+ lsz = (*p & 0x18) >> 3;
+ asz = (*p & 0x60) >> 5;
+ has_ha = (*p & 0x80);
+ if (++p == q)
+ return NULL;
+
+ for (i = 0; i < mem->nwin; i++) {
+ len = ca = ha = 0;
+ for (j = 0; j < lsz; j++, p++) {
+ if (p == q)
+ return NULL;
+ len += *p << (j*8);
+ }
+ for (j = 0; j < asz; j++, p++) {
+ if (p == q)
+ return NULL;
+ ca += *p << (j*8);
+ }
+ if (has_ha)
+ for (j = 0; j < asz; j++, p++) {
+ if (p == q)
+ return NULL;
+ ha += *p << (j*8);
+ }
+ mem->win[i].len = len << 8;
+ mem->win[i].card_addr = ca << 8;
+ mem->win[i].host_addr = ha << 8;
}
- if (has_ha)
- for (j = 0; j < asz; j++, p++) {
- if (p == q)
- return NULL;
- ha += *p << (j*8);
- }
- mem->win[i].len = len << 8;
- mem->win[i].card_addr = ca << 8;
- mem->win[i].host_addr = ha << 8;
- }
- return p;
+ return p;
}
-/*====================================================================*/
static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq)
{
- if (p == q)
- return NULL;
- irq->IRQInfo1 = *p; p++;
- if (irq->IRQInfo1 & IRQ_INFO2_VALID) {
- if (p+2 > q)
+ if (p == q)
return NULL;
- irq->IRQInfo2 = (p[1]<<8) + p[0];
- p += 2;
- }
- return p;
+ irq->IRQInfo1 = *p; p++;
+ if (irq->IRQInfo1 & IRQ_INFO2_VALID) {
+ if (p+2 > q)
+ return NULL;
+ irq->IRQInfo2 = (p[1]<<8) + p[0];
+ p += 2;
+ }
+ return p;
}
-/*====================================================================*/
static int parse_cftable_entry(tuple_t *tuple,
cistpl_cftable_entry_t *entry)
{
- u_char *p, *q, features;
-
- p = tuple->TupleData;
- q = p + tuple->TupleDataLen;
- entry->index = *p & 0x3f;
- entry->flags = 0;
- if (*p & 0x40)
- entry->flags |= CISTPL_CFTABLE_DEFAULT;
- if (*p & 0x80) {
- if (++p == q)
- return -EINVAL;
- if (*p & 0x10)
- entry->flags |= CISTPL_CFTABLE_BVDS;
- if (*p & 0x20)
- entry->flags |= CISTPL_CFTABLE_WP;
+ u_char *p, *q, features;
+
+ p = tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+ entry->index = *p & 0x3f;
+ entry->flags = 0;
if (*p & 0x40)
- entry->flags |= CISTPL_CFTABLE_RDYBSY;
- if (*p & 0x80)
- entry->flags |= CISTPL_CFTABLE_MWAIT;
- entry->interface = *p & 0x0f;
- } else
- entry->interface = 0;
-
- /* Process optional features */
- if (++p == q)
- return -EINVAL;
- features = *p; p++;
-
- /* Power options */
- if ((features & 3) > 0) {
- p = parse_power(p, q, &entry->vcc);
- if (p == NULL)
- return -EINVAL;
- } else
- entry->vcc.present = 0;
- if ((features & 3) > 1) {
- p = parse_power(p, q, &entry->vpp1);
- if (p == NULL)
- return -EINVAL;
- } else
- entry->vpp1.present = 0;
- if ((features & 3) > 2) {
- p = parse_power(p, q, &entry->vpp2);
- if (p == NULL)
- return -EINVAL;
- } else
- entry->vpp2.present = 0;
+ entry->flags |= CISTPL_CFTABLE_DEFAULT;
+ if (*p & 0x80) {
+ if (++p == q)
+ return -EINVAL;
+ if (*p & 0x10)
+ entry->flags |= CISTPL_CFTABLE_BVDS;
+ if (*p & 0x20)
+ entry->flags |= CISTPL_CFTABLE_WP;
+ if (*p & 0x40)
+ entry->flags |= CISTPL_CFTABLE_RDYBSY;
+ if (*p & 0x80)
+ entry->flags |= CISTPL_CFTABLE_MWAIT;
+ entry->interface = *p & 0x0f;
+ } else
+ entry->interface = 0;
- /* Timing options */
- if (features & 0x04) {
- p = parse_timing(p, q, &entry->timing);
- if (p == NULL)
- return -EINVAL;
- } else {
- entry->timing.wait = 0;
- entry->timing.ready = 0;
- entry->timing.reserved = 0;
- }
-
- /* I/O window options */
- if (features & 0x08) {
- p = parse_io(p, q, &entry->io);
- if (p == NULL)
+ /* Process optional features */
+ if (++p == q)
return -EINVAL;
- } else
- entry->io.nwin = 0;
+ features = *p; p++;
- /* Interrupt options */
- if (features & 0x10) {
- p = parse_irq(p, q, &entry->irq);
- if (p == NULL)
- return -EINVAL;
- } else
- entry->irq.IRQInfo1 = 0;
-
- switch (features & 0x60) {
- case 0x00:
- entry->mem.nwin = 0;
- break;
- case 0x20:
- entry->mem.nwin = 1;
- entry->mem.win[0].len = get_unaligned_le16(p) << 8;
- entry->mem.win[0].card_addr = 0;
- entry->mem.win[0].host_addr = 0;
- p += 2;
- if (p > q)
- return -EINVAL;
- break;
- case 0x40:
- entry->mem.nwin = 1;
- entry->mem.win[0].len = get_unaligned_le16(p) << 8;
- entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8;
- entry->mem.win[0].host_addr = 0;
- p += 4;
- if (p > q)
- return -EINVAL;
- break;
- case 0x60:
- p = parse_mem(p, q, &entry->mem);
- if (p == NULL)
- return -EINVAL;
- break;
- }
+ /* Power options */
+ if ((features & 3) > 0) {
+ p = parse_power(p, q, &entry->vcc);
+ if (p == NULL)
+ return -EINVAL;
+ } else
+ entry->vcc.present = 0;
+ if ((features & 3) > 1) {
+ p = parse_power(p, q, &entry->vpp1);
+ if (p == NULL)
+ return -EINVAL;
+ } else
+ entry->vpp1.present = 0;
+ if ((features & 3) > 2) {
+ p = parse_power(p, q, &entry->vpp2);
+ if (p == NULL)
+ return -EINVAL;
+ } else
+ entry->vpp2.present = 0;
- /* Misc features */
- if (features & 0x80) {
- if (p == q)
- return -EINVAL;
- entry->flags |= (*p << 8);
- while (*p & 0x80)
- if (++p == q)
- return -EINVAL;
- p++;
- }
+ /* Timing options */
+ if (features & 0x04) {
+ p = parse_timing(p, q, &entry->timing);
+ if (p == NULL)
+ return -EINVAL;
+ } else {
+ entry->timing.wait = 0;
+ entry->timing.ready = 0;
+ entry->timing.reserved = 0;
+ }
- entry->subtuples = q-p;
+ /* I/O window options */
+ if (features & 0x08) {
+ p = parse_io(p, q, &entry->io);
+ if (p == NULL)
+ return -EINVAL;
+ } else
+ entry->io.nwin = 0;
+
+ /* Interrupt options */
+ if (features & 0x10) {
+ p = parse_irq(p, q, &entry->irq);
+ if (p == NULL)
+ return -EINVAL;
+ } else
+ entry->irq.IRQInfo1 = 0;
+
+ switch (features & 0x60) {
+ case 0x00:
+ entry->mem.nwin = 0;
+ break;
+ case 0x20:
+ entry->mem.nwin = 1;
+ entry->mem.win[0].len = get_unaligned_le16(p) << 8;
+ entry->mem.win[0].card_addr = 0;
+ entry->mem.win[0].host_addr = 0;
+ p += 2;
+ if (p > q)
+ return -EINVAL;
+ break;
+ case 0x40:
+ entry->mem.nwin = 1;
+ entry->mem.win[0].len = get_unaligned_le16(p) << 8;
+ entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8;
+ entry->mem.win[0].host_addr = 0;
+ p += 4;
+ if (p > q)
+ return -EINVAL;
+ break;
+ case 0x60:
+ p = parse_mem(p, q, &entry->mem);
+ if (p == NULL)
+ return -EINVAL;
+ break;
+ }
+
+ /* Misc features */
+ if (features & 0x80) {
+ if (p == q)
+ return -EINVAL;
+ entry->flags |= (*p << 8);
+ while (*p & 0x80)
+ if (++p == q)
+ return -EINVAL;
+ p++;
+ }
+
+ entry->subtuples = q-p;
- return 0;
+ return 0;
}
-/*====================================================================*/
static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo)
{
- u_char *p, *q;
- int n;
+ u_char *p, *q;
+ int n;
- p = (u_char *)tuple->TupleData;
- q = p + tuple->TupleDataLen;
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
- for (n = 0; n < CISTPL_MAX_DEVICES; n++) {
- if (p > q-6)
- break;
- geo->geo[n].buswidth = p[0];
- geo->geo[n].erase_block = 1 << (p[1]-1);
- geo->geo[n].read_block = 1 << (p[2]-1);
- geo->geo[n].write_block = 1 << (p[3]-1);
- geo->geo[n].partition = 1 << (p[4]-1);
- geo->geo[n].interleave = 1 << (p[5]-1);
- p += 6;
- }
- geo->ngeo = n;
- return 0;
+ for (n = 0; n < CISTPL_MAX_DEVICES; n++) {
+ if (p > q-6)
+ break;
+ geo->geo[n].buswidth = p[0];
+ geo->geo[n].erase_block = 1 << (p[1]-1);
+ geo->geo[n].read_block = 1 << (p[2]-1);
+ geo->geo[n].write_block = 1 << (p[3]-1);
+ geo->geo[n].partition = 1 << (p[4]-1);
+ geo->geo[n].interleave = 1 << (p[5]-1);
+ p += 6;
+ }
+ geo->ngeo = n;
+ return 0;
}
-/*====================================================================*/
static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2)
{
- u_char *p, *q;
-
- if (tuple->TupleDataLen < 10)
- return -EINVAL;
-
- p = tuple->TupleData;
- q = p + tuple->TupleDataLen;
-
- v2->vers = p[0];
- v2->comply = p[1];
- v2->dindex = get_unaligned_le16(p + 2);
- v2->vspec8 = p[6];
- v2->vspec9 = p[7];
- v2->nhdr = p[8];
- p += 9;
- return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL);
+ u_char *p, *q;
+
+ if (tuple->TupleDataLen < 10)
+ return -EINVAL;
+
+ p = tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+
+ v2->vers = p[0];
+ v2->comply = p[1];
+ v2->dindex = get_unaligned_le16(p + 2);
+ v2->vspec8 = p[6];
+ v2->vspec9 = p[7];
+ v2->nhdr = p[8];
+ p += 9;
+ return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL);
}
-/*====================================================================*/
static int parse_org(tuple_t *tuple, cistpl_org_t *org)
{
- u_char *p, *q;
- int i;
-
- p = tuple->TupleData;
- q = p + tuple->TupleDataLen;
- if (p == q)
- return -EINVAL;
- org->data_org = *p;
- if (++p == q)
- return -EINVAL;
- for (i = 0; i < 30; i++) {
- org->desc[i] = *p;
- if (*p == '\0')
- break;
+ u_char *p, *q;
+ int i;
+
+ p = tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+ if (p == q)
+ return -EINVAL;
+ org->data_org = *p;
if (++p == q)
return -EINVAL;
- }
- return 0;
+ for (i = 0; i < 30; i++) {
+ org->desc[i] = *p;
+ if (*p == '\0')
+ break;
+ if (++p == q)
+ return -EINVAL;
+ }
+ return 0;
}
-/*====================================================================*/
static int parse_format(tuple_t *tuple, cistpl_format_t *fmt)
{
- u_char *p;
+ u_char *p;
- if (tuple->TupleDataLen < 10)
- return -EINVAL;
+ if (tuple->TupleDataLen < 10)
+ return -EINVAL;
- p = tuple->TupleData;
+ p = tuple->TupleData;
- fmt->type = p[0];
- fmt->edc = p[1];
- fmt->offset = get_unaligned_le32(p + 2);
- fmt->length = get_unaligned_le32(p + 6);
+ fmt->type = p[0];
+ fmt->edc = p[1];
+ fmt->offset = get_unaligned_le32(p + 2);
+ fmt->length = get_unaligned_le32(p + 6);
- return 0;
+ return 0;
}
-/*====================================================================*/
int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse)
{
- int ret = 0;
-
- if (tuple->TupleDataLen > tuple->TupleDataMax)
- return -EINVAL;
- switch (tuple->TupleCode) {
- case CISTPL_DEVICE:
- case CISTPL_DEVICE_A:
- ret = parse_device(tuple, &parse->device);
- break;
- case CISTPL_CHECKSUM:
- ret = parse_checksum(tuple, &parse->checksum);
- break;
- case CISTPL_LONGLINK_A:
- case CISTPL_LONGLINK_C:
- ret = parse_longlink(tuple, &parse->longlink);
- break;
- case CISTPL_LONGLINK_MFC:
- ret = parse_longlink_mfc(tuple, &parse->longlink_mfc);
- break;
- case CISTPL_VERS_1:
- ret = parse_vers_1(tuple, &parse->version_1);
- break;
- case CISTPL_ALTSTR:
- ret = parse_altstr(tuple, &parse->altstr);
- break;
- case CISTPL_JEDEC_A:
- case CISTPL_JEDEC_C:
- ret = parse_jedec(tuple, &parse->jedec);
- break;
- case CISTPL_MANFID:
- ret = parse_manfid(tuple, &parse->manfid);
- break;
- case CISTPL_FUNCID:
- ret = parse_funcid(tuple, &parse->funcid);
- break;
- case CISTPL_FUNCE:
- ret = parse_funce(tuple, &parse->funce);
- break;
- case CISTPL_CONFIG:
- ret = parse_config(tuple, &parse->config);
- break;
- case CISTPL_CFTABLE_ENTRY:
- ret = parse_cftable_entry(tuple, &parse->cftable_entry);
- break;
- case CISTPL_DEVICE_GEO:
- case CISTPL_DEVICE_GEO_A:
- ret = parse_device_geo(tuple, &parse->device_geo);
- break;
- case CISTPL_VERS_2:
- ret = parse_vers_2(tuple, &parse->vers_2);
- break;
- case CISTPL_ORG:
- ret = parse_org(tuple, &parse->org);
- break;
- case CISTPL_FORMAT:
- case CISTPL_FORMAT_A:
- ret = parse_format(tuple, &parse->format);
- break;
- case CISTPL_NO_LINK:
- case CISTPL_LINKTARGET:
- ret = 0;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret)
- pr_debug("parse_tuple failed %d\n", ret);
- return ret;
+ int ret = 0;
+
+ if (tuple->TupleDataLen > tuple->TupleDataMax)
+ return -EINVAL;
+ switch (tuple->TupleCode) {
+ case CISTPL_DEVICE:
+ case CISTPL_DEVICE_A:
+ ret = parse_device(tuple, &parse->device);
+ break;
+ case CISTPL_CHECKSUM:
+ ret = parse_checksum(tuple, &parse->checksum);
+ break;
+ case CISTPL_LONGLINK_A:
+ case CISTPL_LONGLINK_C:
+ ret = parse_longlink(tuple, &parse->longlink);
+ break;
+ case CISTPL_LONGLINK_MFC:
+ ret = parse_longlink_mfc(tuple, &parse->longlink_mfc);
+ break;
+ case CISTPL_VERS_1:
+ ret = parse_vers_1(tuple, &parse->version_1);
+ break;
+ case CISTPL_ALTSTR:
+ ret = parse_altstr(tuple, &parse->altstr);
+ break;
+ case CISTPL_JEDEC_A:
+ case CISTPL_JEDEC_C:
+ ret = parse_jedec(tuple, &parse->jedec);
+ break;
+ case CISTPL_MANFID:
+ ret = parse_manfid(tuple, &parse->manfid);
+ break;
+ case CISTPL_FUNCID:
+ ret = parse_funcid(tuple, &parse->funcid);
+ break;
+ case CISTPL_FUNCE:
+ ret = parse_funce(tuple, &parse->funce);
+ break;
+ case CISTPL_CONFIG:
+ ret = parse_config(tuple, &parse->config);
+ break;
+ case CISTPL_CFTABLE_ENTRY:
+ ret = parse_cftable_entry(tuple, &parse->cftable_entry);
+ break;
+ case CISTPL_DEVICE_GEO:
+ case CISTPL_DEVICE_GEO_A:
+ ret = parse_device_geo(tuple, &parse->device_geo);
+ break;
+ case CISTPL_VERS_2:
+ ret = parse_vers_2(tuple, &parse->vers_2);
+ break;
+ case CISTPL_ORG:
+ ret = parse_org(tuple, &parse->org);
+ break;
+ case CISTPL_FORMAT:
+ case CISTPL_FORMAT_A:
+ ret = parse_format(tuple, &parse->format);
+ break;
+ case CISTPL_NO_LINK:
+ case CISTPL_LINKTARGET:
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret)
+ pr_debug("parse_tuple failed %d\n", ret);
+ return ret;
}
EXPORT_SYMBOL(pcmcia_parse_tuple);
-/*======================================================================
- This is used internally by Card Services to look up CIS stuff.
-
-======================================================================*/
-
-int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function, cisdata_t code, void *parse)
+/**
+ * pccard_read_tuple() - internal CIS tuple access
+ * @s: the struct pcmcia_socket where the card is inserted
+ * @function: the device function we loop for
+ * @code: which CIS code shall we look for?
+ * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
+ *
+ * pccard_read_tuple() reads out one tuple and attempts to parse it
+ */
+int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function,
+ cisdata_t code, void *parse)
{
- tuple_t tuple;
- cisdata_t *buf;
- int ret;
-
- buf = kmalloc(256, GFP_KERNEL);
- if (buf == NULL) {
- dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
- return -ENOMEM;
- }
- tuple.DesiredTuple = code;
- tuple.Attributes = 0;
- if (function == BIND_FN_ALL)
- tuple.Attributes = TUPLE_RETURN_COMMON;
- ret = pccard_get_first_tuple(s, function, &tuple);
- if (ret != 0)
- goto done;
- tuple.TupleData = buf;
- tuple.TupleOffset = 0;
- tuple.TupleDataMax = 255;
- ret = pccard_get_tuple_data(s, &tuple);
- if (ret != 0)
- goto done;
- ret = pcmcia_parse_tuple(&tuple, parse);
+ tuple_t tuple;
+ cisdata_t *buf;
+ int ret;
+
+ buf = kmalloc(256, GFP_KERNEL);
+ if (buf == NULL) {
+ dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
+ return -ENOMEM;
+ }
+ tuple.DesiredTuple = code;
+ tuple.Attributes = 0;
+ if (function == BIND_FN_ALL)
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ ret = pccard_get_first_tuple(s, function, &tuple);
+ if (ret != 0)
+ goto done;
+ tuple.TupleData = buf;
+ tuple.TupleOffset = 0;
+ tuple.TupleDataMax = 255;
+ ret = pccard_get_tuple_data(s, &tuple);
+ if (ret != 0)
+ goto done;
+ ret = pcmcia_parse_tuple(&tuple, parse);
done:
- kfree(buf);
- return ret;
+ kfree(buf);
+ return ret;
}
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 3889cf0..9254ab0 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -42,7 +42,6 @@ struct db1x_pcmcia_sock {
int nr; /* socket number */
void *virt_io;
- /* the "pseudo" addresses of the PCMCIA space. */
phys_addr_t phys_io;
phys_addr_t phys_attr;
phys_addr_t phys_mem;
@@ -437,7 +436,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
* This includes IRQs for Carddetection/ejection, the card
* itself and optional status change detection.
* Also, the memory areas covered by a socket. For these
- * we require the 32bit "pseudo" addresses (see the au1000.h
+ * we require the real 36bit addresses (see the au1000.h
* header for more information).
*/
@@ -459,11 +458,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
ret = -ENODEV;
- /*
- * pseudo-attr: The 32bit address of the PCMCIA attribute space
- * for this socket (usually the 36bit address shifted 4 to the
- * right).
- */
+ /* 36bit PCMCIA Attribute area address */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr");
if (!r) {
printk(KERN_ERR "pcmcia%d has no 'pseudo-attr' resource!\n",
@@ -472,10 +467,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
}
sock->phys_attr = r->start;
- /*
- * pseudo-mem: The 32bit address of the PCMCIA memory space for
- * this socket (usually the 36bit address shifted 4 to the right)
- */
+ /* 36bit PCMCIA Memory area address */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem");
if (!r) {
printk(KERN_ERR "pcmcia%d has no 'pseudo-mem' resource!\n",
@@ -484,10 +476,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
}
sock->phys_mem = r->start;
- /*
- * pseudo-io: The 32bit address of the PCMCIA IO space for this
- * socket (usually the 36bit address shifted 4 to the right).
- */
+ /* 36bit PCMCIA IO area address */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io");
if (!r) {
printk(KERN_ERR "pcmcia%d has no 'pseudo-io' resource!\n",
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index e1741cd..7c20491 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -48,23 +48,13 @@ MODULE_AUTHOR("Jun Komuro <komurojun-mbn@nifty.com>");
* Specifies the interrupt delivery mode. The default (1) is to use PCI
* interrupts; a value of 0 selects ISA interrupts. This must be set for
* correct operation of PCI card readers.
- *
- * irq_list=i,j,...
- * This list limits the set of interrupts that can be used by PCMCIA
- * cards.
- * The default list is 3,4,5,7,9,10,11.
- * (irq_list parameter is not used, if irq_mode = 1)
*/
static int irq_mode = 1; /* 0 = ISA interrupt, 1 = PCI interrupt */
-static int irq_list[16];
-static unsigned int irq_list_count = 0;
module_param(irq_mode, int, 0444);
-module_param_array(irq_list, int, &irq_list_count, 0444);
MODULE_PARM_DESC(irq_mode,
"interrupt delivery mode. 0 = ISA, 1 = PCI. default is 1");
-MODULE_PARM_DESC(irq_list, "interrupts that can be used by PCMCIA cards");
static DEFINE_SPINLOCK(port_lock);
@@ -605,13 +595,7 @@ static u_int __devinit pd6729_isa_scan(void)
return 0;
}
- if (irq_list_count == 0)
- mask0 = 0xffff;
- else
- for (i = mask0 = 0; i < irq_list_count; i++)
- mask0 |= (1<<irq_list[i]);
-
- mask0 &= PD67_MASK;
+ mask0 = PD67_MASK;
/* just find interrupts that aren't in use */
for (i = 0; i < 16; i++)
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index e6f7d41..452c83b 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -79,9 +79,8 @@ static resource_size_t pcmcia_align(void *align_data,
#ifdef CONFIG_X86
if (res->flags & IORESOURCE_IO) {
- if (start & 0x300) {
+ if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
- }
}
#endif
diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c
index 61560cd..f9009d3 100644
--- a/drivers/pcmcia/xxs1500_ss.c
+++ b/drivers/pcmcia/xxs1500_ss.c
@@ -218,11 +218,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev)
ret = -ENODEV;
- /*
- * pseudo-attr: The 32bit address of the PCMCIA attribute space
- * for this socket (usually the 36bit address shifted 4 to the
- * right).
- */
+ /* 36bit PCMCIA Attribute area address */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr");
if (!r) {
dev_err(&pdev->dev, "missing 'pcmcia-attr' resource!\n");
@@ -230,10 +226,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev)
}
sock->phys_attr = r->start;
- /*
- * pseudo-mem: The 32bit address of the PCMCIA memory space for
- * this socket (usually the 36bit address shifted 4 to the right)
- */
+ /* 36bit PCMCIA Memory area address */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem");
if (!r) {
dev_err(&pdev->dev, "missing 'pcmcia-mem' resource!\n");
@@ -241,10 +234,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev)
}
sock->phys_mem = r->start;
- /*
- * pseudo-io: The 32bit address of the PCMCIA IO space for this
- * socket (usually the 36bit address shifted 4 to the right).
- */
+ /* 36bit PCMCIA IO area address */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io");
if (!r) {
dev_err(&pdev->dev, "missing 'pcmcia-io' resource!\n");
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index b85375f..967c766 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1408,10 +1408,10 @@ static struct pci_device_id yenta_table[] = {
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7510, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7610, TI12XX),
- CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_710, TI12XX),
- CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_712, TI12XX),
- CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_720, TI12XX),
- CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_722, TI12XX),
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_710, ENE),
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_712, ENE),
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_720, ENE),
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_722, ENE),
CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, ENE),
CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, ENE),
CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, ENE),
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 6848f21..cd2ee6f 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -59,6 +59,8 @@ config ASUS_LAPTOP
select NEW_LEDS
select BACKLIGHT_CLASS_DEVICE
depends on INPUT
+ depends on RFKILL || RFKILL = n
+ select INPUT_SPARSEKMAP
---help---
This is the new Linux driver for Asus laptops. It may also support some
MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
@@ -177,6 +179,7 @@ config COMPAL_LAPTOP
tristate "Compal Laptop Extras"
depends on ACPI
depends on BACKLIGHT_CLASS_DEVICE
+ depends on RFKILL
---help---
This is a driver for laptops built by Compal:
@@ -320,9 +323,15 @@ config THINKPAD_ACPI_VIDEO
server running, phase of the moon, and the current mood of
Schroedinger's cat. If you can use X.org's RandR to control
your ThinkPad's video output ports instead of this feature,
- don't think twice: do it and say N here to save some memory.
+ don't think twice: do it and say N here to save memory and avoid
+ bad interactions with X.org.
+
+ NOTE: access to this feature is limited to processes with the
+ CAP_SYS_ADMIN capability, to avoid local DoS issues in platforms
+ where it interacts badly with X.org.
- If you are not sure, say Y here.
+ If you are not sure, say Y here but do try to check if you could
+ be using X.org RandR instead.
config THINKPAD_ACPI_HOTKEY_POLL
bool "Support NVRAM polling for hot keys"
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 61a1c75..791fcf3 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -45,58 +45,23 @@
#include <linux/fb.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/rfkill.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
-#include <asm/uaccess.h>
-#include <linux/input.h>
-
-#define ASUS_LAPTOP_VERSION "0.42"
-
-#define ASUS_HOTK_NAME "Asus Laptop Support"
-#define ASUS_HOTK_CLASS "hotkey"
-#define ASUS_HOTK_DEVICE_NAME "Hotkey"
-#define ASUS_HOTK_FILE KBUILD_MODNAME
-#define ASUS_HOTK_PREFIX "\\_SB.ATKD."
+#define ASUS_LAPTOP_VERSION "0.42"
-/*
- * Some events we use, same for all Asus
- */
-#define ATKD_BR_UP 0x10
-#define ATKD_BR_DOWN 0x20
-#define ATKD_LCD_ON 0x33
-#define ATKD_LCD_OFF 0x34
-
-/*
- * Known bits returned by \_SB.ATKD.HWRS
- */
-#define WL_HWRS 0x80
-#define BT_HWRS 0x100
-
-/*
- * Flags for hotk status
- * WL_ON and BT_ON are also used for wireless_status()
- */
-#define WL_ON 0x01 /* internal Wifi */
-#define BT_ON 0x02 /* internal Bluetooth */
-#define MLED_ON 0x04 /* mail LED */
-#define TLED_ON 0x08 /* touchpad LED */
-#define RLED_ON 0x10 /* Record LED */
-#define PLED_ON 0x20 /* Phone LED */
-#define GLED_ON 0x40 /* Gaming LED */
-#define LCD_ON 0x80 /* LCD backlight */
-#define GPS_ON 0x100 /* GPS */
-#define KEY_ON 0x200 /* Keyboard backlight */
-
-#define ASUS_LOG ASUS_HOTK_FILE ": "
-#define ASUS_ERR KERN_ERR ASUS_LOG
-#define ASUS_WARNING KERN_WARNING ASUS_LOG
-#define ASUS_NOTICE KERN_NOTICE ASUS_LOG
-#define ASUS_INFO KERN_INFO ASUS_LOG
-#define ASUS_DEBUG KERN_DEBUG ASUS_LOG
+#define ASUS_LAPTOP_NAME "Asus Laptop Support"
+#define ASUS_LAPTOP_CLASS "hotkey"
+#define ASUS_LAPTOP_DEVICE_NAME "Hotkey"
+#define ASUS_LAPTOP_FILE KBUILD_MODNAME
+#define ASUS_LAPTOP_PREFIX "\\_SB.ATKD."
MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary");
-MODULE_DESCRIPTION(ASUS_HOTK_NAME);
+MODULE_DESCRIPTION(ASUS_LAPTOP_NAME);
MODULE_LICENSE("GPL");
/*
@@ -113,225 +78,209 @@ static uint wapf = 1;
module_param(wapf, uint, 0644);
MODULE_PARM_DESC(wapf, "WAPF value");
-#define ASUS_HANDLE(object, paths...) \
- static acpi_handle object##_handle = NULL; \
- static char *object##_paths[] = { paths }
+static uint wlan_status = 1;
+static uint bluetooth_status = 1;
+
+module_param(wlan_status, uint, 0644);
+MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot "
+ "(0 = disabled, 1 = enabled, -1 = don't do anything). "
+ "default is 1");
+
+module_param(bluetooth_status, uint, 0644);
+MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
+ "(0 = disabled, 1 = enabled, -1 = don't do anything). "
+ "default is 1");
+
+/*
+ * Some events we use, same for all Asus
+ */
+#define ATKD_BR_UP 0x10 /* (event & ~ATKD_BR_UP) = brightness level */
+#define ATKD_BR_DOWN 0x20 /* (event & ~ATKD_BR_DOWN) = britghness level */
+#define ATKD_BR_MIN ATKD_BR_UP
+#define ATKD_BR_MAX (ATKD_BR_DOWN | 0xF) /* 0x2f */
+#define ATKD_LCD_ON 0x33
+#define ATKD_LCD_OFF 0x34
+
+/*
+ * Known bits returned by \_SB.ATKD.HWRS
+ */
+#define WL_HWRS 0x80
+#define BT_HWRS 0x100
+
+/*
+ * Flags for hotk status
+ * WL_ON and BT_ON are also used for wireless_status()
+ */
+#define WL_RSTS 0x01 /* internal Wifi */
+#define BT_RSTS 0x02 /* internal Bluetooth */
/* LED */
-ASUS_HANDLE(mled_set, ASUS_HOTK_PREFIX "MLED");
-ASUS_HANDLE(tled_set, ASUS_HOTK_PREFIX "TLED");
-ASUS_HANDLE(rled_set, ASUS_HOTK_PREFIX "RLED"); /* W1JC */
-ASUS_HANDLE(pled_set, ASUS_HOTK_PREFIX "PLED"); /* A7J */
-ASUS_HANDLE(gled_set, ASUS_HOTK_PREFIX "GLED"); /* G1, G2 (probably) */
+#define METHOD_MLED "MLED"
+#define METHOD_TLED "TLED"
+#define METHOD_RLED "RLED" /* W1JC */
+#define METHOD_PLED "PLED" /* A7J */
+#define METHOD_GLED "GLED" /* G1, G2 (probably) */
/* LEDD */
-ASUS_HANDLE(ledd_set, ASUS_HOTK_PREFIX "SLCM");
+#define METHOD_LEDD "SLCM"
/*
* Bluetooth and WLAN
* WLED and BLED are not handled like other XLED, because in some dsdt
* they also control the WLAN/Bluetooth device.
*/
-ASUS_HANDLE(wl_switch, ASUS_HOTK_PREFIX "WLED");
-ASUS_HANDLE(bt_switch, ASUS_HOTK_PREFIX "BLED");
-ASUS_HANDLE(wireless_status, ASUS_HOTK_PREFIX "RSTS"); /* All new models */
+#define METHOD_WLAN "WLED"
+#define METHOD_BLUETOOTH "BLED"
+#define METHOD_WL_STATUS "RSTS"
/* Brightness */
-ASUS_HANDLE(brightness_set, ASUS_HOTK_PREFIX "SPLV");
-ASUS_HANDLE(brightness_get, ASUS_HOTK_PREFIX "GPLV");
+#define METHOD_BRIGHTNESS_SET "SPLV"
+#define METHOD_BRIGHTNESS_GET "GPLV"
/* Backlight */
-ASUS_HANDLE(lcd_switch, "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
- "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */
- "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
- "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */
- "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */
- "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */
- "\\_SB.PCI0.PX40.Q10", /* S1x */
- "\\Q10"); /* A2x, L2D, L3D, M2E */
+static acpi_handle lcd_switch_handle;
+static const char *lcd_switch_paths[] = {
+ "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
+ "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */
+ "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
+ "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */
+ "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */
+ "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */
+ "\\_SB.PCI0.PX40.Q10", /* S1x */
+ "\\Q10"}; /* A2x, L2D, L3D, M2E */
/* Display */
-ASUS_HANDLE(display_set, ASUS_HOTK_PREFIX "SDSP");
-ASUS_HANDLE(display_get,
- /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */
- "\\_SB.PCI0.P0P1.VGA.GETD",
- /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */
- "\\_SB.PCI0.P0P2.VGA.GETD",
- /* A6V A6Q */
- "\\_SB.PCI0.P0P3.VGA.GETD",
- /* A6T, A6M */
- "\\_SB.PCI0.P0PA.VGA.GETD",
- /* L3C */
- "\\_SB.PCI0.PCI1.VGAC.NMAP",
- /* Z96F */
- "\\_SB.PCI0.VGA.GETD",
- /* A2D */
- "\\ACTD",
- /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */
- "\\ADVG",
- /* P30 */
- "\\DNXT",
- /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */
- "\\INFB",
- /* A3F A6F A3N A3L M6N W3N W6A */
- "\\SSTE");
-
-ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */
-ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */
+#define METHOD_SWITCH_DISPLAY "SDSP"
+
+static acpi_handle display_get_handle;
+static const char *display_get_paths[] = {
+ /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */
+ "\\_SB.PCI0.P0P1.VGA.GETD",
+ /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */
+ "\\_SB.PCI0.P0P2.VGA.GETD",
+ /* A6V A6Q */
+ "\\_SB.PCI0.P0P3.VGA.GETD",
+ /* A6T, A6M */
+ "\\_SB.PCI0.P0PA.VGA.GETD",
+ /* L3C */
+ "\\_SB.PCI0.PCI1.VGAC.NMAP",
+ /* Z96F */
+ "\\_SB.PCI0.VGA.GETD",
+ /* A2D */
+ "\\ACTD",
+ /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */
+ "\\ADVG",
+ /* P30 */
+ "\\DNXT",
+ /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */
+ "\\INFB",
+ /* A3F A6F A3N A3L M6N W3N W6A */
+ "\\SSTE"};
+
+#define METHOD_ALS_CONTROL "ALSC" /* Z71A Z71V */
+#define METHOD_ALS_LEVEL "ALSL" /* Z71A Z71V */
/* GPS */
/* R2H use different handle for GPS on/off */
-ASUS_HANDLE(gps_on, ASUS_HOTK_PREFIX "SDON"); /* R2H */
-ASUS_HANDLE(gps_off, ASUS_HOTK_PREFIX "SDOF"); /* R2H */
-ASUS_HANDLE(gps_status, ASUS_HOTK_PREFIX "GPST");
+#define METHOD_GPS_ON "SDON"
+#define METHOD_GPS_OFF "SDOF"
+#define METHOD_GPS_STATUS "GPST"
/* Keyboard light */
-ASUS_HANDLE(kled_set, ASUS_HOTK_PREFIX "SLKB");
-ASUS_HANDLE(kled_get, ASUS_HOTK_PREFIX "GLKB");
+#define METHOD_KBD_LIGHT_SET "SLKB"
+#define METHOD_KBD_LIGHT_GET "GLKB"
/*
- * This is the main structure, we can use it to store anything interesting
- * about the hotk device
+ * Define a specific led structure to keep the main structure clean
*/
-struct asus_hotk {
- char *name; /* laptop name */
- struct acpi_device *device; /* the device we are in */
- acpi_handle handle; /* the handle of the hotk device */
- char status; /* status of the hotk, for LEDs, ... */
- u32 ledd_status; /* status of the LED display */
- u8 light_level; /* light sensor level */
- u8 light_switch; /* light sensor switch value */
- u16 event_count[128]; /* count for each event TODO make this better */
- struct input_dev *inputdev;
- u16 *keycode_map;
+struct asus_led {
+ int wk;
+ struct work_struct work;
+ struct led_classdev led;
+ struct asus_laptop *asus;
+ const char *method;
};
/*
- * This header is made available to allow proper configuration given model,
- * revision number , ... this info cannot go in struct asus_hotk because it is
- * available before the hotk
- */
-static struct acpi_table_header *asus_info;
-
-/* The actual device the driver binds to */
-static struct asus_hotk *hotk;
-
-/*
- * The hotkey driver declaration
+ * This is the main structure, we can use it to store anything interesting
+ * about the hotk device
*/
-static const struct acpi_device_id asus_device_ids[] = {
- {"ATK0100", 0},
- {"ATK0101", 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, asus_device_ids);
+struct asus_laptop {
+ char *name; /* laptop name */
-static int asus_hotk_add(struct acpi_device *device);
-static int asus_hotk_remove(struct acpi_device *device, int type);
-static void asus_hotk_notify(struct acpi_device *device, u32 event);
+ struct acpi_table_header *dsdt_info;
+ struct platform_device *platform_device;
+ struct acpi_device *device; /* the device we are in */
+ struct backlight_device *backlight_device;
-static struct acpi_driver asus_hotk_driver = {
- .name = ASUS_HOTK_NAME,
- .class = ASUS_HOTK_CLASS,
- .owner = THIS_MODULE,
- .ids = asus_device_ids,
- .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
- .ops = {
- .add = asus_hotk_add,
- .remove = asus_hotk_remove,
- .notify = asus_hotk_notify,
- },
-};
+ struct input_dev *inputdev;
+ struct key_entry *keymap;
-/* The backlight device /sys/class/backlight */
-static struct backlight_device *asus_backlight_device;
+ struct asus_led mled;
+ struct asus_led tled;
+ struct asus_led rled;
+ struct asus_led pled;
+ struct asus_led gled;
+ struct asus_led kled;
+ struct workqueue_struct *led_workqueue;
-/*
- * The backlight class declaration
- */
-static int read_brightness(struct backlight_device *bd);
-static int update_bl_status(struct backlight_device *bd);
-static struct backlight_ops asusbl_ops = {
- .get_brightness = read_brightness,
- .update_status = update_bl_status,
-};
+ int wireless_status;
+ bool have_rsts;
+ int lcd_state;
-/*
- * These functions actually update the LED's, and are called from a
- * workqueue. By doing this as separate work rather than when the LED
- * subsystem asks, we avoid messing with the Asus ACPI stuff during a
- * potentially bad time, such as a timer interrupt.
- */
-static struct workqueue_struct *led_workqueue;
-
-#define ASUS_LED(object, ledname, max) \
- static void object##_led_set(struct led_classdev *led_cdev, \
- enum led_brightness value); \
- static enum led_brightness object##_led_get( \
- struct led_classdev *led_cdev); \
- static void object##_led_update(struct work_struct *ignored); \
- static int object##_led_wk; \
- static DECLARE_WORK(object##_led_work, object##_led_update); \
- static struct led_classdev object##_led = { \
- .name = "asus::" ledname, \
- .brightness_set = object##_led_set, \
- .brightness_get = object##_led_get, \
- .max_brightness = max \
- }
+ struct rfkill *gps_rfkill;
-ASUS_LED(mled, "mail", 1);
-ASUS_LED(tled, "touchpad", 1);
-ASUS_LED(rled, "record", 1);
-ASUS_LED(pled, "phone", 1);
-ASUS_LED(gled, "gaming", 1);
-ASUS_LED(kled, "kbd_backlight", 3);
-
-struct key_entry {
- char type;
- u8 code;
- u16 keycode;
+ acpi_handle handle; /* the handle of the hotk device */
+ u32 ledd_status; /* status of the LED display */
+ u8 light_level; /* light sensor level */
+ u8 light_switch; /* light sensor switch value */
+ u16 event_count[128]; /* count for each event TODO make this better */
+ u16 *keycode_map;
};
-enum { KE_KEY, KE_END };
-
-static struct key_entry asus_keymap[] = {
- {KE_KEY, 0x02, KEY_SCREENLOCK},
- {KE_KEY, 0x05, KEY_WLAN},
- {KE_KEY, 0x08, KEY_F13},
- {KE_KEY, 0x17, KEY_ZOOM},
- {KE_KEY, 0x1f, KEY_BATTERY},
- {KE_KEY, 0x30, KEY_VOLUMEUP},
- {KE_KEY, 0x31, KEY_VOLUMEDOWN},
- {KE_KEY, 0x32, KEY_MUTE},
- {KE_KEY, 0x33, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x34, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x40, KEY_PREVIOUSSONG},
- {KE_KEY, 0x41, KEY_NEXTSONG},
- {KE_KEY, 0x43, KEY_STOPCD},
- {KE_KEY, 0x45, KEY_PLAYPAUSE},
- {KE_KEY, 0x4c, KEY_MEDIA},
- {KE_KEY, 0x50, KEY_EMAIL},
- {KE_KEY, 0x51, KEY_WWW},
- {KE_KEY, 0x55, KEY_CALC},
- {KE_KEY, 0x5C, KEY_SCREENLOCK}, /* Screenlock */
- {KE_KEY, 0x5D, KEY_WLAN},
- {KE_KEY, 0x5E, KEY_WLAN},
- {KE_KEY, 0x5F, KEY_WLAN},
- {KE_KEY, 0x60, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x61, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x62, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x63, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x6B, KEY_F13}, /* Lock Touchpad */
- {KE_KEY, 0x82, KEY_CAMERA},
- {KE_KEY, 0x88, KEY_WLAN },
- {KE_KEY, 0x8A, KEY_PROG1},
- {KE_KEY, 0x95, KEY_MEDIA},
- {KE_KEY, 0x99, KEY_PHONE},
- {KE_KEY, 0xc4, KEY_KBDILLUMUP},
- {KE_KEY, 0xc5, KEY_KBDILLUMDOWN},
+static const struct key_entry asus_keymap[] = {
+ /* Lenovo SL Specific keycodes */
+ {KE_KEY, 0x02, { KEY_SCREENLOCK } },
+ {KE_KEY, 0x05, { KEY_WLAN } },
+ {KE_KEY, 0x08, { KEY_F13 } },
+ {KE_KEY, 0x17, { KEY_ZOOM } },
+ {KE_KEY, 0x1f, { KEY_BATTERY } },
+ /* End of Lenovo SL Specific keycodes */
+ {KE_KEY, 0x30, { KEY_VOLUMEUP } },
+ {KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
+ {KE_KEY, 0x32, { KEY_MUTE } },
+ {KE_KEY, 0x33, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x34, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
+ {KE_KEY, 0x41, { KEY_NEXTSONG } },
+ {KE_KEY, 0x43, { KEY_STOPCD } },
+ {KE_KEY, 0x45, { KEY_PLAYPAUSE } },
+ {KE_KEY, 0x4c, { KEY_MEDIA } },
+ {KE_KEY, 0x50, { KEY_EMAIL } },
+ {KE_KEY, 0x51, { KEY_WWW } },
+ {KE_KEY, 0x55, { KEY_CALC } },
+ {KE_KEY, 0x5C, { KEY_SCREENLOCK } }, /* Screenlock */
+ {KE_KEY, 0x5D, { KEY_WLAN } },
+ {KE_KEY, 0x5E, { KEY_WLAN } },
+ {KE_KEY, 0x5F, { KEY_WLAN } },
+ {KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x6B, { KEY_F13 } }, /* Lock Touchpad */
+ {KE_KEY, 0x7E, { KEY_BLUETOOTH } },
+ {KE_KEY, 0x7D, { KEY_BLUETOOTH } },
+ {KE_KEY, 0x82, { KEY_CAMERA } },
+ {KE_KEY, 0x88, { KEY_WLAN } },
+ {KE_KEY, 0x8A, { KEY_PROG1 } },
+ {KE_KEY, 0x95, { KEY_MEDIA } },
+ {KE_KEY, 0x99, { KEY_PHONE } },
+ {KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
+ {KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
{KE_END, 0},
};
+
/*
* This function evaluates an ACPI method, given an int as parameter, the
* method is searched within the scope of the handle, can be NULL. The output
@@ -339,8 +288,8 @@ static struct key_entry asus_keymap[] = {
*
* returns 0 if write is successful, -1 else.
*/
-static int write_acpi_int(acpi_handle handle, const char *method, int val,
- struct acpi_buffer *output)
+static int write_acpi_int_ret(acpi_handle handle, const char *method, int val,
+ struct acpi_buffer *output)
{
struct acpi_object_list params; /* list of input parameters (an int) */
union acpi_object in_obj; /* the only param we use */
@@ -361,102 +310,82 @@ static int write_acpi_int(acpi_handle handle, const char *method, int val,
return -1;
}
-static int read_wireless_status(int mask)
+static int write_acpi_int(acpi_handle handle, const char *method, int val)
{
- unsigned long long status;
- acpi_status rv = AE_OK;
+ return write_acpi_int_ret(handle, method, val, NULL);
+}
+
+static int acpi_check_handle(acpi_handle handle, const char *method,
+ acpi_handle *ret)
+{
+ acpi_status status;
- if (!wireless_status_handle)
- return (hotk->status & mask) ? 1 : 0;
+ if (method == NULL)
+ return -ENODEV;
- rv = acpi_evaluate_integer(wireless_status_handle, NULL, NULL, &status);
- if (ACPI_FAILURE(rv))
- pr_warning("Error reading Wireless status\n");
- else
- return (status & mask) ? 1 : 0;
+ if (ret)
+ status = acpi_get_handle(handle, (char *)method,
+ ret);
+ else {
+ acpi_handle dummy;
- return (hotk->status & mask) ? 1 : 0;
+ status = acpi_get_handle(handle, (char *)method,
+ &dummy);
+ }
+
+ if (status != AE_OK) {
+ if (ret)
+ pr_warning("Error finding %s\n", method);
+ return -ENODEV;
+ }
+ return 0;
}
-static int read_gps_status(void)
+/* Generic LED function */
+static int asus_led_set(struct asus_laptop *asus, const char *method,
+ int value)
{
- unsigned long long status;
- acpi_status rv = AE_OK;
-
- rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status);
- if (ACPI_FAILURE(rv))
- pr_warning("Error reading GPS status\n");
+ if (!strcmp(method, METHOD_MLED))
+ value = !value;
+ else if (!strcmp(method, METHOD_GLED))
+ value = !value + 1;
else
- return status ? 1 : 0;
+ value = !!value;
- return (hotk->status & GPS_ON) ? 1 : 0;
+ return write_acpi_int(asus->handle, method, value);
}
-/* Generic LED functions */
-static int read_status(int mask)
+/*
+ * LEDs
+ */
+/* /sys/class/led handlers */
+static void asus_led_cdev_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
{
- /* There is a special method for both wireless devices */
- if (mask == BT_ON || mask == WL_ON)
- return read_wireless_status(mask);
- else if (mask == GPS_ON)
- return read_gps_status();
+ struct asus_led *led = container_of(led_cdev, struct asus_led, led);
+ struct asus_laptop *asus = led->asus;
- return (hotk->status & mask) ? 1 : 0;
+ led->wk = !!value;
+ queue_work(asus->led_workqueue, &led->work);
}
-static void write_status(acpi_handle handle, int out, int mask)
+static void asus_led_cdev_update(struct work_struct *work)
{
- hotk->status = (out) ? (hotk->status | mask) : (hotk->status & ~mask);
-
- switch (mask) {
- case MLED_ON:
- out = !(out & 0x1);
- break;
- case GLED_ON:
- out = (out & 0x1) + 1;
- break;
- case GPS_ON:
- handle = (out) ? gps_on_handle : gps_off_handle;
- out = 0x02;
- break;
- default:
- out &= 0x1;
- break;
- }
+ struct asus_led *led = container_of(work, struct asus_led, work);
+ struct asus_laptop *asus = led->asus;
- if (write_acpi_int(handle, NULL, out, NULL))
- pr_warning(" write failed %x\n", mask);
+ asus_led_set(asus, led->method, led->wk);
}
-/* /sys/class/led handlers */
-#define ASUS_LED_HANDLER(object, mask) \
- static void object##_led_set(struct led_classdev *led_cdev, \
- enum led_brightness value) \
- { \
- object##_led_wk = (value > 0) ? 1 : 0; \
- queue_work(led_workqueue, &object##_led_work); \
- } \
- static void object##_led_update(struct work_struct *ignored) \
- { \
- int value = object##_led_wk; \
- write_status(object##_set_handle, value, (mask)); \
- } \
- static enum led_brightness object##_led_get( \
- struct led_classdev *led_cdev) \
- { \
- return led_cdev->brightness; \
- }
-
-ASUS_LED_HANDLER(mled, MLED_ON);
-ASUS_LED_HANDLER(pled, PLED_ON);
-ASUS_LED_HANDLER(rled, RLED_ON);
-ASUS_LED_HANDLER(tled, TLED_ON);
-ASUS_LED_HANDLER(gled, GLED_ON);
+static enum led_brightness asus_led_cdev_get(struct led_classdev *led_cdev)
+{
+ return led_cdev->brightness;
+}
/*
- * Keyboard backlight
+ * Keyboard backlight (also a LED)
*/
-static int get_kled_lvl(void)
+static int asus_kled_lvl(struct asus_laptop *asus)
{
unsigned long long kblv;
struct acpi_object_list params;
@@ -468,75 +397,183 @@ static int get_kled_lvl(void)
in_obj.type = ACPI_TYPE_INTEGER;
in_obj.integer.value = 2;
- rv = acpi_evaluate_integer(kled_get_handle, NULL, &params, &kblv);
+ rv = acpi_evaluate_integer(asus->handle, METHOD_KBD_LIGHT_GET,
+ &params, &kblv);
if (ACPI_FAILURE(rv)) {
pr_warning("Error reading kled level\n");
- return 0;
+ return -ENODEV;
}
return kblv;
}
-static int set_kled_lvl(int kblv)
+static int asus_kled_set(struct asus_laptop *asus, int kblv)
{
if (kblv > 0)
kblv = (1 << 7) | (kblv & 0x7F);
else
kblv = 0;
- if (write_acpi_int(kled_set_handle, NULL, kblv, NULL)) {
+ if (write_acpi_int(asus->handle, METHOD_KBD_LIGHT_SET, kblv)) {
pr_warning("Keyboard LED display write failed\n");
return -EINVAL;
}
return 0;
}
-static void kled_led_set(struct led_classdev *led_cdev,
- enum led_brightness value)
+static void asus_kled_cdev_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
{
- kled_led_wk = value;
- queue_work(led_workqueue, &kled_led_work);
+ struct asus_led *led = container_of(led_cdev, struct asus_led, led);
+ struct asus_laptop *asus = led->asus;
+
+ led->wk = value;
+ queue_work(asus->led_workqueue, &led->work);
}
-static void kled_led_update(struct work_struct *ignored)
+static void asus_kled_cdev_update(struct work_struct *work)
{
- set_kled_lvl(kled_led_wk);
+ struct asus_led *led = container_of(work, struct asus_led, work);
+ struct asus_laptop *asus = led->asus;
+
+ asus_kled_set(asus, led->wk);
}
-static enum led_brightness kled_led_get(struct led_classdev *led_cdev)
+static enum led_brightness asus_kled_cdev_get(struct led_classdev *led_cdev)
{
- return get_kled_lvl();
+ struct asus_led *led = container_of(led_cdev, struct asus_led, led);
+ struct asus_laptop *asus = led->asus;
+
+ return asus_kled_lvl(asus);
}
-static int get_lcd_state(void)
+static void asus_led_exit(struct asus_laptop *asus)
{
- return read_status(LCD_ON);
+ if (asus->mled.led.dev)
+ led_classdev_unregister(&asus->mled.led);
+ if (asus->tled.led.dev)
+ led_classdev_unregister(&asus->tled.led);
+ if (asus->pled.led.dev)
+ led_classdev_unregister(&asus->pled.led);
+ if (asus->rled.led.dev)
+ led_classdev_unregister(&asus->rled.led);
+ if (asus->gled.led.dev)
+ led_classdev_unregister(&asus->gled.led);
+ if (asus->kled.led.dev)
+ led_classdev_unregister(&asus->kled.led);
+ if (asus->led_workqueue) {
+ destroy_workqueue(asus->led_workqueue);
+ asus->led_workqueue = NULL;
+ }
}
-static int set_lcd_state(int value)
+/* Ugly macro, need to fix that later */
+static int asus_led_register(struct asus_laptop *asus,
+ struct asus_led *led,
+ const char *name, const char *method)
+{
+ struct led_classdev *led_cdev = &led->led;
+
+ if (!method || acpi_check_handle(asus->handle, method, NULL))
+ return 0; /* Led not present */
+
+ led->asus = asus;
+ led->method = method;
+
+ INIT_WORK(&led->work, asus_led_cdev_update);
+ led_cdev->name = name;
+ led_cdev->brightness_set = asus_led_cdev_set;
+ led_cdev->brightness_get = asus_led_cdev_get;
+ led_cdev->max_brightness = 1;
+ return led_classdev_register(&asus->platform_device->dev, led_cdev);
+}
+
+static int asus_led_init(struct asus_laptop *asus)
+{
+ int r;
+
+ /*
+ * Functions that actually update the LED's are called from a
+ * workqueue. By doing this as separate work rather than when the LED
+ * subsystem asks, we avoid messing with the Asus ACPI stuff during a
+ * potentially bad time, such as a timer interrupt.
+ */
+ asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
+ if (!asus->led_workqueue)
+ return -ENOMEM;
+
+ r = asus_led_register(asus, &asus->mled, "asus::mail", METHOD_MLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->tled, "asus::touchpad", METHOD_TLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->rled, "asus::record", METHOD_RLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->pled, "asus::phone", METHOD_PLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->gled, "asus::gaming", METHOD_GLED);
+ if (r)
+ goto error;
+ if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_GET, NULL)) {
+ struct asus_led *led = &asus->kled;
+ struct led_classdev *cdev = &led->led;
+
+ led->asus = asus;
+
+ INIT_WORK(&led->work, asus_kled_cdev_update);
+ cdev->name = "asus::kbd_backlight";
+ cdev->brightness_set = asus_kled_cdev_set;
+ cdev->brightness_get = asus_kled_cdev_get;
+ cdev->max_brightness = 3;
+ r = led_classdev_register(&asus->platform_device->dev, cdev);
+ }
+error:
+ if (r)
+ asus_led_exit(asus);
+ return r;
+}
+
+/*
+ * Backlight device
+ */
+static int asus_lcd_status(struct asus_laptop *asus)
+{
+ return asus->lcd_state;
+}
+
+static int asus_lcd_set(struct asus_laptop *asus, int value)
{
int lcd = 0;
acpi_status status = 0;
- lcd = value ? 1 : 0;
+ lcd = !!value;
- if (lcd == get_lcd_state())
+ if (lcd == asus_lcd_status(asus))
return 0;
- if (lcd_switch_handle) {
- status = acpi_evaluate_object(lcd_switch_handle,
- NULL, NULL, NULL);
+ if (!lcd_switch_handle)
+ return -ENODEV;
+
+ status = acpi_evaluate_object(lcd_switch_handle,
+ NULL, NULL, NULL);
- if (ACPI_FAILURE(status))
- pr_warning("Error switching LCD\n");
+ if (ACPI_FAILURE(status)) {
+ pr_warning("Error switching LCD\n");
+ return -ENODEV;
}
- write_status(NULL, lcd, LCD_ON);
+ asus->lcd_state = lcd;
return 0;
}
-static void lcd_blank(int blank)
+static void lcd_blank(struct asus_laptop *asus, int blank)
{
- struct backlight_device *bd = asus_backlight_device;
+ struct backlight_device *bd = asus->backlight_device;
+
+ asus->lcd_state = (blank == FB_BLANK_UNBLANK);
if (bd) {
bd->props.power = blank;
@@ -544,44 +581,91 @@ static void lcd_blank(int blank)
}
}
-static int read_brightness(struct backlight_device *bd)
+static int asus_read_brightness(struct backlight_device *bd)
{
+ struct asus_laptop *asus = bl_get_data(bd);
unsigned long long value;
acpi_status rv = AE_OK;
- rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value);
+ rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
+ NULL, &value);
if (ACPI_FAILURE(rv))
pr_warning("Error reading brightness\n");
return value;
}
-static int set_brightness(struct backlight_device *bd, int value)
+static int asus_set_brightness(struct backlight_device *bd, int value)
{
- int ret = 0;
-
- value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
- /* 0 <= value <= 15 */
+ struct asus_laptop *asus = bl_get_data(bd);
- if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) {
+ if (write_acpi_int(asus->handle, METHOD_BRIGHTNESS_SET, value)) {
pr_warning("Error changing brightness\n");
- ret = -EIO;
+ return -EIO;
}
-
- return ret;
+ return 0;
}
static int update_bl_status(struct backlight_device *bd)
{
+ struct asus_laptop *asus = bl_get_data(bd);
int rv;
int value = bd->props.brightness;
- rv = set_brightness(bd, value);
+ rv = asus_set_brightness(bd, value);
if (rv)
return rv;
value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0;
- return set_lcd_state(value);
+ return asus_lcd_set(asus, value);
+}
+
+static struct backlight_ops asusbl_ops = {
+ .get_brightness = asus_read_brightness,
+ .update_status = update_bl_status,
+};
+
+static int asus_backlight_notify(struct asus_laptop *asus)
+{
+ struct backlight_device *bd = asus->backlight_device;
+ int old = bd->props.brightness;
+
+ backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
+
+ return old;
+}
+
+static int asus_backlight_init(struct asus_laptop *asus)
+{
+ struct backlight_device *bd;
+ struct device *dev = &asus->platform_device->dev;
+
+ if (!acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) &&
+ lcd_switch_handle) {
+ bd = backlight_device_register(ASUS_LAPTOP_FILE, dev,
+ asus, &asusbl_ops);
+ if (IS_ERR(bd)) {
+ pr_err("Could not register asus backlight device\n");
+ asus->backlight_device = NULL;
+ return PTR_ERR(bd);
+ }
+
+ asus->backlight_device = bd;
+
+ bd->props.max_brightness = 15;
+ bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.brightness = asus_read_brightness(bd);
+ backlight_update_status(bd);
+ }
+ return 0;
+}
+
+static void asus_backlight_exit(struct asus_laptop *asus)
+{
+ if (asus->backlight_device)
+ backlight_device_unregister(asus->backlight_device);
+ asus->backlight_device = NULL;
}
/*
@@ -596,25 +680,26 @@ static int update_bl_status(struct backlight_device *bd)
static ssize_t show_infos(struct device *dev,
struct device_attribute *attr, char *page)
{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
int len = 0;
unsigned long long temp;
char buf[16]; /* enough for all info */
acpi_status rv = AE_OK;
/*
- * We use the easy way, we don't care of off and count, so we don't set eof
- * to 1
+ * We use the easy way, we don't care of off and count,
+ * so we don't set eof to 1
*/
- len += sprintf(page, ASUS_HOTK_NAME " " ASUS_LAPTOP_VERSION "\n");
- len += sprintf(page + len, "Model reference : %s\n", hotk->name);
+ len += sprintf(page, ASUS_LAPTOP_NAME " " ASUS_LAPTOP_VERSION "\n");
+ len += sprintf(page + len, "Model reference : %s\n", asus->name);
/*
* The SFUN method probably allows the original driver to get the list
* of features supported by a given model. For now, 0x0100 or 0x0800
* bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
* The significance of others is yet to be found.
*/
- rv = acpi_evaluate_integer(hotk->handle, "SFUN", NULL, &temp);
+ rv = acpi_evaluate_integer(asus->handle, "SFUN", NULL, &temp);
if (!ACPI_FAILURE(rv))
len += sprintf(page + len, "SFUN value : %#x\n",
(uint) temp);
@@ -624,7 +709,7 @@ static ssize_t show_infos(struct device *dev,
* The significance of others is yet to be found.
* If we don't find the method, we assume the device are present.
*/
- rv = acpi_evaluate_integer(hotk->handle, "HRWS", NULL, &temp);
+ rv = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &temp);
if (!ACPI_FAILURE(rv))
len += sprintf(page + len, "HRWS value : %#x\n",
(uint) temp);
@@ -635,26 +720,26 @@ static ssize_t show_infos(struct device *dev,
* Note: since not all the laptops provide this method, errors are
* silently ignored.
*/
- rv = acpi_evaluate_integer(hotk->handle, "ASYM", NULL, &temp);
+ rv = acpi_evaluate_integer(asus->handle, "ASYM", NULL, &temp);
if (!ACPI_FAILURE(rv))
len += sprintf(page + len, "ASYM value : %#x\n",
(uint) temp);
- if (asus_info) {
- snprintf(buf, 16, "%d", asus_info->length);
+ if (asus->dsdt_info) {
+ snprintf(buf, 16, "%d", asus->dsdt_info->length);
len += sprintf(page + len, "DSDT length : %s\n", buf);
- snprintf(buf, 16, "%d", asus_info->checksum);
+ snprintf(buf, 16, "%d", asus->dsdt_info->checksum);
len += sprintf(page + len, "DSDT checksum : %s\n", buf);
- snprintf(buf, 16, "%d", asus_info->revision);
+ snprintf(buf, 16, "%d", asus->dsdt_info->revision);
len += sprintf(page + len, "DSDT revision : %s\n", buf);
- snprintf(buf, 7, "%s", asus_info->oem_id);
+ snprintf(buf, 7, "%s", asus->dsdt_info->oem_id);
len += sprintf(page + len, "OEM id : %s\n", buf);
- snprintf(buf, 9, "%s", asus_info->oem_table_id);
+ snprintf(buf, 9, "%s", asus->dsdt_info->oem_table_id);
len += sprintf(page + len, "OEM table id : %s\n", buf);
- snprintf(buf, 16, "%x", asus_info->oem_revision);
+ snprintf(buf, 16, "%x", asus->dsdt_info->oem_revision);
len += sprintf(page + len, "OEM revision : 0x%s\n", buf);
- snprintf(buf, 5, "%s", asus_info->asl_compiler_id);
+ snprintf(buf, 5, "%s", asus->dsdt_info->asl_compiler_id);
len += sprintf(page + len, "ASL comp vendor id : %s\n", buf);
- snprintf(buf, 16, "%x", asus_info->asl_compiler_revision);
+ snprintf(buf, 16, "%x", asus->dsdt_info->asl_compiler_revision);
len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf);
}
@@ -672,8 +757,9 @@ static int parse_arg(const char *buf, unsigned long count, int *val)
return count;
}
-static ssize_t store_status(const char *buf, size_t count,
- acpi_handle handle, int mask)
+static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
+ const char *buf, size_t count,
+ const char *method)
{
int rv, value;
int out = 0;
@@ -682,8 +768,8 @@ static ssize_t store_status(const char *buf, size_t count,
if (rv > 0)
out = value ? 1 : 0;
- write_status(handle, out, mask);
-
+ if (write_acpi_int(asus->handle, method, value))
+ return -ENODEV;
return rv;
}
@@ -693,67 +779,116 @@ static ssize_t store_status(const char *buf, size_t count,
static ssize_t show_ledd(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "0x%08x\n", hotk->ledd_status);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%08x\n", asus->ledd_status);
}
static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0) {
- if (write_acpi_int(ledd_set_handle, NULL, value, NULL))
+ if (write_acpi_int(asus->handle, METHOD_LEDD, value))
pr_warning("LED display write failed\n");
else
- hotk->ledd_status = (u32) value;
+ asus->ledd_status = (u32) value;
}
return rv;
}
/*
+ * Wireless
+ */
+static int asus_wireless_status(struct asus_laptop *asus, int mask)
+{
+ unsigned long long status;
+ acpi_status rv = AE_OK;
+
+ if (!asus->have_rsts)
+ return (asus->wireless_status & mask) ? 1 : 0;
+
+ rv = acpi_evaluate_integer(asus->handle, METHOD_WL_STATUS,
+ NULL, &status);
+ if (ACPI_FAILURE(rv)) {
+ pr_warning("Error reading Wireless status\n");
+ return -EINVAL;
+ }
+ return !!(status & mask);
+}
+
+/*
* WLAN
*/
+static int asus_wlan_set(struct asus_laptop *asus, int status)
+{
+ if (write_acpi_int(asus->handle, METHOD_WLAN, !!status)) {
+ pr_warning("Error setting wlan status to %d", status);
+ return -EIO;
+ }
+ return 0;
+}
+
static ssize_t show_wlan(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", read_status(WL_ON));
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS));
}
static ssize_t store_wlan(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- return store_status(buf, count, wl_switch_handle, WL_ON);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sysfs_acpi_set(asus, buf, count, METHOD_WLAN);
}
/*
* Bluetooth
*/
+static int asus_bluetooth_set(struct asus_laptop *asus, int status)
+{
+ if (write_acpi_int(asus->handle, METHOD_BLUETOOTH, !!status)) {
+ pr_warning("Error setting bluetooth status to %d", status);
+ return -EIO;
+ }
+ return 0;
+}
+
static ssize_t show_bluetooth(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", read_status(BT_ON));
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS));
}
static ssize_t store_bluetooth(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- return store_status(buf, count, bt_switch_handle, BT_ON);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH);
}
/*
* Display
*/
-static void set_display(int value)
+static void asus_set_display(struct asus_laptop *asus, int value)
{
/* no sanity check needed for now */
- if (write_acpi_int(display_set_handle, NULL, value, NULL))
+ if (write_acpi_int(asus->handle, METHOD_SWITCH_DISPLAY, value))
pr_warning("Error setting display\n");
return;
}
-static int read_display(void)
+static int read_display(struct asus_laptop *asus)
{
unsigned long long value = 0;
acpi_status rv = AE_OK;
@@ -769,7 +904,7 @@ static int read_display(void)
pr_warning("Error reading display status\n");
}
- value &= 0x0F; /* needed for some models, shouldn't hurt others */
+ value &= 0x0F; /* needed for some models, shouldn't hurt others */
return value;
}
@@ -781,7 +916,11 @@ static int read_display(void)
static ssize_t show_disp(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", read_display());
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ if (!display_get_handle)
+ return -ENODEV;
+ return sprintf(buf, "%d\n", read_display(asus));
}
/*
@@ -794,65 +933,72 @@ static ssize_t show_disp(struct device *dev,
static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0)
- set_display(value);
+ asus_set_display(asus, value);
return rv;
}
/*
* Light Sens
*/
-static void set_light_sens_switch(int value)
+static void asus_als_switch(struct asus_laptop *asus, int value)
{
- if (write_acpi_int(ls_switch_handle, NULL, value, NULL))
+ if (write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value))
pr_warning("Error setting light sensor switch\n");
- hotk->light_switch = value;
+ asus->light_switch = value;
}
static ssize_t show_lssw(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", hotk->light_switch);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus->light_switch);
}
static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0)
- set_light_sens_switch(value ? 1 : 0);
+ asus_als_switch(asus, value ? 1 : 0);
return rv;
}
-static void set_light_sens_level(int value)
+static void asus_als_level(struct asus_laptop *asus, int value)
{
- if (write_acpi_int(ls_level_handle, NULL, value, NULL))
+ if (write_acpi_int(asus->handle, METHOD_ALS_LEVEL, value))
pr_warning("Error setting light sensor level\n");
- hotk->light_level = value;
+ asus->light_level = value;
}
static ssize_t show_lslvl(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", hotk->light_level);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus->light_level);
}
static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0) {
value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
/* 0 <= value <= 15 */
- set_light_sens_level(value);
+ asus_als_level(asus, value);
}
return rv;
@@ -861,197 +1007,309 @@ static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
/*
* GPS
*/
+static int asus_gps_status(struct asus_laptop *asus)
+{
+ unsigned long long status;
+ acpi_status rv = AE_OK;
+
+ rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
+ NULL, &status);
+ if (ACPI_FAILURE(rv)) {
+ pr_warning("Error reading GPS status\n");
+ return -ENODEV;
+ }
+ return !!status;
+}
+
+static int asus_gps_switch(struct asus_laptop *asus, int status)
+{
+ const char *meth = status ? METHOD_GPS_ON : METHOD_GPS_OFF;
+
+ if (write_acpi_int(asus->handle, meth, 0x02))
+ return -ENODEV;
+ return 0;
+}
+
static ssize_t show_gps(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", read_status(GPS_ON));
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_gps_status(asus));
}
static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- return store_status(buf, count, NULL, GPS_ON);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+ int rv, value;
+ int ret;
+
+ rv = parse_arg(buf, count, &value);
+ if (rv <= 0)
+ return -EINVAL;
+ ret = asus_gps_switch(asus, !!value);
+ if (ret)
+ return ret;
+ rfkill_set_sw_state(asus->gps_rfkill, !value);
+ return rv;
}
/*
- * Hotkey functions
+ * rfkill
*/
-static struct key_entry *asus_get_entry_by_scancode(int code)
+static int asus_gps_rfkill_set(void *data, bool blocked)
{
- struct key_entry *key;
-
- for (key = asus_keymap; key->type != KE_END; key++)
- if (code == key->code)
- return key;
+ acpi_handle handle = data;
- return NULL;
+ return asus_gps_switch(handle, !blocked);
}
-static struct key_entry *asus_get_entry_by_keycode(int code)
-{
- struct key_entry *key;
-
- for (key = asus_keymap; key->type != KE_END; key++)
- if (code == key->keycode && key->type == KE_KEY)
- return key;
+static const struct rfkill_ops asus_gps_rfkill_ops = {
+ .set_block = asus_gps_rfkill_set,
+};
- return NULL;
+static void asus_rfkill_exit(struct asus_laptop *asus)
+{
+ if (asus->gps_rfkill) {
+ rfkill_unregister(asus->gps_rfkill);
+ rfkill_destroy(asus->gps_rfkill);
+ asus->gps_rfkill = NULL;
+ }
}
-static int asus_getkeycode(struct input_dev *dev, int scancode, int *keycode)
+static int asus_rfkill_init(struct asus_laptop *asus)
{
- struct key_entry *key = asus_get_entry_by_scancode(scancode);
+ int result;
- if (key && key->type == KE_KEY) {
- *keycode = key->keycode;
+ if (acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) ||
+ acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) ||
+ acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL))
return 0;
+
+ asus->gps_rfkill = rfkill_alloc("asus-gps", &asus->platform_device->dev,
+ RFKILL_TYPE_GPS,
+ &asus_gps_rfkill_ops, NULL);
+ if (!asus->gps_rfkill)
+ return -EINVAL;
+
+ result = rfkill_register(asus->gps_rfkill);
+ if (result) {
+ rfkill_destroy(asus->gps_rfkill);
+ asus->gps_rfkill = NULL;
}
- return -EINVAL;
+ return result;
}
-static int asus_setkeycode(struct input_dev *dev, int scancode, int keycode)
+/*
+ * Input device (i.e. hotkeys)
+ */
+static void asus_input_notify(struct asus_laptop *asus, int event)
{
- struct key_entry *key;
- int old_keycode;
+ if (asus->inputdev)
+ sparse_keymap_report_event(asus->inputdev, event, 1, true);
+}
- if (keycode < 0 || keycode > KEY_MAX)
- return -EINVAL;
+static int asus_input_init(struct asus_laptop *asus)
+{
+ struct input_dev *input;
+ int error;
- key = asus_get_entry_by_scancode(scancode);
- if (key && key->type == KE_KEY) {
- old_keycode = key->keycode;
- key->keycode = keycode;
- set_bit(keycode, dev->keybit);
- if (!asus_get_entry_by_keycode(old_keycode))
- clear_bit(old_keycode, dev->keybit);
+ input = input_allocate_device();
+ if (!input) {
+ pr_info("Unable to allocate input device\n");
return 0;
}
+ input->name = "Asus Laptop extra buttons";
+ input->phys = ASUS_LAPTOP_FILE "/input0";
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &asus->platform_device->dev;
+ input_set_drvdata(input, asus);
+
+ error = sparse_keymap_setup(input, asus_keymap, NULL);
+ if (error) {
+ pr_err("Unable to setup input device keymap\n");
+ goto err_keymap;
+ }
+ error = input_register_device(input);
+ if (error) {
+ pr_info("Unable to register input device\n");
+ goto err_device;
+ }
+
+ asus->inputdev = input;
+ return 0;
- return -EINVAL;
+err_keymap:
+ sparse_keymap_free(input);
+err_device:
+ input_free_device(input);
+ return error;
}
-static void asus_hotk_notify(struct acpi_device *device, u32 event)
+static void asus_input_exit(struct asus_laptop *asus)
{
- static struct key_entry *key;
- u16 count;
+ if (asus->inputdev) {
+ sparse_keymap_free(asus->inputdev);
+ input_unregister_device(asus->inputdev);
+ }
+}
- /* TODO Find a better way to handle events count. */
- if (!hotk)
- return;
+/*
+ * ACPI driver
+ */
+static void asus_acpi_notify(struct acpi_device *device, u32 event)
+{
+ struct asus_laptop *asus = acpi_driver_data(device);
+ u16 count;
/*
* We need to tell the backlight device when the backlight power is
* switched
*/
- if (event == ATKD_LCD_ON) {
- write_status(NULL, 1, LCD_ON);
- lcd_blank(FB_BLANK_UNBLANK);
- } else if (event == ATKD_LCD_OFF) {
- write_status(NULL, 0, LCD_ON);
- lcd_blank(FB_BLANK_POWERDOWN);
- }
+ if (event == ATKD_LCD_ON)
+ lcd_blank(asus, FB_BLANK_UNBLANK);
+ else if (event == ATKD_LCD_OFF)
+ lcd_blank(asus, FB_BLANK_POWERDOWN);
- count = hotk->event_count[event % 128]++;
- acpi_bus_generate_proc_event(hotk->device, event, count);
- acpi_bus_generate_netlink_event(hotk->device->pnp.device_class,
- dev_name(&hotk->device->dev), event,
+ /* TODO Find a better way to handle events count. */
+ count = asus->event_count[event % 128]++;
+ acpi_bus_generate_proc_event(asus->device, event, count);
+ acpi_bus_generate_netlink_event(asus->device->pnp.device_class,
+ dev_name(&asus->device->dev), event,
count);
- if (hotk->inputdev) {
- key = asus_get_entry_by_scancode(event);
- if (!key)
- return ;
-
- switch (key->type) {
- case KE_KEY:
- input_report_key(hotk->inputdev, key->keycode, 1);
- input_sync(hotk->inputdev);
- input_report_key(hotk->inputdev, key->keycode, 0);
- input_sync(hotk->inputdev);
- break;
+ /* Brightness events are special */
+ if (event >= ATKD_BR_MIN && event <= ATKD_BR_MAX) {
+
+ /* Ignore them completely if the acpi video driver is used */
+ if (asus->backlight_device != NULL) {
+ /* Update the backlight device. */
+ asus_backlight_notify(asus);
}
+ return ;
}
+ asus_input_notify(asus, event);
}
-#define ASUS_CREATE_DEVICE_ATTR(_name) \
- struct device_attribute dev_attr_##_name = { \
- .attr = { \
- .name = __stringify(_name), \
- .mode = 0 }, \
- .show = NULL, \
- .store = NULL, \
+static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL);
+static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan);
+static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR, show_bluetooth,
+ store_bluetooth);
+static DEVICE_ATTR(display, S_IRUGO | S_IWUSR, show_disp, store_disp);
+static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd);
+static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl);
+static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw);
+static DEVICE_ATTR(gps, S_IRUGO | S_IWUSR, show_gps, store_gps);
+
+static void asus_sysfs_exit(struct asus_laptop *asus)
+{
+ struct platform_device *device = asus->platform_device;
+
+ device_remove_file(&device->dev, &dev_attr_infos);
+ device_remove_file(&device->dev, &dev_attr_wlan);
+ device_remove_file(&device->dev, &dev_attr_bluetooth);
+ device_remove_file(&device->dev, &dev_attr_display);
+ device_remove_file(&device->dev, &dev_attr_ledd);
+ device_remove_file(&device->dev, &dev_attr_ls_switch);
+ device_remove_file(&device->dev, &dev_attr_ls_level);
+ device_remove_file(&device->dev, &dev_attr_gps);
+}
+
+static int asus_sysfs_init(struct asus_laptop *asus)
+{
+ struct platform_device *device = asus->platform_device;
+ int err;
+
+ err = device_create_file(&device->dev, &dev_attr_infos);
+ if (err)
+ return err;
+
+ if (!acpi_check_handle(asus->handle, METHOD_WLAN, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_wlan);
+ if (err)
+ return err;
}
-#define ASUS_SET_DEVICE_ATTR(_name, _mode, _show, _store) \
- do { \
- dev_attr_##_name.attr.mode = _mode; \
- dev_attr_##_name.show = _show; \
- dev_attr_##_name.store = _store; \
- } while(0)
-
-static ASUS_CREATE_DEVICE_ATTR(infos);
-static ASUS_CREATE_DEVICE_ATTR(wlan);
-static ASUS_CREATE_DEVICE_ATTR(bluetooth);
-static ASUS_CREATE_DEVICE_ATTR(display);
-static ASUS_CREATE_DEVICE_ATTR(ledd);
-static ASUS_CREATE_DEVICE_ATTR(ls_switch);
-static ASUS_CREATE_DEVICE_ATTR(ls_level);
-static ASUS_CREATE_DEVICE_ATTR(gps);
-
-static struct attribute *asuspf_attributes[] = {
- &dev_attr_infos.attr,
- &dev_attr_wlan.attr,
- &dev_attr_bluetooth.attr,
- &dev_attr_display.attr,
- &dev_attr_ledd.attr,
- &dev_attr_ls_switch.attr,
- &dev_attr_ls_level.attr,
- &dev_attr_gps.attr,
- NULL
-};
+ if (!acpi_check_handle(asus->handle, METHOD_BLUETOOTH, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_bluetooth);
+ if (err)
+ return err;
+ }
-static struct attribute_group asuspf_attribute_group = {
- .attrs = asuspf_attributes
-};
+ if (!acpi_check_handle(asus->handle, METHOD_SWITCH_DISPLAY, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_display);
+ if (err)
+ return err;
+ }
-static struct platform_driver asuspf_driver = {
- .driver = {
- .name = ASUS_HOTK_FILE,
- .owner = THIS_MODULE,
- }
-};
+ if (!acpi_check_handle(asus->handle, METHOD_LEDD, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_ledd);
+ if (err)
+ return err;
+ }
-static struct platform_device *asuspf_device;
+ if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_ls_switch);
+ if (err)
+ return err;
+ err = device_create_file(&device->dev, &dev_attr_ls_level);
+ if (err)
+ return err;
+ }
-static void asus_hotk_add_fs(void)
-{
- ASUS_SET_DEVICE_ATTR(infos, 0444, show_infos, NULL);
+ if (!acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_gps);
+ if (err)
+ return err;
+ }
- if (wl_switch_handle)
- ASUS_SET_DEVICE_ATTR(wlan, 0644, show_wlan, store_wlan);
+ return err;
+}
+
+static int asus_platform_init(struct asus_laptop *asus)
+{
+ int err;
- if (bt_switch_handle)
- ASUS_SET_DEVICE_ATTR(bluetooth, 0644,
- show_bluetooth, store_bluetooth);
+ asus->platform_device = platform_device_alloc(ASUS_LAPTOP_FILE, -1);
+ if (!asus->platform_device)
+ return -ENOMEM;
+ platform_set_drvdata(asus->platform_device, asus);
- if (display_set_handle && display_get_handle)
- ASUS_SET_DEVICE_ATTR(display, 0644, show_disp, store_disp);
- else if (display_set_handle)
- ASUS_SET_DEVICE_ATTR(display, 0200, NULL, store_disp);
+ err = platform_device_add(asus->platform_device);
+ if (err)
+ goto fail_platform_device;
- if (ledd_set_handle)
- ASUS_SET_DEVICE_ATTR(ledd, 0644, show_ledd, store_ledd);
+ err = asus_sysfs_init(asus);
+ if (err)
+ goto fail_sysfs;
+ return 0;
- if (ls_switch_handle && ls_level_handle) {
- ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl);
- ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw);
- }
+fail_sysfs:
+ asus_sysfs_exit(asus);
+ platform_device_del(asus->platform_device);
+fail_platform_device:
+ platform_device_put(asus->platform_device);
+ return err;
+}
- if (gps_status_handle && gps_on_handle && gps_off_handle)
- ASUS_SET_DEVICE_ATTR(gps, 0644, show_gps, store_gps);
+static void asus_platform_exit(struct asus_laptop *asus)
+{
+ asus_sysfs_exit(asus);
+ platform_device_unregister(asus->platform_device);
}
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = ASUS_LAPTOP_FILE,
+ .owner = THIS_MODULE,
+ }
+};
+
static int asus_handle_init(char *name, acpi_handle * handle,
char **paths, int num_paths)
{
@@ -1073,10 +1331,11 @@ static int asus_handle_init(char *name, acpi_handle * handle,
ARRAY_SIZE(object##_paths))
/*
- * This function is used to initialize the hotk with right values. In this
- * method, we can make all the detection we want, and modify the hotk struct
+ * This function is used to initialize the context with right values. In this
+ * method, we can make all the detection we want, and modify the asus_laptop
+ * struct
*/
-static int asus_hotk_get_info(void)
+static int asus_laptop_get_info(struct asus_laptop *asus)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *model = NULL;
@@ -1089,22 +1348,21 @@ static int asus_hotk_get_info(void)
* models, but late enough to allow acpi_bus_register_driver() to fail
* before doing anything ACPI-specific. Should we encounter a machine,
* which needs special handling (i.e. its hotkey device has a different
- * HID), this bit will be moved. A global variable asus_info contains
- * the DSDT header.
+ * HID), this bit will be moved.
*/
- status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
+ status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info);
if (ACPI_FAILURE(status))
pr_warning("Couldn't get the DSDT table header\n");
/* We have to write 0 on init this far for all ASUS models */
- if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
+ if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) {
pr_err("Hotkey initialization failed\n");
return -ENODEV;
}
/* This needs to be called for some laptops to init properly */
status =
- acpi_evaluate_integer(hotk->handle, "BSTS", NULL, &bsts_result);
+ acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result);
if (ACPI_FAILURE(status))
pr_warning("Error calling BSTS\n");
else if (bsts_result)
@@ -1112,8 +1370,8 @@ static int asus_hotk_get_info(void)
(uint) bsts_result);
/* This too ... */
- write_acpi_int(hotk->handle, "CWAP", wapf, NULL);
-
+ if (write_acpi_int(asus->handle, "CWAP", wapf))
+ pr_err("Error calling CWAP(%d)\n", wapf);
/*
* Try to match the object returned by INIT to the specific model.
* Handle every possible object (or the lack of thereof) the DSDT
@@ -1134,397 +1392,210 @@ static int asus_hotk_get_info(void)
break;
}
}
- hotk->name = kstrdup(string, GFP_KERNEL);
- if (!hotk->name)
+ asus->name = kstrdup(string, GFP_KERNEL);
+ if (!asus->name)
return -ENOMEM;
if (*string)
pr_notice(" %s model detected\n", string);
- ASUS_HANDLE_INIT(mled_set);
- ASUS_HANDLE_INIT(tled_set);
- ASUS_HANDLE_INIT(rled_set);
- ASUS_HANDLE_INIT(pled_set);
- ASUS_HANDLE_INIT(gled_set);
-
- ASUS_HANDLE_INIT(ledd_set);
-
- ASUS_HANDLE_INIT(kled_set);
- ASUS_HANDLE_INIT(kled_get);
-
/*
* The HWRS method return informations about the hardware.
* 0x80 bit is for WLAN, 0x100 for Bluetooth.
* The significance of others is yet to be found.
- * If we don't find the method, we assume the device are present.
*/
status =
- acpi_evaluate_integer(hotk->handle, "HRWS", NULL, &hwrs_result);
- if (ACPI_FAILURE(status))
- hwrs_result = WL_HWRS | BT_HWRS;
-
- if (hwrs_result & WL_HWRS)
- ASUS_HANDLE_INIT(wl_switch);
- if (hwrs_result & BT_HWRS)
- ASUS_HANDLE_INIT(bt_switch);
-
- ASUS_HANDLE_INIT(wireless_status);
+ acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result);
+ if (!ACPI_FAILURE(status))
+ pr_notice(" HRWS returned %x", (int)hwrs_result);
- ASUS_HANDLE_INIT(brightness_set);
- ASUS_HANDLE_INIT(brightness_get);
+ if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
+ asus->have_rsts = true;
+ /* Scheduled for removal */
ASUS_HANDLE_INIT(lcd_switch);
-
- ASUS_HANDLE_INIT(display_set);
ASUS_HANDLE_INIT(display_get);
- /*
- * There is a lot of models with "ALSL", but a few get
- * a real light sens, so we need to check it.
- */
- if (!ASUS_HANDLE_INIT(ls_switch))
- ASUS_HANDLE_INIT(ls_level);
-
- ASUS_HANDLE_INIT(gps_on);
- ASUS_HANDLE_INIT(gps_off);
- ASUS_HANDLE_INIT(gps_status);
-
kfree(model);
return AE_OK;
}
-static int asus_input_init(void)
-{
- const struct key_entry *key;
- int result;
+static bool asus_device_present;
- hotk->inputdev = input_allocate_device();
- if (!hotk->inputdev) {
- pr_info("Unable to allocate input device\n");
- return 0;
- }
- hotk->inputdev->name = "Asus Laptop extra buttons";
- hotk->inputdev->phys = ASUS_HOTK_FILE "/input0";
- hotk->inputdev->id.bustype = BUS_HOST;
- hotk->inputdev->getkeycode = asus_getkeycode;
- hotk->inputdev->setkeycode = asus_setkeycode;
-
- for (key = asus_keymap; key->type != KE_END; key++) {
- switch (key->type) {
- case KE_KEY:
- set_bit(EV_KEY, hotk->inputdev->evbit);
- set_bit(key->keycode, hotk->inputdev->keybit);
- break;
- }
- }
- result = input_register_device(hotk->inputdev);
- if (result) {
- pr_info("Unable to register input device\n");
- input_free_device(hotk->inputdev);
- }
- return result;
-}
-
-static int asus_hotk_check(void)
+static int __devinit asus_acpi_init(struct asus_laptop *asus)
{
int result = 0;
- result = acpi_bus_get_status(hotk->device);
+ result = acpi_bus_get_status(asus->device);
if (result)
return result;
-
- if (hotk->device->status.present) {
- result = asus_hotk_get_info();
- } else {
+ if (!asus->device->status.present) {
pr_err("Hotkey device not present, aborting\n");
- return -EINVAL;
+ return -ENODEV;
}
- return result;
-}
-
-static int asus_hotk_found;
-
-static int asus_hotk_add(struct acpi_device *device)
-{
- int result;
-
- pr_notice("Asus Laptop Support version %s\n",
- ASUS_LAPTOP_VERSION);
-
- hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL);
- if (!hotk)
- return -ENOMEM;
-
- hotk->handle = device->handle;
- strcpy(acpi_device_name(device), ASUS_HOTK_DEVICE_NAME);
- strcpy(acpi_device_class(device), ASUS_HOTK_CLASS);
- device->driver_data = hotk;
- hotk->device = device;
-
- result = asus_hotk_check();
+ result = asus_laptop_get_info(asus);
if (result)
- goto end;
-
- asus_hotk_add_fs();
-
- asus_hotk_found = 1;
+ return result;
/* WLED and BLED are on by default */
- write_status(bt_switch_handle, 1, BT_ON);
- write_status(wl_switch_handle, 1, WL_ON);
-
- /* If the h/w switch is off, we need to check the real status */
- write_status(NULL, read_status(BT_ON), BT_ON);
- write_status(NULL, read_status(WL_ON), WL_ON);
+ if (bluetooth_status >= 0)
+ asus_bluetooth_set(asus, !!bluetooth_status);
- /* LCD Backlight is on by default */
- write_status(NULL, 1, LCD_ON);
+ if (wlan_status >= 0)
+ asus_wlan_set(asus, !!wlan_status);
/* Keyboard Backlight is on by default */
- if (kled_set_handle)
- set_kled_lvl(1);
+ if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL))
+ asus_kled_set(asus, 1);
/* LED display is off by default */
- hotk->ledd_status = 0xFFF;
+ asus->ledd_status = 0xFFF;
/* Set initial values of light sensor and level */
- hotk->light_switch = 0; /* Default to light sensor disabled */
- hotk->light_level = 5; /* level 5 for sensor sensitivity */
+ asus->light_switch = 0; /* Default to light sensor disabled */
+ asus->light_level = 5; /* level 5 for sensor sensitivity */
- if (ls_switch_handle)
- set_light_sens_switch(hotk->light_switch);
-
- if (ls_level_handle)
- set_light_sens_level(hotk->light_level);
-
- /* GPS is on by default */
- write_status(NULL, 1, GPS_ON);
-
-end:
- if (result) {
- kfree(hotk->name);
- kfree(hotk);
+ if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
+ asus_als_switch(asus, asus->light_switch);
+ asus_als_level(asus, asus->light_level);
}
+ asus->lcd_state = 1; /* LCD should be on when the module load */
return result;
}
-static int asus_hotk_remove(struct acpi_device *device, int type)
-{
- kfree(hotk->name);
- kfree(hotk);
-
- return 0;
-}
-
-static void asus_backlight_exit(void)
+static int __devinit asus_acpi_add(struct acpi_device *device)
{
- if (asus_backlight_device)
- backlight_device_unregister(asus_backlight_device);
-}
-
-#define ASUS_LED_UNREGISTER(object) \
- if (object##_led.dev) \
- led_classdev_unregister(&object##_led)
+ struct asus_laptop *asus;
+ int result;
-static void asus_led_exit(void)
-{
- destroy_workqueue(led_workqueue);
- ASUS_LED_UNREGISTER(mled);
- ASUS_LED_UNREGISTER(tled);
- ASUS_LED_UNREGISTER(pled);
- ASUS_LED_UNREGISTER(rled);
- ASUS_LED_UNREGISTER(gled);
- ASUS_LED_UNREGISTER(kled);
-}
+ pr_notice("Asus Laptop Support version %s\n",
+ ASUS_LAPTOP_VERSION);
+ asus = kzalloc(sizeof(struct asus_laptop), GFP_KERNEL);
+ if (!asus)
+ return -ENOMEM;
+ asus->handle = device->handle;
+ strcpy(acpi_device_name(device), ASUS_LAPTOP_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ASUS_LAPTOP_CLASS);
+ device->driver_data = asus;
+ asus->device = device;
-static void asus_input_exit(void)
-{
- if (hotk->inputdev)
- input_unregister_device(hotk->inputdev);
-}
+ result = asus_acpi_init(asus);
+ if (result)
+ goto fail_platform;
-static void __exit asus_laptop_exit(void)
-{
- asus_backlight_exit();
- asus_led_exit();
- asus_input_exit();
+ /*
+ * Register the platform device first. It is used as a parent for the
+ * sub-devices below.
+ */
+ result = asus_platform_init(asus);
+ if (result)
+ goto fail_platform;
- acpi_bus_unregister_driver(&asus_hotk_driver);
- sysfs_remove_group(&asuspf_device->dev.kobj, &asuspf_attribute_group);
- platform_device_unregister(asuspf_device);
- platform_driver_unregister(&asuspf_driver);
-}
+ if (!acpi_video_backlight_support()) {
+ result = asus_backlight_init(asus);
+ if (result)
+ goto fail_backlight;
+ } else
+ pr_info("Backlight controlled by ACPI video driver\n");
-static int asus_backlight_init(struct device *dev)
-{
- struct backlight_device *bd;
+ result = asus_input_init(asus);
+ if (result)
+ goto fail_input;
- if (brightness_set_handle && lcd_switch_handle) {
- bd = backlight_device_register(ASUS_HOTK_FILE, dev,
- NULL, &asusbl_ops);
- if (IS_ERR(bd)) {
- pr_err("Could not register asus backlight device\n");
- asus_backlight_device = NULL;
- return PTR_ERR(bd);
- }
+ result = asus_led_init(asus);
+ if (result)
+ goto fail_led;
- asus_backlight_device = bd;
+ result = asus_rfkill_init(asus);
+ if (result)
+ goto fail_rfkill;
- bd->props.max_brightness = 15;
- bd->props.brightness = read_brightness(NULL);
- bd->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(bd);
- }
+ asus_device_present = true;
return 0;
-}
-static int asus_led_register(acpi_handle handle,
- struct led_classdev *ldev, struct device *dev)
-{
- if (!handle)
- return 0;
+fail_rfkill:
+ asus_led_exit(asus);
+fail_led:
+ asus_input_exit(asus);
+fail_input:
+ asus_backlight_exit(asus);
+fail_backlight:
+ asus_platform_exit(asus);
+fail_platform:
+ kfree(asus->name);
+ kfree(asus);
- return led_classdev_register(dev, ldev);
+ return result;
}
-#define ASUS_LED_REGISTER(object, device) \
- asus_led_register(object##_set_handle, &object##_led, device)
-
-static int asus_led_init(struct device *dev)
+static int asus_acpi_remove(struct acpi_device *device, int type)
{
- int rv;
-
- rv = ASUS_LED_REGISTER(mled, dev);
- if (rv)
- goto out;
-
- rv = ASUS_LED_REGISTER(tled, dev);
- if (rv)
- goto out1;
-
- rv = ASUS_LED_REGISTER(rled, dev);
- if (rv)
- goto out2;
-
- rv = ASUS_LED_REGISTER(pled, dev);
- if (rv)
- goto out3;
-
- rv = ASUS_LED_REGISTER(gled, dev);
- if (rv)
- goto out4;
+ struct asus_laptop *asus = acpi_driver_data(device);
- if (kled_set_handle && kled_get_handle)
- rv = ASUS_LED_REGISTER(kled, dev);
- if (rv)
- goto out5;
-
- led_workqueue = create_singlethread_workqueue("led_workqueue");
- if (!led_workqueue)
- goto out6;
+ asus_backlight_exit(asus);
+ asus_rfkill_exit(asus);
+ asus_led_exit(asus);
+ asus_input_exit(asus);
+ asus_platform_exit(asus);
+ kfree(asus->name);
+ kfree(asus);
return 0;
-out6:
- rv = -ENOMEM;
- ASUS_LED_UNREGISTER(kled);
-out5:
- ASUS_LED_UNREGISTER(gled);
-out4:
- ASUS_LED_UNREGISTER(pled);
-out3:
- ASUS_LED_UNREGISTER(rled);
-out2:
- ASUS_LED_UNREGISTER(tled);
-out1:
- ASUS_LED_UNREGISTER(mled);
-out:
- return rv;
}
+static const struct acpi_device_id asus_device_ids[] = {
+ {"ATK0100", 0},
+ {"ATK0101", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, asus_device_ids);
+
+static struct acpi_driver asus_acpi_driver = {
+ .name = ASUS_LAPTOP_NAME,
+ .class = ASUS_LAPTOP_CLASS,
+ .owner = THIS_MODULE,
+ .ids = asus_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
+ .ops = {
+ .add = asus_acpi_add,
+ .remove = asus_acpi_remove,
+ .notify = asus_acpi_notify,
+ },
+};
+
static int __init asus_laptop_init(void)
{
int result;
- result = acpi_bus_register_driver(&asus_hotk_driver);
+ result = platform_driver_register(&platform_driver);
if (result < 0)
return result;
- /*
- * This is a bit of a kludge. We only want this module loaded
- * for ASUS systems, but there's currently no way to probe the
- * ACPI namespace for ASUS HIDs. So we just return failure if
- * we didn't find one, which will cause the module to be
- * unloaded.
- */
- if (!asus_hotk_found) {
- acpi_bus_unregister_driver(&asus_hotk_driver);
- return -ENODEV;
- }
-
- result = asus_input_init();
- if (result)
- goto fail_input;
-
- /* Register platform stuff */
- result = platform_driver_register(&asuspf_driver);
- if (result)
- goto fail_platform_driver;
-
- asuspf_device = platform_device_alloc(ASUS_HOTK_FILE, -1);
- if (!asuspf_device) {
- result = -ENOMEM;
- goto fail_platform_device1;
+ result = acpi_bus_register_driver(&asus_acpi_driver);
+ if (result < 0)
+ goto fail_acpi_driver;
+ if (!asus_device_present) {
+ result = -ENODEV;
+ goto fail_no_device;
}
-
- result = platform_device_add(asuspf_device);
- if (result)
- goto fail_platform_device2;
-
- result = sysfs_create_group(&asuspf_device->dev.kobj,
- &asuspf_attribute_group);
- if (result)
- goto fail_sysfs;
-
- result = asus_led_init(&asuspf_device->dev);
- if (result)
- goto fail_led;
-
- if (!acpi_video_backlight_support()) {
- result = asus_backlight_init(&asuspf_device->dev);
- if (result)
- goto fail_backlight;
- } else
- pr_info("Brightness ignored, must be controlled by "
- "ACPI video driver\n");
-
return 0;
-fail_backlight:
- asus_led_exit();
-
-fail_led:
- sysfs_remove_group(&asuspf_device->dev.kobj,
- &asuspf_attribute_group);
-
-fail_sysfs:
- platform_device_del(asuspf_device);
-
-fail_platform_device2:
- platform_device_put(asuspf_device);
-
-fail_platform_device1:
- platform_driver_unregister(&asuspf_driver);
-
-fail_platform_driver:
- asus_input_exit();
-
-fail_input:
-
+fail_no_device:
+ acpi_bus_unregister_driver(&asus_acpi_driver);
+fail_acpi_driver:
+ platform_driver_unregister(&platform_driver);
return result;
}
+static void __exit asus_laptop_exit(void)
+{
+ acpi_bus_unregister_driver(&asus_acpi_driver);
+ platform_driver_unregister(&platform_driver);
+}
+
module_init(asus_laptop_init);
module_exit(asus_laptop_exit);
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index c1d2aee..1381430 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1225,9 +1225,8 @@ static int asus_model_match(char *model)
else if (strncmp(model, "M2N", 3) == 0 ||
strncmp(model, "M3N", 3) == 0 ||
strncmp(model, "M5N", 3) == 0 ||
- strncmp(model, "M6N", 3) == 0 ||
strncmp(model, "S1N", 3) == 0 ||
- strncmp(model, "S5N", 3) == 0 || strncmp(model, "W1N", 3) == 0)
+ strncmp(model, "S5N", 3) == 0)
return xxN;
else if (strncmp(model, "M1", 2) == 0)
return M1A;
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 8cb20e4..035a7dd 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -507,6 +507,10 @@ static int cmpc_keys_codes[] = {
KEY_BRIGHTNESSDOWN,
KEY_BRIGHTNESSUP,
KEY_VENDOR,
+ KEY_UNKNOWN,
+ KEY_CAMERA,
+ KEY_BACK,
+ KEY_FORWARD,
KEY_MAX
};
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index b7f4d27..ef61497 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -132,8 +132,8 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = {
};
static struct calling_interface_buffer *buffer;
-struct page *bufferpage;
-DEFINE_MUTEX(buffer_mutex);
+static struct page *bufferpage;
+static DEFINE_MUTEX(buffer_mutex);
static int hwswitch_state;
@@ -580,6 +580,7 @@ static int __init dell_init(void)
fail_backlight:
i8042_remove_filter(dell_laptop_i8042_filter);
+ cancel_delayed_work_sync(&dell_rfkill_work);
fail_filter:
dell_cleanup_rfkill();
fail_rfkill:
@@ -597,12 +598,12 @@ fail_platform_driver:
static void __exit dell_exit(void)
{
- cancel_delayed_work_sync(&dell_rfkill_work);
i8042_remove_filter(dell_laptop_i8042_filter);
+ cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
dell_cleanup_rfkill();
if (platform_device) {
- platform_device_del(platform_device);
+ platform_device_unregister(platform_device);
platform_driver_unregister(&platform_driver);
}
kfree(da_tokens);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index e2be6bb..9a844ca 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -578,6 +578,8 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
struct pci_dev *dev;
struct pci_bus *bus;
bool blocked = eeepc_wlan_rfkill_blocked(eeepc);
+ bool absent;
+ u32 l;
if (eeepc->wlan_rfkill)
rfkill_set_sw_state(eeepc->wlan_rfkill, blocked);
@@ -591,6 +593,22 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
goto out_unlock;
}
+ if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
+ pr_err("Unable to read PCI config space?\n");
+ goto out_unlock;
+ }
+ absent = (l == 0xffffffff);
+
+ if (blocked != absent) {
+ pr_warning("BIOS says wireless lan is %s, "
+ "but the pci device is %s\n",
+ blocked ? "blocked" : "unblocked",
+ absent ? "absent" : "present");
+ pr_warning("skipped wireless hotplug as probably "
+ "inappropriate for this model\n");
+ goto out_unlock;
+ }
+
if (!blocked) {
dev = pci_get_slot(bus, 0);
if (dev) {
@@ -1277,7 +1295,8 @@ static void eeepc_dmi_check(struct eeepc_laptop *eeepc)
* hotplug code. In fact, current hotplug code seems to unplug another
* device...
*/
- if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0) {
+ if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0 ||
+ strcmp(model, "1005PE") == 0) {
eeepc->hotplug_disabled = true;
pr_info("wlan hotplug disabled\n");
}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index eb603f1..e7b0c3b 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -286,6 +286,7 @@ struct ibm_init_struct {
char param[32];
int (*init) (struct ibm_init_struct *);
+ mode_t base_procfs_mode;
struct ibm_struct *data;
};
@@ -2082,6 +2083,7 @@ static struct attribute_set *hotkey_dev_attributes;
static void tpacpi_driver_event(const unsigned int hkey_event);
static void hotkey_driver_event(const unsigned int scancode);
+static void hotkey_poll_setup(const bool may_warn);
/* HKEY.MHKG() return bits */
#define TP_HOTKEY_TABLET_MASK (1 << 3)
@@ -2264,6 +2266,8 @@ static int tpacpi_hotkey_driver_mask_set(const u32 mask)
rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) &
~hotkey_source_mask);
+ hotkey_poll_setup(true);
+
mutex_unlock(&hotkey_mutex);
return rc;
@@ -2548,7 +2552,7 @@ static void hotkey_poll_stop_sync(void)
}
/* call with hotkey_mutex held */
-static void hotkey_poll_setup(bool may_warn)
+static void hotkey_poll_setup(const bool may_warn)
{
const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask;
const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask;
@@ -2579,7 +2583,7 @@ static void hotkey_poll_setup(bool may_warn)
}
}
-static void hotkey_poll_setup_safe(bool may_warn)
+static void hotkey_poll_setup_safe(const bool may_warn)
{
mutex_lock(&hotkey_mutex);
hotkey_poll_setup(may_warn);
@@ -2597,7 +2601,11 @@ static void hotkey_poll_set_freq(unsigned int freq)
#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
-static void hotkey_poll_setup_safe(bool __unused)
+static void hotkey_poll_setup(const bool __unused)
+{
+}
+
+static void hotkey_poll_setup_safe(const bool __unused)
{
}
@@ -2607,16 +2615,11 @@ static int hotkey_inputdev_open(struct input_dev *dev)
{
switch (tpacpi_lifecycle) {
case TPACPI_LIFE_INIT:
- /*
- * hotkey_init will call hotkey_poll_setup_safe
- * at the appropriate moment
- */
- return 0;
- case TPACPI_LIFE_EXITING:
- return -EBUSY;
case TPACPI_LIFE_RUNNING:
hotkey_poll_setup_safe(false);
return 0;
+ case TPACPI_LIFE_EXITING:
+ return -EBUSY;
}
/* Should only happen if tpacpi_lifecycle is corrupt */
@@ -2627,7 +2630,7 @@ static int hotkey_inputdev_open(struct input_dev *dev)
static void hotkey_inputdev_close(struct input_dev *dev)
{
/* disable hotkey polling when possible */
- if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING &&
+ if (tpacpi_lifecycle != TPACPI_LIFE_EXITING &&
!(hotkey_source_mask & hotkey_driver_mask))
hotkey_poll_setup_safe(false);
}
@@ -3655,13 +3658,19 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
break;
case 3:
/* 0x3000-0x3FFF: bay-related wakeups */
- if (hkey == TP_HKEY_EV_BAYEJ_ACK) {
+ switch (hkey) {
+ case TP_HKEY_EV_BAYEJ_ACK:
hotkey_autosleep_ack = 1;
printk(TPACPI_INFO
"bay ejected\n");
hotkey_wakeup_hotunplug_complete_notify_change();
known_ev = true;
- } else {
+ break;
+ case TP_HKEY_EV_OPTDRV_EJ:
+ /* FIXME: kick libata if SATA link offline */
+ known_ev = true;
+ break;
+ default:
known_ev = false;
}
break;
@@ -3870,7 +3879,7 @@ enum {
TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */
TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */
TP_ACPI_BLUETOOTH_RESUMECTRL = 0x04, /* Bluetooth state at resume:
- off / last state */
+ 0 = disable, 1 = enable */
};
enum {
@@ -3916,10 +3925,11 @@ static int bluetooth_set_status(enum tpacpi_rfkill_state state)
}
#endif
- /* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */
- status = TP_ACPI_BLUETOOTH_RESUMECTRL;
if (state == TPACPI_RFK_RADIO_ON)
- status |= TP_ACPI_BLUETOOTH_RADIOSSW;
+ status = TP_ACPI_BLUETOOTH_RADIOSSW
+ | TP_ACPI_BLUETOOTH_RESUMECTRL;
+ else
+ status = 0;
if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
return -EIO;
@@ -4070,7 +4080,7 @@ enum {
TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */
TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */
TP_ACPI_WANCARD_RESUMECTRL = 0x04, /* Wan state at resume:
- off / last state */
+ 0 = disable, 1 = enable */
};
#define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw"
@@ -4107,10 +4117,11 @@ static int wan_set_status(enum tpacpi_rfkill_state state)
}
#endif
- /* We make sure to set TP_ACPI_WANCARD_RESUMECTRL */
- status = TP_ACPI_WANCARD_RESUMECTRL;
if (state == TPACPI_RFK_RADIO_ON)
- status |= TP_ACPI_WANCARD_RADIOSSW;
+ status = TP_ACPI_WANCARD_RADIOSSW
+ | TP_ACPI_WANCARD_RESUMECTRL;
+ else
+ status = 0;
if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
return -EIO;
@@ -4619,6 +4630,10 @@ static int video_read(struct seq_file *m)
return 0;
}
+ /* Even reads can crash X.org, so... */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
status = video_outputsw_get();
if (status < 0)
return status;
@@ -4652,6 +4667,10 @@ static int video_write(char *buf)
if (video_supported == TPACPI_VIDEO_NONE)
return -ENODEV;
+ /* Even reads can crash X.org, let alone writes... */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
enable = 0;
disable = 0;
@@ -6133,13 +6152,13 @@ static const struct tpacpi_quirk brightness_quirk_table[] __initconst = {
TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC), /* T43/p ATI */
/* Models with ATI GPUs that can use ECNVRAM */
- TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC),
+ TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC), /* R50,51 T40-42 */
TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
- TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
+ TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_EC), /* R52 */
TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
/* Models with Intel Extreme Graphics 2 */
- TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC),
+ TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC), /* X40 */
TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
@@ -6522,7 +6541,8 @@ static int volume_set_status(const u8 status)
return volume_set_status_ec(status);
}
-static int volume_set_mute_ec(const bool mute)
+/* returns < 0 on error, 0 on no change, 1 on change */
+static int __volume_set_mute_ec(const bool mute)
{
int rc;
u8 s, n;
@@ -6537,22 +6557,37 @@ static int volume_set_mute_ec(const bool mute)
n = (mute) ? s | TP_EC_AUDIO_MUTESW_MSK :
s & ~TP_EC_AUDIO_MUTESW_MSK;
- if (n != s)
+ if (n != s) {
rc = volume_set_status_ec(n);
+ if (!rc)
+ rc = 1;
+ }
unlock:
mutex_unlock(&volume_mutex);
return rc;
}
+static int volume_alsa_set_mute(const bool mute)
+{
+ dbg_printk(TPACPI_DBG_MIXER, "ALSA: trying to %smute\n",
+ (mute) ? "" : "un");
+ return __volume_set_mute_ec(mute);
+}
+
static int volume_set_mute(const bool mute)
{
+ int rc;
+
dbg_printk(TPACPI_DBG_MIXER, "trying to %smute\n",
(mute) ? "" : "un");
- return volume_set_mute_ec(mute);
+
+ rc = __volume_set_mute_ec(mute);
+ return (rc < 0) ? rc : 0;
}
-static int volume_set_volume_ec(const u8 vol)
+/* returns < 0 on error, 0 on no change, 1 on change */
+static int __volume_set_volume_ec(const u8 vol)
{
int rc;
u8 s, n;
@@ -6569,19 +6604,22 @@ static int volume_set_volume_ec(const u8 vol)
n = (s & ~TP_EC_AUDIO_LVL_MSK) | vol;
- if (n != s)
+ if (n != s) {
rc = volume_set_status_ec(n);
+ if (!rc)
+ rc = 1;
+ }
unlock:
mutex_unlock(&volume_mutex);
return rc;
}
-static int volume_set_volume(const u8 vol)
+static int volume_alsa_set_volume(const u8 vol)
{
dbg_printk(TPACPI_DBG_MIXER,
- "trying to set volume level to %hu\n", vol);
- return volume_set_volume_ec(vol);
+ "ALSA: trying to set volume level to %hu\n", vol);
+ return __volume_set_volume_ec(vol);
}
static void volume_alsa_notify_change(void)
@@ -6628,7 +6666,7 @@ static int volume_alsa_vol_get(struct snd_kcontrol *kcontrol,
static int volume_alsa_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- return volume_set_volume(ucontrol->value.integer.value[0]);
+ return volume_alsa_set_volume(ucontrol->value.integer.value[0]);
}
#define volume_alsa_mute_info snd_ctl_boolean_mono_info
@@ -6651,7 +6689,7 @@ static int volume_alsa_mute_get(struct snd_kcontrol *kcontrol,
static int volume_alsa_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- return volume_set_mute(!ucontrol->value.integer.value[0]);
+ return volume_alsa_set_mute(!ucontrol->value.integer.value[0]);
}
static struct snd_kcontrol_new volume_alsa_control_vol __devinitdata = {
@@ -8477,9 +8515,10 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
"%s installed\n", ibm->name);
if (ibm->read) {
- mode_t mode;
+ mode_t mode = iibm->base_procfs_mode;
- mode = S_IRUGO;
+ if (!mode)
+ mode = S_IRUGO;
if (ibm->write)
mode |= S_IWUSR;
entry = proc_create_data(ibm->name, mode, proc_dir,
@@ -8670,6 +8709,7 @@ static struct ibm_init_struct ibms_init[] __initdata = {
#ifdef CONFIG_THINKPAD_ACPI_VIDEO
{
.init = video_init,
+ .base_procfs_mode = S_IRUSR,
.data = &video_driver_data,
},
#endif
@@ -9032,6 +9072,9 @@ static int __init thinkpad_acpi_module_init(void)
return ret;
}
}
+
+ tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
+
ret = input_register_device(tpacpi_inputdev);
if (ret < 0) {
printk(TPACPI_ERR "unable to register input device\n");
@@ -9041,7 +9084,6 @@ static int __init thinkpad_acpi_module_init(void)
tp_features.input_device_registered = 1;
}
- tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
return 0;
}
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 26c2117..405b969 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -814,21 +814,23 @@ static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
if (hci_result == HCI_SUCCESS) {
if (value == 0x100)
continue;
- else if (value & 0x80) {
- key = toshiba_acpi_get_entry_by_scancode
- (value & ~0x80);
- if (!key) {
- printk(MY_INFO "Unknown key %x\n",
- value & ~0x80);
- continue;
- }
- input_report_key(toshiba_acpi.hotkey_dev,
- key->keycode, 1);
- input_sync(toshiba_acpi.hotkey_dev);
- input_report_key(toshiba_acpi.hotkey_dev,
- key->keycode, 0);
- input_sync(toshiba_acpi.hotkey_dev);
+ /* act on key press; ignore key release */
+ if (value & 0x80)
+ continue;
+
+ key = toshiba_acpi_get_entry_by_scancode
+ (value);
+ if (!key) {
+ printk(MY_INFO "Unknown key %x\n",
+ value);
+ continue;
}
+ input_report_key(toshiba_acpi.hotkey_dev,
+ key->keycode, 1);
+ input_sync(toshiba_acpi.hotkey_dev);
+ input_report_key(toshiba_acpi.hotkey_dev,
+ key->keycode, 0);
+ input_sync(toshiba_acpi.hotkey_dev);
} else if (hci_result == HCI_NOT_SUPPORTED) {
/* This is a workaround for an unresolved issue on
* some machines where system events sporadically
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index d4b3d67..bf14672 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -98,10 +98,10 @@ config BATTERY_WM97XX
Say Y to enable support for battery measured by WM97xx aux port.
config BATTERY_BQ27x00
- tristate "BQ27200 battery driver"
+ tristate "BQ27x00 battery driver"
depends on I2C
help
- Say Y here to enable support for batteries with BQ27200(I2C) chip.
+ Say Y here to enable support for batteries with BQ27x00 (I2C) chips.
config BATTERY_DA9030
tristate "DA9030 battery driver"
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index 62bb981..bece33e 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -26,13 +26,22 @@
#include <linux/i2c.h>
#include <asm/unaligned.h>
-#define DRIVER_VERSION "1.0.0"
+#define DRIVER_VERSION "1.1.0"
#define BQ27x00_REG_TEMP 0x06
#define BQ27x00_REG_VOLT 0x08
-#define BQ27x00_REG_RSOC 0x0B /* Relative State-of-Charge */
#define BQ27x00_REG_AI 0x14
#define BQ27x00_REG_FLAGS 0x0A
+#define BQ27x00_REG_TTE 0x16
+#define BQ27x00_REG_TTF 0x18
+#define BQ27x00_REG_TTECP 0x26
+
+#define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */
+#define BQ27000_FLAG_CHGS BIT(7)
+
+#define BQ27500_REG_SOC 0x2c
+#define BQ27500_FLAG_DSC BIT(0)
+#define BQ27500_FLAG_FC BIT(9)
/* If the system has several batteries we need a different name for each
* of them...
@@ -46,25 +55,28 @@ struct bq27x00_access_methods {
struct bq27x00_device_info *di);
};
+enum bq27x00_chip { BQ27000, BQ27500 };
+
struct bq27x00_device_info {
struct device *dev;
int id;
- int voltage_uV;
- int current_uA;
- int temp_C;
- int charge_rsoc;
struct bq27x00_access_methods *bus;
struct power_supply bat;
+ enum bq27x00_chip chip;
struct i2c_client *client;
};
static enum power_supply_property bq27x00_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
};
/*
@@ -74,16 +86,11 @@ static enum power_supply_property bq27x00_battery_props[] = {
static int bq27x00_read(u8 reg, int *rt_value, int b_single,
struct bq27x00_device_info *di)
{
- int ret;
-
- ret = di->bus->read(reg, rt_value, b_single, di);
- *rt_value = be16_to_cpu(*rt_value);
-
- return ret;
+ return di->bus->read(reg, rt_value, b_single, di);
}
/*
- * Return the battery temperature in Celsius degrees
+ * Return the battery temperature in tenths of degree Celsius
* Or < 0 if something fails.
*/
static int bq27x00_battery_temperature(struct bq27x00_device_info *di)
@@ -97,7 +104,10 @@ static int bq27x00_battery_temperature(struct bq27x00_device_info *di)
return ret;
}
- return (temp >> 2) - 273;
+ if (di->chip == BQ27500)
+ return temp - 2731;
+ else
+ return ((temp >> 2) - 273) * 10;
}
/*
@@ -115,7 +125,7 @@ static int bq27x00_battery_voltage(struct bq27x00_device_info *di)
return ret;
}
- return volt;
+ return volt * 1000;
}
/*
@@ -134,16 +144,23 @@ static int bq27x00_battery_current(struct bq27x00_device_info *di)
dev_err(di->dev, "error reading current\n");
return 0;
}
- ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di);
- if (ret < 0) {
- dev_err(di->dev, "error reading flags\n");
- return 0;
- }
- if ((flags & (1 << 7)) != 0) {
- dev_dbg(di->dev, "negative current!\n");
- return -curr;
+
+ if (di->chip == BQ27500) {
+ /* bq27500 returns signed value */
+ curr = (int)(s16)curr;
+ } else {
+ ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di);
+ if (ret < 0) {
+ dev_err(di->dev, "error reading flags\n");
+ return 0;
+ }
+ if (flags & BQ27000_FLAG_CHGS) {
+ dev_dbg(di->dev, "negative current!\n");
+ curr = -curr;
+ }
}
- return curr;
+
+ return curr * 1000;
}
/*
@@ -155,13 +172,70 @@ static int bq27x00_battery_rsoc(struct bq27x00_device_info *di)
int ret;
int rsoc = 0;
- ret = bq27x00_read(BQ27x00_REG_RSOC, &rsoc, 1, di);
+ if (di->chip == BQ27500)
+ ret = bq27x00_read(BQ27500_REG_SOC, &rsoc, 0, di);
+ else
+ ret = bq27x00_read(BQ27000_REG_RSOC, &rsoc, 1, di);
if (ret) {
dev_err(di->dev, "error reading relative State-of-Charge\n");
return ret;
}
- return rsoc >> 8;
+ return rsoc;
+}
+
+static int bq27x00_battery_status(struct bq27x00_device_info *di,
+ union power_supply_propval *val)
+{
+ int flags = 0;
+ int status;
+ int ret;
+
+ ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di);
+ if (ret < 0) {
+ dev_err(di->dev, "error reading flags\n");
+ return ret;
+ }
+
+ if (di->chip == BQ27500) {
+ if (flags & BQ27500_FLAG_FC)
+ status = POWER_SUPPLY_STATUS_FULL;
+ else if (flags & BQ27500_FLAG_DSC)
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ } else {
+ if (flags & BQ27000_FLAG_CHGS)
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+
+ val->intval = status;
+ return 0;
+}
+
+/*
+ * Read a time register.
+ * Return < 0 if something fails.
+ */
+static int bq27x00_battery_time(struct bq27x00_device_info *di, int reg,
+ union power_supply_propval *val)
+{
+ int tval = 0;
+ int ret;
+
+ ret = bq27x00_read(reg, &tval, 0, di);
+ if (ret) {
+ dev_err(di->dev, "error reading register %02x\n", reg);
+ return ret;
+ }
+
+ if (tval == 65535)
+ return -ENODATA;
+
+ val->intval = tval * 60;
+ return 0;
}
#define to_bq27x00_device_info(x) container_of((x), \
@@ -171,9 +245,13 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
+ int ret = 0;
struct bq27x00_device_info *di = to_bq27x00_device_info(psy);
switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = bq27x00_battery_status(di, val);
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
case POWER_SUPPLY_PROP_PRESENT:
val->intval = bq27x00_battery_voltage(di);
@@ -189,11 +267,20 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_TEMP:
val->intval = bq27x00_battery_temperature(di);
break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ ret = bq27x00_battery_time(di, BQ27x00_REG_TTE, val);
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+ ret = bq27x00_battery_time(di, BQ27x00_REG_TTECP, val);
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
+ ret = bq27x00_battery_time(di, BQ27x00_REG_TTF, val);
+ break;
default:
return -EINVAL;
}
- return 0;
+ return ret;
}
static void bq27x00_powersupply_init(struct bq27x00_device_info *di)
@@ -206,10 +293,10 @@ static void bq27x00_powersupply_init(struct bq27x00_device_info *di)
}
/*
- * BQ27200 specific code
+ * i2c specific code
*/
-static int bq27200_read(u8 reg, int *rt_value, int b_single,
+static int bq27x00_read_i2c(u8 reg, int *rt_value, int b_single,
struct bq27x00_device_info *di)
{
struct i2c_client *client = di->client;
@@ -238,7 +325,7 @@ static int bq27200_read(u8 reg, int *rt_value, int b_single,
err = i2c_transfer(client->adapter, msg, 1);
if (err >= 0) {
if (!b_single)
- *rt_value = get_unaligned_be16(data);
+ *rt_value = get_unaligned_le16(data);
else
*rt_value = data[0];
@@ -248,7 +335,7 @@ static int bq27200_read(u8 reg, int *rt_value, int b_single,
return err;
}
-static int bq27200_battery_probe(struct i2c_client *client,
+static int bq27x00_battery_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
char *name;
@@ -267,7 +354,7 @@ static int bq27200_battery_probe(struct i2c_client *client,
if (retval < 0)
return retval;
- name = kasprintf(GFP_KERNEL, "bq27200-%d", num);
+ name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
if (!name) {
dev_err(&client->dev, "failed to allocate device name\n");
retval = -ENOMEM;
@@ -281,6 +368,7 @@ static int bq27200_battery_probe(struct i2c_client *client,
goto batt_failed_2;
}
di->id = num;
+ di->chip = id->driver_data;
bus = kzalloc(sizeof(*bus), GFP_KERNEL);
if (!bus) {
@@ -293,7 +381,7 @@ static int bq27200_battery_probe(struct i2c_client *client,
i2c_set_clientdata(client, di);
di->dev = &client->dev;
di->bat.name = name;
- bus->read = &bq27200_read;
+ bus->read = &bq27x00_read_i2c;
di->bus = bus;
di->client = client;
@@ -323,7 +411,7 @@ batt_failed_1:
return retval;
}
-static int bq27200_battery_remove(struct i2c_client *client)
+static int bq27x00_battery_remove(struct i2c_client *client)
{
struct bq27x00_device_info *di = i2c_get_clientdata(client);
@@ -344,27 +432,28 @@ static int bq27200_battery_remove(struct i2c_client *client)
* Module stuff
*/
-static const struct i2c_device_id bq27200_id[] = {
- { "bq27200", 0 },
+static const struct i2c_device_id bq27x00_id[] = {
+ { "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */
+ { "bq27500", BQ27500 },
{},
};
-static struct i2c_driver bq27200_battery_driver = {
+static struct i2c_driver bq27x00_battery_driver = {
.driver = {
- .name = "bq27200-battery",
+ .name = "bq27x00-battery",
},
- .probe = bq27200_battery_probe,
- .remove = bq27200_battery_remove,
- .id_table = bq27200_id,
+ .probe = bq27x00_battery_probe,
+ .remove = bq27x00_battery_remove,
+ .id_table = bq27x00_id,
};
static int __init bq27x00_battery_init(void)
{
int ret;
- ret = i2c_add_driver(&bq27200_battery_driver);
+ ret = i2c_add_driver(&bq27x00_battery_driver);
if (ret)
- printk(KERN_ERR "Unable to register BQ27200 driver\n");
+ printk(KERN_ERR "Unable to register BQ27x00 driver\n");
return ret;
}
@@ -372,7 +461,7 @@ module_init(bq27x00_battery_init);
static void __exit bq27x00_battery_exit(void)
{
- i2c_del_driver(&bq27200_battery_driver);
+ i2c_del_driver(&bq27x00_battery_driver);
}
module_exit(bq27x00_battery_exit);
diff --git a/drivers/power/da9030_battery.c b/drivers/power/da9030_battery.c
index 3364198..a2e71f7 100644
--- a/drivers/power/da9030_battery.c
+++ b/drivers/power/da9030_battery.c
@@ -509,7 +509,7 @@ static int da9030_battery_probe(struct platform_device *pdev)
charger->master = pdev->dev.parent;
- /* 10 seconds between monotor runs unless platfrom defines other
+ /* 10 seconds between monitor runs unless platform defines other
interval */
charger->interval = msecs_to_jiffies(
(pdata->batmon_interval ? : 10) * 1000);
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index 6ea3cb5..23eed35 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -26,7 +26,7 @@
static DEFINE_MUTEX(bat_lock);
static struct work_struct bat_work;
-struct mutex work_lock;
+static struct mutex work_lock;
static int bat_status = POWER_SUPPLY_STATUS_UNKNOWN;
static struct wm97xx_batt_info *gpdata;
static enum power_supply_property *prop;
@@ -203,7 +203,7 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
goto err2;
ret = request_irq(gpio_to_irq(pdata->charge_gpio),
wm97xx_chrg_irq, IRQF_DISABLED,
- "AC Detect", 0);
+ "AC Detect", dev);
if (ret)
goto err2;
props++; /* POWER_SUPPLY_PROP_STATUS */
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 262f62e..834b484 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -27,6 +27,17 @@ config REGULATOR_DEBUG
help
Say yes here to enable debugging support.
+config REGULATOR_DUMMY
+ bool "Provide a dummy regulator if regulator lookups fail"
+ help
+ If this option is enabled then when a regulator lookup fails
+ and the board has not specified that it has provided full
+ constraints then the regulator core will provide an always
+ enabled dummy regulator will be provided, allowing consumer
+ drivers to continue.
+
+ A warning will be generated when this substitution is done.
+
config REGULATOR_FIXED_VOLTAGE
tristate "Fixed voltage regulator support"
help
@@ -69,6 +80,13 @@ config REGULATOR_MAX1586
regulator via I2C bus. The provided regulator is suitable
for PXA27x chips to control VCC_CORE and VCC_USIM voltages.
+config REGULATOR_MAX8649
+ tristate "Maxim 8649 voltage regulator"
+ depends on I2C
+ help
+ This driver controls a Maxim 8649 voltage output regulator via
+ I2C bus.
+
config REGULATOR_MAX8660
tristate "Maxim 8660/8661 voltage regulator"
depends on I2C
@@ -91,19 +109,26 @@ config REGULATOR_WM831X
of PMIC devices.
config REGULATOR_WM8350
- tristate "Wolfson Microelectroncis WM8350 AudioPlus PMIC"
+ tristate "Wolfson Microelectronics WM8350 AudioPlus PMIC"
depends on MFD_WM8350
help
This driver provides support for the voltage and current regulators
of the WM8350 AudioPlus PMIC.
config REGULATOR_WM8400
- tristate "Wolfson Microelectroncis WM8400 AudioPlus PMIC"
+ tristate "Wolfson Microelectronics WM8400 AudioPlus PMIC"
depends on MFD_WM8400
help
This driver provides support for the voltage regulators of the
WM8400 AudioPlus PMIC.
+config REGULATOR_WM8994
+ tristate "Wolfson Microelectronics WM8994 CODEC"
+ depends on MFD_WM8994
+ help
+ This driver provides support for the voltage regulators on the
+ WM8994 CODEC.
+
config REGULATOR_DA903X
tristate "Support regulators on Dialog Semiconductor DA9030/DA9034 PMIC"
depends on PMIC_DA903X
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b3c806c..e845b66 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -9,15 +9,18 @@ obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o
+obj-$(CONFIG_REGULATOR_DUMMY) += dummy.o
obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
+obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
+obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index b349db4..7de9509 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -561,7 +561,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
* for all the different regulators.
*/
-static int __init ab3100_regulators_probe(struct platform_device *pdev)
+static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
{
struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
struct ab3100 *ab3100 = platform_get_drvdata(pdev);
@@ -641,7 +641,7 @@ static int __init ab3100_regulators_probe(struct platform_device *pdev)
return 0;
}
-static int __exit ab3100_regulators_remove(struct platform_device *pdev)
+static int __devexit ab3100_regulators_remove(struct platform_device *pdev)
{
int i;
@@ -659,7 +659,7 @@ static struct platform_driver ab3100_regulators_driver = {
.owner = THIS_MODULE,
},
.probe = ab3100_regulators_probe,
- .remove = __exit_p(ab3100_regulators_remove),
+ .remove = __devexit_p(ab3100_regulators_remove),
};
static __init int ab3100_regulators_init(void)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b60a4c9..c7bbe30 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -19,10 +19,13 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/suspend.h>
+#include <linux/delay.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include "dummy.h"
+
#define REGULATOR_VERSION "0.5"
static DEFINE_MUTEX(regulator_list_mutex);
@@ -1084,6 +1087,13 @@ overflow_err:
return NULL;
}
+static int _regulator_get_enable_time(struct regulator_dev *rdev)
+{
+ if (!rdev->desc->ops->enable_time)
+ return 0;
+ return rdev->desc->ops->enable_time(rdev);
+}
+
/* Internal regulator request function */
static struct regulator *_regulator_get(struct device *dev, const char *id,
int exclusive)
@@ -1115,6 +1125,22 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
goto found;
}
}
+
+#ifdef CONFIG_REGULATOR_DUMMY
+ if (!devname)
+ devname = "deviceless";
+
+ /* If the board didn't flag that it was fully constrained then
+ * substitute in a dummy regulator so consumers can continue.
+ */
+ if (!has_full_constraints) {
+ pr_warning("%s supply %s not found, using dummy regulator\n",
+ devname, id);
+ rdev = dummy_regulator_rdev;
+ goto found;
+ }
+#endif
+
mutex_unlock(&regulator_list_mutex);
return regulator;
@@ -1251,7 +1277,7 @@ static int _regulator_can_change_status(struct regulator_dev *rdev)
/* locks held by regulator_enable() */
static int _regulator_enable(struct regulator_dev *rdev)
{
- int ret;
+ int ret, delay;
/* do we need to enable the supply regulator first */
if (rdev->supply) {
@@ -1275,13 +1301,34 @@ static int _regulator_enable(struct regulator_dev *rdev)
if (!_regulator_can_change_status(rdev))
return -EPERM;
- if (rdev->desc->ops->enable) {
- ret = rdev->desc->ops->enable(rdev);
- if (ret < 0)
- return ret;
- } else {
+ if (!rdev->desc->ops->enable)
return -EINVAL;
+
+ /* Query before enabling in case configuration
+ * dependant. */
+ ret = _regulator_get_enable_time(rdev);
+ if (ret >= 0) {
+ delay = ret;
+ } else {
+ printk(KERN_WARNING
+ "%s: enable_time() failed for %s: %d\n",
+ __func__, rdev_get_name(rdev),
+ ret);
+ delay = 0;
}
+
+ /* Allow the regulator to ramp; it would be useful
+ * to extend this for bulk operations so that the
+ * regulators can ramp together. */
+ ret = rdev->desc->ops->enable(rdev);
+ if (ret < 0)
+ return ret;
+
+ if (delay >= 1000)
+ mdelay(delay / 1000);
+ else if (delay)
+ udelay(delay);
+
} else if (ret < 0) {
printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n",
__func__, rdev_get_name(rdev), ret);
@@ -1341,6 +1388,9 @@ static int _regulator_disable(struct regulator_dev *rdev)
__func__, rdev_get_name(rdev));
return ret;
}
+
+ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+ NULL);
}
/* decrease our supplies ref count and disable if required */
@@ -1399,8 +1449,8 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
return ret;
}
/* notify other consumers that power has been forced off */
- _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE,
- NULL);
+ _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
+ REGULATOR_EVENT_DISABLE, NULL);
}
/* decrease our supplies ref count and disable if required */
@@ -1434,9 +1484,9 @@ EXPORT_SYMBOL_GPL(regulator_force_disable);
static int _regulator_is_enabled(struct regulator_dev *rdev)
{
- /* sanity check */
+ /* If we don't know then assume that the regulator is always on */
if (!rdev->desc->ops->is_enabled)
- return -EINVAL;
+ return 1;
return rdev->desc->ops->is_enabled(rdev);
}
@@ -2451,8 +2501,15 @@ EXPORT_SYMBOL_GPL(regulator_get_init_drvdata);
static int __init regulator_init(void)
{
+ int ret;
+
printk(KERN_INFO "regulator: core version %s\n", REGULATOR_VERSION);
- return class_register(&regulator_class);
+
+ ret = class_register(&regulator_class);
+
+ regulator_dummy_init();
+
+ return ret;
}
/* init early to allow our consumers to complete system booting */
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
new file mode 100644
index 0000000..c7410bd
--- /dev/null
+++ b/drivers/regulator/dummy.c
@@ -0,0 +1,66 @@
+/*
+ * dummy.c
+ *
+ * Copyright 2010 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This is useful for systems with mixed controllable and
+ * non-controllable regulators, as well as for allowing testing on
+ * systems with no controllable regulators.
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#include "dummy.h"
+
+struct regulator_dev *dummy_regulator_rdev;
+
+static struct regulator_init_data dummy_initdata;
+
+static struct regulator_ops dummy_ops;
+
+static struct regulator_desc dummy_desc = {
+ .name = "dummy",
+ .id = -1,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .ops = &dummy_ops,
+};
+
+static struct platform_device *dummy_pdev;
+
+void __init regulator_dummy_init(void)
+{
+ int ret;
+
+ dummy_pdev = platform_device_alloc("reg-dummy", -1);
+ if (!dummy_pdev) {
+ pr_err("Failed to allocate dummy regulator device\n");
+ return;
+ }
+
+ ret = platform_device_add(dummy_pdev);
+ if (ret != 0) {
+ pr_err("Failed to register dummy regulator device: %d\n", ret);
+ platform_device_put(dummy_pdev);
+ return;
+ }
+
+ dummy_regulator_rdev = regulator_register(&dummy_desc, NULL,
+ &dummy_initdata, NULL);
+ if (IS_ERR(dummy_regulator_rdev)) {
+ ret = PTR_ERR(dummy_regulator_rdev);
+ pr_err("Failed to register regulator: %d\n", ret);
+ platform_device_unregister(dummy_pdev);
+ return;
+ }
+}
diff --git a/drivers/regulator/dummy.h b/drivers/regulator/dummy.h
new file mode 100644
index 0000000..3921c0e
--- /dev/null
+++ b/drivers/regulator/dummy.h
@@ -0,0 +1,31 @@
+/*
+ * dummy.h
+ *
+ * Copyright 2010 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This is useful for systems with mixed controllable and
+ * non-controllable regulators, as well as for allowing testing on
+ * systems with no controllable regulators.
+ */
+
+#ifndef _DUMMY_H
+#define _DUMMY_H
+
+struct regulator_dev;
+
+extern struct regulator_dev *dummy_regulator_rdev;
+
+#ifdef CONFIG_REGULATOR_DUMMY
+void __init regulator_dummy_init(void);
+#else
+static inline void regulator_dummy_init(void) { }
+#endif
+
+#endif
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index f9f516a..d11f762 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -24,14 +24,16 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/fixed.h>
#include <linux/gpio.h>
+#include <linux/delay.h>
struct fixed_voltage_data {
struct regulator_desc desc;
struct regulator_dev *dev;
int microvolts;
int gpio;
- unsigned enable_high:1;
- unsigned is_enabled:1;
+ unsigned startup_delay;
+ bool enable_high;
+ bool is_enabled;
};
static int fixed_voltage_is_enabled(struct regulator_dev *dev)
@@ -47,7 +49,7 @@ static int fixed_voltage_enable(struct regulator_dev *dev)
if (gpio_is_valid(data->gpio)) {
gpio_set_value_cansleep(data->gpio, data->enable_high);
- data->is_enabled = 1;
+ data->is_enabled = true;
}
return 0;
@@ -59,12 +61,19 @@ static int fixed_voltage_disable(struct regulator_dev *dev)
if (gpio_is_valid(data->gpio)) {
gpio_set_value_cansleep(data->gpio, !data->enable_high);
- data->is_enabled = 0;
+ data->is_enabled = false;
}
return 0;
}
+static int fixed_voltage_enable_time(struct regulator_dev *dev)
+{
+ struct fixed_voltage_data *data = rdev_get_drvdata(dev);
+
+ return data->startup_delay;
+}
+
static int fixed_voltage_get_voltage(struct regulator_dev *dev)
{
struct fixed_voltage_data *data = rdev_get_drvdata(dev);
@@ -87,11 +96,12 @@ static struct regulator_ops fixed_voltage_ops = {
.is_enabled = fixed_voltage_is_enabled,
.enable = fixed_voltage_enable,
.disable = fixed_voltage_disable,
+ .enable_time = fixed_voltage_enable_time,
.get_voltage = fixed_voltage_get_voltage,
.list_voltage = fixed_voltage_list_voltage,
};
-static int regulator_fixed_voltage_probe(struct platform_device *pdev)
+static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
{
struct fixed_voltage_config *config = pdev->dev.platform_data;
struct fixed_voltage_data *drvdata;
@@ -117,6 +127,7 @@ static int regulator_fixed_voltage_probe(struct platform_device *pdev)
drvdata->microvolts = config->microvolts;
drvdata->gpio = config->gpio;
+ drvdata->startup_delay = config->startup_delay;
if (gpio_is_valid(config->gpio)) {
drvdata->enable_high = config->enable_high;
@@ -163,7 +174,7 @@ static int regulator_fixed_voltage_probe(struct platform_device *pdev)
/* Regulator without GPIO control is considered
* always enabled
*/
- drvdata->is_enabled = 1;
+ drvdata->is_enabled = true;
}
drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
@@ -191,7 +202,7 @@ err:
return ret;
}
-static int regulator_fixed_voltage_remove(struct platform_device *pdev)
+static int __devexit reg_fixed_voltage_remove(struct platform_device *pdev)
{
struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev);
@@ -205,10 +216,11 @@ static int regulator_fixed_voltage_remove(struct platform_device *pdev)
}
static struct platform_driver regulator_fixed_voltage_driver = {
- .probe = regulator_fixed_voltage_probe,
- .remove = regulator_fixed_voltage_remove,
+ .probe = reg_fixed_voltage_probe,
+ .remove = __devexit_p(reg_fixed_voltage_remove),
.driver = {
.name = "reg-fixed-voltage",
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 4f33a0f..f5532ed 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -54,7 +54,7 @@ static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val);
#define LP3971_BUCK2_BASE 0x29
#define LP3971_BUCK3_BASE 0x32
-const static int buck_base_addr[] = {
+static const int buck_base_addr[] = {
LP3971_BUCK1_BASE,
LP3971_BUCK2_BASE,
LP3971_BUCK3_BASE,
@@ -63,7 +63,7 @@ const static int buck_base_addr[] = {
#define LP3971_BUCK_TARGET_VOL1_REG(x) (buck_base_addr[x])
#define LP3971_BUCK_TARGET_VOL2_REG(x) (buck_base_addr[x]+1)
-const static int buck_voltage_map[] = {
+static const int buck_voltage_map[] = {
0, 800, 850, 900, 950, 1000, 1050, 1100,
1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500,
1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800,
@@ -96,17 +96,17 @@ const static int buck_voltage_map[] = {
#define LDO_VOL_CONTR_SHIFT(x) ((x & 1) << 2)
#define LDO_VOL_CONTR_MASK 0x0f
-const static int ldo45_voltage_map[] = {
+static const int ldo45_voltage_map[] = {
1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350,
1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300,
};
-const static int ldo123_voltage_map[] = {
+static const int ldo123_voltage_map[] = {
1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500,
2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300,
};
-const static int *ldo_voltage_map[] = {
+static const int *ldo_voltage_map[] = {
ldo123_voltage_map, /* LDO1 */
ldo123_voltage_map, /* LDO2 */
ldo123_voltage_map, /* LDO3 */
@@ -431,20 +431,20 @@ static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val)
return ret;
}
-static int setup_regulators(struct lp3971 *lp3971,
- struct lp3971_platform_data *pdata)
+static int __devinit setup_regulators(struct lp3971 *lp3971,
+ struct lp3971_platform_data *pdata)
{
int i, err;
- int num_regulators = pdata->num_regulators;
- lp3971->num_regulators = num_regulators;
- lp3971->rdev = kzalloc(sizeof(struct regulator_dev *) * num_regulators,
- GFP_KERNEL);
+
+ lp3971->num_regulators = pdata->num_regulators;
+ lp3971->rdev = kcalloc(pdata->num_regulators,
+ sizeof(struct regulator_dev *), GFP_KERNEL);
/* Instantiate the regulators */
- for (i = 0; i < num_regulators; i++) {
- int id = pdata->regulators[i].id;
- lp3971->rdev[i] = regulator_register(&regulators[id],
- lp3971->dev, pdata->regulators[i].initdata, lp3971);
+ for (i = 0; i < pdata->num_regulators; i++) {
+ struct lp3971_regulator_subdev *reg = &pdata->regulators[i];
+ lp3971->rdev[i] = regulator_register(&regulators[reg->id],
+ lp3971->dev, reg->initdata, lp3971);
if (IS_ERR(lp3971->rdev[i])) {
err = PTR_ERR(lp3971->rdev[i]);
@@ -455,10 +455,10 @@ static int setup_regulators(struct lp3971 *lp3971,
}
return 0;
+
error:
- for (i = 0; i < num_regulators; i++)
- if (lp3971->rdev[i])
- regulator_unregister(lp3971->rdev[i]);
+ while (--i >= 0)
+ regulator_unregister(lp3971->rdev[i]);
kfree(lp3971->rdev);
lp3971->rdev = NULL;
return err;
@@ -472,15 +472,17 @@ static int __devinit lp3971_i2c_probe(struct i2c_client *i2c,
int ret;
u16 val;
- lp3971 = kzalloc(sizeof(struct lp3971), GFP_KERNEL);
- if (lp3971 == NULL) {
- ret = -ENOMEM;
- goto err;
+ if (!pdata) {
+ dev_dbg(&i2c->dev, "No platform init data supplied\n");
+ return -ENODEV;
}
+ lp3971 = kzalloc(sizeof(struct lp3971), GFP_KERNEL);
+ if (lp3971 == NULL)
+ return -ENOMEM;
+
lp3971->i2c = i2c;
lp3971->dev = &i2c->dev;
- i2c_set_clientdata(i2c, lp3971);
mutex_init(&lp3971->io_lock);
@@ -493,19 +495,15 @@ static int __devinit lp3971_i2c_probe(struct i2c_client *i2c,
goto err_detect;
}
- if (pdata) {
- ret = setup_regulators(lp3971, pdata);
- if (ret < 0)
- goto err_detect;
- } else
- dev_warn(lp3971->dev, "No platform init data supplied\n");
+ ret = setup_regulators(lp3971, pdata);
+ if (ret < 0)
+ goto err_detect;
+ i2c_set_clientdata(i2c, lp3971);
return 0;
err_detect:
- i2c_set_clientdata(i2c, NULL);
kfree(lp3971);
-err:
return ret;
}
@@ -513,11 +511,13 @@ static int __devexit lp3971_i2c_remove(struct i2c_client *i2c)
{
struct lp3971 *lp3971 = i2c_get_clientdata(i2c);
int i;
+
+ i2c_set_clientdata(i2c, NULL);
+
for (i = 0; i < lp3971->num_regulators; i++)
- if (lp3971->rdev[i])
- regulator_unregister(lp3971->rdev[i]);
+ regulator_unregister(lp3971->rdev[i]);
+
kfree(lp3971->rdev);
- i2c_set_clientdata(i2c, NULL);
kfree(lp3971);
return 0;
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 2c082d3..a49fc95 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -179,8 +179,8 @@ static struct regulator_desc max1586_reg[] = {
},
};
-static int max1586_pmic_probe(struct i2c_client *client,
- const struct i2c_device_id *i2c_id)
+static int __devinit max1586_pmic_probe(struct i2c_client *client,
+ const struct i2c_device_id *i2c_id)
{
struct regulator_dev **rdev;
struct max1586_platform_data *pdata = client->dev.platform_data;
@@ -235,7 +235,7 @@ out:
return ret;
}
-static int max1586_pmic_remove(struct i2c_client *client)
+static int __devexit max1586_pmic_remove(struct i2c_client *client)
{
struct regulator_dev **rdev = i2c_get_clientdata(client);
int i;
@@ -257,9 +257,10 @@ MODULE_DEVICE_TABLE(i2c, max1586_id);
static struct i2c_driver max1586_pmic_driver = {
.probe = max1586_pmic_probe,
- .remove = max1586_pmic_remove,
+ .remove = __devexit_p(max1586_pmic_remove),
.driver = {
.name = "max1586",
+ .owner = THIS_MODULE,
},
.id_table = max1586_id,
};
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
new file mode 100644
index 0000000..3ebdf69
--- /dev/null
+++ b/drivers/regulator/max8649.c
@@ -0,0 +1,408 @@
+/*
+ * Regulators driver for Maxim max8649
+ *
+ * Copyright (C) 2009-2010 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/max8649.h>
+
+#define MAX8649_DCDC_VMIN 750000 /* uV */
+#define MAX8649_DCDC_VMAX 1380000 /* uV */
+#define MAX8649_DCDC_STEP 10000 /* uV */
+#define MAX8649_VOL_MASK 0x3f
+
+/* Registers */
+#define MAX8649_MODE0 0x00
+#define MAX8649_MODE1 0x01
+#define MAX8649_MODE2 0x02
+#define MAX8649_MODE3 0x03
+#define MAX8649_CONTROL 0x04
+#define MAX8649_SYNC 0x05
+#define MAX8649_RAMP 0x06
+#define MAX8649_CHIP_ID1 0x08
+#define MAX8649_CHIP_ID2 0x09
+
+/* Bits */
+#define MAX8649_EN_PD (1 << 7)
+#define MAX8649_VID0_PD (1 << 6)
+#define MAX8649_VID1_PD (1 << 5)
+#define MAX8649_VID_MASK (3 << 5)
+
+#define MAX8649_FORCE_PWM (1 << 7)
+#define MAX8649_SYNC_EXTCLK (1 << 6)
+
+#define MAX8649_EXT_MASK (3 << 6)
+
+#define MAX8649_RAMP_MASK (7 << 5)
+#define MAX8649_RAMP_DOWN (1 << 1)
+
+struct max8649_regulator_info {
+ struct regulator_dev *regulator;
+ struct i2c_client *i2c;
+ struct device *dev;
+ struct mutex io_lock;
+
+ int vol_reg;
+ unsigned mode:2; /* bit[1:0] = VID1, VID0 */
+ unsigned extclk_freq:2;
+ unsigned extclk:1;
+ unsigned ramp_timing:3;
+ unsigned ramp_down:1;
+};
+
+/* I2C operations */
+
+static inline int max8649_read_device(struct i2c_client *i2c,
+ int reg, int bytes, void *dest)
+{
+ unsigned char data;
+ int ret;
+
+ data = (unsigned char)reg;
+ ret = i2c_master_send(i2c, &data, 1);
+ if (ret < 0)
+ return ret;
+ ret = i2c_master_recv(i2c, dest, bytes);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static inline int max8649_write_device(struct i2c_client *i2c,
+ int reg, int bytes, void *src)
+{
+ unsigned char buf[bytes + 1];
+ int ret;
+
+ buf[0] = (unsigned char)reg;
+ memcpy(&buf[1], src, bytes);
+
+ ret = i2c_master_send(i2c, buf, bytes + 1);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int max8649_reg_read(struct i2c_client *i2c, int reg)
+{
+ struct max8649_regulator_info *info = i2c_get_clientdata(i2c);
+ unsigned char data;
+ int ret;
+
+ mutex_lock(&info->io_lock);
+ ret = max8649_read_device(i2c, reg, 1, &data);
+ mutex_unlock(&info->io_lock);
+
+ if (ret < 0)
+ return ret;
+ return (int)data;
+}
+
+static int max8649_set_bits(struct i2c_client *i2c, int reg,
+ unsigned char mask, unsigned char data)
+{
+ struct max8649_regulator_info *info = i2c_get_clientdata(i2c);
+ unsigned char value;
+ int ret;
+
+ mutex_lock(&info->io_lock);
+ ret = max8649_read_device(i2c, reg, 1, &value);
+ if (ret < 0)
+ goto out;
+ value &= ~mask;
+ value |= data;
+ ret = max8649_write_device(i2c, reg, 1, &value);
+out:
+ mutex_unlock(&info->io_lock);
+ return ret;
+}
+
+static inline int check_range(int min_uV, int max_uV)
+{
+ if ((min_uV < MAX8649_DCDC_VMIN) || (max_uV > MAX8649_DCDC_VMAX)
+ || (min_uV > max_uV))
+ return -EINVAL;
+ return 0;
+}
+
+static int max8649_list_voltage(struct regulator_dev *rdev, unsigned index)
+{
+ return (MAX8649_DCDC_VMIN + index * MAX8649_DCDC_STEP);
+}
+
+static int max8649_get_voltage(struct regulator_dev *rdev)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned char data;
+ int ret;
+
+ ret = max8649_reg_read(info->i2c, info->vol_reg);
+ if (ret < 0)
+ return ret;
+ data = (unsigned char)ret & MAX8649_VOL_MASK;
+ return max8649_list_voltage(rdev, data);
+}
+
+static int max8649_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned char data, mask;
+
+ if (check_range(min_uV, max_uV)) {
+ dev_err(info->dev, "invalid voltage range (%d, %d) uV\n",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+ data = (min_uV - MAX8649_DCDC_VMIN + MAX8649_DCDC_STEP - 1)
+ / MAX8649_DCDC_STEP;
+ mask = MAX8649_VOL_MASK;
+
+ return max8649_set_bits(info->i2c, info->vol_reg, mask, data);
+}
+
+/* EN_PD means pulldown on EN input */
+static int max8649_enable(struct regulator_dev *rdev)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD, 0);
+}
+
+/*
+ * Applied internal pulldown resistor on EN input pin.
+ * If pulldown EN pin outside, it would be better.
+ */
+static int max8649_disable(struct regulator_dev *rdev)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD,
+ MAX8649_EN_PD);
+}
+
+static int max8649_is_enabled(struct regulator_dev *rdev)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ ret = max8649_reg_read(info->i2c, MAX8649_CONTROL);
+ if (ret < 0)
+ return ret;
+ return !((unsigned char)ret & MAX8649_EN_PD);
+}
+
+static int max8649_enable_time(struct regulator_dev *rdev)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ int voltage, rate, ret;
+
+ /* get voltage */
+ ret = max8649_reg_read(info->i2c, info->vol_reg);
+ if (ret < 0)
+ return ret;
+ ret &= MAX8649_VOL_MASK;
+ voltage = max8649_list_voltage(rdev, (unsigned char)ret); /* uV */
+
+ /* get rate */
+ ret = max8649_reg_read(info->i2c, MAX8649_RAMP);
+ if (ret < 0)
+ return ret;
+ ret = (ret & MAX8649_RAMP_MASK) >> 5;
+ rate = (32 * 1000) >> ret; /* uV/uS */
+
+ return (voltage / rate);
+}
+
+static int max8649_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ max8649_set_bits(info->i2c, info->vol_reg, MAX8649_FORCE_PWM,
+ MAX8649_FORCE_PWM);
+ break;
+ case REGULATOR_MODE_NORMAL:
+ max8649_set_bits(info->i2c, info->vol_reg,
+ MAX8649_FORCE_PWM, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static unsigned int max8649_get_mode(struct regulator_dev *rdev)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ ret = max8649_reg_read(info->i2c, info->vol_reg);
+ if (ret & MAX8649_FORCE_PWM)
+ return REGULATOR_MODE_FAST;
+ return REGULATOR_MODE_NORMAL;
+}
+
+static struct regulator_ops max8649_dcdc_ops = {
+ .set_voltage = max8649_set_voltage,
+ .get_voltage = max8649_get_voltage,
+ .list_voltage = max8649_list_voltage,
+ .enable = max8649_enable,
+ .disable = max8649_disable,
+ .is_enabled = max8649_is_enabled,
+ .enable_time = max8649_enable_time,
+ .set_mode = max8649_set_mode,
+ .get_mode = max8649_get_mode,
+
+};
+
+static struct regulator_desc dcdc_desc = {
+ .name = "max8649",
+ .ops = &max8649_dcdc_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 1 << 6,
+ .owner = THIS_MODULE,
+};
+
+static int __devinit max8649_regulator_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max8649_platform_data *pdata = client->dev.platform_data;
+ struct max8649_regulator_info *info = NULL;
+ unsigned char data;
+ int ret;
+
+ info = kzalloc(sizeof(struct max8649_regulator_info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&client->dev, "No enough memory\n");
+ return -ENOMEM;
+ }
+
+ info->i2c = client;
+ info->dev = &client->dev;
+ mutex_init(&info->io_lock);
+ i2c_set_clientdata(client, info);
+
+ info->mode = pdata->mode;
+ switch (info->mode) {
+ case 0:
+ info->vol_reg = MAX8649_MODE0;
+ break;
+ case 1:
+ info->vol_reg = MAX8649_MODE1;
+ break;
+ case 2:
+ info->vol_reg = MAX8649_MODE2;
+ break;
+ case 3:
+ info->vol_reg = MAX8649_MODE3;
+ break;
+ default:
+ break;
+ }
+
+ ret = max8649_reg_read(info->i2c, MAX8649_CHIP_ID1);
+ if (ret < 0) {
+ dev_err(info->dev, "Failed to detect ID of MAX8649:%d\n",
+ ret);
+ goto out;
+ }
+ dev_info(info->dev, "Detected MAX8649 (ID:%x)\n", ret);
+
+ /* enable VID0 & VID1 */
+ max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_VID_MASK, 0);
+
+ /* enable/disable external clock synchronization */
+ info->extclk = pdata->extclk;
+ data = (info->extclk) ? MAX8649_SYNC_EXTCLK : 0;
+ max8649_set_bits(info->i2c, info->vol_reg, MAX8649_SYNC_EXTCLK, data);
+ if (info->extclk) {
+ /* set external clock frequency */
+ info->extclk_freq = pdata->extclk_freq;
+ max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK,
+ info->extclk_freq);
+ }
+
+ if (pdata->ramp_timing) {
+ info->ramp_timing = pdata->ramp_timing;
+ max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_MASK,
+ info->ramp_timing << 5);
+ }
+
+ info->ramp_down = pdata->ramp_down;
+ if (info->ramp_down) {
+ max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_DOWN,
+ MAX8649_RAMP_DOWN);
+ }
+
+ info->regulator = regulator_register(&dcdc_desc, &client->dev,
+ pdata->regulator, info);
+ if (IS_ERR(info->regulator)) {
+ dev_err(info->dev, "failed to register regulator %s\n",
+ dcdc_desc.name);
+ ret = PTR_ERR(info->regulator);
+ goto out;
+ }
+
+ dev_info(info->dev, "Max8649 regulator device is detected.\n");
+ return 0;
+out:
+ kfree(info);
+ return ret;
+}
+
+static int __devexit max8649_regulator_remove(struct i2c_client *client)
+{
+ struct max8649_regulator_info *info = i2c_get_clientdata(client);
+
+ if (info) {
+ if (info->regulator)
+ regulator_unregister(info->regulator);
+ kfree(info);
+ }
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+static const struct i2c_device_id max8649_id[] = {
+ { "max8649", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max8649_id);
+
+static struct i2c_driver max8649_driver = {
+ .probe = max8649_regulator_probe,
+ .remove = __devexit_p(max8649_regulator_remove),
+ .driver = {
+ .name = "max8649",
+ },
+ .id_table = max8649_id,
+};
+
+static int __init max8649_init(void)
+{
+ return i2c_add_driver(&max8649_driver);
+}
+subsys_initcall(max8649_init);
+
+static void __exit max8649_exit(void)
+{
+ i2c_del_driver(&max8649_driver);
+}
+module_exit(max8649_exit);
+
+/* Module information */
+MODULE_DESCRIPTION("MAXIM 8649 voltage regulator driver");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index acc2fb7..f12f1bb 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -345,8 +345,8 @@ static struct regulator_desc max8660_reg[] = {
},
};
-static int max8660_probe(struct i2c_client *client,
- const struct i2c_device_id *i2c_id)
+static int __devinit max8660_probe(struct i2c_client *client,
+ const struct i2c_device_id *i2c_id)
{
struct regulator_dev **rdev;
struct max8660_platform_data *pdata = client->dev.platform_data;
@@ -354,7 +354,7 @@ static int max8660_probe(struct i2c_client *client,
int boot_on, i, id, ret = -EINVAL;
if (pdata->num_subdevs > MAX8660_V_END) {
- dev_err(&client->dev, "Too much regulators found!\n");
+ dev_err(&client->dev, "Too many regulators found!\n");
goto out;
}
@@ -462,7 +462,7 @@ out:
return ret;
}
-static int max8660_remove(struct i2c_client *client)
+static int __devexit max8660_remove(struct i2c_client *client)
{
struct regulator_dev **rdev = i2c_get_clientdata(client);
int i;
@@ -485,9 +485,10 @@ MODULE_DEVICE_TABLE(i2c, max8660_id);
static struct i2c_driver max8660_driver = {
.probe = max8660_probe,
- .remove = max8660_remove,
+ .remove = __devexit_p(max8660_remove),
.driver = {
.name = "max8660",
+ .owner = THIS_MODULE,
},
.id_table = max8660_id,
};
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 39c4953..f7b8184 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -2,6 +2,7 @@
* Regulator Driver for Freescale MC13783 PMIC
*
* Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -16,11 +17,44 @@
#include <linux/init.h>
#include <linux/err.h>
-#define MC13783_REG_SWITCHERS4 28
-#define MC13783_REG_SWITCHERS4_PLLEN (1 << 18)
-
#define MC13783_REG_SWITCHERS5 29
#define MC13783_REG_SWITCHERS5_SW3EN (1 << 20)
+#define MC13783_REG_SWITCHERS5_SW3VSEL 18
+#define MC13783_REG_SWITCHERS5_SW3VSEL_M (3 << 18)
+
+#define MC13783_REG_REGULATORSETTING0 30
+#define MC13783_REG_REGULATORSETTING0_VIOLOVSEL 2
+#define MC13783_REG_REGULATORSETTING0_VDIGVSEL 4
+#define MC13783_REG_REGULATORSETTING0_VGENVSEL 6
+#define MC13783_REG_REGULATORSETTING0_VRFDIGVSEL 9
+#define MC13783_REG_REGULATORSETTING0_VRFREFVSEL 11
+#define MC13783_REG_REGULATORSETTING0_VRFCPVSEL 13
+#define MC13783_REG_REGULATORSETTING0_VSIMVSEL 14
+#define MC13783_REG_REGULATORSETTING0_VESIMVSEL 15
+#define MC13783_REG_REGULATORSETTING0_VCAMVSEL 16
+
+#define MC13783_REG_REGULATORSETTING0_VIOLOVSEL_M (3 << 2)
+#define MC13783_REG_REGULATORSETTING0_VDIGVSEL_M (3 << 4)
+#define MC13783_REG_REGULATORSETTING0_VGENVSEL_M (7 << 6)
+#define MC13783_REG_REGULATORSETTING0_VRFDIGVSEL_M (3 << 9)
+#define MC13783_REG_REGULATORSETTING0_VRFREFVSEL_M (3 << 11)
+#define MC13783_REG_REGULATORSETTING0_VRFCPVSEL_M (1 << 13)
+#define MC13783_REG_REGULATORSETTING0_VSIMVSEL_M (1 << 14)
+#define MC13783_REG_REGULATORSETTING0_VESIMVSEL_M (1 << 15)
+#define MC13783_REG_REGULATORSETTING0_VCAMVSEL_M (7 << 16)
+
+#define MC13783_REG_REGULATORSETTING1 31
+#define MC13783_REG_REGULATORSETTING1_VVIBVSEL 0
+#define MC13783_REG_REGULATORSETTING1_VRF1VSEL 2
+#define MC13783_REG_REGULATORSETTING1_VRF2VSEL 4
+#define MC13783_REG_REGULATORSETTING1_VMMC1VSEL 6
+#define MC13783_REG_REGULATORSETTING1_VMMC2VSEL 9
+
+#define MC13783_REG_REGULATORSETTING1_VVIBVSEL_M (3 << 0)
+#define MC13783_REG_REGULATORSETTING1_VRF1VSEL_M (3 << 2)
+#define MC13783_REG_REGULATORSETTING1_VRF2VSEL_M (3 << 4)
+#define MC13783_REG_REGULATORSETTING1_VMMC1VSEL_M (7 << 6)
+#define MC13783_REG_REGULATORSETTING1_VMMC2VSEL_M (7 << 9)
#define MC13783_REG_REGULATORMODE0 32
#define MC13783_REG_REGULATORMODE0_VAUDIOEN (1 << 0)
@@ -48,19 +82,107 @@
#define MC13783_REG_POWERMISC_GPO2EN (1 << 8)
#define MC13783_REG_POWERMISC_GPO3EN (1 << 10)
#define MC13783_REG_POWERMISC_GPO4EN (1 << 12)
+#define MC13783_REG_POWERMISC_PWGT1SPIEN (1 << 15)
+#define MC13783_REG_POWERMISC_PWGT2SPIEN (1 << 16)
+
+#define MC13783_REG_POWERMISC_PWGTSPI_M (3 << 15)
+
struct mc13783_regulator {
struct regulator_desc desc;
int reg;
int enable_bit;
+ int vsel_reg;
+ int vsel_shift;
+ int vsel_mask;
+ int const *voltages;
+};
+
+/* Voltage Values */
+static const int const mc13783_sw3_val[] = {
+ 5000000, 5000000, 5000000, 5500000,
+};
+
+static const int const mc13783_vaudio_val[] = {
+ 2775000,
+};
+
+static const int const mc13783_viohi_val[] = {
+ 2775000,
+};
+
+static const int const mc13783_violo_val[] = {
+ 1200000, 1300000, 1500000, 1800000,
+};
+
+static const int const mc13783_vdig_val[] = {
+ 1200000, 1300000, 1500000, 1800000,
+};
+
+static const int const mc13783_vgen_val[] = {
+ 1200000, 1300000, 1500000, 1800000,
+ 1100000, 2000000, 2775000, 2400000,
+};
+
+static const int const mc13783_vrfdig_val[] = {
+ 1200000, 1500000, 1800000, 1875000,
+};
+
+static const int const mc13783_vrfref_val[] = {
+ 2475000, 2600000, 2700000, 2775000,
+};
+
+static const int const mc13783_vrfcp_val[] = {
+ 2700000, 2775000,
+};
+
+static const int const mc13783_vsim_val[] = {
+ 1800000, 2900000, 3000000,
+};
+
+static const int const mc13783_vesim_val[] = {
+ 1800000, 2900000,
+};
+
+static const int const mc13783_vcam_val[] = {
+ 1500000, 1800000, 2500000, 2550000,
+ 2600000, 2750000, 2800000, 3000000,
+};
+
+static const int const mc13783_vrfbg_val[] = {
+ 1250000,
+};
+
+static const int const mc13783_vvib_val[] = {
+ 1300000, 1800000, 2000000, 3000000,
+};
+
+static const int const mc13783_vmmc_val[] = {
+ 1600000, 1800000, 2000000, 2600000,
+ 2700000, 2800000, 2900000, 3000000,
+};
+
+static const int const mc13783_vrf_val[] = {
+ 1500000, 1875000, 2700000, 2775000,
+};
+
+static const int const mc13783_gpo_val[] = {
+ 3100000,
+};
+
+static const int const mc13783_pwgtdrv_val[] = {
+ 5500000,
};
static struct regulator_ops mc13783_regulator_ops;
+static struct regulator_ops mc13783_fixed_regulator_ops;
+static struct regulator_ops mc13783_gpo_regulator_ops;
-#define MC13783_DEFINE(prefix, _name, _reg) \
+#define MC13783_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages) \
[MC13783_ ## prefix ## _ ## _name] = { \
.desc = { \
.name = #prefix "_" #_name, \
+ .n_voltages = ARRAY_SIZE(_voltages), \
.ops = &mc13783_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MC13783_ ## prefix ## _ ## _name, \
@@ -68,40 +190,92 @@ static struct regulator_ops mc13783_regulator_ops;
}, \
.reg = MC13783_REG_ ## _reg, \
.enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
+ .vsel_reg = MC13783_REG_ ## _vsel_reg, \
+ .vsel_shift = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL,\
+ .vsel_mask = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL_M,\
+ .voltages = _voltages, \
+ }
+
+#define MC13783_FIXED_DEFINE(prefix, _name, _reg, _voltages) \
+ [MC13783_ ## prefix ## _ ## _name] = { \
+ .desc = { \
+ .name = #prefix "_" #_name, \
+ .n_voltages = ARRAY_SIZE(_voltages), \
+ .ops = &mc13783_fixed_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MC13783_ ## prefix ## _ ## _name, \
+ .owner = THIS_MODULE, \
+ }, \
+ .reg = MC13783_REG_ ## _reg, \
+ .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
+ .voltages = _voltages, \
}
-#define MC13783_DEFINE_SW(_name, _reg) MC13783_DEFINE(SW, _name, _reg)
-#define MC13783_DEFINE_REGU(_name, _reg) MC13783_DEFINE(REGU, _name, _reg)
+#define MC13783_GPO_DEFINE(prefix, _name, _reg, _voltages) \
+ [MC13783_ ## prefix ## _ ## _name] = { \
+ .desc = { \
+ .name = #prefix "_" #_name, \
+ .n_voltages = ARRAY_SIZE(_voltages), \
+ .ops = &mc13783_gpo_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MC13783_ ## prefix ## _ ## _name, \
+ .owner = THIS_MODULE, \
+ }, \
+ .reg = MC13783_REG_ ## _reg, \
+ .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
+ .voltages = _voltages, \
+ }
+
+#define MC13783_DEFINE_SW(_name, _reg, _vsel_reg, _voltages) \
+ MC13783_DEFINE(SW, _name, _reg, _vsel_reg, _voltages)
+#define MC13783_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages) \
+ MC13783_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages)
static struct mc13783_regulator mc13783_regulators[] = {
- MC13783_DEFINE_SW(SW3, SWITCHERS5),
- MC13783_DEFINE_SW(PLL, SWITCHERS4),
-
- MC13783_DEFINE_REGU(VAUDIO, REGULATORMODE0),
- MC13783_DEFINE_REGU(VIOHI, REGULATORMODE0),
- MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0),
- MC13783_DEFINE_REGU(VDIG, REGULATORMODE0),
- MC13783_DEFINE_REGU(VGEN, REGULATORMODE0),
- MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0),
- MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0),
- MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0),
- MC13783_DEFINE_REGU(VSIM, REGULATORMODE1),
- MC13783_DEFINE_REGU(VESIM, REGULATORMODE1),
- MC13783_DEFINE_REGU(VCAM, REGULATORMODE1),
- MC13783_DEFINE_REGU(VRFBG, REGULATORMODE1),
- MC13783_DEFINE_REGU(VVIB, REGULATORMODE1),
- MC13783_DEFINE_REGU(VRF1, REGULATORMODE1),
- MC13783_DEFINE_REGU(VRF2, REGULATORMODE1),
- MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1),
- MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1),
- MC13783_DEFINE_REGU(GPO1, POWERMISC),
- MC13783_DEFINE_REGU(GPO2, POWERMISC),
- MC13783_DEFINE_REGU(GPO3, POWERMISC),
- MC13783_DEFINE_REGU(GPO4, POWERMISC),
+ MC13783_DEFINE_SW(SW3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val),
+
+ MC13783_FIXED_DEFINE(REGU, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
+ MC13783_FIXED_DEFINE(REGU, VIOHI, REGULATORMODE0, mc13783_viohi_val),
+ MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_violo_val),
+ MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_vdig_val),
+ MC13783_DEFINE_REGU(VGEN, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_vgen_val),
+ MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_vrfdig_val),
+ MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_vrfref_val),
+ MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_vrfcp_val),
+ MC13783_DEFINE_REGU(VSIM, REGULATORMODE1, REGULATORSETTING0, \
+ mc13783_vsim_val),
+ MC13783_DEFINE_REGU(VESIM, REGULATORMODE1, REGULATORSETTING0, \
+ mc13783_vesim_val),
+ MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \
+ mc13783_vcam_val),
+ MC13783_FIXED_DEFINE(REGU, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
+ MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1, \
+ mc13783_vvib_val),
+ MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1, \
+ mc13783_vrf_val),
+ MC13783_DEFINE_REGU(VRF2, REGULATORMODE1, REGULATORSETTING1, \
+ mc13783_vrf_val),
+ MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1, REGULATORSETTING1, \
+ mc13783_vmmc_val),
+ MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1, \
+ mc13783_vmmc_val),
+ MC13783_GPO_DEFINE(REGU, GPO1, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REGU, GPO2, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REGU, GPO3, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REGU, GPO4, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REGU, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val),
+ MC13783_GPO_DEFINE(REGU, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val),
};
struct mc13783_regulator_priv {
struct mc13783 *mc13783;
+ u32 powermisc_pwgt_state;
struct regulator_dev *regulators[];
};
@@ -154,10 +328,241 @@ static int mc13783_regulator_is_enabled(struct regulator_dev *rdev)
return (val & mc13783_regulators[id].enable_bit) != 0;
}
+static int mc13783_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ int id = rdev_get_id(rdev);
+
+ if (selector >= mc13783_regulators[id].desc.n_voltages)
+ return -EINVAL;
+
+ return mc13783_regulators[id].voltages[selector];
+}
+
+static int mc13783_get_best_voltage_index(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int reg_id = rdev_get_id(rdev);
+ int i;
+ int bestmatch;
+ int bestindex;
+
+ /*
+ * Locate the minimum voltage fitting the criteria on
+ * this regulator. The switchable voltages are not
+ * in strict falling order so we need to check them
+ * all for the best match.
+ */
+ bestmatch = INT_MAX;
+ bestindex = -1;
+ for (i = 0; i < mc13783_regulators[reg_id].desc.n_voltages; i++) {
+ if (mc13783_regulators[reg_id].voltages[i] >= min_uV &&
+ mc13783_regulators[reg_id].voltages[i] < bestmatch) {
+ bestmatch = mc13783_regulators[reg_id].voltages[i];
+ bestindex = i;
+ }
+ }
+
+ if (bestindex < 0 || bestmatch > max_uV) {
+ dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+ return bestindex;
+}
+
+static int mc13783_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int value, id = rdev_get_id(rdev);
+ int ret;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+ __func__, id, min_uV, max_uV);
+
+ /* Find the best index */
+ value = mc13783_get_best_voltage_index(rdev, min_uV, max_uV);
+ dev_dbg(rdev_get_dev(rdev), "%s best value: %d \n", __func__, value);
+ if (value < 0)
+ return value;
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].vsel_reg,
+ mc13783_regulators[id].vsel_mask,
+ value << mc13783_regulators[id].vsel_shift);
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static int mc13783_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int ret, id = rdev_get_id(rdev);
+ unsigned int val;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_reg_read(priv->mc13783,
+ mc13783_regulators[id].vsel_reg, &val);
+ mc13783_unlock(priv->mc13783);
+
+ if (ret)
+ return ret;
+
+ val = (val & mc13783_regulators[id].vsel_mask)
+ >> mc13783_regulators[id].vsel_shift;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
+
+ BUG_ON(val < 0 || val > mc13783_regulators[id].desc.n_voltages);
+
+ return mc13783_regulators[id].voltages[val];
+}
+
static struct regulator_ops mc13783_regulator_ops = {
.enable = mc13783_regulator_enable,
.disable = mc13783_regulator_disable,
.is_enabled = mc13783_regulator_is_enabled,
+ .list_voltage = mc13783_regulator_list_voltage,
+ .set_voltage = mc13783_regulator_set_voltage,
+ .get_voltage = mc13783_regulator_get_voltage,
+};
+
+static int mc13783_fixed_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int id = rdev_get_id(rdev);
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+ __func__, id, min_uV, max_uV);
+
+ if (min_uV > mc13783_regulators[id].voltages[0] &&
+ max_uV < mc13783_regulators[id].voltages[0])
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int mc13783_fixed_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ int id = rdev_get_id(rdev);
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ return mc13783_regulators[id].voltages[0];
+}
+
+static struct regulator_ops mc13783_fixed_regulator_ops = {
+ .enable = mc13783_regulator_enable,
+ .disable = mc13783_regulator_disable,
+ .is_enabled = mc13783_regulator_is_enabled,
+ .list_voltage = mc13783_regulator_list_voltage,
+ .set_voltage = mc13783_fixed_regulator_set_voltage,
+ .get_voltage = mc13783_fixed_regulator_get_voltage,
+};
+
+int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask,
+ u32 val)
+{
+ struct mc13783 *mc13783 = priv->mc13783;
+ int ret;
+ u32 valread;
+
+ BUG_ON(val & ~mask);
+
+ ret = mc13783_reg_read(mc13783, MC13783_REG_POWERMISC, &valread);
+ if (ret)
+ return ret;
+
+ /* Update the stored state for Power Gates. */
+ priv->powermisc_pwgt_state =
+ (priv->powermisc_pwgt_state & ~mask) | val;
+ priv->powermisc_pwgt_state &= MC13783_REG_POWERMISC_PWGTSPI_M;
+
+ /* Construct the new register value */
+ valread = (valread & ~mask) | val;
+ /* Overwrite the PWGTxEN with the stored version */
+ valread = (valread & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
+ priv->powermisc_pwgt_state;
+
+ return mc13783_reg_write(mc13783, MC13783_REG_POWERMISC, valread);
+}
+
+static int mc13783_gpo_regulator_enable(struct regulator_dev *rdev)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ int ret;
+ u32 en_val = mc13783_regulators[id].enable_bit;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ /* Power Gate enable value is 0 */
+ if (id == MC13783_REGU_PWGT1SPI ||
+ id == MC13783_REGU_PWGT2SPI)
+ en_val = 0;
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit,
+ en_val);
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static int mc13783_gpo_regulator_disable(struct regulator_dev *rdev)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ int ret;
+ u32 dis_val = 0;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ /* Power Gate disable value is 1 */
+ if (id == MC13783_REGU_PWGT1SPI ||
+ id == MC13783_REGU_PWGT2SPI)
+ dis_val = mc13783_regulators[id].enable_bit;
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit,
+ dis_val);
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int ret, id = rdev_get_id(rdev);
+ unsigned int val;
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
+ mc13783_unlock(priv->mc13783);
+
+ if (ret)
+ return ret;
+
+ /* Power Gates state is stored in powermisc_pwgt_state
+ * where the meaning of bits is negated */
+ val = (val & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
+ (priv->powermisc_pwgt_state ^ MC13783_REG_POWERMISC_PWGTSPI_M);
+
+ return (val & mc13783_regulators[id].enable_bit) != 0;
+}
+
+static struct regulator_ops mc13783_gpo_regulator_ops = {
+ .enable = mc13783_gpo_regulator_enable,
+ .disable = mc13783_gpo_regulator_disable,
+ .is_enabled = mc13783_gpo_regulator_is_enabled,
+ .list_voltage = mc13783_regulator_list_voltage,
+ .set_voltage = mc13783_fixed_regulator_set_voltage,
+ .get_voltage = mc13783_fixed_regulator_get_voltage,
};
static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 33d7d89..29d0566 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -288,16 +288,18 @@ static int __devexit pcap_regulator_remove(struct platform_device *pdev)
struct regulator_dev *rdev = platform_get_drvdata(pdev);
regulator_unregister(rdev);
+ platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver pcap_regulator_driver = {
.driver = {
- .name = "pcap-regulator",
+ .name = "pcap-regulator",
+ .owner = THIS_MODULE,
},
- .probe = pcap_regulator_probe,
- .remove = __devexit_p(pcap_regulator_remove),
+ .probe = pcap_regulator_probe,
+ .remove = __devexit_p(pcap_regulator_remove),
};
static int __init pcap_regulator_init(void)
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 07fda0a..1f18354 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -457,8 +457,8 @@ static struct regulator_ops tps65023_ldo_ops = {
.list_voltage = tps65023_ldo_list_voltage,
};
-static
-int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int __devinit tps_65023_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
static int desc_id;
const struct tps_info *info = (void *)id->driver_data;
@@ -466,6 +466,7 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct regulator_dev *rdev;
struct tps_pmic *tps;
int i;
+ int error;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
@@ -475,7 +476,6 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id)
* coming from the board-evm file.
*/
init_data = client->dev.platform_data;
-
if (!init_data)
return -EIO;
@@ -502,21 +502,12 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Register the regulators */
rdev = regulator_register(&tps->desc[i], &client->dev,
- init_data, tps);
+ init_data, tps);
if (IS_ERR(rdev)) {
dev_err(&client->dev, "failed to register %s\n",
id->name);
-
- /* Unregister */
- while (i)
- regulator_unregister(tps->rdev[--i]);
-
- tps->client = NULL;
-
- /* clear the client data in i2c */
- i2c_set_clientdata(client, NULL);
- kfree(tps);
- return PTR_ERR(rdev);
+ error = PTR_ERR(rdev);
+ goto fail;
}
/* Save regulator for cleanup */
@@ -526,6 +517,13 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id)
i2c_set_clientdata(client, tps);
return 0;
+
+ fail:
+ while (--i >= 0)
+ regulator_unregister(tps->rdev[i]);
+
+ kfree(tps);
+ return error;
}
/**
@@ -539,13 +537,12 @@ static int __devexit tps_65023_remove(struct i2c_client *client)
struct tps_pmic *tps = i2c_get_clientdata(client);
int i;
+ /* clear the client data in i2c */
+ i2c_set_clientdata(client, NULL);
+
for (i = 0; i < TPS65023_NUM_REGULATOR; i++)
regulator_unregister(tps->rdev[i]);
- tps->client = NULL;
-
- /* clear the client data in i2c */
- i2c_set_clientdata(client, NULL);
kfree(tps);
return 0;
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index f8a6dfb..c2a9539 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -538,8 +538,8 @@ static struct regulator_ops tps6507x_ldo_ops = {
.list_voltage = tps6507x_ldo_list_voltage,
};
-static
-int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int __devinit tps_6507x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
static int desc_id;
const struct tps_info *info = (void *)id->driver_data;
@@ -547,6 +547,7 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct regulator_dev *rdev;
struct tps_pmic *tps;
int i;
+ int error;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA))
@@ -557,7 +558,6 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id)
* coming from the board-evm file.
*/
init_data = client->dev.platform_data;
-
if (!init_data)
return -EIO;
@@ -586,18 +586,8 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (IS_ERR(rdev)) {
dev_err(&client->dev, "failed to register %s\n",
id->name);
-
- /* Unregister */
- while (i)
- regulator_unregister(tps->rdev[--i]);
-
- tps->client = NULL;
-
- /* clear the client data in i2c */
- i2c_set_clientdata(client, NULL);
-
- kfree(tps);
- return PTR_ERR(rdev);
+ error = PTR_ERR(rdev);
+ goto fail;
}
/* Save regulator for cleanup */
@@ -607,6 +597,13 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id)
i2c_set_clientdata(client, tps);
return 0;
+
+fail:
+ while (--i >= 0)
+ regulator_unregister(tps->rdev[i]);
+
+ kfree(tps);
+ return error;
}
/**
@@ -620,13 +617,12 @@ static int __devexit tps_6507x_remove(struct i2c_client *client)
struct tps_pmic *tps = i2c_get_clientdata(client);
int i;
+ /* clear the client data in i2c */
+ i2c_set_clientdata(client, NULL);
+
for (i = 0; i < TPS6507X_NUM_REGULATOR; i++)
regulator_unregister(tps->rdev[i]);
- tps->client = NULL;
-
- /* clear the client data in i2c */
- i2c_set_clientdata(client, NULL);
kfree(tps);
return 0;
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 7e67485..9729d76 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -519,19 +519,19 @@ static struct twlreg_info twl_regs[] = {
/* 6030 REG with base as PMC Slave Misc : 0x0030 */
/* Turnon-delay and remap configuration values for 6030 are not
verified since the specification is not public */
- TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1, 0, 0x08),
- TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2, 0, 0x08),
- TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3, 0, 0x08),
- TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4, 0, 0x08),
- TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5, 0, 0x08),
- TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7, 0, 0x08),
- TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x08),
- TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x08),
- TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x08),
- TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x08)
+ TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1, 0, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2, 0, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3, 0, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4, 0, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5, 0, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7, 0, 0x21),
+ TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21),
+ TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21),
+ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21),
+ TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21)
};
-static int twlreg_probe(struct platform_device *pdev)
+static int __devinit twlreg_probe(struct platform_device *pdev)
{
int i;
struct twlreg_info *info;
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index addc032..d96ceca 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -19,7 +19,7 @@
struct virtual_consumer_data {
struct mutex lock;
struct regulator *regulator;
- int enabled;
+ bool enabled;
int min_uV;
int max_uV;
int min_uA;
@@ -49,7 +49,7 @@ static void update_voltage_constraints(struct device *dev,
dev_dbg(dev, "Enabling regulator\n");
ret = regulator_enable(data->regulator);
if (ret == 0)
- data->enabled = 1;
+ data->enabled = true;
else
dev_err(dev, "regulator_enable() failed: %d\n",
ret);
@@ -59,7 +59,7 @@ static void update_voltage_constraints(struct device *dev,
dev_dbg(dev, "Disabling regulator\n");
ret = regulator_disable(data->regulator);
if (ret == 0)
- data->enabled = 0;
+ data->enabled = false;
else
dev_err(dev, "regulator_disable() failed: %d\n",
ret);
@@ -89,7 +89,7 @@ static void update_current_limit_constraints(struct device *dev,
dev_dbg(dev, "Enabling regulator\n");
ret = regulator_enable(data->regulator);
if (ret == 0)
- data->enabled = 1;
+ data->enabled = true;
else
dev_err(dev, "regulator_enable() failed: %d\n",
ret);
@@ -99,7 +99,7 @@ static void update_current_limit_constraints(struct device *dev,
dev_dbg(dev, "Disabling regulator\n");
ret = regulator_disable(data->regulator);
if (ret == 0)
- data->enabled = 0;
+ data->enabled = false;
else
dev_err(dev, "regulator_disable() failed: %d\n",
ret);
@@ -270,24 +270,28 @@ static DEVICE_ATTR(min_microamps, 0666, show_min_uA, set_min_uA);
static DEVICE_ATTR(max_microamps, 0666, show_max_uA, set_max_uA);
static DEVICE_ATTR(mode, 0666, show_mode, set_mode);
-static struct device_attribute *attributes[] = {
- &dev_attr_min_microvolts,
- &dev_attr_max_microvolts,
- &dev_attr_min_microamps,
- &dev_attr_max_microamps,
- &dev_attr_mode,
+static struct attribute *regulator_virtual_attributes[] = {
+ &dev_attr_min_microvolts.attr,
+ &dev_attr_max_microvolts.attr,
+ &dev_attr_min_microamps.attr,
+ &dev_attr_max_microamps.attr,
+ &dev_attr_mode.attr,
+ NULL
};
-static int regulator_virtual_consumer_probe(struct platform_device *pdev)
+static const struct attribute_group regulator_virtual_attr_group = {
+ .attrs = regulator_virtual_attributes,
+};
+
+static int __devinit regulator_virtual_probe(struct platform_device *pdev)
{
char *reg_id = pdev->dev.platform_data;
struct virtual_consumer_data *drvdata;
- int ret, i;
+ int ret;
drvdata = kzalloc(sizeof(struct virtual_consumer_data), GFP_KERNEL);
- if (drvdata == NULL) {
+ if (drvdata == NULL)
return -ENOMEM;
- }
mutex_init(&drvdata->lock);
@@ -299,13 +303,12 @@ static int regulator_virtual_consumer_probe(struct platform_device *pdev)
goto err;
}
- for (i = 0; i < ARRAY_SIZE(attributes); i++) {
- ret = device_create_file(&pdev->dev, attributes[i]);
- if (ret != 0) {
- dev_err(&pdev->dev, "Failed to create attr %d: %d\n",
- i, ret);
- goto err_regulator;
- }
+ ret = sysfs_create_group(&pdev->dev.kobj,
+ &regulator_virtual_attr_group);
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create attribute group: %d\n", ret);
+ goto err_regulator;
}
drvdata->mode = regulator_get_mode(drvdata->regulator);
@@ -317,37 +320,36 @@ static int regulator_virtual_consumer_probe(struct platform_device *pdev)
err_regulator:
regulator_put(drvdata->regulator);
err:
- for (i = 0; i < ARRAY_SIZE(attributes); i++)
- device_remove_file(&pdev->dev, attributes[i]);
kfree(drvdata);
return ret;
}
-static int regulator_virtual_consumer_remove(struct platform_device *pdev)
+static int __devexit regulator_virtual_remove(struct platform_device *pdev)
{
struct virtual_consumer_data *drvdata = platform_get_drvdata(pdev);
- int i;
- for (i = 0; i < ARRAY_SIZE(attributes); i++)
- device_remove_file(&pdev->dev, attributes[i]);
+ sysfs_remove_group(&pdev->dev.kobj, &regulator_virtual_attr_group);
+
if (drvdata->enabled)
regulator_disable(drvdata->regulator);
regulator_put(drvdata->regulator);
kfree(drvdata);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
static struct platform_driver regulator_virtual_consumer_driver = {
- .probe = regulator_virtual_consumer_probe,
- .remove = regulator_virtual_consumer_remove,
+ .probe = regulator_virtual_probe,
+ .remove = __devexit_p(regulator_virtual_remove),
.driver = {
.name = "reg-virt-consumer",
+ .owner = THIS_MODULE,
},
};
-
static int __init regulator_virtual_consumer_init(void)
{
return platform_driver_register(&regulator_virtual_consumer_driver);
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 0a65775..6e18e56 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -600,6 +600,8 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
struct wm831x *wm831x = dcdc->wm831x;
+ platform_set_drvdata(pdev, NULL);
+
wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "HC"), dcdc);
wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
regulator_unregister(dcdc->regulator);
@@ -615,6 +617,7 @@ static struct platform_driver wm831x_buckv_driver = {
.remove = __devexit_p(wm831x_buckv_remove),
.driver = {
.name = "wm831x-buckv",
+ .owner = THIS_MODULE,
},
};
@@ -769,6 +772,8 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
struct wm831x *wm831x = dcdc->wm831x;
+ platform_set_drvdata(pdev, NULL);
+
wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
regulator_unregister(dcdc->regulator);
kfree(dcdc);
@@ -781,6 +786,7 @@ static struct platform_driver wm831x_buckp_driver = {
.remove = __devexit_p(wm831x_buckp_remove),
.driver = {
.name = "wm831x-buckp",
+ .owner = THIS_MODULE,
},
};
@@ -895,6 +901,8 @@ static __devexit int wm831x_boostp_remove(struct platform_device *pdev)
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
struct wm831x *wm831x = dcdc->wm831x;
+ platform_set_drvdata(pdev, NULL);
+
wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
regulator_unregister(dcdc->regulator);
kfree(dcdc);
@@ -907,6 +915,7 @@ static struct platform_driver wm831x_boostp_driver = {
.remove = __devexit_p(wm831x_boostp_remove),
.driver = {
.name = "wm831x-boostp",
+ .owner = THIS_MODULE,
},
};
@@ -979,6 +988,8 @@ static __devexit int wm831x_epe_remove(struct platform_device *pdev)
{
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
+
regulator_unregister(dcdc->regulator);
kfree(dcdc);
@@ -990,6 +1001,7 @@ static struct platform_driver wm831x_epe_driver = {
.remove = __devexit_p(wm831x_epe_remove),
.driver = {
.name = "wm831x-epe",
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 4885700..ca0f6b6 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -222,6 +222,8 @@ static __devexit int wm831x_isink_remove(struct platform_device *pdev)
struct wm831x_isink *isink = platform_get_drvdata(pdev);
struct wm831x *wm831x = isink->wm831x;
+ platform_set_drvdata(pdev, NULL);
+
wm831x_free_irq(wm831x, platform_get_irq(pdev, 0), isink);
regulator_unregister(isink->regulator);
@@ -235,6 +237,7 @@ static struct platform_driver wm831x_isink_driver = {
.remove = __devexit_p(wm831x_isink_remove),
.driver = {
.name = "wm831x-isink",
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 61e02ac..d2406c1 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -371,6 +371,8 @@ static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
struct wm831x *wm831x = ldo->wm831x;
+ platform_set_drvdata(pdev, NULL);
+
wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), ldo);
regulator_unregister(ldo->regulator);
kfree(ldo);
@@ -383,6 +385,7 @@ static struct platform_driver wm831x_gp_ldo_driver = {
.remove = __devexit_p(wm831x_gp_ldo_remove),
.driver = {
.name = "wm831x-ldo",
+ .owner = THIS_MODULE,
},
};
@@ -640,6 +643,7 @@ static struct platform_driver wm831x_aldo_driver = {
.remove = __devexit_p(wm831x_aldo_remove),
.driver = {
.name = "wm831x-aldo",
+ .owner = THIS_MODULE,
},
};
@@ -811,6 +815,7 @@ static struct platform_driver wm831x_alive_ldo_driver = {
.remove = __devexit_p(wm831x_alive_ldo_remove),
.driver = {
.name = "wm831x-alive-ldo",
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index e7b89e7..94227dd 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -290,6 +290,51 @@ static int wm8350_isink_is_enabled(struct regulator_dev *rdev)
return -EINVAL;
}
+static int wm8350_isink_enable_time(struct regulator_dev *rdev)
+{
+ struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
+ int isink = rdev_get_id(rdev);
+ int reg;
+
+ switch (isink) {
+ case WM8350_ISINK_A:
+ reg = wm8350_reg_read(wm8350, WM8350_CSA_FLASH_CONTROL);
+ break;
+ case WM8350_ISINK_B:
+ reg = wm8350_reg_read(wm8350, WM8350_CSB_FLASH_CONTROL);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (reg & WM8350_CS1_FLASH_MODE) {
+ switch (reg & WM8350_CS1_ON_RAMP_MASK) {
+ case 0:
+ return 0;
+ case 1:
+ return 1950;
+ case 2:
+ return 3910;
+ case 3:
+ return 7800;
+ }
+ } else {
+ switch (reg & WM8350_CS1_ON_RAMP_MASK) {
+ case 0:
+ return 0;
+ case 1:
+ return 250000;
+ case 2:
+ return 500000;
+ case 3:
+ return 1000000;
+ }
+ }
+
+ return -EINVAL;
+}
+
+
int wm8350_isink_set_flash(struct wm8350 *wm8350, int isink, u16 mode,
u16 trigger, u16 duration, u16 on_ramp, u16 off_ramp,
u16 drive)
@@ -1221,6 +1266,7 @@ static struct regulator_ops wm8350_isink_ops = {
.enable = wm8350_isink_enable,
.disable = wm8350_isink_disable,
.is_enabled = wm8350_isink_is_enabled,
+ .enable_time = wm8350_isink_enable_time,
};
static struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index d9a2c98..924c7eb 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -317,14 +317,17 @@ static struct regulator_desc regulators[] = {
static int __devinit wm8400_regulator_probe(struct platform_device *pdev)
{
+ struct wm8400 *wm8400 = container_of(pdev, struct wm8400, regulators[pdev->id]);
struct regulator_dev *rdev;
rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
- pdev->dev.platform_data, dev_get_drvdata(&pdev->dev));
+ pdev->dev.platform_data, wm8400);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
+ platform_set_drvdata(pdev, rdev);
+
return 0;
}
@@ -332,6 +335,7 @@ static int __devexit wm8400_regulator_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
regulator_unregister(rdev);
return 0;
@@ -370,7 +374,6 @@ int wm8400_register_regulator(struct device *dev, int reg,
wm8400->regulators[reg].id = reg;
wm8400->regulators[reg].dev.parent = dev;
wm8400->regulators[reg].dev.platform_data = initdata;
- dev_set_drvdata(&wm8400->regulators[reg].dev, wm8400);
return platform_device_register(&wm8400->regulators[reg]);
}
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
new file mode 100644
index 0000000..95454a4
--- /dev/null
+++ b/drivers/regulator/wm8994-regulator.c
@@ -0,0 +1,307 @@
+/*
+ * wm8994-regulator.c -- Regulator driver for the WM8994
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/gpio.h>
+
+#include <linux/mfd/wm8994/core.h>
+#include <linux/mfd/wm8994/registers.h>
+#include <linux/mfd/wm8994/pdata.h>
+
+struct wm8994_ldo {
+ int enable;
+ bool is_enabled;
+ struct regulator_dev *regulator;
+ struct wm8994 *wm8994;
+};
+
+#define WM8994_LDO1_MAX_SELECTOR 0x7
+#define WM8994_LDO2_MAX_SELECTOR 0x3
+
+static int wm8994_ldo_enable(struct regulator_dev *rdev)
+{
+ struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+
+ /* If we have no soft control assume that the LDO is always enabled. */
+ if (!ldo->enable)
+ return 0;
+
+ gpio_set_value(ldo->enable, 1);
+ ldo->is_enabled = true;
+
+ return 0;
+}
+
+static int wm8994_ldo_disable(struct regulator_dev *rdev)
+{
+ struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+
+ /* If we have no soft control assume that the LDO is always enabled. */
+ if (!ldo->enable)
+ return -EINVAL;
+
+ gpio_set_value(ldo->enable, 0);
+ ldo->is_enabled = false;
+
+ return 0;
+}
+
+static int wm8994_ldo_is_enabled(struct regulator_dev *rdev)
+{
+ struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+
+ return ldo->is_enabled;
+}
+
+static int wm8994_ldo_enable_time(struct regulator_dev *rdev)
+{
+ /* 3ms is fairly conservative but this shouldn't be too performance
+ * critical; can be tweaked per-system if required. */
+ return 3000;
+}
+
+static int wm8994_ldo1_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (selector > WM8994_LDO1_MAX_SELECTOR)
+ return -EINVAL;
+
+ return (selector * 100000) + 2400000;
+}
+
+static int wm8994_ldo1_get_voltage(struct regulator_dev *rdev)
+{
+ struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+ int val;
+
+ val = wm8994_reg_read(ldo->wm8994, WM8994_LDO_1);
+ if (val < 0)
+ return val;
+
+ val = (val & WM8994_LDO1_VSEL_MASK) >> WM8994_LDO1_VSEL_SHIFT;
+
+ return wm8994_ldo1_list_voltage(rdev, val);
+}
+
+static int wm8994_ldo1_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+ int selector, v;
+
+ selector = (min_uV - 2400000) / 100000;
+ v = wm8994_ldo1_list_voltage(rdev, selector);
+ if (v < 0 || v > max_uV)
+ return -EINVAL;
+
+ selector <<= WM8994_LDO1_VSEL_SHIFT;
+
+ return wm8994_set_bits(ldo->wm8994, WM8994_LDO_1,
+ WM8994_LDO1_VSEL_MASK, selector);
+}
+
+static struct regulator_ops wm8994_ldo1_ops = {
+ .enable = wm8994_ldo_enable,
+ .disable = wm8994_ldo_disable,
+ .is_enabled = wm8994_ldo_is_enabled,
+ .enable_time = wm8994_ldo_enable_time,
+
+ .list_voltage = wm8994_ldo1_list_voltage,
+ .get_voltage = wm8994_ldo1_get_voltage,
+ .set_voltage = wm8994_ldo1_set_voltage,
+};
+
+static int wm8994_ldo2_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (selector > WM8994_LDO2_MAX_SELECTOR)
+ return -EINVAL;
+
+ return (selector * 100000) + 900000;
+}
+
+static int wm8994_ldo2_get_voltage(struct regulator_dev *rdev)
+{
+ struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+ int val;
+
+ val = wm8994_reg_read(ldo->wm8994, WM8994_LDO_2);
+ if (val < 0)
+ return val;
+
+ val = (val & WM8994_LDO2_VSEL_MASK) >> WM8994_LDO2_VSEL_SHIFT;
+
+ return wm8994_ldo2_list_voltage(rdev, val);
+}
+
+static int wm8994_ldo2_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+ int selector, v;
+
+ selector = (min_uV - 900000) / 100000;
+ v = wm8994_ldo2_list_voltage(rdev, selector);
+ if (v < 0 || v > max_uV)
+ return -EINVAL;
+
+ selector <<= WM8994_LDO2_VSEL_SHIFT;
+
+ return wm8994_set_bits(ldo->wm8994, WM8994_LDO_2,
+ WM8994_LDO2_VSEL_MASK, selector);
+}
+
+static struct regulator_ops wm8994_ldo2_ops = {
+ .enable = wm8994_ldo_enable,
+ .disable = wm8994_ldo_disable,
+ .is_enabled = wm8994_ldo_is_enabled,
+ .enable_time = wm8994_ldo_enable_time,
+
+ .list_voltage = wm8994_ldo2_list_voltage,
+ .get_voltage = wm8994_ldo2_get_voltage,
+ .set_voltage = wm8994_ldo2_set_voltage,
+};
+
+static struct regulator_desc wm8994_ldo_desc[] = {
+ {
+ .name = "LDO1",
+ .id = 1,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = WM8994_LDO1_MAX_SELECTOR + 1,
+ .ops = &wm8994_ldo1_ops,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO2",
+ .id = 2,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = WM8994_LDO2_MAX_SELECTOR + 1,
+ .ops = &wm8994_ldo2_ops,
+ .owner = THIS_MODULE,
+ },
+};
+
+static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
+{
+ struct wm8994 *wm8994 = dev_get_drvdata(pdev->dev.parent);
+ struct wm8994_pdata *pdata = wm8994->dev->platform_data;
+ int id = pdev->id % ARRAY_SIZE(pdata->ldo);
+ struct wm8994_ldo *ldo;
+ int ret;
+
+ dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
+
+ if (!pdata)
+ return -ENODEV;
+
+ ldo = kzalloc(sizeof(struct wm8994_ldo), GFP_KERNEL);
+ if (ldo == NULL) {
+ dev_err(&pdev->dev, "Unable to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ ldo->wm8994 = wm8994;
+
+ ldo->is_enabled = true;
+
+ if (pdata->ldo[id].enable && gpio_is_valid(pdata->ldo[id].enable)) {
+ ldo->enable = pdata->ldo[id].enable;
+
+ ret = gpio_request(ldo->enable, "WM8994 LDO enable");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get enable GPIO: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = gpio_direction_output(ldo->enable, ldo->is_enabled);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to set GPIO up: %d\n",
+ ret);
+ goto err_gpio;
+ }
+ }
+
+ ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &pdev->dev,
+ pdata->ldo[id].init_data, ldo);
+ if (IS_ERR(ldo->regulator)) {
+ ret = PTR_ERR(ldo->regulator);
+ dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
+ id + 1, ret);
+ goto err_gpio;
+ }
+
+ platform_set_drvdata(pdev, ldo);
+
+ return 0;
+
+err_gpio:
+ if (gpio_is_valid(ldo->enable))
+ gpio_free(ldo->enable);
+err:
+ kfree(ldo);
+ return ret;
+}
+
+static __devexit int wm8994_ldo_remove(struct platform_device *pdev)
+{
+ struct wm8994_ldo *ldo = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ regulator_unregister(ldo->regulator);
+ if (gpio_is_valid(ldo->enable))
+ gpio_free(ldo->enable);
+ kfree(ldo);
+
+ return 0;
+}
+
+static struct platform_driver wm8994_ldo_driver = {
+ .probe = wm8994_ldo_probe,
+ .remove = __devexit_p(wm8994_ldo_remove),
+ .driver = {
+ .name = "wm8994-ldo",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init wm8994_ldo_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&wm8994_ldo_driver);
+ if (ret != 0)
+ pr_err("Failed to register Wm8994 GP LDO driver: %d\n", ret);
+
+ return ret;
+}
+subsys_initcall(wm8994_ldo_init);
+
+static void __exit wm8994_ldo_exit(void)
+{
+ platform_driver_unregister(&wm8994_ldo_driver);
+}
+module_exit(wm8994_ldo_exit);
+
+/* Module information */
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_DESCRIPTION("WM8994 LDO driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm8994-ldo");
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index d935b2d..ae0251e 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -153,8 +153,6 @@ static int baud_table[] = {
0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200, 0 };
-#define BAUD_TABLE_SIZE (sizeof(baud_table)/sizeof(baud_table[0]))
-
/* Sets or clears DTR/RTS on the requested line */
static inline void m68k_rtsdtr(struct m68k_serial *ss, int set)
{
@@ -1406,10 +1404,10 @@ static void m68328_set_baud(void)
USTCNT = ustcnt & ~USTCNT_TXEN;
again:
- for (i = 0; i < sizeof(baud_table) / sizeof(baud_table[0]); i++)
+ for (i = 0; i < ARRAY_SIZE(baud_table); i++)
if (baud_table[i] == m68328_console_baud)
break;
- if (i >= sizeof(baud_table) / sizeof(baud_table[0])) {
+ if (i >= ARRAY_SIZE(baud_table)) {
m68328_console_baud = 9600;
goto again;
}
@@ -1435,7 +1433,7 @@ int m68328_console_setup(struct console *cp, char *arg)
if (arg)
n = simple_strtoul(arg,NULL,0);
- for (i = 0; i < BAUD_TABLE_SIZE; i++)
+ for (i = 0; i < ARRAY_SIZE(baud_table); i++)
if (baud_table[i] == n)
break;
if (i < BAUD_TABLE_SIZE) {
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index a81ff7b..7c4ebe6e 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2690,6 +2690,15 @@ static void __init serial8250_isa_init_ports(void)
}
}
+static void
+serial8250_init_fixed_type_port(struct uart_8250_port *up, unsigned int type)
+{
+ up->port.type = type;
+ up->port.fifosize = uart_config[type].fifo_size;
+ up->capabilities = uart_config[type].flags;
+ up->tx_loadsz = uart_config[type].tx_loadsz;
+}
+
static void __init
serial8250_register_ports(struct uart_driver *drv, struct device *dev)
{
@@ -2706,6 +2715,10 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
struct uart_8250_port *up = &serial8250_ports[i];
up->port.dev = dev;
+
+ if (up->port.flags & UPF_FIXED_TYPE)
+ serial8250_init_fixed_type_port(up, up->port.type);
+
uart_add_one_port(drv, &up->port);
}
}
@@ -3118,12 +3131,8 @@ int serial8250_register_port(struct uart_port *port)
if (port->dev)
uart->port.dev = port->dev;
- if (port->flags & UPF_FIXED_TYPE) {
- uart->port.type = port->type;
- uart->port.fifosize = uart_config[port->type].fifo_size;
- uart->capabilities = uart_config[port->type].flags;
- uart->tx_loadsz = uart_config[port->type].tx_loadsz;
- }
+ if (port->flags & UPF_FIXED_TYPE)
+ serial8250_init_fixed_type_port(uart, port->type);
set_io_from_upio(&uart->port);
/* Possibly override default I/O functions. */
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index b28af13..01c012d 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -760,7 +760,8 @@ static int pci_netmos_init(struct pci_dev *dev)
/* subdevice 0x00PS means <P> parallel, <S> serial */
unsigned int num_serial = dev->subsystem_device & 0xf;
- if (dev->device == PCI_DEVICE_ID_NETMOS_9901)
+ if ((dev->device == PCI_DEVICE_ID_NETMOS_9901) ||
+ (dev->device == PCI_DEVICE_ID_NETMOS_9865))
return 0;
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
@@ -1479,6 +1480,7 @@ enum pci_board_num_t {
pbn_b0_bt_1_115200,
pbn_b0_bt_2_115200,
+ pbn_b0_bt_4_115200,
pbn_b0_bt_8_115200,
pbn_b0_bt_1_460800,
@@ -1703,6 +1705,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
.base_baud = 115200,
.uart_offset = 8,
},
+ [pbn_b0_bt_4_115200] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
[pbn_b0_bt_8_115200] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 8,
@@ -3191,6 +3199,15 @@ static struct pci_device_id serial_pci_tbl[] = {
0x1208, 0x0004, 0, 0,
pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2,
+ 0x1204, 0x0004, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2,
+ 0x1208, 0x0004, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF3,
+ 0x1208, 0x0004, 0, 0,
+ pbn_b0_4_921600 },
/*
* Dell Remote Access Card 4 - Tim_T_Murphy@Dell.com
*/
@@ -3649,6 +3666,18 @@ static struct pci_device_id serial_pci_tbl[] = {
0, 0, pbn_b0_1_115200 },
/*
+ * Best Connectivity PCI Multi I/O cards
+ */
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x3004,
+ 0, 0, pbn_b0_bt_4_115200 },
+
+ /*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
*/
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 888a0ce..746e070 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1418,42 +1418,37 @@ config SERIAL_BFIN_SPORT
To compile this driver as a module, choose M here: the
module will be called bfin_sport_uart.
-choice
- prompt "Baud rate for Blackfin SPORT UART"
- depends on SERIAL_BFIN_SPORT
- default SERIAL_SPORT_BAUD_RATE_57600
- help
- Choose a baud rate for the SPORT UART, other uart settings are
- 8 bit, 1 stop bit, no parity, no flow control.
-
-config SERIAL_SPORT_BAUD_RATE_115200
- bool "115200"
-
-config SERIAL_SPORT_BAUD_RATE_57600
- bool "57600"
+config SERIAL_BFIN_SPORT_CONSOLE
+ bool "Console on Blackfin sport emulated uart"
+ depends on SERIAL_BFIN_SPORT=y
+ select SERIAL_CORE_CONSOLE
-config SERIAL_SPORT_BAUD_RATE_38400
- bool "38400"
+config SERIAL_BFIN_SPORT0_UART
+ bool "Enable UART over SPORT0"
+ depends on SERIAL_BFIN_SPORT && !(BF542 || BF542M || BF544 || BF544M)
+ help
+ Enable UART over SPORT0
-config SERIAL_SPORT_BAUD_RATE_19200
- bool "19200"
+config SERIAL_BFIN_SPORT1_UART
+ bool "Enable UART over SPORT1"
+ depends on SERIAL_BFIN_SPORT
+ help
+ Enable UART over SPORT1
-config SERIAL_SPORT_BAUD_RATE_9600
- bool "9600"
-endchoice
+config SERIAL_BFIN_SPORT2_UART
+ bool "Enable UART over SPORT2"
+ depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
+ help
+ Enable UART over SPORT2
-config SPORT_BAUD_RATE
- int
- depends on SERIAL_BFIN_SPORT
- default 115200 if (SERIAL_SPORT_BAUD_RATE_115200)
- default 57600 if (SERIAL_SPORT_BAUD_RATE_57600)
- default 38400 if (SERIAL_SPORT_BAUD_RATE_38400)
- default 19200 if (SERIAL_SPORT_BAUD_RATE_19200)
- default 9600 if (SERIAL_SPORT_BAUD_RATE_9600)
+config SERIAL_BFIN_SPORT3_UART
+ bool "Enable UART over SPORT3"
+ depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
+ help
+ Enable UART over SPORT3
config SERIAL_TIMBERDALE
tristate "Support for timberdale UART"
- depends on MFD_TIMBERDALE
select SERIAL_CORE
---help---
Add support for UART controller on timberdale.
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 9d948bc..2c9bf9b 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -1213,6 +1213,24 @@ static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
return ret;
}
+#ifdef CONFIG_CONSOLE_POLL
+static int atmel_poll_get_char(struct uart_port *port)
+{
+ while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY))
+ cpu_relax();
+
+ return UART_GET_CHAR(port);
+}
+
+static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
+{
+ while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
+ cpu_relax();
+
+ UART_PUT_CHAR(port, ch);
+}
+#endif
+
static struct uart_ops atmel_pops = {
.tx_empty = atmel_tx_empty,
.set_mctrl = atmel_set_mctrl,
@@ -1232,6 +1250,10 @@ static struct uart_ops atmel_pops = {
.config_port = atmel_config_port,
.verify_port = atmel_verify_port,
.pm = atmel_serial_pm,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = atmel_poll_get_char,
+ .poll_put_char = atmel_poll_put_char,
+#endif
};
/*
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/serial/bcm63xx_uart.c
index 37ad0c4..a1a0e55 100644
--- a/drivers/serial/bcm63xx_uart.c
+++ b/drivers/serial/bcm63xx_uart.c
@@ -35,7 +35,7 @@
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
-#define BCM63XX_NR_UARTS 1
+#define BCM63XX_NR_UARTS 2
static struct uart_port ports[BCM63XX_NR_UARTS];
@@ -784,7 +784,7 @@ static struct uart_driver bcm_uart_driver = {
.dev_name = "ttyS",
.major = TTY_MAJOR,
.minor = 64,
- .nr = 1,
+ .nr = BCM63XX_NR_UARTS,
.cons = BCM63XX_CONSOLE,
};
@@ -826,11 +826,12 @@ static int __devinit bcm_uart_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
port->fifosize = 16;
port->uartclk = clk_get_rate(clk) / 2;
+ port->line = pdev->id;
clk_put(clk);
ret = uart_add_one_port(&bcm_uart_driver, port);
if (ret) {
- kfree(port);
+ ports[pdev->id].membase = 0;
return ret;
}
platform_set_drvdata(pdev, port);
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index 50abb7e..fcf273e 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/ioport.h>
+#include <linux/io.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
@@ -237,7 +238,8 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
- if (kgdb_connected && kgdboc_port_line == uart->port.line)
+ if (kgdb_connected && kgdboc_port_line == uart->port.line
+ && kgdboc_break_enabled)
if (ch == 0x3) {/* Ctrl + C */
kgdb_breakpoint();
return;
@@ -488,6 +490,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
{
int x_pos, pos;
+ dma_disable_irq(uart->tx_dma_channel);
dma_disable_irq(uart->rx_dma_channel);
spin_lock_bh(&uart->port.lock);
@@ -521,6 +524,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
}
spin_unlock_bh(&uart->port.lock);
+ dma_enable_irq(uart->tx_dma_channel);
dma_enable_irq(uart->rx_dma_channel);
mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
@@ -746,15 +750,6 @@ static int bfin_serial_startup(struct uart_port *port)
Status interrupt.\n");
}
- if (uart->cts_pin >= 0) {
- gpio_request(uart->cts_pin, DRIVER_NAME);
- gpio_direction_output(uart->cts_pin, 1);
- }
- if (uart->rts_pin >= 0) {
- gpio_request(uart->rts_pin, DRIVER_NAME);
- gpio_direction_output(uart->rts_pin, 0);
- }
-
/* CTS RTS PINs are negative assertive. */
UART_PUT_MCR(uart, ACTS);
UART_SET_IER(uart, EDSSI);
@@ -801,10 +796,6 @@ static void bfin_serial_shutdown(struct uart_port *port)
gpio_free(uart->rts_pin);
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->cts_pin >= 0)
- gpio_free(uart->cts_pin);
- if (uart->rts_pin >= 0)
- gpio_free(uart->rts_pin);
if (UART_GET_IER(uart) && EDSSI)
free_irq(uart->status_irq, uart);
#endif
@@ -1409,8 +1400,7 @@ static int bfin_serial_remove(struct platform_device *dev)
continue;
uart_remove_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
bfin_serial_ports[i].port.dev = NULL;
-#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
- defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+#if defined(CONFIG_SERIAL_BFIN_CTSRTS)
gpio_free(bfin_serial_ports[i].cts_pin);
gpio_free(bfin_serial_ports[i].rts_pin);
#endif
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index 088bb35..7c72888 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -1,27 +1,11 @@
/*
- * File: linux/drivers/serial/bfin_sport_uart.c
+ * Blackfin On-Chip Sport Emulated UART Driver
*
- * Based on: drivers/serial/bfin_5xx.c by Aubrey Li.
- * Author: Roy Huang <roy.huang@analog.com>
+ * Copyright 2006-2009 Analog Devices Inc.
*
- * Created: Nov 22, 2006
- * Copyright: (c) 2006-2007 Analog Devices Inc.
- * Description: this driver enable SPORTs on Blackfin emulate UART.
+ * Enter bugs at http://blackfin.uclinux.org/
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Licensed under the GPL-2 or later.
*/
/*
@@ -29,39 +13,18 @@
* http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf
* This application note describe how to implement a UART on a Sharc DSP,
* but this driver is implemented on Blackfin Processor.
+ * Transmit Frame Sync is not used by this driver to transfer data out.
*/
-/* After reset, there is a prelude of low level pulse when transmit data first
- * time. No addtional pulse in following transmit.
- * According to document:
- * The SPORTs are ready to start transmitting or receiving data no later than
- * three serial clock cycles after they are enabled in the SPORTx_TCR1 or
- * SPORTx_RCR1 register. No serial clock cycles are lost from this point on.
- * The first internal frame sync will occur one frame sync delay after the
- * SPORTs are ready. External frame syncs can occur as soon as the SPORT is
- * ready.
- */
+/* #define DEBUG */
-/* Thanks to Axel Alatalo <axel@rubico.se> for fixing sport rx bug. Sometimes
- * sport receives data incorrectly. The following is Axel's words.
- * As EE-191, sport rx samples 3 times of the UART baudrate and takes the
- * middle smaple of every 3 samples as the data bit. For a 8-N-1 UART setting,
- * 30 samples will be required for a byte. If transmitter sends a 1/3 bit short
- * byte due to buadrate drift, then the 30th sample of a byte, this sample is
- * also the third sample of the stop bit, will happens on the immediately
- * following start bit which will be thrown away and missed. Thus since parts
- * of the startbit will be missed and the receiver will begin to drift, the
- * effect accumulates over time until synchronization is lost.
- * If only require 2 samples of the stopbit (by sampling in total 29 samples),
- * then a to short byte as in the case above will be tolerated. Then the 1/3
- * early startbit will trigger a framesync since the last read is complete
- * after only 2/3 stopbit and framesync is active during the last 1/3 looking
- * for a possible early startbit. */
-
-//#define DEBUG
+#define DRV_NAME "bfin-sport-uart"
+#define DEVICE_NAME "ttySS"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
#include <linux/module.h>
#include <linux/ioport.h>
+#include <linux/io.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
@@ -75,23 +38,36 @@
#include "bfin_sport_uart.h"
+#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
unsigned short bfin_uart_pin_req_sport0[] =
{P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, \
P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0};
-
+#endif
+#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
unsigned short bfin_uart_pin_req_sport1[] =
{P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, \
P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0};
-
-#define DRV_NAME "bfin-sport-uart"
+#endif
+#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
+unsigned short bfin_uart_pin_req_sport2[] =
+ {P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, \
+ P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0};
+#endif
+#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
+unsigned short bfin_uart_pin_req_sport3[] =
+ {P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, \
+ P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0};
+#endif
struct sport_uart_port {
struct uart_port port;
- char *name;
-
- int tx_irq;
- int rx_irq;
int err_irq;
+ unsigned short csize;
+ unsigned short rxmask;
+ unsigned short txmask1;
+ unsigned short txmask2;
+ unsigned char stopb;
+/* unsigned char parib; */
};
static void sport_uart_tx_chars(struct sport_uart_port *up);
@@ -99,36 +75,42 @@ static void sport_stop_tx(struct uart_port *port);
static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
{
- pr_debug("%s value:%x\n", __func__, value);
- /* Place a Start and Stop bit */
+ pr_debug("%s value:%x, mask1=0x%x, mask2=0x%x\n", __func__, value,
+ up->txmask1, up->txmask2);
+
+ /* Place Start and Stop bits */
__asm__ __volatile__ (
- "R2 = b#01111111100;"
- "R3 = b#10000000001;"
- "%0 <<= 2;"
- "%0 = %0 & R2;"
- "%0 = %0 | R3;"
- : "=d"(value)
- : "d"(value)
- : "ASTAT", "R2", "R3"
+ "%[val] <<= 1;"
+ "%[val] = %[val] & %[mask1];"
+ "%[val] = %[val] | %[mask2];"
+ : [val]"+d"(value)
+ : [mask1]"d"(up->txmask1), [mask2]"d"(up->txmask2)
+ : "ASTAT"
);
pr_debug("%s value:%x\n", __func__, value);
SPORT_PUT_TX(up, value);
}
-static inline unsigned int rx_one_byte(struct sport_uart_port *up)
+static inline unsigned char rx_one_byte(struct sport_uart_port *up)
{
- unsigned int value, extract;
+ unsigned int value;
+ unsigned char extract;
u32 tmp_mask1, tmp_mask2, tmp_shift, tmp;
- value = SPORT_GET_RX32(up);
- pr_debug("%s value:%x\n", __func__, value);
+ if ((up->csize + up->stopb) > 7)
+ value = SPORT_GET_RX32(up);
+ else
+ value = SPORT_GET_RX(up);
+
+ pr_debug("%s value:%x, cs=%d, mask=0x%x\n", __func__, value,
+ up->csize, up->rxmask);
- /* Extract 8 bits data */
+ /* Extract data */
__asm__ __volatile__ (
"%[extr] = 0;"
- "%[mask1] = 0x1801(Z);"
- "%[mask2] = 0x0300(Z);"
+ "%[mask1] = %[rxmask];"
+ "%[mask2] = 0x0200(Z);"
"%[shift] = 0;"
"LSETUP(.Lloop_s, .Lloop_e) LC0 = %[lc];"
".Lloop_s:"
@@ -138,9 +120,9 @@ static inline unsigned int rx_one_byte(struct sport_uart_port *up)
"%[mask1] = %[mask1] - %[mask2];"
".Lloop_e:"
"%[shift] += 1;"
- : [val]"=d"(value), [extr]"=d"(extract), [shift]"=d"(tmp_shift), [tmp]"=d"(tmp),
- [mask1]"=d"(tmp_mask1), [mask2]"=d"(tmp_mask2)
- : "d"(value), [lc]"a"(8)
+ : [extr]"=&d"(extract), [shift]"=&d"(tmp_shift), [tmp]"=&d"(tmp),
+ [mask1]"=&d"(tmp_mask1), [mask2]"=&d"(tmp_mask2)
+ : [val]"d"(value), [rxmask]"d"(up->rxmask), [lc]"a"(up->csize)
: "ASTAT", "LB0", "LC0", "LT0"
);
@@ -148,29 +130,28 @@ static inline unsigned int rx_one_byte(struct sport_uart_port *up)
return extract;
}
-static int sport_uart_setup(struct sport_uart_port *up, int sclk, int baud_rate)
+static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
{
- int tclkdiv, tfsdiv, rclkdiv;
+ int tclkdiv, rclkdiv;
+ unsigned int sclk = get_sclk();
- /* Set TCR1 and TCR2 */
- SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK));
- SPORT_PUT_TCR2(up, 10);
+ /* Set TCR1 and TCR2, TFSR is not enabled for uart */
+ SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK));
+ SPORT_PUT_TCR2(up, size + 1);
pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
/* Set RCR1 and RCR2 */
SPORT_PUT_RCR1(up, (RCKFE | LARFS | LRFS | RFSR | IRCLK));
- SPORT_PUT_RCR2(up, 28);
+ SPORT_PUT_RCR2(up, (size + 1) * 2 - 1);
pr_debug("%s RCR1:%x, RCR2:%x\n", __func__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up));
- tclkdiv = sclk/(2 * baud_rate) - 1;
- tfsdiv = 12;
- rclkdiv = sclk/(2 * baud_rate * 3) - 1;
+ tclkdiv = sclk / (2 * baud_rate) - 1;
+ rclkdiv = sclk / (2 * baud_rate * 2) - 1;
SPORT_PUT_TCLKDIV(up, tclkdiv);
- SPORT_PUT_TFSDIV(up, tfsdiv);
SPORT_PUT_RCLKDIV(up, rclkdiv);
SSYNC();
- pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, tfsdiv:%d, rclkdiv:%d\n",
- __func__, sclk, baud_rate, tclkdiv, tfsdiv, rclkdiv);
+ pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, rclkdiv:%d\n",
+ __func__, sclk, baud_rate, tclkdiv, rclkdiv);
return 0;
}
@@ -181,23 +162,29 @@ static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id)
struct tty_struct *tty = up->port.state->port.tty;
unsigned int ch;
- do {
+ spin_lock(&up->port.lock);
+
+ while (SPORT_GET_STAT(up) & RXNE) {
ch = rx_one_byte(up);
up->port.icount.rx++;
- if (uart_handle_sysrq_char(&up->port, ch))
- ;
- else
+ if (!uart_handle_sysrq_char(&up->port, ch))
tty_insert_flip_char(tty, ch, TTY_NORMAL);
- } while (SPORT_GET_STAT(up) & RXNE);
+ }
tty_flip_buffer_push(tty);
+ spin_unlock(&up->port.lock);
+
return IRQ_HANDLED;
}
static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id)
{
- sport_uart_tx_chars(dev_id);
+ struct sport_uart_port *up = dev_id;
+
+ spin_lock(&up->port.lock);
+ sport_uart_tx_chars(up);
+ spin_unlock(&up->port.lock);
return IRQ_HANDLED;
}
@@ -208,6 +195,8 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
struct tty_struct *tty = up->port.state->port.tty;
unsigned int stat = SPORT_GET_STAT(up);
+ spin_lock(&up->port.lock);
+
/* Overflow in RX FIFO */
if (stat & ROVF) {
up->port.icount.overrun++;
@@ -216,15 +205,16 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
}
/* These should not happen */
if (stat & (TOVF | TUVF | RUVF)) {
- printk(KERN_ERR "SPORT Error:%s %s %s\n",
- (stat & TOVF)?"TX overflow":"",
- (stat & TUVF)?"TX underflow":"",
- (stat & RUVF)?"RX underflow":"");
+ pr_err("SPORT Error:%s %s %s\n",
+ (stat & TOVF) ? "TX overflow" : "",
+ (stat & TUVF) ? "TX underflow" : "",
+ (stat & RUVF) ? "RX underflow" : "");
SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
}
SSYNC();
+ spin_unlock(&up->port.lock);
return IRQ_HANDLED;
}
@@ -232,60 +222,37 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
static int sport_startup(struct uart_port *port)
{
struct sport_uart_port *up = (struct sport_uart_port *)port;
- char buffer[20];
- int retval;
+ int ret;
pr_debug("%s enter\n", __func__);
- snprintf(buffer, 20, "%s rx", up->name);
- retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up);
- if (retval) {
- printk(KERN_ERR "Unable to request interrupt %s\n", buffer);
- return retval;
+ ret = request_irq(up->port.irq, sport_uart_rx_irq, 0,
+ "SPORT_UART_RX", up);
+ if (ret) {
+ dev_err(port->dev, "unable to request SPORT RX interrupt\n");
+ return ret;
}
- snprintf(buffer, 20, "%s tx", up->name);
- retval = request_irq(up->tx_irq, sport_uart_tx_irq, IRQF_SAMPLE_RANDOM, buffer, up);
- if (retval) {
- printk(KERN_ERR "Unable to request interrupt %s\n", buffer);
+ ret = request_irq(up->port.irq+1, sport_uart_tx_irq, 0,
+ "SPORT_UART_TX", up);
+ if (ret) {
+ dev_err(port->dev, "unable to request SPORT TX interrupt\n");
goto fail1;
}
- snprintf(buffer, 20, "%s err", up->name);
- retval = request_irq(up->err_irq, sport_uart_err_irq, IRQF_SAMPLE_RANDOM, buffer, up);
- if (retval) {
- printk(KERN_ERR "Unable to request interrupt %s\n", buffer);
+ ret = request_irq(up->err_irq, sport_uart_err_irq, 0,
+ "SPORT_UART_STATUS", up);
+ if (ret) {
+ dev_err(port->dev, "unable to request SPORT status interrupt\n");
goto fail2;
}
- if (port->line) {
- if (peripheral_request_list(bfin_uart_pin_req_sport1, DRV_NAME))
- goto fail3;
- } else {
- if (peripheral_request_list(bfin_uart_pin_req_sport0, DRV_NAME))
- goto fail3;
- }
-
- sport_uart_setup(up, get_sclk(), port->uartclk);
-
- /* Enable receive interrupt */
- SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) | RSPEN));
- SSYNC();
-
return 0;
+ fail2:
+ free_irq(up->port.irq+1, up);
+ fail1:
+ free_irq(up->port.irq, up);
-
-fail3:
- printk(KERN_ERR DRV_NAME
- ": Requesting Peripherals failed\n");
-
- free_irq(up->err_irq, up);
-fail2:
- free_irq(up->tx_irq, up);
-fail1:
- free_irq(up->rx_irq, up);
-
- return retval;
-
+ return ret;
}
static void sport_uart_tx_chars(struct sport_uart_port *up)
@@ -344,20 +311,17 @@ static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void sport_stop_tx(struct uart_port *port)
{
struct sport_uart_port *up = (struct sport_uart_port *)port;
- unsigned int stat;
pr_debug("%s enter\n", __func__);
- stat = SPORT_GET_STAT(up);
- while(!(stat & TXHRE)) {
- udelay(1);
- stat = SPORT_GET_STAT(up);
- }
/* Although the hold register is empty, last byte is still in shift
- * register and not sent out yet. If baud rate is lower than default,
- * delay should be longer. For example, if the baud rate is 9600,
- * the delay must be at least 2ms by experience */
- udelay(500);
+ * register and not sent out yet. So, put a dummy data into TX FIFO.
+ * Then, sport tx stops when last byte is shift out and the dummy
+ * data is moved into the shift register.
+ */
+ SPORT_PUT_TX(up, 0xffff);
+ while (!(SPORT_GET_STAT(up) & TXHRE))
+ cpu_relax();
SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
SSYNC();
@@ -370,6 +334,7 @@ static void sport_start_tx(struct uart_port *port)
struct sport_uart_port *up = (struct sport_uart_port *)port;
pr_debug("%s enter\n", __func__);
+
/* Write data into SPORT FIFO before enable SPROT to transmit */
sport_uart_tx_chars(up);
@@ -403,37 +368,24 @@ static void sport_shutdown(struct uart_port *port)
{
struct sport_uart_port *up = (struct sport_uart_port *)port;
- pr_debug("%s enter\n", __func__);
+ dev_dbg(port->dev, "%s enter\n", __func__);
/* Disable sport */
SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN));
SSYNC();
- if (port->line) {
- peripheral_free_list(bfin_uart_pin_req_sport1);
- } else {
- peripheral_free_list(bfin_uart_pin_req_sport0);
- }
-
- free_irq(up->rx_irq, up);
- free_irq(up->tx_irq, up);
+ free_irq(up->port.irq, up);
+ free_irq(up->port.irq+1, up);
free_irq(up->err_irq, up);
}
-static void sport_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old)
-{
- pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag);
- uart_update_timeout(port, CS8 ,port->uartclk);
-}
-
static const char *sport_type(struct uart_port *port)
{
struct sport_uart_port *up = (struct sport_uart_port *)port;
pr_debug("%s enter\n", __func__);
- return up->name;
+ return up->port.type == PORT_BFIN_SPORT ? "BFIN-SPORT-UART" : NULL;
}
static void sport_release_port(struct uart_port *port)
@@ -461,6 +413,110 @@ static int sport_verify_port(struct uart_port *port, struct serial_struct *ser)
return 0;
}
+static void sport_set_termios(struct uart_port *port,
+ struct ktermios *termios, struct ktermios *old)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+ unsigned long flags;
+ int i;
+
+ pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag);
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS8:
+ up->csize = 8;
+ break;
+ case CS7:
+ up->csize = 7;
+ break;
+ case CS6:
+ up->csize = 6;
+ break;
+ case CS5:
+ up->csize = 5;
+ break;
+ default:
+ pr_warning("requested word length not supported\n");
+ }
+
+ if (termios->c_cflag & CSTOPB) {
+ up->stopb = 1;
+ }
+ if (termios->c_cflag & PARENB) {
+ pr_warning("PAREN bits is not supported yet\n");
+ /* up->parib = 1; */
+ }
+
+ port->read_status_mask = OE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= (FE | PE);
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= BI;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= FE | PE;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= OE;
+ }
+
+ /* RX extract mask */
+ up->rxmask = 0x01 | (((up->csize + up->stopb) * 2 - 1) << 0x8);
+ /* TX masks, 8 bit data and 1 bit stop for example:
+ * mask1 = b#0111111110
+ * mask2 = b#1000000000
+ */
+ for (i = 0, up->txmask1 = 0; i < up->csize; i++)
+ up->txmask1 |= (1<<i);
+ up->txmask2 = (1<<i);
+ if (up->stopb) {
+ ++i;
+ up->txmask2 |= (1<<i);
+ }
+ up->txmask1 <<= 1;
+ up->txmask2 <<= 1;
+ /* uart baud rate */
+ port->uartclk = uart_get_baud_rate(port, termios, old, 0, get_sclk()/16);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /* Disable UART */
+ SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
+ SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
+
+ sport_uart_setup(up, up->csize + up->stopb, port->uartclk);
+
+ /* driver TX line high after config, one dummy data is
+ * necessary to stop sport after shift one byte
+ */
+ SPORT_PUT_TX(up, 0xffff);
+ SPORT_PUT_TX(up, 0xffff);
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
+ SSYNC();
+ while (!(SPORT_GET_STAT(up) & TXHRE))
+ cpu_relax();
+ SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
+ SSYNC();
+
+ /* Port speed changed, update the per-port timeout. */
+ uart_update_timeout(port, termios->c_cflag, port->uartclk);
+
+ /* Enable sport rx */
+ SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) | RSPEN);
+ SSYNC();
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
struct uart_ops sport_uart_ops = {
.tx_empty = sport_tx_empty,
.set_mctrl = sport_set_mctrl,
@@ -480,138 +536,319 @@ struct uart_ops sport_uart_ops = {
.verify_port = sport_verify_port,
};
-static struct sport_uart_port sport_uart_ports[] = {
- { /* SPORT 0 */
- .name = "SPORT0",
- .tx_irq = IRQ_SPORT0_TX,
- .rx_irq = IRQ_SPORT0_RX,
- .err_irq= IRQ_SPORT0_ERROR,
- .port = {
- .type = PORT_BFIN_SPORT,
- .iotype = UPIO_MEM,
- .membase = (void __iomem *)SPORT0_TCR1,
- .mapbase = SPORT0_TCR1,
- .irq = IRQ_SPORT0_RX,
- .uartclk = CONFIG_SPORT_BAUD_RATE,
- .fifosize = 8,
- .ops = &sport_uart_ops,
- .line = 0,
- },
- }, { /* SPORT 1 */
- .name = "SPORT1",
- .tx_irq = IRQ_SPORT1_TX,
- .rx_irq = IRQ_SPORT1_RX,
- .err_irq= IRQ_SPORT1_ERROR,
- .port = {
- .type = PORT_BFIN_SPORT,
- .iotype = UPIO_MEM,
- .membase = (void __iomem *)SPORT1_TCR1,
- .mapbase = SPORT1_TCR1,
- .irq = IRQ_SPORT1_RX,
- .uartclk = CONFIG_SPORT_BAUD_RATE,
- .fifosize = 8,
- .ops = &sport_uart_ops,
- .line = 1,
- },
+#define BFIN_SPORT_UART_MAX_PORTS 4
+
+static struct sport_uart_port *bfin_sport_uart_ports[BFIN_SPORT_UART_MAX_PORTS];
+
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+static int __init
+sport_uart_console_setup(struct console *co, char *options)
+{
+ struct sport_uart_port *up;
+ int baud = 57600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /* Check whether an invalid uart number has been specified */
+ if (co->index < 0 || co->index >= BFIN_SPORT_UART_MAX_PORTS)
+ return -ENODEV;
+
+ up = bfin_sport_uart_ports[co->index];
+ if (!up)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&up->port, co, baud, parity, bits, flow);
+}
+
+static void sport_uart_console_putchar(struct uart_port *port, int ch)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+
+ while (SPORT_GET_STAT(up) & TXF)
+ barrier();
+
+ tx_one_byte(up, ch);
+}
+
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+sport_uart_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct sport_uart_port *up = bfin_sport_uart_ports[co->index];
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ if (SPORT_GET_TCR1(up) & TSPEN)
+ uart_console_write(&up->port, s, count, sport_uart_console_putchar);
+ else {
+ /* dummy data to start sport */
+ while (SPORT_GET_STAT(up) & TXF)
+ barrier();
+ SPORT_PUT_TX(up, 0xffff);
+ /* Enable transmit, then an interrupt will generated */
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
+ SSYNC();
+
+ uart_console_write(&up->port, s, count, sport_uart_console_putchar);
+
+ /* Although the hold register is empty, last byte is still in shift
+ * register and not sent out yet. So, put a dummy data into TX FIFO.
+ * Then, sport tx stops when last byte is shift out and the dummy
+ * data is moved into the shift register.
+ */
+ while (SPORT_GET_STAT(up) & TXF)
+ barrier();
+ SPORT_PUT_TX(up, 0xffff);
+ while (!(SPORT_GET_STAT(up) & TXHRE))
+ barrier();
+
+ /* Stop sport tx transfer */
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
+ SSYNC();
}
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static struct uart_driver sport_uart_reg;
+
+static struct console sport_uart_console = {
+ .name = DEVICE_NAME,
+ .write = sport_uart_console_write,
+ .device = uart_console_device,
+ .setup = sport_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sport_uart_reg,
};
+#define SPORT_UART_CONSOLE (&sport_uart_console)
+#else
+#define SPORT_UART_CONSOLE NULL
+#endif /* CONFIG_SERIAL_BFIN_SPORT_CONSOLE */
+
+
static struct uart_driver sport_uart_reg = {
.owner = THIS_MODULE,
- .driver_name = "SPORT-UART",
- .dev_name = "ttySS",
+ .driver_name = DRV_NAME,
+ .dev_name = DEVICE_NAME,
.major = 204,
.minor = 84,
- .nr = ARRAY_SIZE(sport_uart_ports),
- .cons = NULL,
+ .nr = BFIN_SPORT_UART_MAX_PORTS,
+ .cons = SPORT_UART_CONSOLE,
};
-static int sport_uart_suspend(struct platform_device *dev, pm_message_t state)
+#ifdef CONFIG_PM
+static int sport_uart_suspend(struct device *dev)
{
- struct sport_uart_port *sport = platform_get_drvdata(dev);
+ struct sport_uart_port *sport = dev_get_drvdata(dev);
- pr_debug("%s enter\n", __func__);
+ dev_dbg(dev, "%s enter\n", __func__);
if (sport)
uart_suspend_port(&sport_uart_reg, &sport->port);
return 0;
}
-static int sport_uart_resume(struct platform_device *dev)
+static int sport_uart_resume(struct device *dev)
{
- struct sport_uart_port *sport = platform_get_drvdata(dev);
+ struct sport_uart_port *sport = dev_get_drvdata(dev);
- pr_debug("%s enter\n", __func__);
+ dev_dbg(dev, "%s enter\n", __func__);
if (sport)
uart_resume_port(&sport_uart_reg, &sport->port);
return 0;
}
-static int sport_uart_probe(struct platform_device *dev)
+static struct dev_pm_ops bfin_sport_uart_dev_pm_ops = {
+ .suspend = sport_uart_suspend,
+ .resume = sport_uart_resume,
+};
+#endif
+
+static int __devinit sport_uart_probe(struct platform_device *pdev)
{
- pr_debug("%s enter\n", __func__);
- sport_uart_ports[dev->id].port.dev = &dev->dev;
- uart_add_one_port(&sport_uart_reg, &sport_uart_ports[dev->id].port);
- platform_set_drvdata(dev, &sport_uart_ports[dev->id]);
+ struct resource *res;
+ struct sport_uart_port *sport;
+ int ret = 0;
- return 0;
+ dev_dbg(&pdev->dev, "%s enter\n", __func__);
+
+ if (pdev->id < 0 || pdev->id >= BFIN_SPORT_UART_MAX_PORTS) {
+ dev_err(&pdev->dev, "Wrong sport uart platform device id.\n");
+ return -ENOENT;
+ }
+
+ if (bfin_sport_uart_ports[pdev->id] == NULL) {
+ bfin_sport_uart_ports[pdev->id] =
+ kmalloc(sizeof(struct sport_uart_port), GFP_KERNEL);
+ sport = bfin_sport_uart_ports[pdev->id];
+ if (!sport) {
+ dev_err(&pdev->dev,
+ "Fail to kmalloc sport_uart_port\n");
+ return -ENOMEM;
+ }
+
+ ret = peripheral_request_list(
+ (unsigned short *)pdev->dev.platform_data, DRV_NAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Fail to request SPORT peripherals\n");
+ goto out_error_free_mem;
+ }
+
+ spin_lock_init(&sport->port.lock);
+ sport->port.fifosize = SPORT_TX_FIFO_SIZE,
+ sport->port.ops = &sport_uart_ops;
+ sport->port.line = pdev->id;
+ sport->port.iotype = UPIO_MEM;
+ sport->port.flags = UPF_BOOT_AUTOCONF;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ ret = -ENOENT;
+ goto out_error_free_peripherals;
+ }
+
+ sport->port.membase = ioremap(res->start,
+ res->end - res->start);
+ if (!sport->port.membase) {
+ dev_err(&pdev->dev, "Cannot map sport IO\n");
+ ret = -ENXIO;
+ goto out_error_free_peripherals;
+ }
+
+ sport->port.irq = platform_get_irq(pdev, 0);
+ if (sport->port.irq < 0) {
+ dev_err(&pdev->dev, "No sport RX/TX IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+
+ sport->err_irq = platform_get_irq(pdev, 1);
+ if (sport->err_irq < 0) {
+ dev_err(&pdev->dev, "No sport status IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+ }
+
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+ if (!is_early_platform_device(pdev)) {
+#endif
+ sport = bfin_sport_uart_ports[pdev->id];
+ sport->port.dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, sport);
+ ret = uart_add_one_port(&sport_uart_reg, &sport->port);
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+ }
+#endif
+ if (!ret)
+ return 0;
+
+ if (sport) {
+out_error_unmap:
+ iounmap(sport->port.membase);
+out_error_free_peripherals:
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+out_error_free_mem:
+ kfree(sport);
+ bfin_sport_uart_ports[pdev->id] = NULL;
+ }
+
+ return ret;
}
-static int sport_uart_remove(struct platform_device *dev)
+static int __devexit sport_uart_remove(struct platform_device *pdev)
{
- struct sport_uart_port *sport = platform_get_drvdata(dev);
+ struct sport_uart_port *sport = platform_get_drvdata(pdev);
- pr_debug("%s enter\n", __func__);
- platform_set_drvdata(dev, NULL);
+ dev_dbg(&pdev->dev, "%s enter\n", __func__);
+ dev_set_drvdata(&pdev->dev, NULL);
- if (sport)
+ if (sport) {
uart_remove_one_port(&sport_uart_reg, &sport->port);
+ iounmap(sport->port.membase);
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+ kfree(sport);
+ bfin_sport_uart_ports[pdev->id] = NULL;
+ }
return 0;
}
static struct platform_driver sport_uart_driver = {
.probe = sport_uart_probe,
- .remove = sport_uart_remove,
- .suspend = sport_uart_suspend,
- .resume = sport_uart_resume,
+ .remove = __devexit_p(sport_uart_remove),
.driver = {
.name = DRV_NAME,
+#ifdef CONFIG_PM
+ .pm = &bfin_sport_uart_dev_pm_ops,
+#endif
},
};
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+static __initdata struct early_platform_driver early_sport_uart_driver = {
+ .class_str = DRV_NAME,
+ .pdrv = &sport_uart_driver,
+ .requested_id = EARLY_PLATFORM_ID_UNSET,
+};
+
+static int __init sport_uart_rs_console_init(void)
+{
+ early_platform_driver_register(&early_sport_uart_driver, DRV_NAME);
+
+ early_platform_driver_probe(DRV_NAME, BFIN_SPORT_UART_MAX_PORTS, 0);
+
+ register_console(&sport_uart_console);
+
+ return 0;
+}
+console_initcall(sport_uart_rs_console_init);
+#endif
+
static int __init sport_uart_init(void)
{
int ret;
- pr_debug("%s enter\n", __func__);
+ pr_info("Serial: Blackfin uart over sport driver\n");
+
ret = uart_register_driver(&sport_uart_reg);
- if (ret != 0) {
- printk(KERN_ERR "Failed to register %s:%d\n",
+ if (ret) {
+ pr_err("failed to register %s:%d\n",
sport_uart_reg.driver_name, ret);
return ret;
}
ret = platform_driver_register(&sport_uart_driver);
- if (ret != 0) {
- printk(KERN_ERR "Failed to register sport uart driver:%d\n", ret);
+ if (ret) {
+ pr_err("failed to register sport uart driver:%d\n", ret);
uart_unregister_driver(&sport_uart_reg);
}
-
- pr_debug("%s exit\n", __func__);
return ret;
}
+module_init(sport_uart_init);
static void __exit sport_uart_exit(void)
{
- pr_debug("%s enter\n", __func__);
platform_driver_unregister(&sport_uart_driver);
uart_unregister_driver(&sport_uart_reg);
}
-
-module_init(sport_uart_init);
module_exit(sport_uart_exit);
+MODULE_AUTHOR("Sonic Zhang, Roy Huang");
+MODULE_DESCRIPTION("Blackfin serial over SPORT driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/serial/bfin_sport_uart.h
index 671d41c..abe0361 100644
--- a/drivers/serial/bfin_sport_uart.h
+++ b/drivers/serial/bfin_sport_uart.h
@@ -1,29 +1,23 @@
/*
- * File: linux/drivers/serial/bfin_sport_uart.h
+ * Blackfin On-Chip Sport Emulated UART Driver
*
- * Based on: include/asm-blackfin/mach-533/bfin_serial_5xx.h
- * Author: Roy Huang <roy.huang>analog.com>
+ * Copyright 2006-2008 Analog Devices Inc.
*
- * Created: Nov 22, 2006
- * Copyright: (C) Analog Device Inc.
- * Description: this driver enable SPORTs on Blackfin emulate UART.
+ * Enter bugs at http://blackfin.uclinux.org/
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Licensed under the GPL-2 or later.
*/
+/*
+ * This driver and the hardware supported are in term of EE-191 of ADI.
+ * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf
+ * This application note describe how to implement a UART on a Sharc DSP,
+ * but this driver is implemented on Blackfin Processor.
+ * Transmit Frame Sync is not used by this driver to transfer data out.
+ */
+
+#ifndef _BFIN_SPORT_UART_H
+#define _BFIN_SPORT_UART_H
#define OFFSET_TCR1 0x00 /* Transmit Configuration 1 Register */
#define OFFSET_TCR2 0x04 /* Transmit Configuration 2 Register */
@@ -61,3 +55,7 @@
#define SPORT_PUT_RCLKDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCLKDIV), v)
#define SPORT_PUT_RFSDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RFSDIV), v)
#define SPORT_PUT_STAT(sport, v) bfin_write16(((sport)->port.membase + OFFSET_STAT), v)
+
+#define SPORT_TX_FIFO_SIZE 8
+
+#endif /* _BFIN_SPORT_UART_H */
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 0028b6f..53a4682 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -751,7 +751,6 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
trace(icom_port, "FID_STATUS", status);
count = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].leLength);
- count = tty_buffer_request_room(tty, count);
trace(icom_port, "RCV_COUNT", count);
trace(icom_port, "REAL_COUNT", count);
@@ -1654,4 +1653,6 @@ MODULE_DESCRIPTION("IBM iSeries Serial IOA driver");
MODULE_SUPPORTED_DEVICE
("IBM iSeries 2745, 2771, 2772, 2742, 2793 and 2805 Communications adapters");
MODULE_LICENSE("GPL");
-
+MODULE_FIRMWARE("icom_call_setup.bin");
+MODULE_FIRMWARE("icom_res_dce.bin");
+MODULE_FIRMWARE("icom_asc.bin");
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 60d665a..d00fcf8 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -1279,7 +1279,7 @@ static int serial_imx_probe(struct platform_device *pdev)
sport->use_irda = 1;
#endif
- if (pdata->init) {
+ if (pdata && pdata->init) {
ret = pdata->init(pdev);
if (ret)
goto clkput;
@@ -1292,7 +1292,7 @@ static int serial_imx_probe(struct platform_device *pdev)
return 0;
deinit:
- if (pdata->exit)
+ if (pdata && pdata->exit)
pdata->exit(pdev);
clkput:
clk_put(sport->clk);
@@ -1321,7 +1321,7 @@ static int serial_imx_remove(struct platform_device *pdev)
clk_disable(sport->clk);
- if (pdata->exit)
+ if (pdata && pdata->exit)
pdata->exit(pdev);
iounmap(sport->port.membase);
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index 85dc041..23ba6b4 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -1411,8 +1411,7 @@ static int receive_chars(struct uart_port *the_port)
read_count = do_read(the_port, ch, MAX_CHARS);
if (read_count > 0) {
flip = 1;
- read_room = tty_buffer_request_room(tty, read_count);
- tty_insert_flip_string(tty, ch, read_room);
+ read_room = tty_insert_flip_string(tty, ch, read_count);
the_port->icount.rx += read_count;
}
spin_unlock_irqrestore(&the_port->lock, pflags);
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c
index 108c3e0..12cb5e4 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/serial/jsm/jsm_driver.c
@@ -179,6 +179,7 @@ static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device
return 0;
out_free_irq:
+ jsm_remove_uart_port(brd);
free_irq(brd->irq, brd);
out_iounmap:
iounmap(brd->re_map_membase);
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index cd95e21..5673ca9 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -432,7 +432,7 @@ int __devinit jsm_tty_init(struct jsm_board *brd)
int jsm_uart_port_init(struct jsm_board *brd)
{
- int i;
+ int i, rc;
unsigned int line;
struct jsm_channel *ch;
@@ -467,8 +467,11 @@ int jsm_uart_port_init(struct jsm_board *brd)
} else
set_bit(line, linemap);
brd->channels[i]->uart_port.line = line;
- if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port))
- printk(KERN_INFO "jsm: add device failed\n");
+ rc = uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port);
+ if (rc){
+ printk(KERN_INFO "jsm: Port %d failed. Aborting...\n", i);
+ return rc;
+ }
else
printk(KERN_INFO "jsm: Port %d added\n", i);
}
diff --git a/drivers/serial/msm_serial.c b/drivers/serial/msm_serial.c
index b05c5aa..ecdc0fa 100644
--- a/drivers/serial/msm_serial.c
+++ b/drivers/serial/msm_serial.c
@@ -691,6 +691,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
struct msm_port *msm_port;
struct resource *resource;
struct uart_port *port;
+ int irq;
if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
return -ENXIO;
@@ -711,9 +712,10 @@ static int __init msm_serial_probe(struct platform_device *pdev)
return -ENXIO;
port->mapbase = resource->start;
- port->irq = platform_get_irq(pdev, 0);
- if (unlikely(port->irq < 0))
+ irq = platform_get_irq(pdev, 0);
+ if (unlikely(irq < 0))
return -ENXIO;
+ port->irq = irq;
platform_set_drvdata(pdev, port);
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
index 34b31da..7bf1026 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/serial/timbuart.c
@@ -421,7 +421,7 @@ static struct uart_driver timbuart_driver = {
static int timbuart_probe(struct platform_device *dev)
{
- int err;
+ int err, irq;
struct timbuart_port *uart;
struct resource *iomem;
@@ -453,11 +453,12 @@ static int timbuart_probe(struct platform_device *dev)
uart->port.mapbase = iomem->start;
uart->port.membase = NULL;
- uart->port.irq = platform_get_irq(dev, 0);
- if (uart->port.irq < 0) {
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
err = -EINVAL;
goto err_register;
}
+ uart->port.irq = irq;
tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index d8992d1..f6e34e0 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -144,7 +144,7 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed)
case USB_SPEED_LOW:
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
break;
default:
usbip_uerr("speed %d\n", speed);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 4f5bb56..6a58cb1 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -21,6 +21,7 @@ config USB_ARCH_HAS_HCD
default y if USB_ARCH_HAS_EHCI
default y if PCMCIA && !M32R # sl811_cs
default y if ARM # SL-811
+ default y if BLACKFIN # SL-811
default y if SUPERH # r8a66597-hcd
default PCI
@@ -39,6 +40,7 @@ config USB_ARCH_HAS_OHCI
default y if ARCH_PNX4008 && I2C
default y if MFD_TC6393XB
default y if ARCH_W90X900
+ default y if ARCH_DAVINCI_DA8XX
# PPC:
default y if STB03xxx
default y if PPC_MPC52xx
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index be3c9b8..80b4008 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_USB_U132_HCD) += host/
obj-$(CONFIG_USB_R8A66597_HCD) += host/
obj-$(CONFIG_USB_HWA_HCD) += host/
obj-$(CONFIG_USB_ISP1760_HCD) += host/
+obj-$(CONFIG_USB_IMX21_HCD) += host/
obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 56802d2..c89990f 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -5,6 +5,7 @@
* Copyright (C) 2004 David Woodhouse, Duncan Sands, Roman Kagan
* Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru)
* Copyright (C) 2007 Simon Arlott
+ * Copyright (C) 2009 Simon Arlott
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -43,7 +44,7 @@
#include "usbatm.h"
#define DRIVER_AUTHOR "Roman Kagan, David Woodhouse, Duncan Sands, Simon Arlott"
-#define DRIVER_VERSION "0.3"
+#define DRIVER_VERSION "0.4"
#define DRIVER_DESC "Conexant AccessRunner ADSL USB modem driver"
static const char cxacru_driver_name[] = "cxacru";
@@ -52,6 +53,7 @@ static const char cxacru_driver_name[] = "cxacru";
#define CXACRU_EP_DATA 0x02 /* Bulk in/out */
#define CMD_PACKET_SIZE 64 /* Should be maxpacket(ep)? */
+#define CMD_MAX_CONFIG ((CMD_PACKET_SIZE / 4 - 1) / 2)
/* Addresses */
#define PLLFCLK_ADDR 0x00350068
@@ -105,6 +107,26 @@ enum cxacru_cm_request {
CM_REQUEST_MAX,
};
+/* commands for interaction with the flash memory
+ *
+ * read: response is the contents of the first 60 bytes of flash memory
+ * write: request contains the 60 bytes of data to write to flash memory
+ * response is the contents of the first 60 bytes of flash memory
+ *
+ * layout: PP PP VV VV MM MM MM MM MM MM ?? ?? SS SS SS SS SS SS SS SS
+ * SS SS SS SS SS SS SS SS 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ *
+ * P: le16 USB Product ID
+ * V: le16 USB Vendor ID
+ * M: be48 MAC Address
+ * S: le16 ASCII Serial Number
+ */
+enum cxacru_cm_flash {
+ CM_FLASH_READ = 0xa1,
+ CM_FLASH_WRITE = 0xa2
+};
+
/* reply codes to the commands above */
enum cxacru_cm_status {
CM_STATUS_UNDEFINED,
@@ -196,23 +218,32 @@ static DEVICE_ATTR(_name, S_IRUGO, cxacru_sysfs_show_##_name, NULL)
static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, \
cxacru_sysfs_show_##_name, cxacru_sysfs_store_##_name)
+#define CXACRU_SET_INIT(_name) \
+static DEVICE_ATTR(_name, S_IWUSR, \
+ NULL, cxacru_sysfs_store_##_name)
+
#define CXACRU_ATTR_INIT(_value, _type, _name) \
static ssize_t cxacru_sysfs_show_##_name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
- struct usb_interface *intf = to_usb_interface(dev); \
- struct usbatm_data *usbatm_instance = usb_get_intfdata(intf); \
- struct cxacru_data *instance = usbatm_instance->driver_data; \
+ struct cxacru_data *instance = to_usbatm_driver_data(\
+ to_usb_interface(dev)); \
+\
+ if (instance == NULL) \
+ return -ENODEV; \
+\
return cxacru_sysfs_showattr_##_type(instance->card_info[_value], buf); \
} \
CXACRU__ATTR_INIT(_name)
#define CXACRU_ATTR_CREATE(_v, _t, _name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU_CMD_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
+#define CXACRU_SET_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU__ATTR_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU_ATTR_REMOVE(_v, _t, _name) CXACRU_DEVICE_REMOVE_FILE(_name)
#define CXACRU_CMD_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
+#define CXACRU_SET_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
#define CXACRU__ATTR_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
static ssize_t cxacru_sysfs_showattr_u32(u32 value, char *buf)
@@ -267,12 +298,12 @@ static ssize_t cxacru_sysfs_showattr_LINE(u32 value, char *buf)
static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
{
static char *str[] = {
- NULL,
+ "",
"ANSI T1.413",
"ITU-T G.992.1 (G.DMT)",
"ITU-T G.992.2 (G.LITE)"
};
- if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL))
+ if (unlikely(value >= ARRAY_SIZE(str)))
return snprintf(buf, PAGE_SIZE, "%u\n", value);
return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
}
@@ -288,22 +319,28 @@ static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
static ssize_t cxacru_sysfs_show_mac_address(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbatm_data *usbatm_instance = usb_get_intfdata(intf);
- struct atm_dev *atm_dev = usbatm_instance->atm_dev;
+ struct cxacru_data *instance = to_usbatm_driver_data(
+ to_usb_interface(dev));
- return snprintf(buf, PAGE_SIZE, "%pM\n", atm_dev->esi);
+ if (instance == NULL || instance->usbatm->atm_dev == NULL)
+ return -ENODEV;
+
+ return snprintf(buf, PAGE_SIZE, "%pM\n",
+ instance->usbatm->atm_dev->esi);
}
static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbatm_data *usbatm_instance = usb_get_intfdata(intf);
- struct cxacru_data *instance = usbatm_instance->driver_data;
- u32 value = instance->card_info[CXINF_LINE_STARTABLE];
-
static char *str[] = { "running", "stopped" };
+ struct cxacru_data *instance = to_usbatm_driver_data(
+ to_usb_interface(dev));
+ u32 value;
+
+ if (instance == NULL)
+ return -ENODEV;
+
+ value = instance->card_info[CXINF_LINE_STARTABLE];
if (unlikely(value >= ARRAY_SIZE(str)))
return snprintf(buf, PAGE_SIZE, "%u\n", value);
return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
@@ -312,9 +349,8 @@ static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev,
static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbatm_data *usbatm_instance = usb_get_intfdata(intf);
- struct cxacru_data *instance = usbatm_instance->driver_data;
+ struct cxacru_data *instance = to_usbatm_driver_data(
+ to_usb_interface(dev));
int ret;
int poll = -1;
char str_cmd[8];
@@ -328,13 +364,16 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
return -EINVAL;
ret = 0;
+ if (instance == NULL)
+ return -ENODEV;
+
if (mutex_lock_interruptible(&instance->adsl_state_serialize))
return -ERESTARTSYS;
if (!strcmp(str_cmd, "stop") || !strcmp(str_cmd, "restart")) {
ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_STOP, NULL, 0, NULL, 0);
if (ret < 0) {
- atm_err(usbatm_instance, "change adsl state:"
+ atm_err(instance->usbatm, "change adsl state:"
" CHIP_ADSL_LINE_STOP returned %d\n", ret);
ret = -EIO;
@@ -354,7 +393,7 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
if (!strcmp(str_cmd, "start") || !strcmp(str_cmd, "restart")) {
ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0);
if (ret < 0) {
- atm_err(usbatm_instance, "change adsl state:"
+ atm_err(instance->usbatm, "change adsl state:"
" CHIP_ADSL_LINE_START returned %d\n", ret);
ret = -EIO;
@@ -407,6 +446,72 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
return ret;
}
+/* CM_REQUEST_CARD_DATA_GET times out, so no show attribute */
+
+static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct cxacru_data *instance = to_usbatm_driver_data(
+ to_usb_interface(dev));
+ int len = strlen(buf);
+ int ret, pos, num;
+ __le32 data[CMD_PACKET_SIZE / 4];
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EACCES;
+
+ if (instance == NULL)
+ return -ENODEV;
+
+ pos = 0;
+ num = 0;
+ while (pos < len) {
+ int tmp;
+ u32 index;
+ u32 value;
+
+ ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
+ if (ret < 2)
+ return -EINVAL;
+ if (index < 0 || index > 0x7f)
+ return -EINVAL;
+ pos += tmp;
+
+ /* skip trailing newline */
+ if (buf[pos] == '\n' && pos == len-1)
+ pos++;
+
+ data[num * 2 + 1] = cpu_to_le32(index);
+ data[num * 2 + 2] = cpu_to_le32(value);
+ num++;
+
+ /* send config values when data buffer is full
+ * or no more data
+ */
+ if (pos >= len || num >= CMD_MAX_CONFIG) {
+ char log[CMD_MAX_CONFIG * 12 + 1]; /* %02x=%08x */
+
+ data[0] = cpu_to_le32(num);
+ ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET,
+ (u8 *) data, 4 + num * 8, NULL, 0);
+ if (ret < 0) {
+ atm_err(instance->usbatm,
+ "set card data returned %d\n", ret);
+ return -EIO;
+ }
+
+ for (tmp = 0; tmp < num; tmp++)
+ snprintf(log + tmp*12, 13, " %02x=%08x",
+ le32_to_cpu(data[tmp * 2 + 1]),
+ le32_to_cpu(data[tmp * 2 + 2]));
+ atm_info(instance->usbatm, "config%s\n", log);
+ num = 0;
+ }
+ }
+
+ return len;
+}
+
/*
* All device attributes are included in CXACRU_ALL_FILES
* so that the same list can be used multiple times:
@@ -442,7 +547,8 @@ CXACRU_ATTR_##_action(CXINF_MODULATION, MODU, modulation); \
CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND, u32, adsl_headend); \
CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND_ENVIRONMENT, u32, adsl_headend_environment); \
CXACRU_ATTR_##_action(CXINF_CONTROLLER_VERSION, u32, adsl_controller_version); \
-CXACRU_CMD_##_action( adsl_state);
+CXACRU_CMD_##_action( adsl_state); \
+CXACRU_SET_##_action( adsl_config);
CXACRU_ALL_FILES(INIT);
@@ -596,7 +702,7 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
len = ret / 4;
for (offb = 0; offb < len; ) {
int l = le32_to_cpu(buf[offb++]);
- if (l > stride || l > (len - offb) / 2) {
+ if (l < 0 || l > stride || l > (len - offb) / 2) {
if (printk_ratelimit())
usb_err(instance->usbatm, "invalid data length from cm %#x: %d\n",
cm, l);
@@ -649,9 +755,6 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
{
struct cxacru_data *instance = usbatm_instance->driver_data;
struct usb_interface *intf = usbatm_instance->usb_intf;
- /*
- struct atm_dev *atm_dev = usbatm_instance->atm_dev;
- */
int ret;
int start_polling = 1;
@@ -697,6 +800,9 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
mutex_unlock(&instance->poll_state_serialize);
mutex_unlock(&instance->adsl_state_serialize);
+ printk(KERN_INFO "%s%d: %s %pM\n", atm_dev->type, atm_dev->number,
+ usbatm_instance->description, atm_dev->esi);
+
if (start_polling)
cxacru_poll_status(&instance->poll_work.work);
return 0;
@@ -873,11 +979,9 @@ cleanup:
static void cxacru_upload_firmware(struct cxacru_data *instance,
const struct firmware *fw,
- const struct firmware *bp,
- const struct firmware *cf)
+ const struct firmware *bp)
{
int ret;
- int off;
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
__le16 signature[] = { usb_dev->descriptor.idVendor,
@@ -911,6 +1015,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
}
/* Firmware */
+ usb_info(usbatm, "loading firmware\n");
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, FW_ADDR, fw->data, fw->size);
if (ret) {
usb_err(usbatm, "Firmware upload failed: %d\n", ret);
@@ -919,6 +1024,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
/* Boot ROM patch */
if (instance->modem_type->boot_rom_patch) {
+ usb_info(usbatm, "loading boot ROM patch\n");
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_ADDR, bp->data, bp->size);
if (ret) {
usb_err(usbatm, "Boot ROM patching failed: %d\n", ret);
@@ -933,6 +1039,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
return;
}
+ usb_info(usbatm, "starting device\n");
if (instance->modem_type->boot_rom_patch) {
val = cpu_to_le32(BR_ADDR);
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_STACK_ADDR, (u8 *) &val, 4);
@@ -958,26 +1065,6 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
usb_err(usbatm, "modem failed to initialize: %d\n", ret);
return;
}
-
- /* Load config data (le32), doing one packet at a time */
- if (cf)
- for (off = 0; off < cf->size / 4; ) {
- __le32 buf[CMD_PACKET_SIZE / 4 - 1];
- int i, len = min_t(int, cf->size / 4 - off, CMD_PACKET_SIZE / 4 / 2 - 1);
- buf[0] = cpu_to_le32(len);
- for (i = 0; i < len; i++, off++) {
- buf[i * 2 + 1] = cpu_to_le32(off);
- memcpy(buf + i * 2 + 2, cf->data + off * 4, 4);
- }
- ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET,
- (u8 *) buf, len, NULL, 0);
- if (ret < 0) {
- usb_err(usbatm, "load config data failed: %d\n", ret);
- return;
- }
- }
-
- msleep_interruptible(4000);
}
static int cxacru_find_firmware(struct cxacru_data *instance,
@@ -1003,7 +1090,7 @@ static int cxacru_find_firmware(struct cxacru_data *instance,
static int cxacru_heavy_init(struct usbatm_data *usbatm_instance,
struct usb_interface *usb_intf)
{
- const struct firmware *fw, *bp, *cf;
+ const struct firmware *fw, *bp;
struct cxacru_data *instance = usbatm_instance->driver_data;
int ret = cxacru_find_firmware(instance, "fw", &fw);
@@ -1021,13 +1108,8 @@ static int cxacru_heavy_init(struct usbatm_data *usbatm_instance,
}
}
- if (cxacru_find_firmware(instance, "cf", &cf)) /* optional */
- cf = NULL;
-
- cxacru_upload_firmware(instance, fw, bp, cf);
+ cxacru_upload_firmware(instance, fw, bp);
- if (cf)
- release_firmware(cf);
if (instance->modem_type->boot_rom_patch)
release_firmware(bp);
release_firmware(fw);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index fbea856..9b53e8d 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1333,6 +1333,7 @@ void usbatm_usb_disconnect(struct usb_interface *intf)
if (instance->atm_dev) {
sysfs_remove_link(&instance->atm_dev->class_dev.kobj, "device");
atm_dev_deregister(instance->atm_dev);
+ instance->atm_dev = NULL;
}
usbatm_put_instance(instance); /* taken in usbatm_usb_probe */
@@ -1348,7 +1349,7 @@ static int __init usbatm_usb_init(void)
{
dbg("%s: driver version %s", __func__, DRIVER_VERSION);
- if (sizeof(struct usbatm_control) > sizeof(((struct sk_buff *) 0)->cb)) {
+ if (sizeof(struct usbatm_control) > FIELD_SIZEOF(struct sk_buff, cb)) {
printk(KERN_ERR "%s unusable with this kernel!\n", usbatm_driver_name);
return -EIO;
}
diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h
index f6f4508..0863f85 100644
--- a/drivers/usb/atm/usbatm.h
+++ b/drivers/usb/atm/usbatm.h
@@ -204,4 +204,19 @@ struct usbatm_data {
struct urb *urbs[0];
};
+static inline void *to_usbatm_driver_data(struct usb_interface *intf)
+{
+ struct usbatm_data *usbatm_instance;
+
+ if (intf == NULL)
+ return NULL;
+
+ usbatm_instance = usb_get_intfdata(intf);
+
+ if (usbatm_instance == NULL) /* set NULL before unbind() */
+ return NULL;
+
+ return usbatm_instance->driver_data; /* set NULL after unbind() */
+}
+
#endif /* _USBATM_H_ */
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index 5633bc5..029ee4a 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -137,13 +137,13 @@ static int __devinit c67x00_drv_probe(struct platform_device *pdev)
if (!c67x00)
return -ENOMEM;
- if (!request_mem_region(res->start, res->end - res->start + 1,
+ if (!request_mem_region(res->start, resource_size(res),
pdev->name)) {
dev_err(&pdev->dev, "Memory region busy\n");
ret = -EBUSY;
goto request_mem_failed;
}
- c67x00->hpi.base = ioremap(res->start, res->end - res->start + 1);
+ c67x00->hpi.base = ioremap(res->start, resource_size(res));
if (!c67x00->hpi.base) {
dev_err(&pdev->dev, "Unable to map HPI registers\n");
ret = -EIO;
@@ -182,7 +182,7 @@ static int __devinit c67x00_drv_probe(struct platform_device *pdev)
request_irq_failed:
iounmap(c67x00->hpi.base);
map_failed:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
request_mem_failed:
kfree(c67x00);
@@ -208,7 +208,7 @@ static int __devexit c67x00_drv_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res)
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
kfree(c67x00);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 34d4eb9..975d556 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -170,6 +170,7 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb)
{
wb->use = 0;
acm->transmitting--;
+ usb_autopm_put_interface_async(acm->control);
}
/*
@@ -211,9 +212,12 @@ static int acm_write_start(struct acm *acm, int wbn)
}
dbg("%s susp_count: %d", __func__, acm->susp_count);
+ usb_autopm_get_interface_async(acm->control);
if (acm->susp_count) {
- acm->delayed_wb = wb;
- schedule_work(&acm->waker);
+ if (!acm->delayed_wb)
+ acm->delayed_wb = wb;
+ else
+ usb_autopm_put_interface_async(acm->control);
spin_unlock_irqrestore(&acm->write_lock, flags);
return 0; /* A white lie */
}
@@ -424,7 +428,6 @@ next_buffer:
throttled = acm->throttle;
spin_unlock_irqrestore(&acm->throttle_lock, flags);
if (!throttled) {
- tty_buffer_request_room(tty, buf->size);
tty_insert_flip_string(tty, buf->base, buf->size);
tty_flip_buffer_push(tty);
} else {
@@ -534,23 +537,6 @@ static void acm_softint(struct work_struct *work)
tty_kref_put(tty);
}
-static void acm_waker(struct work_struct *waker)
-{
- struct acm *acm = container_of(waker, struct acm, waker);
- int rv;
-
- rv = usb_autopm_get_interface(acm->control);
- if (rv < 0) {
- dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__);
- return;
- }
- if (acm->delayed_wb) {
- acm_start_wb(acm, acm->delayed_wb);
- acm->delayed_wb = NULL;
- }
- usb_autopm_put_interface(acm->control);
-}
-
/*
* TTY handlers
*/
@@ -566,7 +552,7 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
acm = acm_table[tty->index];
if (!acm || !acm->dev)
- goto err_out;
+ goto out;
else
rv = 0;
@@ -582,8 +568,9 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
mutex_lock(&acm->mutex);
if (acm->port.count++) {
+ mutex_unlock(&acm->mutex);
usb_autopm_put_interface(acm->control);
- goto done;
+ goto out;
}
acm->ctrlurb->dev = acm->dev;
@@ -612,18 +599,18 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
rv = tty_port_block_til_ready(&acm->port, tty, filp);
tasklet_schedule(&acm->urb_task);
-done:
+
mutex_unlock(&acm->mutex);
-err_out:
+out:
mutex_unlock(&open_mutex);
return rv;
full_bailout:
usb_kill_urb(acm->ctrlurb);
bail_out:
- usb_autopm_put_interface(acm->control);
acm->port.count--;
mutex_unlock(&acm->mutex);
+ usb_autopm_put_interface(acm->control);
early_bail:
mutex_unlock(&open_mutex);
tty_port_tty_set(&acm->port, NULL);
@@ -1023,7 +1010,7 @@ static int acm_probe(struct usb_interface *intf,
case USB_CDC_CALL_MANAGEMENT_TYPE:
call_management_function = buffer[3];
call_interface_num = buffer[4];
- if ((call_management_function & 3) != 3)
+ if ( (quirks & NOT_A_MODEM) == 0 && (call_management_function & 3) != 3)
dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n");
break;
default:
@@ -1178,7 +1165,6 @@ made_compressed_probe:
acm->urb_task.func = acm_rx_tasklet;
acm->urb_task.data = (unsigned long) acm;
INIT_WORK(&acm->work, acm_softint);
- INIT_WORK(&acm->waker, acm_waker);
init_waitqueue_head(&acm->drain_wait);
spin_lock_init(&acm->throttle_lock);
spin_lock_init(&acm->write_lock);
@@ -1343,7 +1329,6 @@ static void stop_data_traffic(struct acm *acm)
tasklet_enable(&acm->urb_task);
cancel_work_sync(&acm->work);
- cancel_work_sync(&acm->waker);
}
static void acm_disconnect(struct usb_interface *intf)
@@ -1435,6 +1420,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
static int acm_resume(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
+ struct acm_wb *wb;
int rv = 0;
int cnt;
@@ -1449,6 +1435,21 @@ static int acm_resume(struct usb_interface *intf)
mutex_lock(&acm->mutex);
if (acm->port.count) {
rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
+
+ spin_lock_irq(&acm->write_lock);
+ if (acm->delayed_wb) {
+ wb = acm->delayed_wb;
+ acm->delayed_wb = NULL;
+ spin_unlock_irq(&acm->write_lock);
+ acm_start_wb(acm, acm->delayed_wb);
+ } else {
+ spin_unlock_irq(&acm->write_lock);
+ }
+
+ /*
+ * delayed error checking because we must
+ * do the write path at all cost
+ */
if (rv < 0)
goto err_out;
@@ -1460,6 +1461,23 @@ err_out:
return rv;
}
+static int acm_reset_resume(struct usb_interface *intf)
+{
+ struct acm *acm = usb_get_intfdata(intf);
+ struct tty_struct *tty;
+
+ mutex_lock(&acm->mutex);
+ if (acm->port.count) {
+ tty = tty_port_tty_get(&acm->port);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+ }
+ mutex_unlock(&acm->mutex);
+ return acm_resume(intf);
+}
+
#endif /* CONFIG_PM */
#define NOKIA_PCSUITE_ACM_INFO(x) \
@@ -1471,7 +1489,7 @@ err_out:
* USB driver structure.
*/
-static struct usb_device_id acm_ids[] = {
+static const struct usb_device_id acm_ids[] = {
/* quirky and broken devices */
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
@@ -1576,6 +1594,11 @@ static struct usb_device_id acm_ids[] = {
/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
+ /* Support Lego NXT using pbLua firmware */
+ { USB_DEVICE(0x0694, 0xff00),
+ .driver_info = NOT_A_MODEM,
+ },
+
/* control interfaces with various AT-command sets */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_V25TER) },
@@ -1602,6 +1625,7 @@ static struct usb_driver acm_driver = {
#ifdef CONFIG_PM
.suspend = acm_suspend,
.resume = acm_resume,
+ .reset_resume = acm_reset_resume,
#endif
.id_table = acm_ids,
#ifdef CONFIG_PM
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index c4a0ee8..4a8e87e 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -112,7 +112,6 @@ struct acm {
struct mutex mutex;
struct usb_cdc_line_coding line; /* bits, stop, parity */
struct work_struct work; /* work queue entry for line discipline waking up */
- struct work_struct waker;
wait_queue_head_t drain_wait; /* close processing */
struct tasklet_struct urb_task; /* rx processing */
spinlock_t throttle_lock; /* synchronize throtteling and read callback */
@@ -137,3 +136,4 @@ struct acm {
#define NO_UNION_NORMAL 1
#define SINGLE_RX_URB 2
#define NO_CAP_LINE 4
+#define NOT_A_MODEM 8
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 3e564bf..18aafcb 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -31,7 +31,7 @@
#define DRIVER_AUTHOR "Oliver Neukum"
#define DRIVER_DESC "USB Abstract Control Model driver for USB WCM Device Management"
-static struct usb_device_id wdm_ids[] = {
+static const struct usb_device_id wdm_ids[] = {
{
.match_flags = USB_DEVICE_ID_MATCH_INT_CLASS |
USB_DEVICE_ID_MATCH_INT_SUBCLASS,
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 9bc112e..93b5f85 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -163,7 +163,6 @@ struct usblp {
unsigned char used; /* True if open */
unsigned char present; /* True if not disconnected */
unsigned char bidir; /* interface is bidirectional */
- unsigned char sleeping; /* interface is suspended */
unsigned char no_paper; /* Paper Out happened */
unsigned char *device_id_string; /* IEEE 1284 DEVICE ID string (ptr) */
/* first 2 bytes are (big-endian) length */
@@ -191,7 +190,6 @@ static void usblp_dump(struct usblp *usblp) {
dbg("quirks=%d", usblp->quirks);
dbg("used=%d", usblp->used);
dbg("bidir=%d", usblp->bidir);
- dbg("sleeping=%d", usblp->sleeping);
dbg("device_id_string=\"%s\"",
usblp->device_id_string ?
usblp->device_id_string + 2 :
@@ -376,7 +374,7 @@ static int usblp_check_status(struct usblp *usblp, int err)
static int handle_bidir (struct usblp *usblp)
{
- if (usblp->bidir && usblp->used && !usblp->sleeping) {
+ if (usblp->bidir && usblp->used) {
if (usblp_submit_read(usblp) < 0)
return -EIO;
}
@@ -503,11 +501,6 @@ static long usblp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
goto done;
}
- if (usblp->sleeping) {
- retval = -ENODEV;
- goto done;
- }
-
dbg("usblp_ioctl: cmd=0x%x (%c nr=%d len=%d dir=%d)", cmd, _IOC_TYPE(cmd),
_IOC_NR(cmd), _IOC_SIZE(cmd), _IOC_DIR(cmd) );
@@ -914,8 +907,6 @@ static int usblp_wtest(struct usblp *usblp, int nonblock)
return 0;
}
spin_unlock_irqrestore(&usblp->lock, flags);
- if (usblp->sleeping)
- return -ENODEV;
if (nonblock)
return -EAGAIN;
return 1;
@@ -968,8 +959,6 @@ static int usblp_rtest(struct usblp *usblp, int nonblock)
return 0;
}
spin_unlock_irqrestore(&usblp->lock, flags);
- if (usblp->sleeping)
- return -ENODEV;
if (nonblock)
return -EAGAIN;
return 1;
@@ -1377,12 +1366,10 @@ static void usblp_disconnect(struct usb_interface *intf)
mutex_unlock (&usblp_mutex);
}
-static int usblp_suspend (struct usb_interface *intf, pm_message_t message)
+static int usblp_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usblp *usblp = usb_get_intfdata (intf);
- /* we take no more IO */
- usblp->sleeping = 1;
usblp_unlink_urbs(usblp);
#if 0 /* XXX Do we want this? What if someone is reading, should we fail? */
/* not strictly necessary, but just in case */
@@ -1393,18 +1380,17 @@ static int usblp_suspend (struct usb_interface *intf, pm_message_t message)
return 0;
}
-static int usblp_resume (struct usb_interface *intf)
+static int usblp_resume(struct usb_interface *intf)
{
struct usblp *usblp = usb_get_intfdata (intf);
int r;
- usblp->sleeping = 0;
r = handle_bidir (usblp);
return r;
}
-static struct usb_device_id usblp_ids [] = {
+static const struct usb_device_id usblp_ids[] = {
{ USB_DEVICE_INFO(7, 1, 1) },
{ USB_DEVICE_INFO(7, 1, 2) },
{ USB_DEVICE_INFO(7, 1, 3) },
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 7c5f4e3..8588c09 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -48,7 +48,7 @@
*/
#define USBTMC_MAX_READS_TO_CLEAR_BULK_IN 100
-static struct usb_device_id usbtmc_devices[] = {
+static const struct usb_device_id usbtmc_devices[] = {
{ USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), },
{ USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 1), },
{ 0, } /* terminating entry */
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index ad92594..97a819c 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -91,8 +91,8 @@ config USB_DYNAMIC_MINORS
If you are unsure about this, say N here.
config USB_SUSPEND
- bool "USB selective suspend/resume and wakeup"
- depends on USB && PM
+ bool "USB runtime power management (suspend/resume and wakeup)"
+ depends on USB && PM_RUNTIME
help
If you say Y here, you can use driver calls or the sysfs
"power/level" file to suspend or resume individual USB
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 355dffc..c83c975 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -118,6 +118,7 @@ static const char *format_endpt =
*/
static DECLARE_WAIT_QUEUE_HEAD(deviceconndiscwq);
+/* guarded by usbfs_mutex */
static unsigned int conndiscevcnt;
/* this struct stores the poll state for <mountpoint>/devices pollers */
@@ -156,7 +157,9 @@ static const struct class_info clas_info[] =
void usbfs_conn_disc_event(void)
{
+ mutex_lock(&usbfs_mutex);
conndiscevcnt++;
+ mutex_unlock(&usbfs_mutex);
wake_up(&deviceconndiscwq);
}
@@ -629,42 +632,29 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
static unsigned int usb_device_poll(struct file *file,
struct poll_table_struct *wait)
{
- struct usb_device_status *st = file->private_data;
+ struct usb_device_status *st;
unsigned int mask = 0;
- lock_kernel();
+ mutex_lock(&usbfs_mutex);
+ st = file->private_data;
if (!st) {
st = kmalloc(sizeof(struct usb_device_status), GFP_KERNEL);
-
- /* we may have dropped BKL -
- * need to check for having lost the race */
- if (file->private_data) {
- kfree(st);
- st = file->private_data;
- goto lost_race;
- }
- /* we haven't lost - check for allocation failure now */
if (!st) {
- unlock_kernel();
+ mutex_unlock(&usbfs_mutex);
return POLLIN;
}
- /*
- * need to prevent the module from being unloaded, since
- * proc_unregister does not call the release method and
- * we would have a memory leak
- */
st->lastev = conndiscevcnt;
file->private_data = st;
mask = POLLIN;
}
-lost_race:
+
if (file->f_mode & FMODE_READ)
poll_wait(file, &deviceconndiscwq, wait);
if (st->lastev != conndiscevcnt)
mask |= POLLIN;
st->lastev = conndiscevcnt;
- unlock_kernel();
+ mutex_unlock(&usbfs_mutex);
return mask;
}
@@ -685,7 +675,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig)
{
loff_t ret;
- lock_kernel();
+ mutex_lock(&file->f_dentry->d_inode->i_mutex);
switch (orig) {
case 0:
@@ -701,7 +691,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig)
ret = -EINVAL;
}
- unlock_kernel();
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return ret;
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index a678186..e909ff7 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -122,7 +122,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
{
loff_t ret;
- lock_kernel();
+ mutex_lock(&file->f_dentry->d_inode->i_mutex);
switch (orig) {
case 0:
@@ -138,7 +138,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
ret = -EINVAL;
}
- unlock_kernel();
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return ret;
}
@@ -310,7 +310,8 @@ static struct async *async_getpending(struct dev_state *ps,
static void snoop_urb(struct usb_device *udev,
void __user *userurb, int pipe, unsigned length,
- int timeout_or_status, enum snoop_when when)
+ int timeout_or_status, enum snoop_when when,
+ unsigned char *data, unsigned data_len)
{
static const char *types[] = {"isoc", "int", "ctrl", "bulk"};
static const char *dirs[] = {"out", "in"};
@@ -344,6 +345,11 @@ static void snoop_urb(struct usb_device *udev,
"status %d\n",
ep, t, d, length, timeout_or_status);
}
+
+ if (data && data_len > 0) {
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1,
+ data, data_len, 1);
+ }
}
#define AS_CONTINUATION 1
@@ -410,7 +416,9 @@ static void async_completed(struct urb *urb)
}
snoop(&urb->dev->dev, "urb complete\n");
snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
- as->status, COMPLETE);
+ as->status, COMPLETE,
+ ((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_OUT) ?
+ NULL : urb->transfer_buffer, urb->actual_length);
if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
as->status != -ENOENT)
cancel_bulk_urbs(ps, as->bulk_addr);
@@ -653,20 +661,20 @@ static int usbdev_open(struct inode *inode, struct file *file)
const struct cred *cred = current_cred();
int ret;
- lock_kernel();
- /* Protect against simultaneous removal or release */
- mutex_lock(&usbfs_mutex);
-
ret = -ENOMEM;
ps = kmalloc(sizeof(struct dev_state), GFP_KERNEL);
if (!ps)
- goto out;
+ goto out_free_ps;
ret = -ENODEV;
+ /* Protect against simultaneous removal or release */
+ mutex_lock(&usbfs_mutex);
+
/* usbdev device-node */
if (imajor(inode) == USB_DEVICE_MAJOR)
dev = usbdev_lookup_by_devt(inode->i_rdev);
+
#ifdef CONFIG_USB_DEVICEFS
/* procfs file */
if (!dev) {
@@ -678,13 +686,19 @@ static int usbdev_open(struct inode *inode, struct file *file)
dev = NULL;
}
#endif
- if (!dev || dev->state == USB_STATE_NOTATTACHED)
- goto out;
+ mutex_unlock(&usbfs_mutex);
+
+ if (!dev)
+ goto out_free_ps;
+
+ usb_lock_device(dev);
+ if (dev->state == USB_STATE_NOTATTACHED)
+ goto out_unlock_device;
+
ret = usb_autoresume_device(dev);
if (ret)
- goto out;
+ goto out_unlock_device;
- ret = 0;
ps->dev = dev;
ps->file = file;
spin_lock_init(&ps->lock);
@@ -702,15 +716,16 @@ static int usbdev_open(struct inode *inode, struct file *file)
smp_wmb();
list_add_tail(&ps->list, &dev->filelist);
file->private_data = ps;
+ usb_unlock_device(dev);
snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current),
current->comm);
- out:
- if (ret) {
- kfree(ps);
- usb_put_dev(dev);
- }
- mutex_unlock(&usbfs_mutex);
- unlock_kernel();
+ return ret;
+
+ out_unlock_device:
+ usb_unlock_device(dev);
+ usb_put_dev(dev);
+ out_free_ps:
+ kfree(ps);
return ret;
}
@@ -724,10 +739,7 @@ static int usbdev_release(struct inode *inode, struct file *file)
usb_lock_device(dev);
usb_hub_release_all_ports(dev, ps);
- /* Protect against simultaneous open */
- mutex_lock(&usbfs_mutex);
list_del_init(&ps->list);
- mutex_unlock(&usbfs_mutex);
for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed);
ifnum++) {
@@ -770,6 +782,13 @@ static int proc_control(struct dev_state *ps, void __user *arg)
if (!tbuf)
return -ENOMEM;
tmo = ctrl.timeout;
+ snoop(&dev->dev, "control urb: bRequestType=%02x "
+ "bRequest=%02x wValue=%04x "
+ "wIndex=%04x wLength=%04x\n",
+ ctrl.bRequestType, ctrl.bRequest,
+ __le16_to_cpup(&ctrl.wValue),
+ __le16_to_cpup(&ctrl.wIndex),
+ __le16_to_cpup(&ctrl.wLength));
if (ctrl.bRequestType & 0x80) {
if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data,
ctrl.wLength)) {
@@ -777,15 +796,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
return -EINVAL;
}
pipe = usb_rcvctrlpipe(dev, 0);
- snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT);
+ snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, NULL, 0);
usb_unlock_device(dev);
i = usb_control_msg(dev, pipe, ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
- snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE);
-
+ snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE,
+ tbuf, i);
if ((i > 0) && ctrl.wLength) {
if (copy_to_user(ctrl.data, tbuf, i)) {
free_page((unsigned long)tbuf);
@@ -800,14 +819,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
}
}
pipe = usb_sndctrlpipe(dev, 0);
- snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT);
+ snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT,
+ tbuf, ctrl.wLength);
usb_unlock_device(dev);
i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
- snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE);
+ snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, NULL, 0);
}
free_page((unsigned long)tbuf);
if (i < 0 && i != -EPIPE) {
@@ -853,12 +873,12 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
kfree(tbuf);
return -EINVAL;
}
- snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT);
+ snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0);
usb_unlock_device(dev);
i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
usb_lock_device(dev);
- snoop_urb(dev, NULL, pipe, len2, i, COMPLETE);
+ snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, tbuf, len2);
if (!i && len2) {
if (copy_to_user(bulk.data, tbuf, len2)) {
@@ -873,12 +893,12 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
return -EFAULT;
}
}
- snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT);
+ snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1);
usb_unlock_device(dev);
i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
usb_lock_device(dev);
- snoop_urb(dev, NULL, pipe, len2, i, COMPLETE);
+ snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0);
}
kfree(tbuf);
if (i < 0)
@@ -1097,6 +1117,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
is_in = 0;
uurb->endpoint &= ~USB_DIR_IN;
}
+ snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
+ "bRequest=%02x wValue=%04x "
+ "wIndex=%04x wLength=%04x\n",
+ dr->bRequestType, dr->bRequest,
+ __le16_to_cpup(&dr->wValue),
+ __le16_to_cpup(&dr->wIndex),
+ __le16_to_cpup(&dr->wLength));
break;
case USBDEVFS_URB_TYPE_BULK:
@@ -1104,13 +1131,25 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_ISOC:
return -EINVAL;
- /* allow single-shot interrupt transfers, at bogus rates */
+ case USB_ENDPOINT_XFER_INT:
+ /* allow single-shot interrupt transfers */
+ uurb->type = USBDEVFS_URB_TYPE_INTERRUPT;
+ goto interrupt_urb;
}
uurb->number_of_packets = 0;
if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
return -EINVAL;
break;
+ case USBDEVFS_URB_TYPE_INTERRUPT:
+ if (!usb_endpoint_xfer_int(&ep->desc))
+ return -EINVAL;
+ interrupt_urb:
+ uurb->number_of_packets = 0;
+ if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
+ return -EINVAL;
+ break;
+
case USBDEVFS_URB_TYPE_ISO:
/* arbitrary limit */
if (uurb->number_of_packets < 1 ||
@@ -1143,14 +1182,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
uurb->buffer_length = totlen;
break;
- case USBDEVFS_URB_TYPE_INTERRUPT:
- uurb->number_of_packets = 0;
- if (!usb_endpoint_xfer_int(&ep->desc))
- return -EINVAL;
- if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
- return -EINVAL;
- break;
-
default:
return -EINVAL;
}
@@ -1236,7 +1267,9 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
}
}
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
- as->urb->transfer_buffer_length, 0, SUBMIT);
+ as->urb->transfer_buffer_length, 0, SUBMIT,
+ is_in ? NULL : as->urb->transfer_buffer,
+ uurb->buffer_length);
async_newpending(as);
if (usb_endpoint_xfer_bulk(&ep->desc)) {
@@ -1274,7 +1307,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
dev_printk(KERN_DEBUG, &ps->dev->dev,
"usbfs: usb_submit_urb returned %d\n", ret);
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
- 0, ret, COMPLETE);
+ 0, ret, COMPLETE, NULL, 0);
async_removepending(as);
free_async(as);
return ret;
@@ -1628,7 +1661,10 @@ static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl)
if (driver == NULL || driver->ioctl == NULL) {
retval = -ENOTTY;
} else {
+ /* keep API that guarantees BKL */
+ lock_kernel();
retval = driver->ioctl(intf, ctl->ioctl_code, buf);
+ unlock_kernel();
if (retval == -ENOIOCTLCMD)
retval = -ENOTTY;
}
@@ -1711,6 +1747,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
if (!(file->f_mode & FMODE_WRITE))
return -EPERM;
+
usb_lock_device(dev);
if (!connected(ps)) {
usb_unlock_device(dev);
@@ -1877,9 +1914,7 @@ static long usbdev_ioctl(struct file *file, unsigned int cmd,
{
int ret;
- lock_kernel();
ret = usbdev_do_ioctl(file, cmd, (void __user *)arg);
- unlock_kernel();
return ret;
}
@@ -1890,9 +1925,7 @@ static long usbdev_compat_ioctl(struct file *file, unsigned int cmd,
{
int ret;
- lock_kernel();
ret = usbdev_do_ioctl(file, cmd, compat_ptr(arg));
- unlock_kernel();
return ret;
}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index f2f055e..a7037bf 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -25,7 +25,7 @@
#include <linux/device.h>
#include <linux/usb.h>
#include <linux/usb/quirks.h>
-#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
#include "hcd.h"
#include "usb.h"
@@ -221,7 +221,7 @@ static int usb_probe_device(struct device *dev)
{
struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
struct usb_device *udev = to_usb_device(dev);
- int error = -ENODEV;
+ int error = 0;
dev_dbg(dev, "%s\n", __func__);
@@ -230,18 +230,23 @@ static int usb_probe_device(struct device *dev)
/* The device should always appear to be in use
* unless the driver suports autosuspend.
*/
- udev->pm_usage_cnt = !(udriver->supports_autosuspend);
+ if (!udriver->supports_autosuspend)
+ error = usb_autoresume_device(udev);
- error = udriver->probe(udev);
+ if (!error)
+ error = udriver->probe(udev);
return error;
}
/* called from driver core with dev locked */
static int usb_unbind_device(struct device *dev)
{
+ struct usb_device *udev = to_usb_device(dev);
struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
- udriver->disconnect(to_usb_device(dev));
+ udriver->disconnect(udev);
+ if (!udriver->supports_autosuspend)
+ usb_autosuspend_device(udev);
return 0;
}
@@ -274,60 +279,62 @@ static int usb_probe_interface(struct device *dev)
intf->needs_binding = 0;
if (usb_device_is_owned(udev))
- return -ENODEV;
+ return error;
if (udev->authorized == 0) {
dev_err(&intf->dev, "Device is not authorized for usage\n");
- return -ENODEV;
+ return error;
}
id = usb_match_id(intf, driver->id_table);
if (!id)
id = usb_match_dynamic_id(intf, driver);
- if (id) {
- dev_dbg(dev, "%s - got id\n", __func__);
-
- error = usb_autoresume_device(udev);
- if (error)
- return error;
+ if (!id)
+ return error;
- /* Interface "power state" doesn't correspond to any hardware
- * state whatsoever. We use it to record when it's bound to
- * a driver that may start I/0: it's not frozen/quiesced.
- */
- mark_active(intf);
- intf->condition = USB_INTERFACE_BINDING;
+ dev_dbg(dev, "%s - got id\n", __func__);
- /* The interface should always appear to be in use
- * unless the driver suports autosuspend.
- */
- atomic_set(&intf->pm_usage_cnt, !driver->supports_autosuspend);
-
- /* Carry out a deferred switch to altsetting 0 */
- if (intf->needs_altsetting0) {
- error = usb_set_interface(udev, intf->altsetting[0].
- desc.bInterfaceNumber, 0);
- if (error < 0)
- goto err;
+ error = usb_autoresume_device(udev);
+ if (error)
+ return error;
- intf->needs_altsetting0 = 0;
- }
+ intf->condition = USB_INTERFACE_BINDING;
- error = driver->probe(intf, id);
- if (error)
+ /* Bound interfaces are initially active. They are
+ * runtime-PM-enabled only if the driver has autosuspend support.
+ * They are sensitive to their children's power states.
+ */
+ pm_runtime_set_active(dev);
+ pm_suspend_ignore_children(dev, false);
+ if (driver->supports_autosuspend)
+ pm_runtime_enable(dev);
+
+ /* Carry out a deferred switch to altsetting 0 */
+ if (intf->needs_altsetting0) {
+ error = usb_set_interface(udev, intf->altsetting[0].
+ desc.bInterfaceNumber, 0);
+ if (error < 0)
goto err;
-
- intf->condition = USB_INTERFACE_BOUND;
- usb_autosuspend_device(udev);
+ intf->needs_altsetting0 = 0;
}
+ error = driver->probe(intf, id);
+ if (error)
+ goto err;
+
+ intf->condition = USB_INTERFACE_BOUND;
+ usb_autosuspend_device(udev);
return error;
-err:
- mark_quiesced(intf);
+ err:
intf->needs_remote_wakeup = 0;
intf->condition = USB_INTERFACE_UNBOUND;
usb_cancel_queued_reset(intf);
+
+ /* Unbound interfaces are always runtime-PM-disabled and -suspended */
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+
usb_autosuspend_device(udev);
return error;
}
@@ -377,9 +384,17 @@ static int usb_unbind_interface(struct device *dev)
usb_set_intfdata(intf, NULL);
intf->condition = USB_INTERFACE_UNBOUND;
- mark_quiesced(intf);
intf->needs_remote_wakeup = 0;
+ /* Unbound interfaces are always runtime-PM-disabled and -suspended */
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+
+ /* Undo any residual pm_autopm_get_interface_* calls */
+ for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r)
+ usb_autopm_put_interface_no_suspend(intf);
+ atomic_set(&intf->pm_usage_cnt, 0);
+
if (!error)
usb_autosuspend_device(udev);
@@ -410,7 +425,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
struct usb_interface *iface, void *priv)
{
struct device *dev = &iface->dev;
- struct usb_device *udev = interface_to_usbdev(iface);
int retval = 0;
if (dev->driver)
@@ -420,11 +434,16 @@ int usb_driver_claim_interface(struct usb_driver *driver,
usb_set_intfdata(iface, priv);
iface->needs_binding = 0;
- usb_pm_lock(udev);
iface->condition = USB_INTERFACE_BOUND;
- mark_active(iface);
- atomic_set(&iface->pm_usage_cnt, !driver->supports_autosuspend);
- usb_pm_unlock(udev);
+
+ /* Bound interfaces are initially active. They are
+ * runtime-PM-enabled only if the driver has autosuspend support.
+ * They are sensitive to their children's power states.
+ */
+ pm_runtime_set_active(dev);
+ pm_suspend_ignore_children(dev, false);
+ if (driver->supports_autosuspend)
+ pm_runtime_enable(dev);
/* if interface was already added, bind now; else let
* the future device_add() bind it, bypassing probe()
@@ -691,9 +710,6 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct usb_device *usb_dev;
- /* driver is often null here; dev_dbg() would oops */
- pr_debug("usb %s: uevent\n", dev_name(dev));
-
if (is_usb_device(dev)) {
usb_dev = to_usb_device(dev);
} else if (is_usb_interface(dev)) {
@@ -705,6 +721,7 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
}
if (usb_dev->devnum < 0) {
+ /* driver is often null here; dev_dbg() would oops */
pr_debug("usb %s: already deleted?\n", dev_name(dev));
return -ENODEV;
}
@@ -983,7 +1000,6 @@ static void do_unbind_rebind(struct usb_device *udev, int action)
}
}
-/* Caller has locked udev's pm_mutex */
static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
{
struct usb_device_driver *udriver;
@@ -1007,7 +1023,6 @@ static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
return status;
}
-/* Caller has locked udev's pm_mutex */
static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
{
struct usb_device_driver *udriver;
@@ -1041,27 +1056,20 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
return status;
}
-/* Caller has locked intf's usb_device's pm mutex */
static int usb_suspend_interface(struct usb_device *udev,
struct usb_interface *intf, pm_message_t msg)
{
struct usb_driver *driver;
int status = 0;
- /* with no hardware, USB interfaces only use FREEZE and ON states */
- if (udev->state == USB_STATE_NOTATTACHED || !is_active(intf))
- goto done;
-
- /* This can happen; see usb_driver_release_interface() */
- if (intf->condition == USB_INTERFACE_UNBOUND)
+ if (udev->state == USB_STATE_NOTATTACHED ||
+ intf->condition == USB_INTERFACE_UNBOUND)
goto done;
driver = to_usb_driver(intf->dev.driver);
if (driver->suspend) {
status = driver->suspend(intf, msg);
- if (status == 0)
- mark_quiesced(intf);
- else if (!(msg.event & PM_EVENT_AUTO))
+ if (status && !(msg.event & PM_EVENT_AUTO))
dev_err(&intf->dev, "%s error %d\n",
"suspend", status);
} else {
@@ -1069,7 +1077,6 @@ static int usb_suspend_interface(struct usb_device *udev,
intf->needs_binding = 1;
dev_warn(&intf->dev, "no %s for driver %s?\n",
"suspend", driver->name);
- mark_quiesced(intf);
}
done:
@@ -1077,14 +1084,13 @@ static int usb_suspend_interface(struct usb_device *udev,
return status;
}
-/* Caller has locked intf's usb_device's pm_mutex */
static int usb_resume_interface(struct usb_device *udev,
struct usb_interface *intf, pm_message_t msg, int reset_resume)
{
struct usb_driver *driver;
int status = 0;
- if (udev->state == USB_STATE_NOTATTACHED || is_active(intf))
+ if (udev->state == USB_STATE_NOTATTACHED)
goto done;
/* Don't let autoresume interfere with unbinding */
@@ -1135,90 +1141,11 @@ static int usb_resume_interface(struct usb_device *udev,
done:
dev_vdbg(&intf->dev, "%s: status %d\n", __func__, status);
- if (status == 0 && intf->condition == USB_INTERFACE_BOUND)
- mark_active(intf);
/* Later we will unbind the driver and/or reprobe, if necessary */
return status;
}
-#ifdef CONFIG_USB_SUSPEND
-
-/* Internal routine to check whether we may autosuspend a device. */
-static int autosuspend_check(struct usb_device *udev, int reschedule)
-{
- int i;
- struct usb_interface *intf;
- unsigned long suspend_time, j;
-
- /* For autosuspend, fail fast if anything is in use or autosuspend
- * is disabled. Also fail if any interfaces require remote wakeup
- * but it isn't available.
- */
- if (udev->pm_usage_cnt > 0)
- return -EBUSY;
- if (udev->autosuspend_delay < 0 || udev->autosuspend_disabled)
- return -EPERM;
-
- suspend_time = udev->last_busy + udev->autosuspend_delay;
- if (udev->actconfig) {
- for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
- intf = udev->actconfig->interface[i];
- if (!is_active(intf))
- continue;
- if (atomic_read(&intf->pm_usage_cnt) > 0)
- return -EBUSY;
- if (intf->needs_remote_wakeup &&
- !udev->do_remote_wakeup) {
- dev_dbg(&udev->dev, "remote wakeup needed "
- "for autosuspend\n");
- return -EOPNOTSUPP;
- }
-
- /* Don't allow autosuspend if the device will need
- * a reset-resume and any of its interface drivers
- * doesn't include support.
- */
- if (udev->quirks & USB_QUIRK_RESET_RESUME) {
- struct usb_driver *driver;
-
- driver = to_usb_driver(intf->dev.driver);
- if (!driver->reset_resume ||
- intf->needs_remote_wakeup)
- return -EOPNOTSUPP;
- }
- }
- }
-
- /* If everything is okay but the device hasn't been idle for long
- * enough, queue a delayed autosuspend request. If the device
- * _has_ been idle for long enough and the reschedule flag is set,
- * likewise queue a delayed (1 second) autosuspend request.
- */
- j = jiffies;
- if (time_before(j, suspend_time))
- reschedule = 1;
- else
- suspend_time = j + HZ;
- if (reschedule) {
- if (!timer_pending(&udev->autosuspend.timer)) {
- queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
- round_jiffies_up_relative(suspend_time - j));
- }
- return -EAGAIN;
- }
- return 0;
-}
-
-#else
-
-static inline int autosuspend_check(struct usb_device *udev, int reschedule)
-{
- return 0;
-}
-
-#endif /* CONFIG_USB_SUSPEND */
-
/**
* usb_suspend_both - suspend a USB device and its interfaces
* @udev: the usb_device to suspend
@@ -1230,27 +1157,12 @@ static inline int autosuspend_check(struct usb_device *udev, int reschedule)
* all the interfaces which were suspended are resumed so that they remain
* in the same state as the device.
*
- * If an autosuspend is in progress the routine checks first to make sure
- * that neither the device itself or any of its active interfaces is in use
- * (pm_usage_cnt is greater than 0). If they are, the autosuspend fails.
- *
- * If the suspend succeeds, the routine recursively queues an autosuspend
- * request for @udev's parent device, thereby propagating the change up
- * the device tree. If all of the parent's children are now suspended,
- * the parent will autosuspend in turn.
- *
- * The suspend method calls are subject to mutual exclusion under control
- * of @udev's pm_mutex. Many of these calls are also under the protection
- * of @udev's device lock (including all requests originating outside the
- * USB subsystem), but autosuspend requests generated by a child device or
- * interface driver may not be. Usbcore will insure that the method calls
- * do not arrive during bind, unbind, or reset operations. However, drivers
- * must be prepared to handle suspend calls arriving at unpredictable times.
- * The only way to block such calls is to do an autoresume (preventing
- * autosuspends) while holding @udev's device lock (preventing outside
- * suspends).
- *
- * The caller must hold @udev->pm_mutex.
+ * Autosuspend requests originating from a child device or an interface
+ * driver may be made without the protection of @udev's device lock, but
+ * all other suspend calls will hold the lock. Usbcore will insure that
+ * method calls do not arrive during bind, unbind, or reset operations.
+ * However drivers must be prepared to handle suspend calls arriving at
+ * unpredictable times.
*
* This routine can run only in process context.
*/
@@ -1259,20 +1171,11 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
int status = 0;
int i = 0;
struct usb_interface *intf;
- struct usb_device *parent = udev->parent;
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED)
goto done;
- udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
-
- if (msg.event & PM_EVENT_AUTO) {
- status = autosuspend_check(udev, 0);
- if (status < 0)
- goto done;
- }
-
/* Suspend all the interfaces and then udev itself */
if (udev->actconfig) {
for (; i < udev->actconfig->desc.bNumInterfaces; i++) {
@@ -1287,35 +1190,21 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
/* If the suspend failed, resume interfaces that did get suspended */
if (status != 0) {
- pm_message_t msg2;
-
- msg2.event = msg.event ^ (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
+ msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
while (--i >= 0) {
intf = udev->actconfig->interface[i];
- usb_resume_interface(udev, intf, msg2, 0);
+ usb_resume_interface(udev, intf, msg, 0);
}
- /* Try another autosuspend when the interfaces aren't busy */
- if (msg.event & PM_EVENT_AUTO)
- autosuspend_check(udev, status == -EBUSY);
-
- /* If the suspend succeeded then prevent any more URB submissions,
- * flush any outstanding URBs, and propagate the suspend up the tree.
+ /* If the suspend succeeded then prevent any more URB submissions
+ * and flush any outstanding URBs.
*/
} else {
- cancel_delayed_work(&udev->autosuspend);
udev->can_submit = 0;
for (i = 0; i < 16; ++i) {
usb_hcd_flush_endpoint(udev, udev->ep_out[i]);
usb_hcd_flush_endpoint(udev, udev->ep_in[i]);
}
-
- /* If this is just a FREEZE or a PRETHAW, udev might
- * not really be suspended. Only true suspends get
- * propagated up the device tree.
- */
- if (parent && udev->state == USB_STATE_SUSPENDED)
- usb_autosuspend_device(parent);
}
done:
@@ -1332,23 +1221,12 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
* the resume method for @udev and then calls the resume methods for all
* the interface drivers in @udev.
*
- * Before starting the resume, the routine calls itself recursively for
- * the parent device of @udev, thereby propagating the change up the device
- * tree and assuring that @udev will be able to resume. If the parent is
- * unable to resume successfully, the routine fails.
- *
- * The resume method calls are subject to mutual exclusion under control
- * of @udev's pm_mutex. Many of these calls are also under the protection
- * of @udev's device lock (including all requests originating outside the
- * USB subsystem), but autoresume requests generated by a child device or
- * interface driver may not be. Usbcore will insure that the method calls
- * do not arrive during bind, unbind, or reset operations. However, drivers
- * must be prepared to handle resume calls arriving at unpredictable times.
- * The only way to block such calls is to do an autoresume (preventing
- * other autoresumes) while holding @udev's device lock (preventing outside
- * resumes).
- *
- * The caller must hold @udev->pm_mutex.
+ * Autoresume requests originating from a child device or an interface
+ * driver may be made without the protection of @udev's device lock, but
+ * all other resume calls will hold the lock. Usbcore will insure that
+ * method calls do not arrive during bind, unbind, or reset operations.
+ * However drivers must be prepared to handle resume calls arriving at
+ * unpredictable times.
*
* This routine can run only in process context.
*/
@@ -1357,48 +1235,18 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
int status = 0;
int i;
struct usb_interface *intf;
- struct usb_device *parent = udev->parent;
- cancel_delayed_work(&udev->autosuspend);
if (udev->state == USB_STATE_NOTATTACHED) {
status = -ENODEV;
goto done;
}
udev->can_submit = 1;
- /* Propagate the resume up the tree, if necessary */
- if (udev->state == USB_STATE_SUSPENDED) {
- if (parent) {
- status = usb_autoresume_device(parent);
- if (status == 0) {
- status = usb_resume_device(udev, msg);
- if (status || udev->state ==
- USB_STATE_NOTATTACHED) {
- usb_autosuspend_device(parent);
-
- /* It's possible usb_resume_device()
- * failed after the port was
- * unsuspended, causing udev to be
- * logically disconnected. We don't
- * want usb_disconnect() to autosuspend
- * the parent again, so tell it that
- * udev disconnected while still
- * suspended. */
- if (udev->state ==
- USB_STATE_NOTATTACHED)
- udev->discon_suspended = 1;
- }
- }
- } else {
-
- /* We can't progagate beyond the USB subsystem,
- * so if a root hub's controller is suspended
- * then we're stuck. */
- status = usb_resume_device(udev, msg);
- }
- } else if (udev->reset_resume)
+ /* Resume the device */
+ if (udev->state == USB_STATE_SUSPENDED || udev->reset_resume)
status = usb_resume_device(udev, msg);
+ /* Resume the interfaces */
if (status == 0 && udev->actconfig) {
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
intf = udev->actconfig->interface[i];
@@ -1414,55 +1262,94 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
return status;
}
-#ifdef CONFIG_USB_SUSPEND
+/* The device lock is held by the PM core */
+int usb_suspend(struct device *dev, pm_message_t msg)
+{
+ struct usb_device *udev = to_usb_device(dev);
-/* Internal routine to adjust a device's usage counter and change
- * its autosuspend state.
- */
-static int usb_autopm_do_device(struct usb_device *udev, int inc_usage_cnt)
+ do_unbind_rebind(udev, DO_UNBIND);
+ udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
+ return usb_suspend_both(udev, msg);
+}
+
+/* The device lock is held by the PM core */
+int usb_resume(struct device *dev, pm_message_t msg)
{
- int status = 0;
+ struct usb_device *udev = to_usb_device(dev);
+ int status;
- usb_pm_lock(udev);
- udev->pm_usage_cnt += inc_usage_cnt;
- WARN_ON(udev->pm_usage_cnt < 0);
- if (inc_usage_cnt)
- udev->last_busy = jiffies;
- if (inc_usage_cnt >= 0 && udev->pm_usage_cnt > 0) {
- if (udev->state == USB_STATE_SUSPENDED)
- status = usb_resume_both(udev, PMSG_AUTO_RESUME);
- if (status != 0)
- udev->pm_usage_cnt -= inc_usage_cnt;
- else if (inc_usage_cnt)
+ /* For PM complete calls, all we do is rebind interfaces */
+ if (msg.event == PM_EVENT_ON) {
+ if (udev->state != USB_STATE_NOTATTACHED)
+ do_unbind_rebind(udev, DO_REBIND);
+ status = 0;
+
+ /* For all other calls, take the device back to full power and
+ * tell the PM core in case it was autosuspended previously.
+ */
+ } else {
+ status = usb_resume_both(udev, msg);
+ if (status == 0) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
udev->last_busy = jiffies;
- } else if (inc_usage_cnt <= 0 && udev->pm_usage_cnt <= 0) {
- status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
+ }
}
- usb_pm_unlock(udev);
+
+ /* Avoid PM error messages for devices disconnected while suspended
+ * as we'll display regular disconnect messages just a bit later.
+ */
+ if (status == -ENODEV)
+ status = 0;
return status;
}
-/* usb_autosuspend_work - callback routine to autosuspend a USB device */
-void usb_autosuspend_work(struct work_struct *work)
-{
- struct usb_device *udev =
- container_of(work, struct usb_device, autosuspend.work);
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_USB_SUSPEND
- usb_autopm_do_device(udev, 0);
+/**
+ * usb_enable_autosuspend - allow a USB device to be autosuspended
+ * @udev: the USB device which may be autosuspended
+ *
+ * This routine allows @udev to be autosuspended. An autosuspend won't
+ * take place until the autosuspend_delay has elapsed and all the other
+ * necessary conditions are satisfied.
+ *
+ * The caller must hold @udev's device lock.
+ */
+int usb_enable_autosuspend(struct usb_device *udev)
+{
+ if (udev->autosuspend_disabled) {
+ udev->autosuspend_disabled = 0;
+ usb_autosuspend_device(udev);
+ }
+ return 0;
}
+EXPORT_SYMBOL_GPL(usb_enable_autosuspend);
-/* usb_autoresume_work - callback routine to autoresume a USB device */
-void usb_autoresume_work(struct work_struct *work)
+/**
+ * usb_disable_autosuspend - prevent a USB device from being autosuspended
+ * @udev: the USB device which may not be autosuspended
+ *
+ * This routine prevents @udev from being autosuspended and wakes it up
+ * if it is already autosuspended.
+ *
+ * The caller must hold @udev's device lock.
+ */
+int usb_disable_autosuspend(struct usb_device *udev)
{
- struct usb_device *udev =
- container_of(work, struct usb_device, autoresume);
+ int rc = 0;
- /* Wake it up, let the drivers do their thing, and then put it
- * back to sleep.
- */
- if (usb_autopm_do_device(udev, 1) == 0)
- usb_autopm_do_device(udev, -1);
+ if (!udev->autosuspend_disabled) {
+ rc = usb_autoresume_device(udev);
+ if (rc == 0)
+ udev->autosuspend_disabled = 1;
+ }
+ return rc;
}
+EXPORT_SYMBOL_GPL(usb_disable_autosuspend);
/**
* usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces
@@ -1472,15 +1359,11 @@ void usb_autoresume_work(struct work_struct *work)
* @udev and wants to allow it to autosuspend. Examples would be when
* @udev's device file in usbfs is closed or after a configuration change.
*
- * @udev's usage counter is decremented. If it or any of the usage counters
- * for an active interface is greater than 0, no autosuspend request will be
- * queued. (If an interface driver does not support autosuspend then its
- * usage counter is permanently positive.) Furthermore, if an interface
- * driver requires remote-wakeup capability during autosuspend but remote
- * wakeup is disabled, the autosuspend will fail.
+ * @udev's usage counter is decremented; if it drops to 0 and all the
+ * interfaces are inactive then a delayed autosuspend will be attempted.
+ * The attempt may fail (see autosuspend_check()).
*
- * Often the caller will hold @udev's device lock, but this is not
- * necessary.
+ * The caller must hold @udev's device lock.
*
* This routine can run only in process context.
*/
@@ -1488,9 +1371,11 @@ void usb_autosuspend_device(struct usb_device *udev)
{
int status;
- status = usb_autopm_do_device(udev, -1);
- dev_vdbg(&udev->dev, "%s: cnt %d\n",
- __func__, udev->pm_usage_cnt);
+ udev->last_busy = jiffies;
+ status = pm_runtime_put_sync(&udev->dev);
+ dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&udev->dev.power.usage_count),
+ status);
}
/**
@@ -1500,17 +1385,22 @@ void usb_autosuspend_device(struct usb_device *udev)
* This routine should be called when a core subsystem thinks @udev may
* be ready to autosuspend.
*
- * @udev's usage counter left unchanged. If it or any of the usage counters
- * for an active interface is greater than 0, or autosuspend is not allowed
- * for any other reason, no autosuspend request will be queued.
+ * @udev's usage counter left unchanged. If it is 0 and all the interfaces
+ * are inactive then an autosuspend will be attempted. The attempt may
+ * fail or be delayed.
+ *
+ * The caller must hold @udev's device lock.
*
* This routine can run only in process context.
*/
void usb_try_autosuspend_device(struct usb_device *udev)
{
- usb_autopm_do_device(udev, 0);
- dev_vdbg(&udev->dev, "%s: cnt %d\n",
- __func__, udev->pm_usage_cnt);
+ int status;
+
+ status = pm_runtime_idle(&udev->dev);
+ dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&udev->dev.power.usage_count),
+ status);
}
/**
@@ -1519,16 +1409,15 @@ void usb_try_autosuspend_device(struct usb_device *udev)
*
* This routine should be called when a core subsystem wants to use @udev
* and needs to guarantee that it is not suspended. No autosuspend will
- * occur until usb_autosuspend_device is called. (Note that this will not
- * prevent suspend events originating in the PM core.) Examples would be
- * when @udev's device file in usbfs is opened or when a remote-wakeup
+ * occur until usb_autosuspend_device() is called. (Note that this will
+ * not prevent suspend events originating in the PM core.) Examples would
+ * be when @udev's device file in usbfs is opened or when a remote-wakeup
* request is received.
*
* @udev's usage counter is incremented to prevent subsequent autosuspends.
* However if the autoresume fails then the usage counter is re-decremented.
*
- * Often the caller will hold @udev's device lock, but this is not
- * necessary (and attempting it might cause deadlock).
+ * The caller must hold @udev's device lock.
*
* This routine can run only in process context.
*/
@@ -1536,42 +1425,14 @@ int usb_autoresume_device(struct usb_device *udev)
{
int status;
- status = usb_autopm_do_device(udev, 1);
- dev_vdbg(&udev->dev, "%s: status %d cnt %d\n",
- __func__, status, udev->pm_usage_cnt);
- return status;
-}
-
-/* Internal routine to adjust an interface's usage counter and change
- * its device's autosuspend state.
- */
-static int usb_autopm_do_interface(struct usb_interface *intf,
- int inc_usage_cnt)
-{
- struct usb_device *udev = interface_to_usbdev(intf);
- int status = 0;
-
- usb_pm_lock(udev);
- if (intf->condition == USB_INTERFACE_UNBOUND)
- status = -ENODEV;
- else {
- atomic_add(inc_usage_cnt, &intf->pm_usage_cnt);
- udev->last_busy = jiffies;
- if (inc_usage_cnt >= 0 &&
- atomic_read(&intf->pm_usage_cnt) > 0) {
- if (udev->state == USB_STATE_SUSPENDED)
- status = usb_resume_both(udev,
- PMSG_AUTO_RESUME);
- if (status != 0)
- atomic_sub(inc_usage_cnt, &intf->pm_usage_cnt);
- else
- udev->last_busy = jiffies;
- } else if (inc_usage_cnt <= 0 &&
- atomic_read(&intf->pm_usage_cnt) <= 0) {
- status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
- }
- }
- usb_pm_unlock(udev);
+ status = pm_runtime_get_sync(&udev->dev);
+ if (status < 0)
+ pm_runtime_put_sync(&udev->dev);
+ dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&udev->dev.power.usage_count),
+ status);
+ if (status > 0)
+ status = 0;
return status;
}
@@ -1585,34 +1446,25 @@ static int usb_autopm_do_interface(struct usb_interface *intf,
* closed.
*
* The routine decrements @intf's usage counter. When the counter reaches
- * 0, a delayed autosuspend request for @intf's device is queued. When
- * the delay expires, if @intf->pm_usage_cnt is still <= 0 along with all
- * the other usage counters for the sibling interfaces and @intf's
- * usb_device, the device and all its interfaces will be autosuspended.
- *
- * Note that @intf->pm_usage_cnt is owned by the interface driver. The
- * core will not change its value other than the increment and decrement
- * in usb_autopm_get_interface and usb_autopm_put_interface. The driver
- * may use this simple counter-oriented discipline or may set the value
- * any way it likes.
+ * 0, a delayed autosuspend request for @intf's device is attempted. The
+ * attempt may fail (see autosuspend_check()).
*
* If the driver has set @intf->needs_remote_wakeup then autosuspend will
* take place only if the device's remote-wakeup facility is enabled.
*
- * Suspend method calls queued by this routine can arrive at any time
- * while @intf is resumed and its usage counter is equal to 0. They are
- * not protected by the usb_device's lock but only by its pm_mutex.
- * Drivers must provide their own synchronization.
- *
* This routine can run only in process context.
*/
void usb_autopm_put_interface(struct usb_interface *intf)
{
- int status;
+ struct usb_device *udev = interface_to_usbdev(intf);
+ int status;
- status = usb_autopm_do_interface(intf, -1);
- dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, atomic_read(&intf->pm_usage_cnt));
+ udev->last_busy = jiffies;
+ atomic_dec(&intf->pm_usage_cnt);
+ status = pm_runtime_put_sync(&intf->dev);
+ dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&intf->dev.power.usage_count),
+ status);
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
@@ -1620,11 +1472,11 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
* usb_autopm_put_interface_async - decrement a USB interface's PM-usage counter
* @intf: the usb_interface whose counter should be decremented
*
- * This routine does essentially the same thing as
- * usb_autopm_put_interface(): it decrements @intf's usage counter and
- * queues a delayed autosuspend request if the counter is <= 0. The
- * difference is that it does not acquire the device's pm_mutex;
- * callers must handle all synchronization issues themselves.
+ * This routine does much the same thing as usb_autopm_put_interface():
+ * It decrements @intf's usage counter and schedules a delayed
+ * autosuspend request if the counter is <= 0. The difference is that it
+ * does not perform any synchronization; callers should hold a private
+ * lock and handle all synchronization issues themselves.
*
* Typically a driver would call this routine during an URB's completion
* handler, if no more URBs were pending.
@@ -1634,28 +1486,58 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
void usb_autopm_put_interface_async(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long last_busy;
int status = 0;
- if (intf->condition == USB_INTERFACE_UNBOUND) {
- status = -ENODEV;
- } else {
- udev->last_busy = jiffies;
- atomic_dec(&intf->pm_usage_cnt);
- if (udev->autosuspend_disabled || udev->autosuspend_delay < 0)
- status = -EPERM;
- else if (atomic_read(&intf->pm_usage_cnt) <= 0 &&
- !timer_pending(&udev->autosuspend.timer)) {
- queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
+ last_busy = udev->last_busy;
+ udev->last_busy = jiffies;
+ atomic_dec(&intf->pm_usage_cnt);
+ pm_runtime_put_noidle(&intf->dev);
+
+ if (!udev->autosuspend_disabled) {
+ /* Optimization: Don't schedule a delayed autosuspend if
+ * the timer is already running and the expiration time
+ * wouldn't change.
+ *
+ * We have to use the interface's timer. Attempts to
+ * schedule a suspend for the device would fail because
+ * the interface is still active.
+ */
+ if (intf->dev.power.timer_expires == 0 ||
+ round_jiffies_up(last_busy) !=
+ round_jiffies_up(jiffies)) {
+ status = pm_schedule_suspend(&intf->dev,
+ jiffies_to_msecs(
round_jiffies_up_relative(
- udev->autosuspend_delay));
+ udev->autosuspend_delay)));
}
}
- dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, atomic_read(&intf->pm_usage_cnt));
+ dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&intf->dev.power.usage_count),
+ status);
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
/**
+ * usb_autopm_put_interface_no_suspend - decrement a USB interface's PM-usage counter
+ * @intf: the usb_interface whose counter should be decremented
+ *
+ * This routine decrements @intf's usage counter but does not carry out an
+ * autosuspend.
+ *
+ * This routine can run in atomic context.
+ */
+void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ udev->last_busy = jiffies;
+ atomic_dec(&intf->pm_usage_cnt);
+ pm_runtime_put_noidle(&intf->dev);
+}
+EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
+
+/**
* usb_autopm_get_interface - increment a USB interface's PM-usage counter
* @intf: the usb_interface whose counter should be incremented
*
@@ -1667,25 +1549,8 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
* or @intf is unbound. A typical example would be a character-device
* driver when its device file is opened.
*
- *
- * The routine increments @intf's usage counter. (However if the
- * autoresume fails then the counter is re-decremented.) So long as the
- * counter is greater than 0, autosuspend will not be allowed for @intf
- * or its usb_device. When the driver is finished using @intf it should
- * call usb_autopm_put_interface() to decrement the usage counter and
- * queue a delayed autosuspend request (if the counter is <= 0).
- *
- *
- * Note that @intf->pm_usage_cnt is owned by the interface driver. The
- * core will not change its value other than the increment and decrement
- * in usb_autopm_get_interface and usb_autopm_put_interface. The driver
- * may use this simple counter-oriented discipline or may set the value
- * any way it likes.
- *
- * Resume method calls generated by this routine can arrive at any time
- * while @intf is suspended. They are not protected by the usb_device's
- * lock but only by its pm_mutex. Drivers must provide their own
- * synchronization.
+ * @intf's usage counter is incremented to prevent subsequent autosuspends.
+ * However if the autoresume fails then the counter is re-decremented.
*
* This routine can run only in process context.
*/
@@ -1693,9 +1558,16 @@ int usb_autopm_get_interface(struct usb_interface *intf)
{
int status;
- status = usb_autopm_do_interface(intf, 1);
- dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, atomic_read(&intf->pm_usage_cnt));
+ status = pm_runtime_get_sync(&intf->dev);
+ if (status < 0)
+ pm_runtime_put_sync(&intf->dev);
+ else
+ atomic_inc(&intf->pm_usage_cnt);
+ dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&intf->dev.power.usage_count),
+ status);
+ if (status > 0)
+ status = 0;
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
@@ -1705,149 +1577,207 @@ EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
* @intf: the usb_interface whose counter should be incremented
*
* This routine does much the same thing as
- * usb_autopm_get_interface(): it increments @intf's usage counter and
- * queues an autoresume request if the result is > 0. The differences
- * are that it does not acquire the device's pm_mutex (callers must
- * handle all synchronization issues themselves), and it does not
- * autoresume the device directly (it only queues a request). After a
- * successful call, the device will generally not yet be resumed.
+ * usb_autopm_get_interface(): It increments @intf's usage counter and
+ * queues an autoresume request if the device is suspended. The
+ * differences are that it does not perform any synchronization (callers
+ * should hold a private lock and handle all synchronization issues
+ * themselves), and it does not autoresume the device directly (it only
+ * queues a request). After a successful call, the device may not yet be
+ * resumed.
*
* This routine can run in atomic context.
*/
int usb_autopm_get_interface_async(struct usb_interface *intf)
{
- struct usb_device *udev = interface_to_usbdev(intf);
- int status = 0;
+ int status = 0;
+ enum rpm_status s;
- if (intf->condition == USB_INTERFACE_UNBOUND)
- status = -ENODEV;
- else {
+ /* Don't request a resume unless the interface is already suspending
+ * or suspended. Doing so would force a running suspend timer to be
+ * cancelled.
+ */
+ pm_runtime_get_noresume(&intf->dev);
+ s = ACCESS_ONCE(intf->dev.power.runtime_status);
+ if (s == RPM_SUSPENDING || s == RPM_SUSPENDED)
+ status = pm_request_resume(&intf->dev);
+
+ if (status < 0 && status != -EINPROGRESS)
+ pm_runtime_put_noidle(&intf->dev);
+ else
atomic_inc(&intf->pm_usage_cnt);
- if (atomic_read(&intf->pm_usage_cnt) > 0 &&
- udev->state == USB_STATE_SUSPENDED)
- queue_work(ksuspend_usb_wq, &udev->autoresume);
- }
- dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, atomic_read(&intf->pm_usage_cnt));
+ dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&intf->dev.power.usage_count),
+ status);
+ if (status > 0)
+ status = 0;
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async);
-#else
-
-void usb_autosuspend_work(struct work_struct *work)
-{}
-
-void usb_autoresume_work(struct work_struct *work)
-{}
-
-#endif /* CONFIG_USB_SUSPEND */
-
/**
- * usb_external_suspend_device - external suspend of a USB device and its interfaces
- * @udev: the usb_device to suspend
- * @msg: Power Management message describing this state transition
+ * usb_autopm_get_interface_no_resume - increment a USB interface's PM-usage counter
+ * @intf: the usb_interface whose counter should be incremented
*
- * This routine handles external suspend requests: ones not generated
- * internally by a USB driver (autosuspend) but rather coming from the user
- * (via sysfs) or the PM core (system sleep). The suspend will be carried
- * out regardless of @udev's usage counter or those of its interfaces,
- * and regardless of whether or not remote wakeup is enabled. Of course,
- * interface drivers still have the option of failing the suspend (if
- * there are unsuspended children, for example).
+ * This routine increments @intf's usage counter but does not carry out an
+ * autoresume.
*
- * The caller must hold @udev's device lock.
+ * This routine can run in atomic context.
*/
-int usb_external_suspend_device(struct usb_device *udev, pm_message_t msg)
+void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
{
- int status;
+ struct usb_device *udev = interface_to_usbdev(intf);
- do_unbind_rebind(udev, DO_UNBIND);
- usb_pm_lock(udev);
- status = usb_suspend_both(udev, msg);
- usb_pm_unlock(udev);
- return status;
+ udev->last_busy = jiffies;
+ atomic_inc(&intf->pm_usage_cnt);
+ pm_runtime_get_noresume(&intf->dev);
}
+EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
-/**
- * usb_external_resume_device - external resume of a USB device and its interfaces
- * @udev: the usb_device to resume
- * @msg: Power Management message describing this state transition
- *
- * This routine handles external resume requests: ones not generated
- * internally by a USB driver (autoresume) but rather coming from the user
- * (via sysfs), the PM core (system resume), or the device itself (remote
- * wakeup). @udev's usage counter is unaffected.
- *
- * The caller must hold @udev's device lock.
- */
-int usb_external_resume_device(struct usb_device *udev, pm_message_t msg)
+/* Internal routine to check whether we may autosuspend a device. */
+static int autosuspend_check(struct usb_device *udev)
{
- int status;
+ int i;
+ struct usb_interface *intf;
+ unsigned long suspend_time, j;
- usb_pm_lock(udev);
- status = usb_resume_both(udev, msg);
- udev->last_busy = jiffies;
- usb_pm_unlock(udev);
- if (status == 0)
- do_unbind_rebind(udev, DO_REBIND);
+ /* Fail if autosuspend is disabled, or any interfaces are in use, or
+ * any interface drivers require remote wakeup but it isn't available.
+ */
+ udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
+ if (udev->actconfig) {
+ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
+ intf = udev->actconfig->interface[i];
- /* Now that the device is awake, we can start trying to autosuspend
- * it again. */
- if (status == 0)
- usb_try_autosuspend_device(udev);
- return status;
+ /* We don't need to check interfaces that are
+ * disabled for runtime PM. Either they are unbound
+ * or else their drivers don't support autosuspend
+ * and so they are permanently active.
+ */
+ if (intf->dev.power.disable_depth)
+ continue;
+ if (atomic_read(&intf->dev.power.usage_count) > 0)
+ return -EBUSY;
+ if (intf->needs_remote_wakeup &&
+ !udev->do_remote_wakeup) {
+ dev_dbg(&udev->dev, "remote wakeup needed "
+ "for autosuspend\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Don't allow autosuspend if the device will need
+ * a reset-resume and any of its interface drivers
+ * doesn't include support or needs remote wakeup.
+ */
+ if (udev->quirks & USB_QUIRK_RESET_RESUME) {
+ struct usb_driver *driver;
+
+ driver = to_usb_driver(intf->dev.driver);
+ if (!driver->reset_resume ||
+ intf->needs_remote_wakeup)
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+
+ /* If everything is okay but the device hasn't been idle for long
+ * enough, queue a delayed autosuspend request.
+ */
+ j = ACCESS_ONCE(jiffies);
+ suspend_time = udev->last_busy + udev->autosuspend_delay;
+ if (time_before(j, suspend_time)) {
+ pm_schedule_suspend(&udev->dev, jiffies_to_msecs(
+ round_jiffies_up_relative(suspend_time - j)));
+ return -EAGAIN;
+ }
+ return 0;
}
-int usb_suspend(struct device *dev, pm_message_t msg)
+static int usb_runtime_suspend(struct device *dev)
{
- struct usb_device *udev;
-
- udev = to_usb_device(dev);
+ int status = 0;
- /* If udev is already suspended, we can skip this suspend and
- * we should also skip the upcoming system resume. High-speed
- * root hubs are an exception; they need to resume whenever the
- * system wakes up in order for USB-PERSIST port handover to work
- * properly.
+ /* A USB device can be suspended if it passes the various autosuspend
+ * checks. Runtime suspend for a USB device means suspending all the
+ * interfaces and then the device itself.
*/
- if (udev->state == USB_STATE_SUSPENDED) {
- if (udev->parent || udev->speed != USB_SPEED_HIGH)
- udev->skip_sys_resume = 1;
- return 0;
+ if (is_usb_device(dev)) {
+ struct usb_device *udev = to_usb_device(dev);
+
+ if (autosuspend_check(udev) != 0)
+ return -EAGAIN;
+
+ status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
+
+ /* If an interface fails the suspend, adjust the last_busy
+ * time so that we don't get another suspend attempt right
+ * away.
+ */
+ if (status) {
+ udev->last_busy = jiffies +
+ (udev->autosuspend_delay == 0 ?
+ HZ/2 : 0);
+ }
+
+ /* Prevent the parent from suspending immediately after */
+ else if (udev->parent) {
+ udev->parent->last_busy = jiffies;
+ }
}
- udev->skip_sys_resume = 0;
- return usb_external_suspend_device(udev, msg);
+ /* Runtime suspend for a USB interface doesn't mean anything. */
+ return status;
}
-int usb_resume(struct device *dev, pm_message_t msg)
+static int usb_runtime_resume(struct device *dev)
{
- struct usb_device *udev;
- int status;
+ /* Runtime resume for a USB device means resuming both the device
+ * and all its interfaces.
+ */
+ if (is_usb_device(dev)) {
+ struct usb_device *udev = to_usb_device(dev);
+ int status;
- udev = to_usb_device(dev);
+ status = usb_resume_both(udev, PMSG_AUTO_RESUME);
+ udev->last_busy = jiffies;
+ return status;
+ }
- /* If udev->skip_sys_resume is set then udev was already suspended
- * when the system sleep started, so we don't want to resume it
- * during this system wakeup.
- */
- if (udev->skip_sys_resume)
- return 0;
- status = usb_external_resume_device(udev, msg);
+ /* Runtime resume for a USB interface doesn't mean anything. */
+ return 0;
+}
- /* Avoid PM error messages for devices disconnected while suspended
- * as we'll display regular disconnect messages just a bit later.
+static int usb_runtime_idle(struct device *dev)
+{
+ /* An idle USB device can be suspended if it passes the various
+ * autosuspend checks. An idle interface can be suspended at
+ * any time.
*/
- if (status == -ENODEV)
- return 0;
- return status;
+ if (is_usb_device(dev)) {
+ struct usb_device *udev = to_usb_device(dev);
+
+ if (autosuspend_check(udev) != 0)
+ return 0;
+ }
+
+ pm_runtime_suspend(dev);
+ return 0;
}
-#endif /* CONFIG_PM */
+static struct dev_pm_ops usb_bus_pm_ops = {
+ .runtime_suspend = usb_runtime_suspend,
+ .runtime_resume = usb_runtime_resume,
+ .runtime_idle = usb_runtime_idle,
+};
+
+#else
+
+#define usb_bus_pm_ops (*(struct dev_pm_ops *) NULL)
+
+#endif /* CONFIG_USB_SUSPEND */
struct bus_type usb_bus_type = {
.name = "usb",
.match = usb_device_match,
.uevent = usb_uevent,
+ .pm = &usb_bus_pm_ops,
};
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index bfc6c2e..c3536f1 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -34,7 +34,6 @@ static int usb_open(struct inode * inode, struct file * file)
int err = -ENODEV;
const struct file_operations *old_fops, *new_fops = NULL;
- lock_kernel();
down_read(&minor_rwsem);
c = usb_minors[minor];
@@ -53,7 +52,6 @@ static int usb_open(struct inode * inode, struct file * file)
fops_put(old_fops);
done:
up_read(&minor_rwsem);
- unlock_kernel();
return err;
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 80995ef..2f8cedd 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -39,6 +39,7 @@
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
#include <linux/usb.h>
@@ -141,7 +142,7 @@ static const u8 usb3_rh_dev_descriptor[18] = {
0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */
0x6b, 0x1d, /* __le16 idVendor; Linux Foundation */
- 0x02, 0x00, /* __le16 idProduct; device 0x0002 */
+ 0x03, 0x00, /* __le16 idProduct; device 0x0003 */
KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
0x03, /* __u8 iManufacturer; */
@@ -1670,11 +1671,16 @@ int usb_hcd_alloc_bandwidth(struct usb_device *udev,
}
}
for (i = 0; i < num_intfs; ++i) {
+ struct usb_host_interface *first_alt;
+ int iface_num;
+
+ first_alt = &new_config->intf_cache[i]->altsetting[0];
+ iface_num = first_alt->desc.bInterfaceNumber;
/* Set up endpoints for alternate interface setting 0 */
- alt = usb_find_alt_setting(new_config, i, 0);
+ alt = usb_find_alt_setting(new_config, iface_num, 0);
if (!alt)
/* No alt setting 0? Pick the first setting. */
- alt = &new_config->intf_cache[i]->altsetting[0];
+ alt = first_alt;
for (j = 0; j < alt->desc.bNumEndpoints; j++) {
ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]);
@@ -1853,6 +1859,10 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
return status;
}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_USB_SUSPEND
+
/* Workqueue routine for root-hub remote wakeup */
static void hcd_resume_work(struct work_struct *work)
{
@@ -1860,8 +1870,7 @@ static void hcd_resume_work(struct work_struct *work)
struct usb_device *udev = hcd->self.root_hub;
usb_lock_device(udev);
- usb_mark_last_busy(udev);
- usb_external_resume_device(udev, PMSG_REMOTE_RESUME);
+ usb_remote_wakeup(udev);
usb_unlock_device(udev);
}
@@ -1880,12 +1889,12 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
spin_lock_irqsave (&hcd_root_hub_lock, flags);
if (hcd->rh_registered)
- queue_work(ksuspend_usb_wq, &hcd->wakeup_work);
+ queue_work(pm_wq, &hcd->wakeup_work);
spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
}
EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub);
-#endif
+#endif /* CONFIG_USB_SUSPEND */
/*-------------------------------------------------------------------------*/
@@ -2030,7 +2039,7 @@ struct usb_hcd *usb_create_hcd (const struct hc_driver *driver,
init_timer(&hcd->rh_timer);
hcd->rh_timer.function = rh_timer_func;
hcd->rh_timer.data = (unsigned long) hcd;
-#ifdef CONFIG_PM
+#ifdef CONFIG_USB_SUSPEND
INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
#endif
mutex_init(&hcd->bandwidth_mutex);
@@ -2230,7 +2239,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
hcd->rh_registered = 0;
spin_unlock_irq (&hcd_root_hub_lock);
-#ifdef CONFIG_PM
+#ifdef CONFIG_USB_SUSPEND
cancel_work_sync(&hcd->wakeup_work);
#endif
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index bbe2b92..a3cdb09 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -80,7 +80,7 @@ struct usb_hcd {
struct timer_list rh_timer; /* drives root-hub polling */
struct urb *status_urb; /* the current status urb */
-#ifdef CONFIG_PM
+#ifdef CONFIG_USB_SUSPEND
struct work_struct wakeup_work; /* for remote wakeup */
#endif
@@ -248,7 +248,7 @@ struct hc_driver {
/* xHCI specific functions */
/* Called by usb_alloc_dev to alloc HC device structures */
int (*alloc_dev)(struct usb_hcd *, struct usb_device *);
- /* Called by usb_release_dev to free HC device structures */
+ /* Called by usb_disconnect to free HC device structures */
void (*free_dev)(struct usb_hcd *, struct usb_device *);
/* Bandwidth computation functions */
@@ -286,6 +286,7 @@ struct hc_driver {
*/
int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
+ int (*reset_device)(struct usb_hcd *, struct usb_device *);
};
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
@@ -463,16 +464,20 @@ extern int usb_find_interface_driver(struct usb_device *dev,
#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN))
#ifdef CONFIG_PM
-extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
extern void usb_root_hub_lost_power(struct usb_device *rhdev);
extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg);
extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg);
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_USB_SUSPEND
+extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
#else
static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd)
{
return;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_USB_SUSPEND */
+
/*
* USB device fs stuff
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 20ecb4c..0940ccd 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -22,6 +22,7 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
+#include <linux/pm_runtime.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -71,7 +72,6 @@ struct usb_hub {
unsigned mA_per_port; /* current for each child */
- unsigned init_done:1;
unsigned limited_power:1;
unsigned quiescing:1;
unsigned disconnected:1;
@@ -820,7 +820,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
}
init3:
hub->quiescing = 0;
- hub->init_done = 1;
status = usb_submit_urb(hub->urb, GFP_NOIO);
if (status < 0)
@@ -861,11 +860,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
int i;
cancel_delayed_work_sync(&hub->init_work);
- if (!hub->init_done) {
- hub->init_done = 1;
- usb_autopm_put_interface_no_suspend(
- to_usb_interface(hub->intfdev));
- }
/* khubd and related activity won't re-trigger */
hub->quiescing = 1;
@@ -1224,6 +1218,9 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
desc = intf->cur_altsetting;
hdev = interface_to_usbdev(intf);
+ /* Hubs have proper suspend/resume support */
+ usb_enable_autosuspend(hdev);
+
if (hdev->level == MAX_TOPO_LEVEL) {
dev_err(&intf->dev,
"Unsupported bus topology: hub nested too deep\n");
@@ -1402,10 +1399,8 @@ static void recursively_mark_NOTATTACHED(struct usb_device *udev)
if (udev->children[i])
recursively_mark_NOTATTACHED(udev->children[i]);
}
- if (udev->state == USB_STATE_SUSPENDED) {
- udev->discon_suspended = 1;
+ if (udev->state == USB_STATE_SUSPENDED)
udev->active_duration -= jiffies;
- }
udev->state = USB_STATE_NOTATTACHED;
}
@@ -1448,11 +1443,11 @@ void usb_set_device_state(struct usb_device *udev,
|| new_state == USB_STATE_SUSPENDED)
; /* No change to wakeup settings */
else if (new_state == USB_STATE_CONFIGURED)
- device_init_wakeup(&udev->dev,
+ device_set_wakeup_capable(&udev->dev,
(udev->actconfig->desc.bmAttributes
& USB_CONFIG_ATT_WAKEUP));
else
- device_init_wakeup(&udev->dev, 0);
+ device_set_wakeup_capable(&udev->dev, 0);
}
if (udev->state == USB_STATE_SUSPENDED &&
new_state != USB_STATE_SUSPENDED)
@@ -1529,31 +1524,15 @@ static void update_address(struct usb_device *udev, int devnum)
udev->devnum = devnum;
}
-#ifdef CONFIG_USB_SUSPEND
-
-static void usb_stop_pm(struct usb_device *udev)
+static void hub_free_dev(struct usb_device *udev)
{
- /* Synchronize with the ksuspend thread to prevent any more
- * autosuspend requests from being submitted, and decrement
- * the parent's count of unsuspended children.
- */
- usb_pm_lock(udev);
- if (udev->parent && !udev->discon_suspended)
- usb_autosuspend_device(udev->parent);
- usb_pm_unlock(udev);
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
- /* Stop any autosuspend or autoresume requests already submitted */
- cancel_delayed_work_sync(&udev->autosuspend);
- cancel_work_sync(&udev->autoresume);
+ /* Root hubs aren't real devices, so don't free HCD resources */
+ if (hcd->driver->free_dev && udev->parent)
+ hcd->driver->free_dev(hcd, udev);
}
-#else
-
-static inline void usb_stop_pm(struct usb_device *udev)
-{ }
-
-#endif
-
/**
* usb_disconnect - disconnect a device (usbcore-internal)
* @pdev: pointer to device being disconnected
@@ -1622,7 +1601,7 @@ void usb_disconnect(struct usb_device **pdev)
*pdev = NULL;
spin_unlock_irq(&device_state_lock);
- usb_stop_pm(udev);
+ hub_free_dev(udev);
put_device(&udev->dev);
}
@@ -1799,9 +1778,18 @@ int usb_new_device(struct usb_device *udev)
{
int err;
- /* Increment the parent's count of unsuspended children */
- if (udev->parent)
- usb_autoresume_device(udev->parent);
+ if (udev->parent) {
+ /* Initialize non-root-hub device wakeup to disabled;
+ * device (un)configuration controls wakeup capable
+ * sysfs power/wakeup controls wakeup enabled/disabled
+ */
+ device_init_wakeup(&udev->dev, 0);
+ device_set_wakeup_enable(&udev->dev, 1);
+ }
+
+ /* Tell the runtime-PM framework the device is active */
+ pm_runtime_set_active(&udev->dev);
+ pm_runtime_enable(&udev->dev);
usb_detect_quirks(udev);
err = usb_enumerate_device(udev); /* Read descriptors */
@@ -1833,7 +1821,8 @@ int usb_new_device(struct usb_device *udev)
fail:
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
- usb_stop_pm(udev);
+ pm_runtime_disable(&udev->dev);
+ pm_runtime_set_suspended(&udev->dev);
return err;
}
@@ -1982,7 +1971,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (!(portstatus & USB_PORT_STAT_RESET) &&
(portstatus & USB_PORT_STAT_ENABLE)) {
if (hub_is_wusb(hub))
- udev->speed = USB_SPEED_VARIABLE;
+ udev->speed = USB_SPEED_WIRELESS;
else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
udev->speed = USB_SPEED_HIGH;
else if (portstatus & USB_PORT_STAT_LOW_SPEED)
@@ -2008,7 +1997,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, unsigned int delay)
{
int i, status;
+ struct usb_hcd *hcd;
+ hcd = bus_to_hcd(udev->bus);
/* Block EHCI CF initialization during the port reset.
* Some companion controllers don't like it when they mix.
*/
@@ -2036,6 +2027,14 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
/* TRSTRCY = 10 ms; plus some extra */
msleep(10 + 40);
update_address(udev, 0);
+ if (hcd->driver->reset_device) {
+ status = hcd->driver->reset_device(hcd, udev);
+ if (status < 0) {
+ dev_err(&udev->dev, "Cannot reset "
+ "HCD device state\n");
+ break;
+ }
+ }
/* FALL THROUGH */
case -ENOTCONN:
case -ENODEV:
@@ -2381,14 +2380,17 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
}
/* caller has locked udev */
-static int remote_wakeup(struct usb_device *udev)
+int usb_remote_wakeup(struct usb_device *udev)
{
int status = 0;
if (udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
- usb_mark_last_busy(udev);
- status = usb_external_resume_device(udev, PMSG_REMOTE_RESUME);
+ status = usb_autoresume_device(udev);
+ if (status == 0) {
+ /* Let the drivers do their thing, then... */
+ usb_autosuspend_device(udev);
+ }
}
return status;
}
@@ -2425,11 +2427,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
return status;
}
-static inline int remote_wakeup(struct usb_device *udev)
-{
- return 0;
-}
-
#endif
static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
@@ -2496,11 +2493,6 @@ EXPORT_SYMBOL_GPL(usb_root_hub_lost_power);
#else /* CONFIG_PM */
-static inline int remote_wakeup(struct usb_device *udev)
-{
- return 0;
-}
-
#define hub_suspend NULL
#define hub_resume NULL
#define hub_reset_resume NULL
@@ -2645,14 +2637,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
mutex_lock(&usb_address0_mutex);
- if ((hcd->driver->flags & HCD_USB3) && udev->config) {
- /* FIXME this will need special handling by the xHCI driver. */
- dev_dbg(&udev->dev,
- "xHCI reset of configured device "
- "not supported yet.\n");
- retval = -EINVAL;
- goto fail;
- } else if (!udev->config && oldspeed == USB_SPEED_SUPER) {
+ if (!udev->config && oldspeed == USB_SPEED_SUPER) {
/* Don't reset USB 3.0 devices during an initial setup */
usb_set_device_state(udev, USB_STATE_DEFAULT);
} else {
@@ -2678,7 +2663,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
*/
switch (udev->speed) {
case USB_SPEED_SUPER:
- case USB_SPEED_VARIABLE: /* fixed at 512 */
+ case USB_SPEED_WIRELESS: /* fixed at 512 */
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
break;
case USB_SPEED_HIGH: /* fixed at 64 */
@@ -2706,7 +2691,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
case USB_SPEED_SUPER:
speed = "super";
break;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
speed = "variable";
type = "Wireless ";
break;
@@ -3006,7 +2991,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
/* For a suspended device, treat this as a
* remote wakeup event.
*/
- status = remote_wakeup(udev);
+ status = usb_remote_wakeup(udev);
#endif
} else {
@@ -3192,6 +3177,7 @@ loop_disable:
loop:
usb_ep0_reinit(udev);
release_address(udev);
+ hub_free_dev(udev);
usb_put_dev(udev);
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
break;
@@ -3259,7 +3245,7 @@ static void hub_events(void)
* disconnected while waiting for the lock to succeed. */
usb_lock_device(hdev);
if (unlikely(hub->disconnected))
- goto loop2;
+ goto loop_disconnected;
/* If the hub has died, clean up after it */
if (hdev->state == USB_STATE_NOTATTACHED) {
@@ -3352,7 +3338,7 @@ static void hub_events(void)
msleep(10);
usb_lock_device(udev);
- ret = remote_wakeup(hdev->
+ ret = usb_remote_wakeup(hdev->
children[i-1]);
usb_unlock_device(udev);
if (ret < 0)
@@ -3419,7 +3405,7 @@ static void hub_events(void)
* kick_khubd() and allow autosuspend.
*/
usb_autopm_put_interface(intf);
- loop2:
+ loop_disconnected:
usb_unlock_device(hdev);
kref_put(&hub->kref, hub_release);
@@ -3446,7 +3432,7 @@ static int hub_thread(void *__unused)
return 0;
}
-static struct usb_device_id hub_id_table [] = {
+static const struct usb_device_id hub_id_table[] = {
{ .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
.bDeviceClass = USB_CLASS_HUB},
{ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index df73574..cd22027 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1316,7 +1316,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
alt = usb_altnum_to_altsetting(iface, alternate);
if (!alt) {
- dev_warn(&dev->dev, "selecting invalid altsetting %d",
+ dev_warn(&dev->dev, "selecting invalid altsetting %d\n",
alternate);
return -EINVAL;
}
@@ -1471,7 +1471,7 @@ int usb_reset_configuration(struct usb_device *dev)
/* If not, reinstate the old alternate settings */
if (retval < 0) {
reset_old_alts:
- for (; i >= 0; i--) {
+ for (i--; i >= 0; i--) {
struct usb_interface *intf = config->interface[i];
struct usb_host_interface *alt;
@@ -1843,7 +1843,6 @@ free_interfaces:
intf->dev.dma_mask = dev->dev.dma_mask;
INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
device_initialize(&intf->dev);
- mark_quiesced(intf);
dev_set_name(&intf->dev, "%d-%s:%d.%d",
dev->bus->busnum, dev->devpath,
configuration, alt->desc.bInterfaceNumber);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index ab93918..f073c5c 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -103,10 +103,19 @@ void usb_detect_quirks(struct usb_device *udev)
dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
udev->quirks);
- /* By default, disable autosuspend for all non-hubs */
#ifdef CONFIG_USB_SUSPEND
- if (udev->descriptor.bDeviceClass != USB_CLASS_HUB)
- udev->autosuspend_disabled = 1;
+
+ /* By default, disable autosuspend for all devices. The hub driver
+ * will enable it for hubs.
+ */
+ usb_disable_autosuspend(udev);
+
+ /* Autosuspend can also be disabled if the initial autosuspend_delay
+ * is negative.
+ */
+ if (udev->autosuspend_delay < 0)
+ usb_autoresume_device(udev);
+
#endif
/* For the present, all devices default to USB-PERSIST enabled */
@@ -120,6 +129,7 @@ void usb_detect_quirks(struct usb_device *udev)
* for all devices. It will affect things like hub resets
* and EMF-related port disables.
*/
- udev->persist_enabled = 1;
+ if (!(udev->quirks & USB_QUIRK_RESET_MORPHS))
+ udev->persist_enabled = 1;
#endif /* CONFIG_PM */
}
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 5f3908f..43c002e 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -115,7 +115,7 @@ show_speed(struct device *dev, struct device_attribute *attr, char *buf)
case USB_SPEED_HIGH:
speed = "480";
break;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
speed = "480";
break;
case USB_SPEED_SUPER:
@@ -191,6 +191,36 @@ show_quirks(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR(quirks, S_IRUGO, show_quirks, NULL);
static ssize_t
+show_avoid_reset_quirk(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct usb_device *udev;
+
+ udev = to_usb_device(dev);
+ return sprintf(buf, "%d\n", !!(udev->quirks & USB_QUIRK_RESET_MORPHS));
+}
+
+static ssize_t
+set_avoid_reset_quirk(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_device *udev = to_usb_device(dev);
+ int config;
+
+ if (sscanf(buf, "%d", &config) != 1 || config < 0 || config > 1)
+ return -EINVAL;
+ usb_lock_device(udev);
+ if (config)
+ udev->quirks |= USB_QUIRK_RESET_MORPHS;
+ else
+ udev->quirks &= ~USB_QUIRK_RESET_MORPHS;
+ usb_unlock_device(udev);
+ return count;
+}
+
+static DEVICE_ATTR(avoid_reset_quirk, S_IRUGO | S_IWUSR,
+ show_avoid_reset_quirk, set_avoid_reset_quirk);
+
+static ssize_t
show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_device *udev;
@@ -226,9 +256,10 @@ set_persist(struct device *dev, struct device_attribute *attr,
if (sscanf(buf, "%d", &value) != 1)
return -EINVAL;
- usb_pm_lock(udev);
+
+ usb_lock_device(udev);
udev->persist_enabled = !!value;
- usb_pm_unlock(udev);
+ usb_unlock_device(udev);
return count;
}
@@ -315,20 +346,34 @@ set_autosuspend(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
- int value;
+ int value, old_delay;
+ int rc;
if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/HZ ||
value <= - INT_MAX/HZ)
return -EINVAL;
value *= HZ;
+ usb_lock_device(udev);
+ old_delay = udev->autosuspend_delay;
udev->autosuspend_delay = value;
- if (value >= 0)
- usb_try_autosuspend_device(udev);
- else {
- if (usb_autoresume_device(udev) == 0)
+
+ if (old_delay < 0) { /* Autosuspend wasn't allowed */
+ if (value >= 0)
usb_autosuspend_device(udev);
+ } else { /* Autosuspend was allowed */
+ if (value < 0) {
+ rc = usb_autoresume_device(udev);
+ if (rc < 0) {
+ count = rc;
+ udev->autosuspend_delay = old_delay;
+ }
+ } else {
+ usb_try_autosuspend_device(udev);
+ }
}
+
+ usb_unlock_device(udev);
return count;
}
@@ -356,34 +401,25 @@ set_level(struct device *dev, struct device_attribute *attr,
struct usb_device *udev = to_usb_device(dev);
int len = count;
char *cp;
- int rc = 0;
- int old_autosuspend_disabled;
+ int rc;
cp = memchr(buf, '\n', count);
if (cp)
len = cp - buf;
usb_lock_device(udev);
- old_autosuspend_disabled = udev->autosuspend_disabled;
- /* Setting the flags without calling usb_pm_lock is a subject to
- * races, but who cares...
- */
if (len == sizeof on_string - 1 &&
- strncmp(buf, on_string, len) == 0) {
- udev->autosuspend_disabled = 1;
- rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
+ strncmp(buf, on_string, len) == 0)
+ rc = usb_disable_autosuspend(udev);
- } else if (len == sizeof auto_string - 1 &&
- strncmp(buf, auto_string, len) == 0) {
- udev->autosuspend_disabled = 0;
- rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
+ else if (len == sizeof auto_string - 1 &&
+ strncmp(buf, auto_string, len) == 0)
+ rc = usb_enable_autosuspend(udev);
- } else
+ else
rc = -EINVAL;
- if (rc)
- udev->autosuspend_disabled = old_autosuspend_disabled;
usb_unlock_device(udev);
return (rc < 0 ? rc : count);
}
@@ -558,6 +594,7 @@ static struct attribute *dev_attrs[] = {
&dev_attr_version.attr,
&dev_attr_maxchild.attr,
&dev_attr_quirks.attr,
+ &dev_attr_avoid_reset_quirk.attr,
&dev_attr_authorized.attr,
&dev_attr_remove.attr,
NULL,
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index e7cae13..2708056 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -387,6 +387,13 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
{
unsigned int orig_flags = urb->transfer_flags;
unsigned int allowed;
+ static int pipetypes[4] = {
+ PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
+ };
+
+ /* Check that the pipe's type matches the endpoint's type */
+ if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
+ return -EPIPE; /* The most suitable error code :-) */
/* enforce simple/standard policy */
allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP |
@@ -430,7 +437,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
case USB_ENDPOINT_XFER_INT:
/* too small? */
switch (dev->speed) {
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
if (urb->interval < 6)
return -EINVAL;
break;
@@ -446,7 +453,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
if (urb->interval > (1 << 15))
return -EINVAL;
max = 1 << 15;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
if (urb->interval > 16)
return -EINVAL;
break;
@@ -473,7 +480,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
default:
return -EINVAL;
}
- if (dev->speed != USB_SPEED_VARIABLE) {
+ if (dev->speed != USB_SPEED_WIRELESS) {
/* Round down to a power of 2, no more than max */
urb->interval = min(max, 1 << ilog2(urb->interval));
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 0daff0d..1297e9b 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -49,9 +49,6 @@ const char *usbcore_name = "usbcore";
static int nousb; /* Disable USB when built into kernel image */
-/* Workqueue for autosuspend and for remote wakeup of root hubs */
-struct workqueue_struct *ksuspend_usb_wq;
-
#ifdef CONFIG_USB_SUSPEND
static int usb_autosuspend_delay = 2; /* Default delay value,
* in seconds */
@@ -228,9 +225,6 @@ static void usb_release_dev(struct device *dev)
hcd = bus_to_hcd(udev->bus);
usb_destroy_configuration(udev);
- /* Root hubs aren't real devices, so don't free HCD resources */
- if (hcd->driver->free_dev && udev->parent)
- hcd->driver->free_dev(hcd, udev);
usb_put_hcd(hcd);
kfree(udev->product);
kfree(udev->manufacturer);
@@ -264,23 +258,6 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
#ifdef CONFIG_PM
-static int ksuspend_usb_init(void)
-{
- /* This workqueue is supposed to be both freezable and
- * singlethreaded. Its job doesn't justify running on more
- * than one CPU.
- */
- ksuspend_usb_wq = create_freezeable_workqueue("ksuspend_usbd");
- if (!ksuspend_usb_wq)
- return -ENOMEM;
- return 0;
-}
-
-static void ksuspend_usb_cleanup(void)
-{
- destroy_workqueue(ksuspend_usb_wq);
-}
-
/* USB device Power-Management thunks.
* There's no need to distinguish here between quiescing a USB device
* and powering it down; the generic_suspend() routine takes care of
@@ -296,7 +273,7 @@ static int usb_dev_prepare(struct device *dev)
static void usb_dev_complete(struct device *dev)
{
/* Currently used only for rebinding interfaces */
- usb_resume(dev, PMSG_RESUME); /* Message event is meaningless */
+ usb_resume(dev, PMSG_ON); /* FIXME: change to PMSG_COMPLETE */
}
static int usb_dev_suspend(struct device *dev)
@@ -342,9 +319,7 @@ static const struct dev_pm_ops usb_device_pm_ops = {
#else
-#define ksuspend_usb_init() 0
-#define ksuspend_usb_cleanup() do {} while (0)
-#define usb_device_pm_ops (*(struct dev_pm_ops *)0)
+#define usb_device_pm_ops (*(struct dev_pm_ops *) NULL)
#endif /* CONFIG_PM */
@@ -472,9 +447,6 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
INIT_LIST_HEAD(&dev->filelist);
#ifdef CONFIG_PM
- mutex_init(&dev->pm_mutex);
- INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
- INIT_WORK(&dev->autoresume, usb_autoresume_work);
dev->autosuspend_delay = usb_autosuspend_delay * HZ;
dev->connect_time = jiffies;
dev->active_duration = -jiffies;
@@ -1117,9 +1089,6 @@ static int __init usb_init(void)
if (retval)
goto out;
- retval = ksuspend_usb_init();
- if (retval)
- goto out;
retval = bus_register(&usb_bus_type);
if (retval)
goto bus_register_failed;
@@ -1159,7 +1128,7 @@ major_init_failed:
bus_notifier_failed:
bus_unregister(&usb_bus_type);
bus_register_failed:
- ksuspend_usb_cleanup();
+ usb_debugfs_cleanup();
out:
return retval;
}
@@ -1181,7 +1150,6 @@ static void __exit usb_exit(void)
usb_hub_cleanup();
bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
bus_unregister(&usb_bus_type);
- ksuspend_usb_cleanup();
usb_debugfs_cleanup();
}
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 4c36c7f..cd88220 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -55,24 +55,8 @@ extern void usb_major_cleanup(void);
extern int usb_suspend(struct device *dev, pm_message_t msg);
extern int usb_resume(struct device *dev, pm_message_t msg);
-extern void usb_autosuspend_work(struct work_struct *work);
-extern void usb_autoresume_work(struct work_struct *work);
extern int usb_port_suspend(struct usb_device *dev, pm_message_t msg);
extern int usb_port_resume(struct usb_device *dev, pm_message_t msg);
-extern int usb_external_suspend_device(struct usb_device *udev,
- pm_message_t msg);
-extern int usb_external_resume_device(struct usb_device *udev,
- pm_message_t msg);
-
-static inline void usb_pm_lock(struct usb_device *udev)
-{
- mutex_lock_nested(&udev->pm_mutex, udev->level);
-}
-
-static inline void usb_pm_unlock(struct usb_device *udev)
-{
- mutex_unlock(&udev->pm_mutex);
-}
#else
@@ -86,9 +70,6 @@ static inline int usb_port_resume(struct usb_device *udev, pm_message_t msg)
return 0;
}
-static inline void usb_pm_lock(struct usb_device *udev) {}
-static inline void usb_pm_unlock(struct usb_device *udev) {}
-
#endif
#ifdef CONFIG_USB_SUSPEND
@@ -96,6 +77,7 @@ static inline void usb_pm_unlock(struct usb_device *udev) {}
extern void usb_autosuspend_device(struct usb_device *udev);
extern void usb_try_autosuspend_device(struct usb_device *udev);
extern int usb_autoresume_device(struct usb_device *udev);
+extern int usb_remote_wakeup(struct usb_device *dev);
#else
@@ -106,9 +88,13 @@ static inline int usb_autoresume_device(struct usb_device *udev)
return 0;
}
+static inline int usb_remote_wakeup(struct usb_device *udev)
+{
+ return 0;
+}
+
#endif
-extern struct workqueue_struct *ksuspend_usb_wq;
extern struct bus_type usb_bus_type;
extern struct device_type usb_device_type;
extern struct device_type usb_if_device_type;
@@ -138,23 +124,6 @@ static inline int is_usb_device_driver(struct device_driver *drv)
for_devices;
}
-/* Interfaces and their "power state" are owned by usbcore */
-
-static inline void mark_active(struct usb_interface *f)
-{
- f->is_active = 1;
-}
-
-static inline void mark_quiesced(struct usb_interface *f)
-{
- f->is_active = 0;
-}
-
-static inline int is_active(const struct usb_interface *f)
-{
- return f->is_active;
-}
-
/* for labeling diagnostics */
extern const char *usbcore_name;
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 2958a12..6e98a36 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -66,8 +66,6 @@ static struct ehci_dev ehci_dev;
#define USB_DEBUG_DEVNUM 127
-#define DBGP_DATA_TOGGLE 0x8800
-
#ifdef DBGP_DEBUG
#define dbgp_printk printk
static void dbgp_ehci_status(char *str)
@@ -88,11 +86,6 @@ static inline void dbgp_ehci_status(char *str) { }
static inline void dbgp_printk(const char *fmt, ...) { }
#endif
-static inline u32 dbgp_pid_update(u32 x, u32 tok)
-{
- return ((x ^ DBGP_DATA_TOGGLE) & 0xffff00) | (tok & 0xff);
-}
-
static inline u32 dbgp_len_update(u32 x, u32 len)
{
return (x & ~0x0f) | (len & 0x0f);
@@ -136,6 +129,19 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
#define DBGP_MAX_PACKET 8
#define DBGP_TIMEOUT (250 * 1000)
+#define DBGP_LOOPS 1000
+
+static inline u32 dbgp_pid_write_update(u32 x, u32 tok)
+{
+ static int data0 = USB_PID_DATA1;
+ data0 ^= USB_PID_DATA_TOGGLE;
+ return (x & 0xffff0000) | (data0 << 8) | (tok & 0xff);
+}
+
+static inline u32 dbgp_pid_read_update(u32 x, u32 tok)
+{
+ return (x & 0xffff0000) | (USB_PID_DATA0 << 8) | (tok & 0xff);
+}
static int dbgp_wait_until_complete(void)
{
@@ -180,7 +186,7 @@ static int dbgp_wait_until_done(unsigned ctrl)
{
u32 pids, lpid;
int ret;
- int loop = 3;
+ int loop = DBGP_LOOPS;
retry:
writel(ctrl | DBGP_GO, &ehci_debug->control);
@@ -197,6 +203,8 @@ retry:
*/
if (ret == -DBGP_TIMEOUT && !dbgp_not_safe)
dbgp_not_safe = 1;
+ if (ret == -DBGP_ERR_BAD && --loop > 0)
+ goto retry;
return ret;
}
@@ -245,12 +253,20 @@ static inline void dbgp_get_data(void *buf, int size)
bytes[i] = (hi >> (8*(i - 4))) & 0xff;
}
-static int dbgp_out(u32 addr, const char *bytes, int size)
+static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
+ const char *bytes, int size)
{
+ int ret;
+ u32 addr;
u32 pids, ctrl;
+ if (size > DBGP_MAX_PACKET)
+ return -1;
+
+ addr = DBGP_EPADDR(devnum, endpoint);
+
pids = readl(&ehci_debug->pids);
- pids = dbgp_pid_update(pids, USB_PID_OUT);
+ pids = dbgp_pid_write_update(pids, USB_PID_OUT);
ctrl = readl(&ehci_debug->control);
ctrl = dbgp_len_update(ctrl, size);
@@ -260,34 +276,7 @@ static int dbgp_out(u32 addr, const char *bytes, int size)
dbgp_set_data(bytes, size);
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
- return dbgp_wait_until_done(ctrl);
-}
-
-static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
- const char *bytes, int size)
-{
- int ret;
- int loops = 5;
- u32 addr;
- if (size > DBGP_MAX_PACKET)
- return -1;
-
- addr = DBGP_EPADDR(devnum, endpoint);
-try_again:
- if (loops--) {
- ret = dbgp_out(addr, bytes, size);
- if (ret == -DBGP_ERR_BAD) {
- int try_loops = 3;
- do {
- /* Emit a dummy packet to re-sync communication
- * with the debug device */
- if (dbgp_out(addr, "12345678", 8) >= 0) {
- udelay(2);
- goto try_again;
- }
- } while (try_loops--);
- }
- }
+ ret = dbgp_wait_until_done(ctrl);
return ret;
}
@@ -304,7 +293,7 @@ static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
addr = DBGP_EPADDR(devnum, endpoint);
pids = readl(&ehci_debug->pids);
- pids = dbgp_pid_update(pids, USB_PID_IN);
+ pids = dbgp_pid_read_update(pids, USB_PID_IN);
ctrl = readl(&ehci_debug->control);
ctrl = dbgp_len_update(ctrl, size);
@@ -362,7 +351,6 @@ static int dbgp_control_msg(unsigned devnum, int requesttype,
return dbgp_bulk_read(devnum, 0, data, size);
}
-
/* Find a PCI capability */
static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap)
{
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index ee41120..7460cd7 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -812,6 +812,16 @@ config USB_CDC_COMPOSITE
Say "y" to link the driver statically, or "m" to build a
dynamically linked module.
+config USB_G_NOKIA
+ tristate "Nokia composite gadget"
+ depends on PHONET
+ help
+ The Nokia composite gadget provides support for acm, obex
+ and phonet in only one composite gadget driver.
+
+ It's only really useful for N900 hardware. If you're building
+ a kernel for N900, say Y or M here. If unsure, say N.
+
config USB_G_MULTI
tristate "Multifunction Composite Gadget (EXPERIMENTAL)"
depends on BLOCK && NET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 2e2c047..43b51da 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -43,6 +43,7 @@ g_mass_storage-objs := mass_storage.o
g_printer-objs := printer.o
g_cdc-objs := cdc2.o
g_multi-objs := multi.o
+g_nokia-objs := nokia.o
obj-$(CONFIG_USB_ZERO) += g_zero.o
obj-$(CONFIG_USB_AUDIO) += g_audio.o
@@ -55,4 +56,5 @@ obj-$(CONFIG_USB_G_PRINTER) += g_printer.o
obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o
obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o
obj-$(CONFIG_USB_G_MULTI) += g_multi.o
+obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 043e04d..12ac9cd 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1656,9 +1656,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
if (!res)
return -ENXIO;
- if (!request_mem_region(res->start,
- res->end - res->start + 1,
- driver_name)) {
+ if (!request_mem_region(res->start, resource_size(res), driver_name)) {
DBG("someone's using UDC memory\n");
return -EBUSY;
}
@@ -1699,7 +1697,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
udc->ep[3].maxpacket = 64;
}
- udc->udp_baseaddr = ioremap(res->start, res->end - res->start + 1);
+ udc->udp_baseaddr = ioremap(res->start, resource_size(res));
if (!udc->udp_baseaddr) {
retval = -ENOMEM;
goto fail0a;
@@ -1781,7 +1779,7 @@ fail0a:
if (cpu_is_at91rm9200())
gpio_free(udc->board.pullup_pin);
fail0:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
DBG("%s probe failed, %d\n", driver_name, retval);
return retval;
}
@@ -1813,7 +1811,7 @@ static int __exit at91udc_remove(struct platform_device *pdev)
gpio_free(udc->board.pullup_pin);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
clk_put(udc->iclk);
clk_put(udc->fclk);
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 4e970cf..f79bdfe 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -320,7 +320,7 @@ static inline void usba_cleanup_debugfs(struct usba_udc *udc)
static int vbus_is_present(struct usba_udc *udc)
{
if (gpio_is_valid(udc->vbus_pin))
- return gpio_get_value(udc->vbus_pin);
+ return gpio_get_value(udc->vbus_pin) ^ udc->vbus_pin_inverted;
/* No Vbus detection: Assume always present */
return 1;
@@ -1763,7 +1763,7 @@ static irqreturn_t usba_vbus_irq(int irq, void *devid)
if (!udc->driver)
goto out;
- vbus = gpio_get_value(udc->vbus_pin);
+ vbus = vbus_is_present(udc);
if (vbus != udc->vbus_prev) {
if (vbus) {
toggle_bias(1);
@@ -1914,14 +1914,14 @@ static int __init usba_udc_probe(struct platform_device *pdev)
udc->vbus_pin = -ENODEV;
ret = -ENOMEM;
- udc->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ udc->regs = ioremap(regs->start, resource_size(regs));
if (!udc->regs) {
dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n");
goto err_map_regs;
}
dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n",
(unsigned long)regs->start, udc->regs);
- udc->fifo = ioremap(fifo->start, fifo->end - fifo->start + 1);
+ udc->fifo = ioremap(fifo->start, resource_size(fifo));
if (!udc->fifo) {
dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n");
goto err_map_fifo;
@@ -2000,6 +2000,7 @@ static int __init usba_udc_probe(struct platform_device *pdev)
if (gpio_is_valid(pdata->vbus_pin)) {
if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) {
udc->vbus_pin = pdata->vbus_pin;
+ udc->vbus_pin_inverted = pdata->vbus_pin_inverted;
ret = request_irq(gpio_to_irq(udc->vbus_pin),
usba_vbus_irq, 0,
diff --git a/drivers/usb/gadget/atmel_usba_udc.h b/drivers/usb/gadget/atmel_usba_udc.h
index f7baea3..88a2e07 100644
--- a/drivers/usb/gadget/atmel_usba_udc.h
+++ b/drivers/usb/gadget/atmel_usba_udc.h
@@ -323,6 +323,7 @@ struct usba_udc {
struct platform_device *pdev;
int irq;
int vbus_pin;
+ int vbus_pin_inverted;
struct clk *pclk;
struct clk *hclk;
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index cd0914e..65a5f94 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -265,16 +265,24 @@ struct usb_ep * __init usb_ep_autoconfig (
return ep;
}
- } else if (gadget_is_sh (gadget) && USB_ENDPOINT_XFER_INT == type) {
- /* single buffering is enough; maybe 8 byte fifo is too */
- ep = find_ep (gadget, "ep3in-bulk");
- if (ep && ep_matches (gadget, ep, desc))
- return ep;
-
- } else if (gadget_is_mq11xx (gadget) && USB_ENDPOINT_XFER_INT == type) {
- ep = find_ep (gadget, "ep1-bulk");
+#ifdef CONFIG_BLACKFIN
+ } else if (gadget_is_musbhsfc(gadget) || gadget_is_musbhdrc(gadget)) {
+ if ((USB_ENDPOINT_XFER_BULK == type) ||
+ (USB_ENDPOINT_XFER_ISOC == type)) {
+ if (USB_DIR_IN & desc->bEndpointAddress)
+ ep = find_ep (gadget, "ep5in");
+ else
+ ep = find_ep (gadget, "ep6out");
+ } else if (USB_ENDPOINT_XFER_INT == type) {
+ if (USB_DIR_IN & desc->bEndpointAddress)
+ ep = find_ep(gadget, "ep1in");
+ else
+ ep = find_ep(gadget, "ep2out");
+ } else
+ ep = NULL;
if (ep && ep_matches (gadget, ep, desc))
return ep;
+#endif
}
/* Second, look at endpoints until an unclaimed one looks usable */
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 141372b..400f8037 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -259,7 +259,7 @@ static struct usb_configuration rndis_config_driver = {
/*-------------------------------------------------------------------------*/
-#ifdef USB_ETH_EEM
+#ifdef CONFIG_USB_ETH_EEM
static int use_eem = 1;
#else
static int use_eem;
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index d10353d..e49c732 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -702,14 +702,6 @@ acm_unbind(struct usb_configuration *c, struct usb_function *f)
/* Some controllers can't support CDC ACM ... */
static inline bool can_support_cdc(struct usb_configuration *c)
{
- /* SH3 doesn't support multiple interfaces */
- if (gadget_is_sh(c->cdev->gadget))
- return false;
-
- /* sa1100 doesn't have a third interrupt endpoint */
- if (gadget_is_sa1100(c->cdev->gadget))
- return false;
-
/* everything else is *probably* fine ... */
return true;
}
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index ecf5bdd..2fff530 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -497,12 +497,9 @@ static int ecm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
struct net_device *net;
/* Enable zlps by default for ECM conformance;
- * override for musb_hdrc (avoids txdma ovhead)
- * and sa1100 (can't).
+ * override for musb_hdrc (avoids txdma ovhead).
*/
- ecm->port.is_zlp_ok = !(
- gadget_is_sa1100(cdev->gadget)
- || gadget_is_musbhdrc(cdev->gadget)
+ ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget)
);
ecm->port.cdc_filter = DEFAULT_FILTER;
DBG(cdev, "activate ecm\n");
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index a37640e..5a3cdd0 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -368,7 +368,7 @@ struct fsg_common {
struct task_struct *thread_task;
/* Callback function to call when thread exits. */
- void (*thread_exits)(struct fsg_common *common);
+ int (*thread_exits)(struct fsg_common *common);
/* Gadget's private data. */
void *private_data;
@@ -392,8 +392,12 @@ struct fsg_config {
const char *lun_name_format;
const char *thread_name;
- /* Callback function to call when thread exits. */
- void (*thread_exits)(struct fsg_common *common);
+ /* Callback function to call when thread exits. If no
+ * callback is set or it returns value lower then zero MSF
+ * will force eject all LUNs it operates on (including those
+ * marked as non-removable or with prevent_medium_removal flag
+ * set). */
+ int (*thread_exits)(struct fsg_common *common);
/* Gadget's private data. */
void *private_data;
@@ -614,7 +618,12 @@ static int fsg_setup(struct usb_function *f,
return -EDOM;
VDBG(fsg, "get max LUN\n");
*(u8 *) req->buf = fsg->common->nluns - 1;
- return 1;
+
+ /* Respond with data/status */
+ req->length = min((u16)1, w_length);
+ fsg->common->ep0req_name =
+ ctrl->bRequestType & USB_DIR_IN ? "ep0-in" : "ep0-out";
+ return ep0_queue(fsg->common);
}
VDBG(fsg,
@@ -1041,7 +1050,7 @@ static void invalidate_sub(struct fsg_lun *curlun)
unsigned long rc;
rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
- VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
+ VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
}
static int do_verify(struct fsg_common *common)
@@ -2524,14 +2533,6 @@ static void handle_exception(struct fsg_common *common)
case FSG_STATE_CONFIG_CHANGE:
rc = do_set_config(common, new_config);
- if (common->ep0_req_tag != exception_req_tag)
- break;
- if (rc != 0) { /* STALL on errors */
- DBG(common, "ep0 set halt\n");
- usb_ep_set_halt(common->ep0);
- } else { /* Complete the status stage */
- ep0_queue(common);
- }
break;
case FSG_STATE_EXIT:
@@ -2615,8 +2616,20 @@ static int fsg_main_thread(void *common_)
common->thread_task = NULL;
spin_unlock_irq(&common->lock);
- if (common->thread_exits)
- common->thread_exits(common);
+ if (!common->thread_exits || common->thread_exits(common) < 0) {
+ struct fsg_lun *curlun = common->luns;
+ unsigned i = common->nluns;
+
+ down_write(&common->filesem);
+ for (; i--; ++curlun) {
+ if (!fsg_lun_is_open(curlun))
+ continue;
+
+ fsg_lun_close(curlun);
+ curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+ }
+ up_write(&common->filesem);
+ }
/* Let the unbind and cleanup routines know the thread has exited */
complete_and_exit(&common->thread_notifier, 0);
@@ -2763,10 +2776,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
if (cfg->release != 0xffff) {
i = cfg->release;
} else {
- /* The sa1100 controller is not supported */
- i = gadget_is_sa1100(gadget)
- ? -1
- : usb_gadget_controller_number(gadget);
+ i = usb_gadget_controller_number(gadget);
if (i >= 0) {
i = 0x0300 + i;
} else {
@@ -2791,8 +2801,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
* disable stalls.
*/
common->can_stall = cfg->can_stall &&
- !(gadget_is_sh(common->gadget) ||
- gadget_is_at91(common->gadget));
+ !(gadget_is_at91(common->gadget));
spin_lock_init(&common->lock);
@@ -2852,7 +2861,6 @@ error_release:
/* Call fsg_common_release() directly, ref might be not
* initialised */
fsg_common_release(&common->ref);
- complete(&common->thread_notifier);
return ERR_PTR(rc);
}
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 95dae4c..a30e60c 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -769,10 +769,6 @@ rndis_unbind(struct usb_configuration *c, struct usb_function *f)
/* Some controllers can't support RNDIS ... */
static inline bool can_support_rndis(struct usb_configuration *c)
{
- /* only two endpoints on sa1100 */
- if (gadget_is_sa1100(c->cdev->gadget))
- return false;
-
/* everything else is *presumably* fine */
return true;
}
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 29dfb02..b49d86e 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -1448,7 +1448,7 @@ static void invalidate_sub(struct fsg_lun *curlun)
unsigned long rc;
rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
- VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
+ VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
}
static int do_verify(struct fsg_dev *fsg)
@@ -3208,15 +3208,11 @@ static int __init check_parameters(struct fsg_dev *fsg)
* halt bulk endpoints correctly. If one of them is present,
* disable stalls.
*/
- if (gadget_is_sh(fsg->gadget) || gadget_is_at91(fsg->gadget))
+ if (gadget_is_at91(fsg->gadget))
mod_data.can_stall = 0;
if (mod_data.release == 0xffff) { // Parameter wasn't set
- /* The sa1100 controller is not supported */
- if (gadget_is_sa1100(fsg->gadget))
- gcnum = -1;
- else
- gcnum = usb_gadget_controller_number(fsg->gadget);
+ gcnum = usb_gadget_controller_number(fsg->gadget);
if (gcnum >= 0)
mod_data.release = 0x0300 + gcnum;
else {
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 7881f12..3537d51 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2749,7 +2749,7 @@ static int __devexit qe_udc_remove(struct of_device *ofdev)
}
/*-------------------------------------------------------------------------*/
-static struct of_device_id __devinitdata qe_udc_match[] = {
+static const struct of_device_id qe_udc_match[] __devinitconst = {
{
.compatible = "fsl,mpc8323-qe-usb",
.data = (void *)PORT_QE,
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index f2d270b..1edbc12 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -45,46 +45,18 @@
#define gadget_is_goku(g) 0
#endif
-/* SH3 UDC -- not yet ported 2.4 --> 2.6 */
-#ifdef CONFIG_USB_GADGET_SUPERH
-#define gadget_is_sh(g) !strcmp("sh_udc", (g)->name)
-#else
-#define gadget_is_sh(g) 0
-#endif
-
-/* not yet stable on 2.6 (would help "original Zaurus") */
-#ifdef CONFIG_USB_GADGET_SA1100
-#define gadget_is_sa1100(g) !strcmp("sa1100_udc", (g)->name)
-#else
-#define gadget_is_sa1100(g) 0
-#endif
-
#ifdef CONFIG_USB_GADGET_LH7A40X
#define gadget_is_lh7a40x(g) !strcmp("lh7a40x_udc", (g)->name)
#else
#define gadget_is_lh7a40x(g) 0
#endif
-/* handhelds.org tree (?) */
-#ifdef CONFIG_USB_GADGET_MQ11XX
-#define gadget_is_mq11xx(g) !strcmp("mq11xx_udc", (g)->name)
-#else
-#define gadget_is_mq11xx(g) 0
-#endif
-
#ifdef CONFIG_USB_GADGET_OMAP
#define gadget_is_omap(g) !strcmp("omap_udc", (g)->name)
#else
#define gadget_is_omap(g) 0
#endif
-/* not yet ported 2.4 --> 2.6 */
-#ifdef CONFIG_USB_GADGET_N9604
-#define gadget_is_n9604(g) !strcmp("n9604_udc", (g)->name)
-#else
-#define gadget_is_n9604(g) 0
-#endif
-
/* various unstable versions available */
#ifdef CONFIG_USB_GADGET_PXA27X
#define gadget_is_pxa27x(g) !strcmp("pxa27x_udc", (g)->name)
@@ -122,14 +94,6 @@
#define gadget_is_fsl_usb2(g) 0
#endif
-/* Mentor high speed function controller */
-/* from Montavista kernel (?) */
-#ifdef CONFIG_USB_GADGET_MUSBHSFC
-#define gadget_is_musbhsfc(g) !strcmp("musbhsfc_udc", (g)->name)
-#else
-#define gadget_is_musbhsfc(g) 0
-#endif
-
/* Mentor high speed "dual role" controller, in peripheral role */
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
#define gadget_is_musbhdrc(g) !strcmp("musb_hdrc", (g)->name)
@@ -143,13 +107,6 @@
#define gadget_is_langwell(g) 0
#endif
-/* from Montavista kernel (?) */
-#ifdef CONFIG_USB_GADGET_MPC8272
-#define gadget_is_mpc8272(g) !strcmp("mpc8272_udc", (g)->name)
-#else
-#define gadget_is_mpc8272(g) 0
-#endif
-
#ifdef CONFIG_USB_GADGET_M66592
#define gadget_is_m66592(g) !strcmp("m66592_udc", (g)->name)
#else
@@ -203,20 +160,12 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
return 0x02;
else if (gadget_is_pxa(gadget))
return 0x03;
- else if (gadget_is_sh(gadget))
- return 0x04;
- else if (gadget_is_sa1100(gadget))
- return 0x05;
else if (gadget_is_goku(gadget))
return 0x06;
- else if (gadget_is_mq11xx(gadget))
- return 0x07;
else if (gadget_is_omap(gadget))
return 0x08;
else if (gadget_is_lh7a40x(gadget))
return 0x09;
- else if (gadget_is_n9604(gadget))
- return 0x10;
else if (gadget_is_pxa27x(gadget))
return 0x11;
else if (gadget_is_s3c2410(gadget))
@@ -225,12 +174,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
return 0x13;
else if (gadget_is_imx(gadget))
return 0x14;
- else if (gadget_is_musbhsfc(gadget))
- return 0x15;
else if (gadget_is_musbhdrc(gadget))
return 0x16;
- else if (gadget_is_mpc8272(gadget))
- return 0x17;
else if (gadget_is_atmel_usba(gadget))
return 0x18;
else if (gadget_is_fsl_usb2(gadget))
@@ -265,10 +210,6 @@ static inline bool gadget_supports_altsettings(struct usb_gadget *gadget)
if (gadget_is_pxa27x(gadget))
return false;
- /* SH3 hardware just doesn't do altsettings */
- if (gadget_is_sh(gadget))
- return false;
-
/* Everything else is *presumably* fine ... */
return true;
}
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 5f6a2e0..04f6224 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -618,11 +618,6 @@ gmidi_set_config(struct gmidi_device *dev, unsigned number, gfp_t gfp_flags)
}
#endif
- if (gadget_is_sa1100(gadget) && dev->config) {
- /* tx fifo is full, but we can't clear it...*/
- ERROR(dev, "can't change configurations\n");
- return -ESPIPE;
- }
gmidi_reset_config(dev);
switch (number) {
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 112bb40..e8edc64 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1859,7 +1859,7 @@ done:
/*-------------------------------------------------------------------------*/
-static struct pci_device_id pci_ids [] = { {
+static const struct pci_device_id pci_ids[] = { {
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.class_mask = ~0,
.vendor = 0x102f, /* Toshiba */
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index bf0f652..de8a838 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -194,7 +194,7 @@ enum ep_state {
};
struct ep_data {
- struct semaphore lock;
+ struct mutex lock;
enum ep_state state;
atomic_t count;
struct dev_data *dev;
@@ -298,10 +298,10 @@ get_ready_ep (unsigned f_flags, struct ep_data *epdata)
int val;
if (f_flags & O_NONBLOCK) {
- if (down_trylock (&epdata->lock) != 0)
+ if (!mutex_trylock(&epdata->lock))
goto nonblock;
if (epdata->state != STATE_EP_ENABLED) {
- up (&epdata->lock);
+ mutex_unlock(&epdata->lock);
nonblock:
val = -EAGAIN;
} else
@@ -309,7 +309,8 @@ nonblock:
return val;
}
- if ((val = down_interruptible (&epdata->lock)) < 0)
+ val = mutex_lock_interruptible(&epdata->lock);
+ if (val < 0)
return val;
switch (epdata->state) {
@@ -323,7 +324,7 @@ nonblock:
// FALLTHROUGH
case STATE_EP_UNBOUND: /* clean disconnect */
val = -ENODEV;
- up (&epdata->lock);
+ mutex_unlock(&epdata->lock);
}
return val;
}
@@ -393,7 +394,7 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
if (likely (data->ep != NULL))
usb_ep_set_halt (data->ep);
spin_unlock_irq (&data->dev->lock);
- up (&data->lock);
+ mutex_unlock(&data->lock);
return -EBADMSG;
}
@@ -411,7 +412,7 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
value = -EFAULT;
free1:
- up (&data->lock);
+ mutex_unlock(&data->lock);
kfree (kbuf);
return value;
}
@@ -436,7 +437,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
if (likely (data->ep != NULL))
usb_ep_set_halt (data->ep);
spin_unlock_irq (&data->dev->lock);
- up (&data->lock);
+ mutex_unlock(&data->lock);
return -EBADMSG;
}
@@ -455,7 +456,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
VDEBUG (data->dev, "%s write %zu IN, status %d\n",
data->name, len, (int) value);
free1:
- up (&data->lock);
+ mutex_unlock(&data->lock);
kfree (kbuf);
return value;
}
@@ -466,7 +467,8 @@ ep_release (struct inode *inode, struct file *fd)
struct ep_data *data = fd->private_data;
int value;
- if ((value = down_interruptible(&data->lock)) < 0)
+ value = mutex_lock_interruptible(&data->lock);
+ if (value < 0)
return value;
/* clean up if this can be reopened */
@@ -476,7 +478,7 @@ ep_release (struct inode *inode, struct file *fd)
data->hs_desc.bDescriptorType = 0;
usb_ep_disable(data->ep);
}
- up (&data->lock);
+ mutex_unlock(&data->lock);
put_ep (data);
return 0;
}
@@ -507,7 +509,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
} else
status = -ENODEV;
spin_unlock_irq (&data->dev->lock);
- up (&data->lock);
+ mutex_unlock(&data->lock);
return status;
}
@@ -673,7 +675,7 @@ fail:
value = -ENODEV;
spin_unlock_irq(&epdata->dev->lock);
- up(&epdata->lock);
+ mutex_unlock(&epdata->lock);
if (unlikely(value)) {
kfree(priv);
@@ -765,7 +767,8 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
u32 tag;
int value, length = len;
- if ((value = down_interruptible (&data->lock)) < 0)
+ value = mutex_lock_interruptible(&data->lock);
+ if (value < 0)
return value;
if (data->state != STATE_EP_READY) {
@@ -854,7 +857,7 @@ fail:
data->desc.bDescriptorType = 0;
data->hs_desc.bDescriptorType = 0;
}
- up (&data->lock);
+ mutex_unlock(&data->lock);
return value;
fail0:
value = -EINVAL;
@@ -870,7 +873,7 @@ ep_open (struct inode *inode, struct file *fd)
struct ep_data *data = inode->i_private;
int value = -EBUSY;
- if (down_interruptible (&data->lock) != 0)
+ if (mutex_lock_interruptible(&data->lock) != 0)
return -EINTR;
spin_lock_irq (&data->dev->lock);
if (data->dev->state == STATE_DEV_UNBOUND)
@@ -885,7 +888,7 @@ ep_open (struct inode *inode, struct file *fd)
DBG (data->dev, "%s state %d\n",
data->name, data->state);
spin_unlock_irq (&data->dev->lock);
- up (&data->lock);
+ mutex_unlock(&data->lock);
return value;
}
@@ -1631,7 +1634,7 @@ static int activate_ep_files (struct dev_data *dev)
if (!data)
goto enomem0;
data->state = STATE_EP_DISABLED;
- init_MUTEX (&data->lock);
+ mutex_init(&data->lock);
init_waitqueue_head (&data->wait);
strncpy (data->name, ep->name, sizeof (data->name) - 1);
diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c
index 19619fb..705cc1f 100644
--- a/drivers/usb/gadget/mass_storage.c
+++ b/drivers/usb/gadget/mass_storage.c
@@ -135,6 +135,12 @@ FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
static unsigned long msg_registered = 0;
static void msg_cleanup(void);
+static int msg_thread_exits(struct fsg_common *common)
+{
+ msg_cleanup();
+ return 0;
+}
+
static int __init msg_do_config(struct usb_configuration *c)
{
struct fsg_common *common;
@@ -147,7 +153,7 @@ static int __init msg_do_config(struct usb_configuration *c)
}
fsg_config_from_params(&config, &mod_data);
- config.thread_exits = (void(*)(struct fsg_common*))&msg_cleanup;
+ config.thread_exits = msg_thread_exits;
common = fsg_common_init(0, c->cdev, &config);
if (IS_ERR(common))
return PTR_ERR(common);
diff --git a/drivers/usb/gadget/nokia.c b/drivers/usb/gadget/nokia.c
new file mode 100644
index 0000000..7d6b66a
--- /dev/null
+++ b/drivers/usb/gadget/nokia.c
@@ -0,0 +1,259 @@
+/*
+ * nokia.c -- Nokia Composite Gadget Driver
+ *
+ * Copyright (C) 2008-2010 Nokia Corporation
+ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * This gadget driver borrows from serial.c which is:
+ *
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 by David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * version 2 of that License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+
+#include "u_serial.h"
+#include "u_ether.h"
+#include "u_phonet.h"
+#include "gadget_chips.h"
+
+/* Defines */
+
+#define NOKIA_VERSION_NUM 0x0211
+#define NOKIA_LONG_NAME "N900 (PC-Suite Mode)"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "u_serial.c"
+#include "f_acm.c"
+#include "f_ecm.c"
+#include "f_obex.c"
+#include "f_serial.c"
+#include "f_phonet.c"
+#include "u_ether.c"
+
+/*-------------------------------------------------------------------------*/
+
+#define NOKIA_VENDOR_ID 0x0421 /* Nokia */
+#define NOKIA_PRODUCT_ID 0x01c8 /* Nokia Gadget */
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+#define STRING_DESCRIPTION_IDX 2
+
+static char manufacturer_nokia[] = "Nokia";
+static const char product_nokia[] = NOKIA_LONG_NAME;
+static const char description_nokia[] = "PC-Suite Configuration";
+
+static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = manufacturer_nokia,
+ [STRING_PRODUCT_IDX].s = NOKIA_LONG_NAME,
+ [STRING_DESCRIPTION_IDX].s = description_nokia,
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+ &stringtab_dev,
+ NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+ .bLength = USB_DT_DEVICE_SIZE,
+ .bDescriptorType = USB_DT_DEVICE,
+ .bcdUSB = __constant_cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_COMM,
+ .idVendor = __constant_cpu_to_le16(NOKIA_VENDOR_ID),
+ .idProduct = __constant_cpu_to_le16(NOKIA_PRODUCT_ID),
+ /* .iManufacturer = DYNAMIC */
+ /* .iProduct = DYNAMIC */
+ .bNumConfigurations = 1,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* Module */
+MODULE_DESCRIPTION("Nokia composite gadget driver for N900");
+MODULE_AUTHOR("Felipe Balbi");
+MODULE_LICENSE("GPL");
+
+/*-------------------------------------------------------------------------*/
+
+static u8 hostaddr[ETH_ALEN];
+
+static int __init nokia_bind_config(struct usb_configuration *c)
+{
+ int status = 0;
+
+ status = phonet_bind_config(c);
+ if (status)
+ printk(KERN_DEBUG "could not bind phonet config\n");
+
+ status = obex_bind_config(c, 0);
+ if (status)
+ printk(KERN_DEBUG "could not bind obex config %d\n", 0);
+
+ status = obex_bind_config(c, 1);
+ if (status)
+ printk(KERN_DEBUG "could not bind obex config %d\n", 0);
+
+ status = acm_bind_config(c, 2);
+ if (status)
+ printk(KERN_DEBUG "could not bind acm config\n");
+
+ status = ecm_bind_config(c, hostaddr);
+ if (status)
+ printk(KERN_DEBUG "could not bind ecm config\n");
+
+ return status;
+}
+
+static struct usb_configuration nokia_config_500ma_driver = {
+ .label = "Bus Powered",
+ .bind = nokia_bind_config,
+ .bConfigurationValue = 1,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_ONE,
+ .bMaxPower = 250, /* 500mA */
+};
+
+static struct usb_configuration nokia_config_100ma_driver = {
+ .label = "Self Powered",
+ .bind = nokia_bind_config,
+ .bConfigurationValue = 2,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+ .bMaxPower = 50, /* 100 mA */
+};
+
+static int __init nokia_bind(struct usb_composite_dev *cdev)
+{
+ int gcnum;
+ struct usb_gadget *gadget = cdev->gadget;
+ int status;
+
+ status = gphonet_setup(cdev->gadget);
+ if (status < 0)
+ goto err_phonet;
+
+ status = gserial_setup(cdev->gadget, 3);
+ if (status < 0)
+ goto err_serial;
+
+ status = gether_setup(cdev->gadget, hostaddr);
+ if (status < 0)
+ goto err_ether;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto err_usb;
+ strings_dev[STRING_MANUFACTURER_IDX].id = status;
+
+ device_desc.iManufacturer = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto err_usb;
+ strings_dev[STRING_PRODUCT_IDX].id = status;
+
+ device_desc.iProduct = status;
+
+ /* config description */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto err_usb;
+ strings_dev[STRING_DESCRIPTION_IDX].id = status;
+
+ nokia_config_500ma_driver.iConfiguration = status;
+ nokia_config_100ma_driver.iConfiguration = status;
+
+ /* set up other descriptors */
+ gcnum = usb_gadget_controller_number(gadget);
+ if (gcnum >= 0)
+ device_desc.bcdDevice = cpu_to_le16(NOKIA_VERSION_NUM);
+ else {
+ /* this should only work with hw that supports altsettings
+ * and several endpoints, anything else, panic.
+ */
+ pr_err("nokia_bind: controller '%s' not recognized\n",
+ gadget->name);
+ goto err_usb;
+ }
+
+ /* finaly register the configuration */
+ status = usb_add_config(cdev, &nokia_config_500ma_driver);
+ if (status < 0)
+ goto err_usb;
+
+ status = usb_add_config(cdev, &nokia_config_100ma_driver);
+ if (status < 0)
+ goto err_usb;
+
+ dev_info(&gadget->dev, "%s\n", NOKIA_LONG_NAME);
+
+ return 0;
+
+err_usb:
+ gether_cleanup();
+err_ether:
+ gserial_cleanup();
+err_serial:
+ gphonet_cleanup();
+err_phonet:
+ return status;
+}
+
+static int __exit nokia_unbind(struct usb_composite_dev *cdev)
+{
+ gphonet_cleanup();
+ gserial_cleanup();
+ gether_cleanup();
+
+ return 0;
+}
+
+static struct usb_composite_driver nokia_driver = {
+ .name = "g_nokia",
+ .dev = &device_desc,
+ .strings = dev_strings,
+ .bind = nokia_bind,
+ .unbind = __exit_p(nokia_unbind),
+};
+
+static int __init nokia_init(void)
+{
+ return usb_composite_register(&nokia_driver);
+}
+module_init(nokia_init);
+
+static void __exit nokia_cleanup(void)
+{
+ usb_composite_unregister(&nokia_driver);
+}
+module_exit(nokia_cleanup);
+
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 2d867fd..6b8bf8c 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -949,12 +949,6 @@ printer_set_config(struct printer_dev *dev, unsigned number)
int result = 0;
struct usb_gadget *gadget = dev->gadget;
- if (gadget_is_sa1100(gadget) && dev->config) {
- /* tx fifo is full, but we can't clear it...*/
- INFO(dev, "can't change configurations\n");
- return -ESPIPE;
- }
-
switch (number) {
case DEV_CONFIG_VALUE:
result = 0;
@@ -1033,12 +1027,6 @@ set_interface(struct printer_dev *dev, unsigned number)
{
int result = 0;
- if (gadget_is_sa1100(dev->gadget) && dev->interface < 0) {
- /* tx fifo is full, but we can't clear it...*/
- INFO(dev, "can't change interfaces\n");
- return -ESPIPE;
- }
-
/* Free the current interface */
switch (dev->interface) {
case PRINTER_INTERFACE:
@@ -1392,12 +1380,6 @@ printer_bind(struct usb_gadget *gadget)
goto fail;
}
- if (gadget_is_sa1100(gadget)) {
- /* hardware can't write zero length packets. */
- ERROR(dev, "SA1100 controller is unsupport by this driver\n");
- goto fail;
- }
-
gcnum = usb_gadget_controller_number(gadget);
if (gcnum >= 0) {
device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index adda120..05b892c 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -742,13 +742,17 @@ static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req)
* @ep: pxa physical endpoint
* @req: pxa request
* @status: usb request status sent to gadget API
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
- * Context: ep->lock held
+ * Context: ep->lock held if flags not NULL, else ep->lock released
*
* Retire a pxa27x usb request. Endpoint must be locked.
*/
-static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status)
+static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status,
+ unsigned long *pflags)
{
+ unsigned long flags;
+
ep_del_request(ep, req);
if (likely(req->req.status == -EINPROGRESS))
req->req.status = status;
@@ -760,38 +764,48 @@ static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status)
&req->req, status,
req->req.actual, req->req.length);
+ if (pflags)
+ spin_unlock_irqrestore(&ep->lock, *pflags);
+ local_irq_save(flags);
req->req.complete(&req->udc_usb_ep->usb_ep, &req->req);
+ local_irq_restore(flags);
+ if (pflags)
+ spin_lock_irqsave(&ep->lock, *pflags);
}
/**
* ep_end_out_req - Ends endpoint OUT request
* @ep: physical endpoint
* @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
- * Context: ep->lock held
+ * Context: ep->lock held or released (see req_done())
*
* Ends endpoint OUT request (completes usb request).
*/
-static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req)
+static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
{
inc_ep_stats_reqs(ep, !USB_DIR_IN);
- req_done(ep, req, 0);
+ req_done(ep, req, 0, pflags);
}
/**
* ep0_end_out_req - Ends control endpoint OUT request (ends data stage)
* @ep: physical endpoint
* @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
- * Context: ep->lock held
+ * Context: ep->lock held or released (see req_done())
*
* Ends control endpoint OUT request (completes usb request), and puts
* control endpoint into idle state
*/
-static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req)
+static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
{
set_ep0state(ep->dev, OUT_STATUS_STAGE);
- ep_end_out_req(ep, req);
+ ep_end_out_req(ep, req, pflags);
ep0_idle(ep->dev);
}
@@ -799,31 +813,35 @@ static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req)
* ep_end_in_req - Ends endpoint IN request
* @ep: physical endpoint
* @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
- * Context: ep->lock held
+ * Context: ep->lock held or released (see req_done())
*
* Ends endpoint IN request (completes usb request).
*/
-static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req)
+static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
{
inc_ep_stats_reqs(ep, USB_DIR_IN);
- req_done(ep, req, 0);
+ req_done(ep, req, 0, pflags);
}
/**
* ep0_end_in_req - Ends control endpoint IN request (ends data stage)
* @ep: physical endpoint
* @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
- * Context: ep->lock held
+ * Context: ep->lock held or released (see req_done())
*
* Ends control endpoint IN request (completes usb request), and puts
* control endpoint into status state
*/
-static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req)
+static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
{
set_ep0state(ep->dev, IN_STATUS_STAGE);
- ep_end_in_req(ep, req);
+ ep_end_in_req(ep, req, pflags);
}
/**
@@ -831,19 +849,22 @@ static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req)
* @ep: pxa endpoint
* @status: usb request status
*
- * Context: ep->lock held
+ * Context: ep->lock released
*
* Dequeues all requests on an endpoint. As a side effect, interrupts will be
* disabled on that endpoint (because no more requests).
*/
static void nuke(struct pxa_ep *ep, int status)
{
- struct pxa27x_request *req;
+ struct pxa27x_request *req;
+ unsigned long flags;
+ spin_lock_irqsave(&ep->lock, flags);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct pxa27x_request, queue);
- req_done(ep, req, status);
+ req_done(ep, req, status, &flags);
}
+ spin_unlock_irqrestore(&ep->lock, flags);
}
/**
@@ -1123,6 +1144,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
int rc = 0;
int is_first_req;
unsigned length;
+ int recursion_detected;
req = container_of(_req, struct pxa27x_request, req);
udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
@@ -1152,6 +1174,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
return -EMSGSIZE;
spin_lock_irqsave(&ep->lock, flags);
+ recursion_detected = ep->in_handle_ep;
is_first_req = list_empty(&ep->queue);
ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n",
@@ -1161,12 +1184,12 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
if (!ep->enabled) {
_req->status = -ESHUTDOWN;
rc = -ESHUTDOWN;
- goto out;
+ goto out_locked;
}
if (req->in_use) {
ep_err(ep, "refusing to queue req %p (already queued)\n", req);
- goto out;
+ goto out_locked;
}
length = _req->length;
@@ -1174,12 +1197,13 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
_req->actual = 0;
ep_add_request(ep, req);
+ spin_unlock_irqrestore(&ep->lock, flags);
if (is_ep0(ep)) {
switch (dev->ep0state) {
case WAIT_ACK_SET_CONF_INTERF:
if (length == 0) {
- ep_end_in_req(ep, req);
+ ep_end_in_req(ep, req, NULL);
} else {
ep_err(ep, "got a request of %d bytes while"
"in state WAIT_ACK_SET_CONF_INTERF\n",
@@ -1192,12 +1216,12 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
case IN_DATA_STAGE:
if (!ep_is_full(ep))
if (write_ep0_fifo(ep, req))
- ep0_end_in_req(ep, req);
+ ep0_end_in_req(ep, req, NULL);
break;
case OUT_DATA_STAGE:
if ((length == 0) || !epout_has_pkt(ep))
if (read_ep0_fifo(ep, req))
- ep0_end_out_req(ep, req);
+ ep0_end_out_req(ep, req, NULL);
break;
default:
ep_err(ep, "odd state %s to send me a request\n",
@@ -1207,12 +1231,15 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
break;
}
} else {
- handle_ep(ep);
+ if (!recursion_detected)
+ handle_ep(ep);
}
out:
- spin_unlock_irqrestore(&ep->lock, flags);
return rc;
+out_locked:
+ spin_unlock_irqrestore(&ep->lock, flags);
+ goto out;
}
/**
@@ -1242,13 +1269,14 @@ static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
/* make sure it's actually queued on this endpoint */
list_for_each_entry(req, &ep->queue, queue) {
if (&req->req == _req) {
- req_done(ep, req, -ECONNRESET);
rc = 0;
break;
}
}
spin_unlock_irqrestore(&ep->lock, flags);
+ if (!rc)
+ req_done(ep, req, -ECONNRESET, NULL);
return rc;
}
@@ -1445,7 +1473,6 @@ static int pxa_ep_disable(struct usb_ep *_ep)
{
struct pxa_ep *ep;
struct udc_usb_ep *udc_usb_ep;
- unsigned long flags;
if (!_ep)
return -EINVAL;
@@ -1455,10 +1482,8 @@ static int pxa_ep_disable(struct usb_ep *_ep)
if (!ep || is_ep0(ep) || !list_empty(&ep->queue))
return -EINVAL;
- spin_lock_irqsave(&ep->lock, flags);
ep->enabled = 0;
nuke(ep, -ESHUTDOWN);
- spin_unlock_irqrestore(&ep->lock, flags);
pxa_ep_fifo_flush(_ep);
udc_usb_ep->pxa_ep = NULL;
@@ -1907,8 +1932,10 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc,
} u;
int i;
int have_extrabytes = 0;
+ unsigned long flags;
nuke(ep, -EPROTO);
+ spin_lock_irqsave(&ep->lock, flags);
/*
* In the PXA320 manual, in the section about Back-to-Back setup
@@ -1947,10 +1974,13 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc,
/* Tell UDC to enter Data Stage */
ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
+ spin_unlock_irqrestore(&ep->lock, flags);
i = udc->driver->setup(&udc->gadget, &u.r);
+ spin_lock_irqsave(&ep->lock, flags);
if (i < 0)
goto stall;
out:
+ spin_unlock_irqrestore(&ep->lock, flags);
return;
stall:
ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
@@ -2055,13 +2085,13 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
if (req && !ep_is_full(ep))
completed = write_ep0_fifo(ep, req);
if (completed)
- ep0_end_in_req(ep, req);
+ ep0_end_in_req(ep, req, NULL);
break;
case OUT_DATA_STAGE: /* SET_DESCRIPTOR */
if (epout_has_pkt(ep) && req)
completed = read_ep0_fifo(ep, req);
if (completed)
- ep0_end_out_req(ep, req);
+ ep0_end_out_req(ep, req, NULL);
break;
case STALL:
ep_write_UDCCSR(ep, UDCCSR0_FST);
@@ -2091,7 +2121,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
* Tries to transfer all pending request data into the endpoint and/or
* transfer all pending data in the endpoint into usb requests.
*
- * Is always called when in_interrupt() or with ep->lock held.
+ * Is always called when in_interrupt() and with ep->lock released.
*/
static void handle_ep(struct pxa_ep *ep)
{
@@ -2100,10 +2130,17 @@ static void handle_ep(struct pxa_ep *ep)
u32 udccsr;
int is_in = ep->dir_in;
int loop = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ if (ep->in_handle_ep)
+ goto recursion_detected;
+ ep->in_handle_ep = 1;
do {
completed = 0;
udccsr = udc_ep_readl(ep, UDCCSR);
+
if (likely(!list_empty(&ep->queue)))
req = list_entry(ep->queue.next,
struct pxa27x_request, queue);
@@ -2122,15 +2159,22 @@ static void handle_ep(struct pxa_ep *ep)
if (unlikely(is_in)) {
if (likely(!ep_is_full(ep)))
completed = write_fifo(ep, req);
- if (completed)
- ep_end_in_req(ep, req);
} else {
if (likely(epout_has_pkt(ep)))
completed = read_fifo(ep, req);
- if (completed)
- ep_end_out_req(ep, req);
+ }
+
+ if (completed) {
+ if (is_in)
+ ep_end_in_req(ep, req, &flags);
+ else
+ ep_end_out_req(ep, req, &flags);
}
} while (completed);
+
+ ep->in_handle_ep = 0;
+recursion_detected:
+ spin_unlock_irqrestore(&ep->lock, flags);
}
/**
@@ -2218,9 +2262,13 @@ static void irq_handle_data(int irq, struct pxa_udc *udc)
continue;
udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK));
- ep = &udc->pxa_ep[i];
- ep->stats.irqs++;
- handle_ep(ep);
+
+ WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
+ if (i < ARRAY_SIZE(udc->pxa_ep)) {
+ ep = &udc->pxa_ep[i];
+ ep->stats.irqs++;
+ handle_ep(ep);
+ }
}
for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) {
@@ -2228,9 +2276,12 @@ static void irq_handle_data(int irq, struct pxa_udc *udc)
if (!(udcisr1 & UDCISR_INT_MASK))
continue;
- ep = &udc->pxa_ep[i];
- ep->stats.irqs++;
- handle_ep(ep);
+ WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
+ if (i < ARRAY_SIZE(udc->pxa_ep)) {
+ ep = &udc->pxa_ep[i];
+ ep->stats.irqs++;
+ handle_ep(ep);
+ }
}
}
@@ -2439,7 +2490,7 @@ static int __init pxa_udc_probe(struct platform_device *pdev)
}
retval = -ENOMEM;
- udc->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ udc->regs = ioremap(regs->start, resource_size(regs));
if (!udc->regs) {
dev_err(&pdev->dev, "Unable to map UDC I/O memory\n");
goto err_map;
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index e25225e..ff61e48 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -318,6 +318,11 @@ struct udc_usb_ep {
* @queue: requests queue
* @lock: lock to pxa_ep data (queues and stats)
* @enabled: true when endpoint enabled (not stopped by gadget layer)
+ * @in_handle_ep: number of recursions of handle_ep() function
+ * Prevents deadlocks or infinite recursions of types :
+ * irq->handle_ep()->req_done()->req.complete()->pxa_ep_queue()->handle_ep()
+ * or
+ * pxa_ep_queue()->handle_ep()->req_done()->req.complete()->pxa_ep_queue()
* @idx: endpoint index (1 => epA, 2 => epB, ..., 24 => epX)
* @name: endpoint name (for trace/debug purpose)
* @dir_in: 1 if IN endpoint, 0 if OUT endpoint
@@ -346,6 +351,7 @@ struct pxa_ep {
spinlock_t lock; /* Protects this structure */
/* (queues, stats) */
unsigned enabled:1;
+ unsigned in_handle_ep:1;
unsigned idx:5;
char *name;
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 5fc80a1..7e5bf59 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -317,7 +317,8 @@ static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
*
* Allocate a new USB request structure appropriate for the specified endpoint
*/
-struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep, gfp_t flags)
+static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep,
+ gfp_t flags)
{
struct s3c_hsotg_req *req;
@@ -373,7 +374,7 @@ static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
req->dma = DMA_ADDR_INVALID;
hs_req->mapped = 0;
} else {
- dma_sync_single(hsotg->dev, req->dma, req->length, dir);
+ dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
}
}
@@ -755,7 +756,7 @@ static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,
hs_req->mapped = 1;
req->dma = dma;
} else {
- dma_sync_single(hsotg->dev, req->dma, req->length, dir);
+ dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
hs_req->mapped = 0;
}
@@ -1460,7 +1461,7 @@ static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
* as the actual data should be sent to the memory directly and we turn
* on the completion interrupts to get notifications of transfer completion.
*/
-void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
+static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
{
u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP);
u32 epnum, status, size;
@@ -3094,7 +3095,7 @@ static void s3c_hsotg_gate(struct platform_device *pdev, bool on)
local_irq_restore(flags);
}
-struct s3c_hsotg_plat s3c_hsotg_default_pdata;
+static struct s3c_hsotg_plat s3c_hsotg_default_pdata;
static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
{
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 2fc02bd..84ca195 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -746,6 +746,10 @@ static const struct net_device_ops eth_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+static struct device_type gadget_type = {
+ .name = "gadget",
+};
+
/**
* gether_setup - initialize one ethernet-over-usb link
* @g: gadget to associated with these links
@@ -808,6 +812,7 @@ int __init gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
+ SET_NETDEV_DEVTYPE(net, &gadget_type);
status = register_netdev(net);
if (status < 0) {
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index fd55f45..3c8c0c9 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -93,13 +93,6 @@ static inline bool can_support_ecm(struct usb_gadget *gadget)
if (!gadget_supports_altsettings(gadget))
return false;
- /* SA1100 can do ECM, *without* status endpoint ... but we'll
- * only use it in non-ECM mode for backwards compatibility
- * (and since we currently require a status endpoint)
- */
- if (gadget_is_sa1100(gadget))
- return false;
-
/* Everything else is *presumably* fine ... but this is a bit
* chancy, so be **CERTAIN** there are no hardware issues with
* your controller. Add it above if it can't handle CDC.
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 2d77240..fac81ee 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -297,12 +297,10 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
*/
if (loopdefault) {
loopback_add(cdev, autoresume != 0);
- if (!gadget_is_sh(gadget))
- sourcesink_add(cdev, autoresume != 0);
+ sourcesink_add(cdev, autoresume != 0);
} else {
sourcesink_add(cdev, autoresume != 0);
- if (!gadget_is_sh(gadget))
- loopback_add(cdev, autoresume != 0);
+ loopback_add(cdev, autoresume != 0);
}
gcnum = usb_gadget_controller_number(gadget);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2678a16..8d3df03 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -399,3 +399,14 @@ config USB_HWA_HCD
To compile this driver a module, choose M here: the module
will be called "hwa-hc".
+
+config USB_IMX21_HCD
+ tristate "iMX21 HCD support"
+ depends on USB && ARM && MACH_MX21
+ help
+ This driver enables support for the on-chip USB host in the
+ iMX21 processor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called "imx21-hcd".
+
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f58b249..4e0c67f 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -32,3 +32,5 @@ obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
+obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
+
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 87c1b7c..51bd0ed 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -149,7 +149,7 @@ static int __init ehci_atmel_drv_probe(struct platform_device *pdev)
goto fail_request_resource;
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index dbfb482..e3a74e7 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -121,6 +121,7 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
+ struct resource *res;
int ret;
if (usb_disabled())
@@ -144,8 +145,9 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
if (!hcd)
return -ENOMEM;
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
pr_debug("request_mem_region failed");
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 9911749..0e26aa1 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2005 MontaVista Software
+ * Copyright 2005-2009 MontaVista Software, Inc.
+ * Copyright 2008 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,17 +18,20 @@
*
* Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided
* by Hunter Wu.
+ * Power Management support by Dave Liu <daveliu@freescale.com>,
+ * Jerry Huang <Chang-Ming.Huang@freescale.com> and
+ * Anton Vorontsov <avorontsov@ru.mvista.com>.
*/
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include "ehci-fsl.h"
-/* FIXME: Power Management is un-ported so temporarily disable it */
-#undef CONFIG_PM
-
-
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
@@ -40,8 +44,8 @@
* Allocates basic resources for this USB host controller.
*
*/
-int usb_hcd_fsl_probe(const struct hc_driver *driver,
- struct platform_device *pdev)
+static int usb_hcd_fsl_probe(const struct hc_driver *driver,
+ struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata;
struct usb_hcd *hcd;
@@ -147,7 +151,8 @@ int usb_hcd_fsl_probe(const struct hc_driver *driver,
* Reverses the effect of usb_hcd_fsl_probe().
*
*/
-void usb_hcd_fsl_remove(struct usb_hcd *hcd, struct platform_device *pdev)
+static void usb_hcd_fsl_remove(struct usb_hcd *hcd,
+ struct platform_device *pdev)
{
usb_remove_hcd(hcd);
iounmap(hcd->regs);
@@ -284,10 +289,81 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
return retval;
}
+struct ehci_fsl {
+ struct ehci_hcd ehci;
+
+#ifdef CONFIG_PM
+ /* Saved USB PHY settings, need to restore after deep sleep. */
+ u32 usb_ctrl;
+#endif
+};
+
+#ifdef CONFIG_PM
+
+static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ return container_of(ehci, struct ehci_fsl, ehci);
+}
+
+static int ehci_fsl_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ void __iomem *non_ehci = hcd->regs;
+
+ if (!fsl_deep_sleep())
+ return 0;
+
+ ehci_fsl->usb_ctrl = in_be32(non_ehci + FSL_SOC_USB_CTRL);
+ return 0;
+}
+
+static int ehci_fsl_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ void __iomem *non_ehci = hcd->regs;
+
+ if (!fsl_deep_sleep())
+ return 0;
+
+ usb_root_hub_lost_power(hcd->self.root_hub);
+
+ /* Restore USB PHY settings and enable the controller. */
+ out_be32(non_ehci + FSL_SOC_USB_CTRL, ehci_fsl->usb_ctrl);
+
+ ehci_reset(ehci);
+ ehci_fsl_reinit(ehci);
+
+ return 0;
+}
+
+static int ehci_fsl_drv_restore(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ usb_root_hub_lost_power(hcd->self.root_hub);
+ return 0;
+}
+
+static struct dev_pm_ops ehci_fsl_pm_ops = {
+ .suspend = ehci_fsl_drv_suspend,
+ .resume = ehci_fsl_drv_resume,
+ .restore = ehci_fsl_drv_restore,
+};
+
+#define EHCI_FSL_PM_OPS (&ehci_fsl_pm_ops)
+#else
+#define EHCI_FSL_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static const struct hc_driver ehci_fsl_hc_driver = {
.description = hcd_name,
.product_desc = "Freescale On-Chip EHCI Host Controller",
- .hcd_priv_size = sizeof(struct ehci_hcd),
+ .hcd_priv_size = sizeof(struct ehci_fsl),
/*
* generic hardware linkage
@@ -354,6 +430,7 @@ static struct platform_driver ehci_fsl_driver = {
.remove = ehci_fsl_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
- .name = "fsl-ehci",
+ .name = "fsl-ehci",
+ .pm = EHCI_FSL_PM_OPS,
},
};
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index 35c56f4..23cd917 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -162,6 +162,17 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
goto err_ioremap;
}
+ /* call platform specific init function */
+ if (pdata->init) {
+ ret = pdata->init(pdev);
+ if (ret) {
+ dev_err(dev, "platform init failed\n");
+ goto err_init;
+ }
+ /* platforms need some time to settle changed IO settings */
+ mdelay(10);
+ }
+
/* enable clocks */
priv->usbclk = clk_get(dev, "usb");
if (IS_ERR(priv->usbclk)) {
@@ -192,18 +203,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
if (ret < 0)
goto err_init;
- /* call platform specific init function */
- if (pdata->init) {
- ret = pdata->init(pdev);
- if (ret) {
- dev_err(dev, "platform init failed\n");
- goto err_init;
- }
- }
-
- /* most platforms need some time to settle changed IO settings */
- mdelay(10);
-
/* Initialize the transceiver */
if (pdata->otg) {
pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 74d07f4..f0282d6 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -26,10 +26,9 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
- * TODO (last updated Feb 23rd, 2009):
+ * TODO (last updated Feb 12, 2010):
* - add kernel-doc
* - enable AUTOIDLE
- * - move DPLL5 programming to clock fw
* - add suspend/resume
* - move workarounds to board-files
*/
@@ -37,6 +36,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
#include <plat/usb.h>
/*
@@ -178,6 +178,11 @@ struct ehci_hcd_omap {
void __iomem *uhh_base;
void __iomem *tll_base;
void __iomem *ehci_base;
+
+ /* Regulators for USB PHYs.
+ * Each PHY can have a seperate regulator.
+ */
+ struct regulator *regulator[OMAP3_HS_USB_PORTS];
};
/*-------------------------------------------------------------------------*/
@@ -546,6 +551,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
int irq = platform_get_irq(pdev, 0);
int ret = -ENODEV;
+ int i;
+ char supply[7];
if (!pdata) {
dev_dbg(&pdev->dev, "missing platform_data\n");
@@ -613,6 +620,21 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
goto err_tll_ioremap;
}
+ /* get ehci regulator and enable */
+ for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
+ if (omap->port_mode[i] != EHCI_HCD_OMAP_MODE_PHY) {
+ omap->regulator[i] = NULL;
+ continue;
+ }
+ snprintf(supply, sizeof(supply), "hsusb%d", i);
+ omap->regulator[i] = regulator_get(omap->dev, supply);
+ if (IS_ERR(omap->regulator[i]))
+ dev_dbg(&pdev->dev,
+ "failed to get ehci port%d regulator\n", i);
+ else
+ regulator_enable(omap->regulator[i]);
+ }
+
ret = omap_start_ehc(omap, hcd);
if (ret) {
dev_dbg(&pdev->dev, "failed to start ehci\n");
@@ -622,13 +644,12 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
omap->ehci->regs = hcd->regs
+ HC_LENGTH(readl(&omap->ehci->caps->hc_capbase));
+ dbg_hcs_params(omap->ehci, "reset");
+ dbg_hcc_params(omap->ehci, "reset");
+
/* cache this readonly data; minimize chip reads */
omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params);
- /* SET 1 micro-frame Interrupt interval */
- writel(readl(&omap->ehci->regs->command) | (1 << 16),
- &omap->ehci->regs->command);
-
ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (ret) {
dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
@@ -641,6 +662,12 @@ err_add_hcd:
omap_stop_ehc(omap, hcd);
err_start:
+ for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
+ if (omap->regulator[i]) {
+ regulator_disable(omap->regulator[i]);
+ regulator_put(omap->regulator[i]);
+ }
+ }
iounmap(omap->tll_base);
err_tll_ioremap:
@@ -674,13 +701,21 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
{
struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
+ int i;
usb_remove_hcd(hcd);
omap_stop_ehc(omap, hcd);
iounmap(hcd->regs);
+ for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
+ if (omap->regulator[i]) {
+ regulator_disable(omap->regulator[i]);
+ regulator_put(omap->regulator[i]);
+ }
+ }
iounmap(omap->tll_base);
iounmap(omap->uhh_base);
usb_put_hcd(hcd);
+ kfree(omap);
return 0;
}
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 1d283e1..0f87dc7 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -222,14 +222,14 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
goto err1;
}
- if (!request_mem_region(res->start, res->end - res->start + 1,
+ if (!request_mem_region(res->start, resource_size(res),
ehci_orion_hc_driver.description)) {
dev_dbg(&pdev->dev, "controller already in use\n");
err = -EBUSY;
goto err1;
}
- regs = ioremap(res->start, res->end - res->start + 1);
+ regs = ioremap(res->start, resource_size(res));
if (regs == NULL) {
dev_dbg(&pdev->dev, "error mapping memory\n");
err = -EFAULT;
@@ -244,7 +244,7 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
ehci = hcd_to_ehci(hcd);
@@ -287,7 +287,7 @@ err4:
err3:
iounmap(regs);
err2:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
err1:
dev_err(&pdev->dev, "init %s fail, %d\n",
dev_name(&pdev->dev), err);
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 36f96da..8df33b8 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -134,21 +134,21 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
hcd->rsrc_len = res.end - res.start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- printk(KERN_ERR __FILE__ ": request_mem_region failed\n");
+ printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
rv = -EBUSY;
goto err_rmr;
}
irq = irq_of_parse_and_map(dn, 0);
if (irq == NO_IRQ) {
- printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n");
+ printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
rv = -EBUSY;
goto err_irq;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
- printk(KERN_ERR __FILE__ ": ioremap failed\n");
+ printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
rv = -ENOMEM;
goto err_ioremap;
}
@@ -161,9 +161,9 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
ehci->ohci_hcctrl_reg = ioremap(res.start +
OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN);
else
- pr_debug(__FILE__ ": no ohci offset in fdt\n");
+ pr_debug("%s: no ohci offset in fdt\n", __FILE__);
if (!ehci->ohci_hcctrl_reg) {
- pr_debug(__FILE__ ": ioremap for ohci hcctrl failed\n");
+ pr_debug("%s: ioremap for ohci hcctrl failed\n", __FILE__);
} else {
ehci->has_amcc_usb23 = 1;
}
@@ -241,7 +241,7 @@ static int ehci_hcd_ppc_of_remove(struct of_device *op)
else
release_mem_region(res.start, 0x4);
else
- pr_debug(__FILE__ ": no ohci offset in fdt\n");
+ pr_debug("%s: no ohci offset in fdt\n", __FILE__);
of_node_put(np);
}
@@ -264,7 +264,7 @@ static int ehci_hcd_ppc_of_shutdown(struct of_device *op)
}
-static struct of_device_id ehci_hcd_ppc_of_match[] = {
+static const struct of_device_id ehci_hcd_ppc_of_match[] = {
{
.compatible = "usb-ehci",
},
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 1e391e6..39340ae 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -510,6 +510,8 @@ static int disable_periodic (struct ehci_hcd *ehci)
ehci_writel(ehci, cmd, &ehci->regs->command);
/* posted write ... */
+ free_cached_itd_list(ehci);
+
ehci->next_uframe = -1;
return 0;
}
@@ -2322,9 +2324,13 @@ restart:
* No need to check for activity unless the
* frame is current.
*/
- if (frame == clock_frame && live &&
- (q.sitd->hw_results &
- SITD_ACTIVE(ehci))) {
+ if (((frame == clock_frame) ||
+ (((frame + 1) % ehci->periodic_size)
+ == clock_frame))
+ && live
+ && (q.sitd->hw_results &
+ SITD_ACTIVE(ehci))) {
+
incomplete = true;
q_p = &q.sitd->sitd_next;
hw_p = &q.sitd->hw_next;
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index a5861531..f603bb2 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -177,21 +177,21 @@ ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match)
hcd->rsrc_len = res.end - res.start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- printk(KERN_ERR __FILE__ ": request_mem_region failed\n");
+ printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
rv = -EBUSY;
goto err_rmr;
}
irq = irq_of_parse_and_map(dn, 0);
if (irq == NO_IRQ) {
- printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n");
+ printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
rv = -EBUSY;
goto err_irq;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
- printk(KERN_ERR __FILE__ ": ioremap failed\n");
+ printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
rv = -ENOMEM;
goto err_ioremap;
}
@@ -281,7 +281,7 @@ static int ehci_hcd_xilinx_of_shutdown(struct of_device *op)
}
-static struct of_device_id ehci_hcd_xilinx_of_match[] = {
+static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
{.compatible = "xlnx,xps-usb-host-1.00.a",},
{},
};
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 78e7c3c..5dcfb3d 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -433,7 +433,7 @@ static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
return -ENOMEM;
/* allocate the private part of the URB */
- urb_priv->tds = kzalloc(size * sizeof(struct td), mem_flags);
+ urb_priv->tds = kcalloc(size, sizeof(*urb_priv->tds), mem_flags);
if (!urb_priv->tds) {
kfree(urb_priv);
return -ENOMEM;
@@ -805,7 +805,7 @@ static int __devexit of_fhci_remove(struct of_device *ofdev)
return fhci_remove(&ofdev->dev);
}
-static struct of_device_id of_fhci_match[] = {
+static const struct of_device_id of_fhci_match[] = {
{ .compatible = "fsl,mpc8323-qe-usb", },
{},
};
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
new file mode 100644
index 0000000..512f647
--- /dev/null
+++ b/drivers/usb/host/imx21-dbg.c
@@ -0,0 +1,527 @@
+/*
+ * Copyright (c) 2009 by Martin Fuzzey
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* this file is part of imx21-hcd.c */
+
+#ifndef DEBUG
+
+static inline void create_debug_files(struct imx21 *imx21) { }
+static inline void remove_debug_files(struct imx21 *imx21) { }
+static inline void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) {}
+static inline void debug_urb_completed(struct imx21 *imx21, struct urb *urb,
+ int status) {}
+static inline void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) {}
+static inline void debug_urb_queued_for_etd(struct imx21 *imx21,
+ struct urb *urb) {}
+static inline void debug_urb_queued_for_dmem(struct imx21 *imx21,
+ struct urb *urb) {}
+static inline void debug_etd_allocated(struct imx21 *imx21) {}
+static inline void debug_etd_freed(struct imx21 *imx21) {}
+static inline void debug_dmem_allocated(struct imx21 *imx21, int size) {}
+static inline void debug_dmem_freed(struct imx21 *imx21, int size) {}
+static inline void debug_isoc_submitted(struct imx21 *imx21,
+ int frame, struct td *td) {}
+static inline void debug_isoc_completed(struct imx21 *imx21,
+ int frame, struct td *td, int cc, int len) {}
+
+#else
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static const char *dir_labels[] = {
+ "TD 0",
+ "OUT",
+ "IN",
+ "TD 1"
+};
+
+static const char *speed_labels[] = {
+ "Full",
+ "Low"
+};
+
+static const char *format_labels[] = {
+ "Control",
+ "ISO",
+ "Bulk",
+ "Interrupt"
+};
+
+static inline struct debug_stats *stats_for_urb(struct imx21 *imx21,
+ struct urb *urb)
+{
+ return usb_pipeisoc(urb->pipe) ?
+ &imx21->isoc_stats : &imx21->nonisoc_stats;
+}
+
+static void debug_urb_submitted(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->submitted++;
+}
+
+static void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int st)
+{
+ if (st)
+ stats_for_urb(imx21, urb)->completed_failed++;
+ else
+ stats_for_urb(imx21, urb)->completed_ok++;
+}
+
+static void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->unlinked++;
+}
+
+static void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->queue_etd++;
+}
+
+static void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->queue_dmem++;
+}
+
+static inline void debug_etd_allocated(struct imx21 *imx21)
+{
+ imx21->etd_usage.maximum = max(
+ ++(imx21->etd_usage.value),
+ imx21->etd_usage.maximum);
+}
+
+static inline void debug_etd_freed(struct imx21 *imx21)
+{
+ imx21->etd_usage.value--;
+}
+
+static inline void debug_dmem_allocated(struct imx21 *imx21, int size)
+{
+ imx21->dmem_usage.value += size;
+ imx21->dmem_usage.maximum = max(
+ imx21->dmem_usage.value,
+ imx21->dmem_usage.maximum);
+}
+
+static inline void debug_dmem_freed(struct imx21 *imx21, int size)
+{
+ imx21->dmem_usage.value -= size;
+}
+
+
+static void debug_isoc_submitted(struct imx21 *imx21,
+ int frame, struct td *td)
+{
+ struct debug_isoc_trace *trace = &imx21->isoc_trace[
+ imx21->isoc_trace_index++];
+
+ imx21->isoc_trace_index %= ARRAY_SIZE(imx21->isoc_trace);
+ trace->schedule_frame = td->frame;
+ trace->submit_frame = frame;
+ trace->request_len = td->len;
+ trace->td = td;
+}
+
+static inline void debug_isoc_completed(struct imx21 *imx21,
+ int frame, struct td *td, int cc, int len)
+{
+ struct debug_isoc_trace *trace, *trace_failed;
+ int i;
+ int found = 0;
+
+ trace = imx21->isoc_trace;
+ for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) {
+ if (trace->td == td) {
+ trace->done_frame = frame;
+ trace->done_len = len;
+ trace->cc = cc;
+ trace->td = NULL;
+ found = 1;
+ break;
+ }
+ }
+
+ if (found && cc) {
+ trace_failed = &imx21->isoc_trace_failed[
+ imx21->isoc_trace_index_failed++];
+
+ imx21->isoc_trace_index_failed %= ARRAY_SIZE(
+ imx21->isoc_trace_failed);
+ *trace_failed = *trace;
+ }
+}
+
+
+static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize)
+{
+ if (ep)
+ snprintf(buf, bufsize, "ep_%02x (type:%02X kaddr:%p)",
+ ep->desc.bEndpointAddress,
+ usb_endpoint_type(&ep->desc),
+ ep);
+ else
+ snprintf(buf, bufsize, "none");
+ return buf;
+}
+
+static char *format_etd_dword0(u32 value, char *buf, int bufsize)
+{
+ snprintf(buf, bufsize,
+ "addr=%d ep=%d dir=%s speed=%s format=%s halted=%d",
+ value & 0x7F,
+ (value >> DW0_ENDPNT) & 0x0F,
+ dir_labels[(value >> DW0_DIRECT) & 0x03],
+ speed_labels[(value >> DW0_SPEED) & 0x01],
+ format_labels[(value >> DW0_FORMAT) & 0x03],
+ (value >> DW0_HALTED) & 0x01);
+ return buf;
+}
+
+static int debug_status_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ int etds_allocated = 0;
+ int etds_sw_busy = 0;
+ int etds_hw_busy = 0;
+ int dmem_blocks = 0;
+ int queued_for_etd = 0;
+ int queued_for_dmem = 0;
+ unsigned int dmem_bytes = 0;
+ int i;
+ struct etd_priv *etd;
+ u32 etd_enable_mask;
+ unsigned long flags;
+ struct imx21_dmem_area *dmem;
+ struct ep_priv *ep_priv;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ etd_enable_mask = readl(imx21->regs + USBH_ETDENSET);
+ for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
+ if (etd->alloc)
+ etds_allocated++;
+ if (etd->urb)
+ etds_sw_busy++;
+ if (etd_enable_mask & (1<<i))
+ etds_hw_busy++;
+ }
+
+ list_for_each_entry(dmem, &imx21->dmem_list, list) {
+ dmem_bytes += dmem->size;
+ dmem_blocks++;
+ }
+
+ list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue)
+ queued_for_etd++;
+
+ list_for_each_entry(etd, &imx21->queue_for_dmem, queue)
+ queued_for_dmem++;
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ seq_printf(s,
+ "Frame: %d\n"
+ "ETDs allocated: %d/%d (max=%d)\n"
+ "ETDs in use sw: %d\n"
+ "ETDs in use hw: %d\n"
+ "DMEM alocated: %d/%d (max=%d)\n"
+ "DMEM blocks: %d\n"
+ "Queued waiting for ETD: %d\n"
+ "Queued waiting for DMEM: %d\n",
+ readl(imx21->regs + USBH_FRMNUB) & 0xFFFF,
+ etds_allocated, USB_NUM_ETD, imx21->etd_usage.maximum,
+ etds_sw_busy,
+ etds_hw_busy,
+ dmem_bytes, DMEM_SIZE, imx21->dmem_usage.maximum,
+ dmem_blocks,
+ queued_for_etd,
+ queued_for_dmem);
+
+ return 0;
+}
+
+static int debug_dmem_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ struct imx21_dmem_area *dmem;
+ unsigned long flags;
+ char ep_text[40];
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ list_for_each_entry(dmem, &imx21->dmem_list, list)
+ seq_printf(s,
+ "%04X: size=0x%X "
+ "ep=%s\n",
+ dmem->offset, dmem->size,
+ format_ep(dmem->ep, ep_text, sizeof(ep_text)));
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static int debug_etd_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ struct etd_priv *etd;
+ char buf[60];
+ u32 dword;
+ int i, j;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
+ int state = -1;
+ struct urb_priv *urb_priv;
+ if (etd->urb) {
+ urb_priv = etd->urb->hcpriv;
+ if (urb_priv)
+ state = urb_priv->state;
+ }
+
+ seq_printf(s,
+ "etd_num: %d\n"
+ "ep: %s\n"
+ "alloc: %d\n"
+ "len: %d\n"
+ "busy sw: %d\n"
+ "busy hw: %d\n"
+ "urb state: %d\n"
+ "current urb: %p\n",
+
+ i,
+ format_ep(etd->ep, buf, sizeof(buf)),
+ etd->alloc,
+ etd->len,
+ etd->urb != NULL,
+ (readl(imx21->regs + USBH_ETDENSET) & (1 << i)) > 0,
+ state,
+ etd->urb);
+
+ for (j = 0; j < 4; j++) {
+ dword = etd_readl(imx21, i, j);
+ switch (j) {
+ case 0:
+ format_etd_dword0(dword, buf, sizeof(buf));
+ break;
+ case 2:
+ snprintf(buf, sizeof(buf),
+ "cc=0X%02X", dword >> DW2_COMPCODE);
+ break;
+ default:
+ *buf = 0;
+ break;
+ }
+ seq_printf(s,
+ "dword %d: submitted=%08X cur=%08X [%s]\n",
+ j,
+ etd->submitted_dwords[j],
+ dword,
+ buf);
+ }
+ seq_printf(s, "\n");
+ }
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static void debug_statistics_show_one(struct seq_file *s,
+ const char *name, struct debug_stats *stats)
+{
+ seq_printf(s, "%s:\n"
+ "submitted URBs: %lu\n"
+ "completed OK: %lu\n"
+ "completed failed: %lu\n"
+ "unlinked: %lu\n"
+ "queued for ETD: %lu\n"
+ "queued for DMEM: %lu\n\n",
+ name,
+ stats->submitted,
+ stats->completed_ok,
+ stats->completed_failed,
+ stats->unlinked,
+ stats->queue_etd,
+ stats->queue_dmem);
+}
+
+static int debug_statistics_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ debug_statistics_show_one(s, "nonisoc", &imx21->nonisoc_stats);
+ debug_statistics_show_one(s, "isoc", &imx21->isoc_stats);
+ seq_printf(s, "unblock kludge triggers: %lu\n", imx21->debug_unblocks);
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static void debug_isoc_show_one(struct seq_file *s,
+ const char *name, int index, struct debug_isoc_trace *trace)
+{
+ seq_printf(s, "%s %d:\n"
+ "cc=0X%02X\n"
+ "scheduled frame %d (%d)\n"
+ "submittted frame %d (%d)\n"
+ "completed frame %d (%d)\n"
+ "requested length=%d\n"
+ "completed length=%d\n\n",
+ name, index,
+ trace->cc,
+ trace->schedule_frame, trace->schedule_frame & 0xFFFF,
+ trace->submit_frame, trace->submit_frame & 0xFFFF,
+ trace->done_frame, trace->done_frame & 0xFFFF,
+ trace->request_len,
+ trace->done_len);
+}
+
+static int debug_isoc_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ struct debug_isoc_trace *trace;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ trace = imx21->isoc_trace_failed;
+ for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++)
+ debug_isoc_show_one(s, "isoc failed", i, trace);
+
+ trace = imx21->isoc_trace;
+ for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++)
+ debug_isoc_show_one(s, "isoc", i, trace);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static int debug_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_status_show, inode->i_private);
+}
+
+static int debug_dmem_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_dmem_show, inode->i_private);
+}
+
+static int debug_etd_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_etd_show, inode->i_private);
+}
+
+static int debug_statistics_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_statistics_show, inode->i_private);
+}
+
+static int debug_isoc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_isoc_show, inode->i_private);
+}
+
+static const struct file_operations debug_status_fops = {
+ .open = debug_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_dmem_fops = {
+ .open = debug_dmem_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_etd_fops = {
+ .open = debug_etd_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_statistics_fops = {
+ .open = debug_statistics_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_isoc_fops = {
+ .open = debug_isoc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void create_debug_files(struct imx21 *imx21)
+{
+ imx21->debug_root = debugfs_create_dir(dev_name(imx21->dev), NULL);
+ if (!imx21->debug_root)
+ goto failed_create_rootdir;
+
+ if (!debugfs_create_file("status", S_IRUGO,
+ imx21->debug_root, imx21, &debug_status_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("dmem", S_IRUGO,
+ imx21->debug_root, imx21, &debug_dmem_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("etd", S_IRUGO,
+ imx21->debug_root, imx21, &debug_etd_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("statistics", S_IRUGO,
+ imx21->debug_root, imx21, &debug_statistics_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("isoc", S_IRUGO,
+ imx21->debug_root, imx21, &debug_isoc_fops))
+ goto failed_create;
+
+ return;
+
+failed_create:
+ debugfs_remove_recursive(imx21->debug_root);
+
+failed_create_rootdir:
+ imx21->debug_root = NULL;
+}
+
+
+static void remove_debug_files(struct imx21 *imx21)
+{
+ if (imx21->debug_root) {
+ debugfs_remove_recursive(imx21->debug_root);
+ imx21->debug_root = NULL;
+ }
+}
+
+#endif
+
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
new file mode 100644
index 0000000..213e270
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.c
@@ -0,0 +1,1789 @@
+/*
+ * USB Host Controller Driver for IMX21
+ *
+ * Copyright (C) 2006 Loping Dog Embedded Systems
+ * Copyright (C) 2009 Martin Fuzzey
+ * Originally written by Jay Monkman <jtm@lopingdog.com>
+ * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+ /*
+ * The i.MX21 USB hardware contains
+ * * 32 transfer descriptors (called ETDs)
+ * * 4Kb of Data memory
+ *
+ * The data memory is shared between the host and fuction controlers
+ * (but this driver only supports the host controler)
+ *
+ * So setting up a transfer involves:
+ * * Allocating a ETD
+ * * Fill in ETD with appropriate information
+ * * Allocating data memory (and putting the offset in the ETD)
+ * * Activate the ETD
+ * * Get interrupt when done.
+ *
+ * An ETD is assigned to each active endpoint.
+ *
+ * Low resource (ETD and Data memory) situations are handled differently for
+ * isochronous and non insosynchronous transactions :
+ *
+ * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
+ *
+ * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
+ * They allocate both ETDs and Data memory during URB submission
+ * (and fail if unavailable).
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+
+#include "../core/hcd.h"
+#include "imx21-hcd.h"
+
+#ifdef DEBUG
+#define DEBUG_LOG_FRAME(imx21, etd, event) \
+ (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
+#else
+#define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
+#endif
+
+static const char hcd_name[] = "imx21-hcd";
+
+static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
+{
+ return (struct imx21 *)hcd->hcd_priv;
+}
+
+
+/* =========================================== */
+/* Hardware access helpers */
+/* =========================================== */
+
+static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+ writel(readl(reg) | mask, reg);
+}
+
+static inline void clear_register_bits(struct imx21 *imx21,
+ u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+ writel(readl(reg) & ~mask, reg);
+}
+
+static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+
+ if (readl(reg) & mask)
+ writel(mask, reg);
+}
+
+static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+
+ if (!(readl(reg) & mask))
+ writel(mask, reg);
+}
+
+static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
+{
+ writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
+}
+
+static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
+{
+ return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
+}
+
+static inline int wrap_frame(int counter)
+{
+ return counter & 0xFFFF;
+}
+
+static inline int frame_after(int frame, int after)
+{
+ /* handle wrapping like jiffies time_afer */
+ return (s16)((s16)after - (s16)frame) < 0;
+}
+
+static int imx21_hc_get_frame(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+
+ return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
+}
+
+
+#include "imx21-dbg.c"
+
+/* =========================================== */
+/* ETD management */
+/* =========================================== */
+
+static int alloc_etd(struct imx21 *imx21)
+{
+ int i;
+ struct etd_priv *etd = imx21->etd;
+
+ for (i = 0; i < USB_NUM_ETD; i++, etd++) {
+ if (etd->alloc == 0) {
+ memset(etd, 0, sizeof(imx21->etd[0]));
+ etd->alloc = 1;
+ debug_etd_allocated(imx21);
+ return i;
+ }
+ }
+ return -1;
+}
+
+static void disactivate_etd(struct imx21 *imx21, int num)
+{
+ int etd_mask = (1 << num);
+ struct etd_priv *etd = &imx21->etd[num];
+
+ writel(etd_mask, imx21->regs + USBH_ETDENCLR);
+ clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
+ writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
+ clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
+
+ etd->active_count = 0;
+
+ DEBUG_LOG_FRAME(imx21, etd, disactivated);
+}
+
+static void reset_etd(struct imx21 *imx21, int num)
+{
+ struct etd_priv *etd = imx21->etd + num;
+ int i;
+
+ disactivate_etd(imx21, num);
+
+ for (i = 0; i < 4; i++)
+ etd_writel(imx21, num, i, 0);
+ etd->urb = NULL;
+ etd->ep = NULL;
+ etd->td = NULL;;
+}
+
+static void free_etd(struct imx21 *imx21, int num)
+{
+ if (num < 0)
+ return;
+
+ if (num >= USB_NUM_ETD) {
+ dev_err(imx21->dev, "BAD etd=%d!\n", num);
+ return;
+ }
+ if (imx21->etd[num].alloc == 0) {
+ dev_err(imx21->dev, "ETD %d already free!\n", num);
+ return;
+ }
+
+ debug_etd_freed(imx21);
+ reset_etd(imx21, num);
+ memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
+}
+
+
+static void setup_etd_dword0(struct imx21 *imx21,
+ int etd_num, struct urb *urb, u8 dir, u16 maxpacket)
+{
+ etd_writel(imx21, etd_num, 0,
+ ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS |
+ ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
+ ((u32) dir << DW0_DIRECT) |
+ ((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
+ 1 : 0) << DW0_SPEED) |
+ ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
+ ((u32) maxpacket << DW0_MAXPKTSIZ));
+}
+
+static void activate_etd(struct imx21 *imx21,
+ int etd_num, dma_addr_t dma, u8 dir)
+{
+ u32 etd_mask = 1 << etd_num;
+ struct etd_priv *etd = &imx21->etd[etd_num];
+
+ clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
+ set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
+ clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+ clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+
+ if (dma) {
+ set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
+ clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
+ clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
+ writel(dma, imx21->regs + USB_ETDSMSA(etd_num));
+ set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
+ } else {
+ if (dir != TD_DIR_IN) {
+ /* need to set for ZLP */
+ set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+ set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+ }
+ }
+
+ DEBUG_LOG_FRAME(imx21, etd, activated);
+
+#ifdef DEBUG
+ if (!etd->active_count) {
+ int i;
+ etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
+ etd->disactivated_frame = -1;
+ etd->last_int_frame = -1;
+ etd->last_req_frame = -1;
+
+ for (i = 0; i < 4; i++)
+ etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
+ }
+#endif
+
+ etd->active_count = 1;
+ writel(etd_mask, imx21->regs + USBH_ETDENSET);
+}
+
+/* =========================================== */
+/* Data memory management */
+/* =========================================== */
+
+static int alloc_dmem(struct imx21 *imx21, unsigned int size,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int offset = 0;
+ struct imx21_dmem_area *area;
+ struct imx21_dmem_area *tmp;
+
+ size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
+
+ if (size > DMEM_SIZE) {
+ dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
+ size, DMEM_SIZE);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(tmp, &imx21->dmem_list, list) {
+ if ((size + offset) < offset)
+ goto fail;
+ if ((size + offset) <= tmp->offset)
+ break;
+ offset = tmp->size + tmp->offset;
+ if ((offset + size) > DMEM_SIZE)
+ goto fail;
+ }
+
+ area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
+ if (area == NULL)
+ return -ENOMEM;
+
+ area->ep = ep;
+ area->offset = offset;
+ area->size = size;
+ list_add_tail(&area->list, &tmp->list);
+ debug_dmem_allocated(imx21, size);
+ return offset;
+
+fail:
+ return -ENOMEM;
+}
+
+/* Memory now available for a queued ETD - activate it */
+static void activate_queued_etd(struct imx21 *imx21,
+ struct etd_priv *etd, u32 dmem_offset)
+{
+ struct urb_priv *urb_priv = etd->urb->hcpriv;
+ int etd_num = etd - &imx21->etd[0];
+ u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
+ u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
+
+ dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
+ etd_num);
+ etd_writel(imx21, etd_num, 1,
+ ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
+
+ urb_priv->active = 1;
+ activate_etd(imx21, etd_num, etd->dma_handle, dir);
+}
+
+static void free_dmem(struct imx21 *imx21, int offset)
+{
+ struct imx21_dmem_area *area;
+ struct etd_priv *etd, *tmp;
+ int found = 0;
+
+ list_for_each_entry(area, &imx21->dmem_list, list) {
+ if (area->offset == offset) {
+ debug_dmem_freed(imx21, area->size);
+ list_del(&area->list);
+ kfree(area);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_err(imx21->dev,
+ "Trying to free unallocated DMEM %d\n", offset);
+ return;
+ }
+
+ /* Try again to allocate memory for anything we've queued */
+ list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
+ offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
+ if (offset >= 0) {
+ list_del(&etd->queue);
+ activate_queued_etd(imx21, etd, (u32)offset);
+ }
+ }
+}
+
+static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
+{
+ struct imx21_dmem_area *area, *tmp;
+
+ list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
+ if (area->ep == ep) {
+ dev_err(imx21->dev,
+ "Active DMEM %d for disabled ep=%p\n",
+ area->offset, ep);
+ list_del(&area->list);
+ kfree(area);
+ }
+ }
+}
+
+
+/* =========================================== */
+/* End handling */
+/* =========================================== */
+static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
+
+/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
+static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
+{
+ int etd_num;
+ int i;
+
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ etd_num = ep_priv->etd[i];
+ if (etd_num < 0)
+ continue;
+
+ ep_priv->etd[i] = -1;
+ if (list_empty(&imx21->queue_for_etd)) {
+ free_etd(imx21, etd_num);
+ continue;
+ }
+
+ dev_dbg(imx21->dev,
+ "assigning idle etd %d for queued request\n", etd_num);
+ ep_priv = list_first_entry(&imx21->queue_for_etd,
+ struct ep_priv, queue);
+ list_del(&ep_priv->queue);
+ reset_etd(imx21, etd_num);
+ ep_priv->waiting_etd = 0;
+ ep_priv->etd[i] = etd_num;
+
+ if (list_empty(&ep_priv->ep->urb_list)) {
+ dev_err(imx21->dev, "No urb for queued ep!\n");
+ continue;
+ }
+ schedule_nonisoc_etd(imx21, list_first_entry(
+ &ep_priv->ep->urb_list, struct urb, urb_list));
+ }
+}
+
+static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
+__releases(imx21->lock)
+__acquires(imx21->lock)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct ep_priv *ep_priv = urb->ep->hcpriv;
+ struct urb_priv *urb_priv = urb->hcpriv;
+
+ debug_urb_completed(imx21, urb, status);
+ dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
+
+ kfree(urb_priv->isoc_td);
+ kfree(urb->hcpriv);
+ urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock(&imx21->lock);
+ usb_hcd_giveback_urb(hcd, urb, status);
+ spin_lock(&imx21->lock);
+ if (list_empty(&ep_priv->ep->urb_list))
+ ep_idle(imx21, ep_priv);
+}
+
+/* =========================================== */
+/* ISOC Handling ... */
+/* =========================================== */
+
+static void schedule_isoc_etds(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct ep_priv *ep_priv = ep->hcpriv;
+ struct etd_priv *etd;
+ struct urb_priv *urb_priv;
+ struct td *td;
+ int etd_num;
+ int i;
+ int cur_frame;
+ u8 dir;
+
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+too_late:
+ if (list_empty(&ep_priv->td_list))
+ break;
+
+ etd_num = ep_priv->etd[i];
+ if (etd_num < 0)
+ break;
+
+ etd = &imx21->etd[etd_num];
+ if (etd->urb)
+ continue;
+
+ td = list_entry(ep_priv->td_list.next, struct td, list);
+ list_del(&td->list);
+ urb_priv = td->urb->hcpriv;
+
+ cur_frame = imx21_hc_get_frame(hcd);
+ if (frame_after(cur_frame, td->frame)) {
+ dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
+ cur_frame, td->frame);
+ urb_priv->isoc_status = -EXDEV;
+ td->urb->iso_frame_desc[
+ td->isoc_index].actual_length = 0;
+ td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
+ if (--urb_priv->isoc_remaining == 0)
+ urb_done(hcd, td->urb, urb_priv->isoc_status);
+ goto too_late;
+ }
+
+ urb_priv->active = 1;
+ etd->td = td;
+ etd->ep = td->ep;
+ etd->urb = td->urb;
+ etd->len = td->len;
+
+ debug_isoc_submitted(imx21, cur_frame, td);
+
+ dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
+ setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
+ etd_writel(imx21, etd_num, 1, etd->dmem_offset);
+ etd_writel(imx21, etd_num, 2,
+ (TD_NOTACCESSED << DW2_COMPCODE) |
+ ((td->frame & 0xFFFF) << DW2_STARTFRM));
+ etd_writel(imx21, etd_num, 3,
+ (TD_NOTACCESSED << DW3_COMPCODE0) |
+ (td->len << DW3_PKTLEN0));
+
+ activate_etd(imx21, etd_num, td->data, dir);
+ }
+}
+
+static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ int etd_mask = 1 << etd_num;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ struct etd_priv *etd = imx21->etd + etd_num;
+ struct td *td = etd->td;
+ struct usb_host_endpoint *ep = etd->ep;
+ int isoc_index = td->isoc_index;
+ unsigned int pipe = urb->pipe;
+ int dir_in = usb_pipein(pipe);
+ int cc;
+ int bytes_xfrd;
+
+ disactivate_etd(imx21, etd_num);
+
+ cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
+ bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
+
+ /* Input doesn't always fill the buffer, don't generate an error
+ * when this happens.
+ */
+ if (dir_in && (cc == TD_DATAUNDERRUN))
+ cc = TD_CC_NOERROR;
+
+ if (cc == TD_NOTACCESSED)
+ bytes_xfrd = 0;
+
+ debug_isoc_completed(imx21,
+ imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
+ if (cc) {
+ urb_priv->isoc_status = -EXDEV;
+ dev_dbg(imx21->dev,
+ "bad iso cc=0x%X frame=%d sched frame=%d "
+ "cnt=%d len=%d urb=%p etd=%d index=%d\n",
+ cc, imx21_hc_get_frame(hcd), td->frame,
+ bytes_xfrd, td->len, urb, etd_num, isoc_index);
+ }
+
+ if (dir_in)
+ clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+
+ urb->actual_length += bytes_xfrd;
+ urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
+ urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
+
+ etd->td = NULL;
+ etd->urb = NULL;
+ etd->ep = NULL;
+
+ if (--urb_priv->isoc_remaining == 0)
+ urb_done(hcd, urb, urb_priv->isoc_status);
+
+ schedule_isoc_etds(hcd, ep);
+}
+
+static struct ep_priv *alloc_isoc_ep(
+ struct imx21 *imx21, struct usb_host_endpoint *ep)
+{
+ struct ep_priv *ep_priv;
+ int i;
+
+ ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
+ if (ep_priv == NULL)
+ return NULL;
+
+ /* Allocate the ETDs */
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ ep_priv->etd[i] = alloc_etd(imx21);
+ if (ep_priv->etd[i] < 0) {
+ int j;
+ dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
+ for (j = 0; j < i; j++)
+ free_etd(imx21, ep_priv->etd[j]);
+ goto alloc_etd_failed;
+ }
+ imx21->etd[ep_priv->etd[i]].ep = ep;
+ }
+
+ INIT_LIST_HEAD(&ep_priv->td_list);
+ ep_priv->ep = ep;
+ ep->hcpriv = ep_priv;
+ return ep_priv;
+
+alloc_etd_failed:
+ kfree(ep_priv);
+ return NULL;
+}
+
+static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep,
+ struct urb *urb, gfp_t mem_flags)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct urb_priv *urb_priv;
+ unsigned long flags;
+ struct ep_priv *ep_priv;
+ struct td *td = NULL;
+ int i;
+ int ret;
+ int cur_frame;
+ u16 maxpacket;
+
+ urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
+ if (urb_priv == NULL)
+ return -ENOMEM;
+
+ urb_priv->isoc_td = kzalloc(
+ sizeof(struct td) * urb->number_of_packets, mem_flags);
+ if (urb_priv->isoc_td == NULL) {
+ ret = -ENOMEM;
+ goto alloc_td_failed;
+ }
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ if (ep->hcpriv == NULL) {
+ ep_priv = alloc_isoc_ep(imx21, ep);
+ if (ep_priv == NULL) {
+ ret = -ENOMEM;
+ goto alloc_ep_failed;
+ }
+ } else {
+ ep_priv = ep->hcpriv;
+ }
+
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret)
+ goto link_failed;
+
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->error_count = 0;
+ urb->hcpriv = urb_priv;
+ urb_priv->ep = ep;
+
+ /* allocate data memory for largest packets if not already done */
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
+
+ if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
+ /* not sure if this can really occur.... */
+ dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
+ etd->dmem_size, maxpacket);
+ ret = -EMSGSIZE;
+ goto alloc_dmem_failed;
+ }
+
+ if (etd->dmem_size == 0) {
+ etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
+ if (etd->dmem_offset < 0) {
+ dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
+ ret = -EAGAIN;
+ goto alloc_dmem_failed;
+ }
+ etd->dmem_size = maxpacket;
+ }
+ }
+
+ /* calculate frame */
+ cur_frame = imx21_hc_get_frame(hcd);
+ if (urb->transfer_flags & URB_ISO_ASAP) {
+ if (list_empty(&ep_priv->td_list))
+ urb->start_frame = cur_frame + 5;
+ else
+ urb->start_frame = list_entry(
+ ep_priv->td_list.prev,
+ struct td, list)->frame + urb->interval;
+ }
+ urb->start_frame = wrap_frame(urb->start_frame);
+ if (frame_after(cur_frame, urb->start_frame)) {
+ dev_dbg(imx21->dev,
+ "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
+ urb->start_frame, cur_frame,
+ (urb->transfer_flags & URB_ISO_ASAP) != 0);
+ urb->start_frame = wrap_frame(cur_frame + 1);
+ }
+
+ /* set up transfers */
+ td = urb_priv->isoc_td;
+ for (i = 0; i < urb->number_of_packets; i++, td++) {
+ td->ep = ep;
+ td->urb = urb;
+ td->len = urb->iso_frame_desc[i].length;
+ td->isoc_index = i;
+ td->frame = wrap_frame(urb->start_frame + urb->interval * i);
+ td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset;
+ list_add_tail(&td->list, &ep_priv->td_list);
+ }
+
+ urb_priv->isoc_remaining = urb->number_of_packets;
+ dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
+ urb->number_of_packets, urb->start_frame, td->frame);
+
+ debug_urb_submitted(imx21, urb);
+ schedule_isoc_etds(hcd, ep);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+
+alloc_dmem_failed:
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+
+link_failed:
+alloc_ep_failed:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ kfree(urb_priv->isoc_td);
+
+alloc_td_failed:
+ kfree(urb_priv);
+ return ret;
+}
+
+static void dequeue_isoc_urb(struct imx21 *imx21,
+ struct urb *urb, struct ep_priv *ep_priv)
+{
+ struct urb_priv *urb_priv = urb->hcpriv;
+ struct td *td, *tmp;
+ int i;
+
+ if (urb_priv->active) {
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ int etd_num = ep_priv->etd[i];
+ if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
+ struct etd_priv *etd = imx21->etd + etd_num;
+
+ reset_etd(imx21, etd_num);
+ if (etd->dmem_size)
+ free_dmem(imx21, etd->dmem_offset);
+ etd->dmem_size = 0;
+ }
+ }
+ }
+
+ list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
+ if (td->urb == urb) {
+ dev_vdbg(imx21->dev, "removing td %p\n", td);
+ list_del(&td->list);
+ }
+ }
+}
+
+/* =========================================== */
+/* NON ISOC Handling ... */
+/* =========================================== */
+
+static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
+{
+ unsigned int pipe = urb->pipe;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
+ int state = urb_priv->state;
+ int etd_num = ep_priv->etd[0];
+ struct etd_priv *etd;
+ int dmem_offset;
+ u32 count;
+ u16 etd_buf_size;
+ u16 maxpacket;
+ u8 dir;
+ u8 bufround;
+ u8 datatoggle;
+ u8 interval = 0;
+ u8 relpolpos = 0;
+
+ if (etd_num < 0) {
+ dev_err(imx21->dev, "No valid ETD\n");
+ return;
+ }
+ if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
+ dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
+
+ etd = &imx21->etd[etd_num];
+ maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
+ if (!maxpacket)
+ maxpacket = 8;
+
+ if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
+ if (state == US_CTRL_SETUP) {
+ dir = TD_DIR_SETUP;
+ etd->dma_handle = urb->setup_dma;
+ bufround = 0;
+ count = 8;
+ datatoggle = TD_TOGGLE_DATA0;
+ } else { /* US_CTRL_ACK */
+ dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
+ etd->dma_handle = urb->transfer_dma;
+ bufround = 0;
+ count = 0;
+ datatoggle = TD_TOGGLE_DATA1;
+ }
+ } else {
+ dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
+ bufround = (dir == TD_DIR_IN) ? 1 : 0;
+ etd->dma_handle = urb->transfer_dma;
+ if (usb_pipebulk(pipe) && (state == US_BULK0))
+ count = 0;
+ else
+ count = urb->transfer_buffer_length;
+
+ if (usb_pipecontrol(pipe)) {
+ datatoggle = TD_TOGGLE_DATA1;
+ } else {
+ if (usb_gettoggle(
+ urb->dev,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe)))
+ datatoggle = TD_TOGGLE_DATA1;
+ else
+ datatoggle = TD_TOGGLE_DATA0;
+ }
+ }
+
+ etd->urb = urb;
+ etd->ep = urb_priv->ep;
+ etd->len = count;
+
+ if (usb_pipeint(pipe)) {
+ interval = urb->interval;
+ relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
+ }
+
+ /* Write ETD to device memory */
+ setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
+
+ etd_writel(imx21, etd_num, 2,
+ (u32) interval << DW2_POLINTERV |
+ ((u32) relpolpos << DW2_RELPOLPOS) |
+ ((u32) dir << DW2_DIRPID) |
+ ((u32) bufround << DW2_BUFROUND) |
+ ((u32) datatoggle << DW2_DATATOG) |
+ ((u32) TD_NOTACCESSED << DW2_COMPCODE));
+
+ /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
+ is smaller. Make sure we don't overrun the buffer!
+ */
+ if (count && count < maxpacket)
+ etd_buf_size = count;
+ else
+ etd_buf_size = maxpacket;
+
+ etd_writel(imx21, etd_num, 3,
+ ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
+
+ if (!count)
+ etd->dma_handle = 0;
+
+ /* allocate x and y buffer space at once */
+ etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
+ dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
+ if (dmem_offset < 0) {
+ /* Setup everything we can in HW and update when we get DMEM */
+ etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
+
+ dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
+ debug_urb_queued_for_dmem(imx21, urb);
+ list_add_tail(&etd->queue, &imx21->queue_for_dmem);
+ return;
+ }
+
+ etd_writel(imx21, etd_num, 1,
+ (((u32) dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
+ (u32) dmem_offset);
+
+ urb_priv->active = 1;
+
+ /* enable the ETD to kick off transfer */
+ dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
+ etd_num, count, dir != TD_DIR_IN ? "out" : "in");
+ activate_etd(imx21, etd_num, etd->dma_handle, dir);
+
+}
+
+static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct etd_priv *etd = &imx21->etd[etd_num];
+ u32 etd_mask = 1 << etd_num;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ int dir;
+ u16 xbufaddr;
+ int cc;
+ u32 bytes_xfrd;
+ int etd_done;
+
+ disactivate_etd(imx21, etd_num);
+
+ dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
+ xbufaddr = etd_readl(imx21, etd_num, 1) & 0xffff;
+ cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
+ bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
+
+ /* save toggle carry */
+ usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe),
+ (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
+
+ if (dir == TD_DIR_IN) {
+ clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+ clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+ }
+ free_dmem(imx21, xbufaddr);
+
+ urb->error_count = 0;
+ if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
+ && (cc == TD_DATAUNDERRUN))
+ cc = TD_CC_NOERROR;
+
+ if (cc != 0)
+ dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
+
+ etd_done = (cc_to_error[cc] != 0); /* stop if error */
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ switch (urb_priv->state) {
+ case US_CTRL_SETUP:
+ if (urb->transfer_buffer_length > 0)
+ urb_priv->state = US_CTRL_DATA;
+ else
+ urb_priv->state = US_CTRL_ACK;
+ break;
+ case US_CTRL_DATA:
+ urb->actual_length += bytes_xfrd;
+ urb_priv->state = US_CTRL_ACK;
+ break;
+ case US_CTRL_ACK:
+ etd_done = 1;
+ break;
+ default:
+ dev_err(imx21->dev,
+ "Invalid pipe state %d\n", urb_priv->state);
+ etd_done = 1;
+ break;
+ }
+ break;
+
+ case PIPE_BULK:
+ urb->actual_length += bytes_xfrd;
+ if ((urb_priv->state == US_BULK)
+ && (urb->transfer_flags & URB_ZERO_PACKET)
+ && urb->transfer_buffer_length > 0
+ && ((urb->transfer_buffer_length %
+ usb_maxpacket(urb->dev, urb->pipe,
+ usb_pipeout(urb->pipe))) == 0)) {
+ /* need a 0-packet */
+ urb_priv->state = US_BULK0;
+ } else {
+ etd_done = 1;
+ }
+ break;
+
+ case PIPE_INTERRUPT:
+ urb->actual_length += bytes_xfrd;
+ etd_done = 1;
+ break;
+ }
+
+ if (!etd_done) {
+ dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
+ schedule_nonisoc_etd(imx21, urb);
+ } else {
+ struct usb_host_endpoint *ep = urb->ep;
+
+ urb_done(hcd, urb, cc_to_error[cc]);
+ etd->urb = NULL;
+
+ if (!list_empty(&ep->urb_list)) {
+ urb = list_first_entry(&ep->urb_list,
+ struct urb, urb_list);
+ dev_vdbg(imx21->dev, "next URB %p\n", urb);
+ schedule_nonisoc_etd(imx21, urb);
+ }
+ }
+}
+
+static struct ep_priv *alloc_ep(void)
+{
+ int i;
+ struct ep_priv *ep_priv;
+
+ ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
+ if (!ep_priv)
+ return NULL;
+
+ for (i = 0; i < NUM_ISO_ETDS; ++i)
+ ep_priv->etd[i] = -1;
+
+ return ep_priv;
+}
+
+static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
+ struct urb *urb, gfp_t mem_flags)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct usb_host_endpoint *ep = urb->ep;
+ struct urb_priv *urb_priv;
+ struct ep_priv *ep_priv;
+ struct etd_priv *etd;
+ int ret;
+ unsigned long flags;
+ int new_ep = 0;
+
+ dev_vdbg(imx21->dev,
+ "enqueue urb=%p ep=%p len=%d "
+ "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
+ urb, ep,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma,
+ urb->setup_packet, urb->setup_dma);
+
+ if (usb_pipeisoc(urb->pipe))
+ return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
+
+ urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
+ if (!urb_priv)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ ep_priv = ep->hcpriv;
+ if (ep_priv == NULL) {
+ ep_priv = alloc_ep();
+ if (!ep_priv) {
+ ret = -ENOMEM;
+ goto failed_alloc_ep;
+ }
+ ep->hcpriv = ep_priv;
+ ep_priv->ep = ep;
+ new_ep = 1;
+ }
+
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret)
+ goto failed_link;
+
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->error_count = 0;
+ urb->hcpriv = urb_priv;
+ urb_priv->ep = ep;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ urb_priv->state = US_CTRL_SETUP;
+ break;
+ case PIPE_BULK:
+ urb_priv->state = US_BULK;
+ break;
+ }
+
+ debug_urb_submitted(imx21, urb);
+ if (ep_priv->etd[0] < 0) {
+ if (ep_priv->waiting_etd) {
+ dev_dbg(imx21->dev,
+ "no ETD available already queued %p\n",
+ ep_priv);
+ debug_urb_queued_for_etd(imx21, urb);
+ goto out;
+ }
+ ep_priv->etd[0] = alloc_etd(imx21);
+ if (ep_priv->etd[0] < 0) {
+ dev_dbg(imx21->dev,
+ "no ETD available queueing %p\n", ep_priv);
+ debug_urb_queued_for_etd(imx21, urb);
+ list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
+ ep_priv->waiting_etd = 1;
+ goto out;
+ }
+ }
+
+ /* Schedule if no URB already active for this endpoint */
+ etd = &imx21->etd[ep_priv->etd[0]];
+ if (etd->urb == NULL) {
+ DEBUG_LOG_FRAME(imx21, etd, last_req);
+ schedule_nonisoc_etd(imx21, urb);
+ }
+
+out:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+
+failed_link:
+failed_alloc_ep:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ kfree(urb_priv);
+ return ret;
+}
+
+static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
+ int status)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+ struct usb_host_endpoint *ep;
+ struct ep_priv *ep_priv;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ int ret = -EINVAL;
+
+ dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
+ urb, usb_pipeisoc(urb->pipe), status);
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (ret)
+ goto fail;
+ ep = urb_priv->ep;
+ ep_priv = ep->hcpriv;
+
+ debug_urb_unlinked(imx21, urb);
+
+ if (usb_pipeisoc(urb->pipe)) {
+ dequeue_isoc_urb(imx21, urb, ep_priv);
+ schedule_isoc_etds(hcd, ep);
+ } else if (urb_priv->active) {
+ int etd_num = ep_priv->etd[0];
+ if (etd_num != -1) {
+ disactivate_etd(imx21, etd_num);
+ free_dmem(imx21, etd_readl(imx21, etd_num, 1) & 0xffff);
+ imx21->etd[etd_num].urb = NULL;
+ }
+ }
+
+ urb_done(hcd, urb, status);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+
+fail:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return ret;
+}
+
+/* =========================================== */
+/* Interrupt dispatch */
+/* =========================================== */
+
+static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
+{
+ int etd_num;
+ int enable_sof_int = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
+ u32 etd_mask = 1 << etd_num;
+ u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
+ u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
+ struct etd_priv *etd = &imx21->etd[etd_num];
+
+
+ if (done) {
+ DEBUG_LOG_FRAME(imx21, etd, last_int);
+ } else {
+/*
+ * Kludge warning!
+ *
+ * When multiple transfers are using the bus we sometimes get into a state
+ * where the transfer has completed (the CC field of the ETD is != 0x0F),
+ * the ETD has self disabled but the ETDDONESTAT flag is not set
+ * (and hence no interrupt occurs).
+ * This causes the transfer in question to hang.
+ * The kludge below checks for this condition at each SOF and processes any
+ * blocked ETDs (after an arbitary 10 frame wait)
+ *
+ * With a single active transfer the usbtest test suite will run for days
+ * without the kludge.
+ * With other bus activity (eg mass storage) even just test1 will hang without
+ * the kludge.
+ */
+ u32 dword0;
+ int cc;
+
+ if (etd->active_count && !enabled) /* suspicious... */
+ enable_sof_int = 1;
+
+ if (!sof || enabled || !etd->active_count)
+ continue;
+
+ cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
+ if (cc == TD_NOTACCESSED)
+ continue;
+
+ if (++etd->active_count < 10)
+ continue;
+
+ dword0 = etd_readl(imx21, etd_num, 0);
+ dev_dbg(imx21->dev,
+ "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
+ etd_num, dword0 & 0x7F,
+ (dword0 >> DW0_ENDPNT) & 0x0F,
+ cc);
+
+#ifdef DEBUG
+ dev_dbg(imx21->dev,
+ "frame: act=%d disact=%d"
+ " int=%d req=%d cur=%d\n",
+ etd->activated_frame,
+ etd->disactivated_frame,
+ etd->last_int_frame,
+ etd->last_req_frame,
+ readl(imx21->regs + USBH_FRMNUB));
+ imx21->debug_unblocks++;
+#endif
+ etd->active_count = 0;
+/* End of kludge */
+ }
+
+ if (etd->ep == NULL || etd->urb == NULL) {
+ dev_dbg(imx21->dev,
+ "Interrupt for unexpected etd %d"
+ " ep=%p urb=%p\n",
+ etd_num, etd->ep, etd->urb);
+ disactivate_etd(imx21, etd_num);
+ continue;
+ }
+
+ if (usb_pipeisoc(etd->urb->pipe))
+ isoc_etd_done(hcd, etd->urb, etd_num);
+ else
+ nonisoc_etd_done(hcd, etd->urb, etd_num);
+ }
+
+ /* only enable SOF interrupt if it may be needed for the kludge */
+ if (enable_sof_int)
+ set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
+ else
+ clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
+
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+}
+
+static irqreturn_t imx21_irq(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ u32 ints = readl(imx21->regs + USBH_SYSISR);
+
+ if (ints & USBH_SYSIEN_HERRINT)
+ dev_dbg(imx21->dev, "Scheduling error\n");
+
+ if (ints & USBH_SYSIEN_SORINT)
+ dev_dbg(imx21->dev, "Scheduling overrun\n");
+
+ if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
+ process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
+
+ writel(ints, imx21->regs + USBH_SYSISR);
+ return IRQ_HANDLED;
+}
+
+static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+ struct ep_priv *ep_priv;
+ int i;
+
+ if (ep == NULL)
+ return;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+ ep_priv = ep->hcpriv;
+ dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
+
+ if (!list_empty(&ep->urb_list))
+ dev_dbg(imx21->dev, "ep's URB list is not empty\n");
+
+ if (ep_priv != NULL) {
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ if (ep_priv->etd[i] > -1)
+ dev_dbg(imx21->dev, "free etd %d for disable\n",
+ ep_priv->etd[i]);
+
+ free_etd(imx21, ep_priv->etd[i]);
+ }
+ kfree(ep_priv);
+ ep->hcpriv = NULL;
+ }
+
+ for (i = 0; i < USB_NUM_ETD; i++) {
+ if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
+ dev_err(imx21->dev,
+ "Active etd %d for disabled ep=%p!\n", i, ep);
+ free_etd(imx21, i);
+ }
+ }
+ free_epdmem(imx21, ep);
+ spin_unlock_irqrestore(&imx21->lock, flags);
+}
+
+/* =========================================== */
+/* Hub handling */
+/* =========================================== */
+
+static int get_hub_descriptor(struct usb_hcd *hcd,
+ struct usb_hub_descriptor *desc)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ desc->bDescriptorType = 0x29; /* HUB descriptor */
+ desc->bHubContrCurrent = 0;
+
+ desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
+ & USBH_ROOTHUBA_NDNSTMPRT_MASK;
+ desc->bDescLength = 9;
+ desc->bPwrOn2PwrGood = 0;
+ desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
+ 0x0002 | /* No power switching */
+ 0x0010 | /* No over current protection */
+ 0);
+
+ desc->bitmap[0] = 1 << 1;
+ desc->bitmap[1] = ~0;
+ return 0;
+}
+
+static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ int ports;
+ int changed = 0;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+ ports = readl(imx21->regs + USBH_ROOTHUBA)
+ & USBH_ROOTHUBA_NDNSTMPRT_MASK;
+ if (ports > 7) {
+ ports = 7;
+ dev_err(imx21->dev, "ports %d > 7\n", ports);
+ }
+ for (i = 0; i < ports; i++) {
+ if (readl(imx21->regs + USBH_PORTSTAT(i)) &
+ (USBH_PORTSTAT_CONNECTSC |
+ USBH_PORTSTAT_PRTENBLSC |
+ USBH_PORTSTAT_PRTSTATSC |
+ USBH_PORTSTAT_OVRCURIC |
+ USBH_PORTSTAT_PRTRSTSC)) {
+
+ changed = 1;
+ buf[0] |= 1 << (i + 1);
+ }
+ }
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ if (changed)
+ dev_info(imx21->dev, "Hub status changed\n");
+ return changed;
+}
+
+static int imx21_hc_hub_control(struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue, u16 wIndex, char *buf, u16 wLength)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ int rc = 0;
+ u32 status_write = 0;
+
+ switch (typeReq) {
+ case ClearHubFeature:
+ dev_dbg(imx21->dev, "ClearHubFeature\n");
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ dev_dbg(imx21->dev, " OVER_CURRENT\n");
+ break;
+ case C_HUB_LOCAL_POWER:
+ dev_dbg(imx21->dev, " LOCAL_POWER\n");
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+ break;
+
+ case ClearPortFeature:
+ dev_dbg(imx21->dev, "ClearPortFeature\n");
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ dev_dbg(imx21->dev, " ENABLE\n");
+ status_write = USBH_PORTSTAT_CURCONST;
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(imx21->dev, " SUSPEND\n");
+ status_write = USBH_PORTSTAT_PRTOVRCURI;
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(imx21->dev, " POWER\n");
+ status_write = USBH_PORTSTAT_LSDEVCON;
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ dev_dbg(imx21->dev, " C_ENABLE\n");
+ status_write = USBH_PORTSTAT_PRTENBLSC;
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ dev_dbg(imx21->dev, " C_SUSPEND\n");
+ status_write = USBH_PORTSTAT_PRTSTATSC;
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ dev_dbg(imx21->dev, " C_CONNECTION\n");
+ status_write = USBH_PORTSTAT_CONNECTSC;
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ dev_dbg(imx21->dev, " C_OVER_CURRENT\n");
+ status_write = USBH_PORTSTAT_OVRCURIC;
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ dev_dbg(imx21->dev, " C_RESET\n");
+ status_write = USBH_PORTSTAT_PRTRSTSC;
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ break;
+
+ case GetHubDescriptor:
+ dev_dbg(imx21->dev, "GetHubDescriptor\n");
+ rc = get_hub_descriptor(hcd, (void *)buf);
+ break;
+
+ case GetHubStatus:
+ dev_dbg(imx21->dev, " GetHubStatus\n");
+ *(__le32 *) buf = 0;
+ break;
+
+ case GetPortStatus:
+ dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
+ wIndex, USBH_PORTSTAT(wIndex - 1));
+ *(__le32 *) buf = readl(imx21->regs +
+ USBH_PORTSTAT(wIndex - 1));
+ break;
+
+ case SetHubFeature:
+ dev_dbg(imx21->dev, "SetHubFeature\n");
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ dev_dbg(imx21->dev, " OVER_CURRENT\n");
+ break;
+
+ case C_HUB_LOCAL_POWER:
+ dev_dbg(imx21->dev, " LOCAL_POWER\n");
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ break;
+
+ case SetPortFeature:
+ dev_dbg(imx21->dev, "SetPortFeature\n");
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(imx21->dev, " SUSPEND\n");
+ status_write = USBH_PORTSTAT_PRTSUSPST;
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(imx21->dev, " POWER\n");
+ status_write = USBH_PORTSTAT_PRTPWRST;
+ break;
+ case USB_PORT_FEAT_RESET:
+ dev_dbg(imx21->dev, " RESET\n");
+ status_write = USBH_PORTSTAT_PRTRSTST;
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+ break;
+
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ if (status_write)
+ writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
+ return rc;
+}
+
+/* =========================================== */
+/* Host controller management */
+/* =========================================== */
+
+static int imx21_hc_reset(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long timeout;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ /* Reset the Host controler modules */
+ writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
+ USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
+ imx21->regs + USBOTG_RST_CTRL);
+
+ /* Wait for reset to finish */
+ timeout = jiffies + HZ;
+ while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
+ if (time_after(jiffies, timeout)) {
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ dev_err(imx21->dev, "timeout waiting for reset\n");
+ return -ETIMEDOUT;
+ }
+ spin_unlock_irq(&imx21->lock);
+ schedule_timeout(1);
+ spin_lock_irq(&imx21->lock);
+ }
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+}
+
+static int __devinit imx21_hc_start(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+ int i, j;
+ u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
+ u32 usb_control = 0;
+
+ hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
+ USBOTG_HWMODE_HOSTXCVR_MASK);
+ hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
+ USBOTG_HWMODE_OTGXCVR_MASK);
+
+ if (imx21->pdata->host1_txenoe)
+ usb_control |= USBCTRL_HOST1_TXEN_OE;
+
+ if (!imx21->pdata->host1_xcverless)
+ usb_control |= USBCTRL_HOST1_BYP_TLL;
+
+ if (imx21->pdata->otg_ext_xcvr)
+ usb_control |= USBCTRL_OTC_RCV_RXDP;
+
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN),
+ imx21->regs + USBOTG_CLK_CTRL);
+ writel(hw_mode, imx21->regs + USBOTG_HWMODE);
+ writel(usb_control, imx21->regs + USBCTRL);
+ writel(USB_MISCCONTROL_SKPRTRY | USB_MISCCONTROL_ARBMODE,
+ imx21->regs + USB_MISCCONTROL);
+
+ /* Clear the ETDs */
+ for (i = 0; i < USB_NUM_ETD; i++)
+ for (j = 0; j < 4; j++)
+ etd_writel(imx21, i, j, 0);
+
+ /* Take the HC out of reset */
+ writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1,
+ imx21->regs + USBH_HOST_CTRL);
+
+ /* Enable ports */
+ if (imx21->pdata->enable_otg_host)
+ writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
+ imx21->regs + USBH_PORTSTAT(0));
+
+ if (imx21->pdata->enable_host1)
+ writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
+ imx21->regs + USBH_PORTSTAT(1));
+
+ if (imx21->pdata->enable_host2)
+ writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
+ imx21->regs + USBH_PORTSTAT(2));
+
+
+ hcd->state = HC_STATE_RUNNING;
+
+ /* Enable host controller interrupts */
+ set_register_bits(imx21, USBH_SYSIEN,
+ USBH_SYSIEN_HERRINT |
+ USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
+ set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static void imx21_hc_stop(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ writel(0, imx21->regs + USBH_SYSIEN);
+ clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
+ clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
+ USBOTG_CLK_CTRL);
+ spin_unlock_irqrestore(&imx21->lock, flags);
+}
+
+/* =========================================== */
+/* Driver glue */
+/* =========================================== */
+
+static struct hc_driver imx21_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "IMX21 USB Host Controller",
+ .hcd_priv_size = sizeof(struct imx21),
+
+ .flags = HCD_USB11,
+ .irq = imx21_irq,
+
+ .reset = imx21_hc_reset,
+ .start = imx21_hc_start,
+ .stop = imx21_hc_stop,
+
+ /* I/O requests */
+ .urb_enqueue = imx21_hc_urb_enqueue,
+ .urb_dequeue = imx21_hc_urb_dequeue,
+ .endpoint_disable = imx21_hc_endpoint_disable,
+
+ /* scheduling support */
+ .get_frame_number = imx21_hc_get_frame,
+
+ /* Root hub support */
+ .hub_status_data = imx21_hc_hub_status_data,
+ .hub_control = imx21_hc_hub_control,
+
+};
+
+static struct mx21_usbh_platform_data default_pdata = {
+ .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
+ .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
+ .enable_host1 = 1,
+ .enable_host2 = 1,
+ .enable_otg_host = 1,
+
+};
+
+static int imx21_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ remove_debug_files(imx21);
+ usb_remove_hcd(hcd);
+
+ if (res != NULL) {
+ clk_disable(imx21->clk);
+ clk_put(imx21->clk);
+ iounmap(imx21->regs);
+ release_mem_region(res->start, resource_size(res));
+ }
+
+ kfree(hcd);
+ return 0;
+}
+
+
+static int imx21_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct imx21 *imx21;
+ struct resource *res;
+ int ret;
+ int irq;
+
+ printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ hcd = usb_create_hcd(&imx21_hc_driver,
+ &pdev->dev, dev_name(&pdev->dev));
+ if (hcd == NULL) {
+ dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
+ dev_name(&pdev->dev));
+ return -ENOMEM;
+ }
+
+ imx21 = hcd_to_imx21(hcd);
+ imx21->dev = &pdev->dev;
+ imx21->pdata = pdev->dev.platform_data;
+ if (!imx21->pdata)
+ imx21->pdata = &default_pdata;
+
+ spin_lock_init(&imx21->lock);
+ INIT_LIST_HEAD(&imx21->dmem_list);
+ INIT_LIST_HEAD(&imx21->queue_for_etd);
+ INIT_LIST_HEAD(&imx21->queue_for_dmem);
+ create_debug_files(imx21);
+
+ res = request_mem_region(res->start, resource_size(res), hcd_name);
+ if (!res) {
+ ret = -EBUSY;
+ goto failed_request_mem;
+ }
+
+ imx21->regs = ioremap(res->start, resource_size(res));
+ if (imx21->regs == NULL) {
+ dev_err(imx21->dev, "Cannot map registers\n");
+ ret = -ENOMEM;
+ goto failed_ioremap;
+ }
+
+ /* Enable clocks source */
+ imx21->clk = clk_get(imx21->dev, NULL);
+ if (IS_ERR(imx21->clk)) {
+ dev_err(imx21->dev, "no clock found\n");
+ ret = PTR_ERR(imx21->clk);
+ goto failed_clock_get;
+ }
+
+ ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
+ if (ret)
+ goto failed_clock_set;
+ ret = clk_enable(imx21->clk);
+ if (ret)
+ goto failed_clock_enable;
+
+ dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
+ (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
+
+ ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ if (ret != 0) {
+ dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
+ goto failed_add_hcd;
+ }
+
+ return 0;
+
+failed_add_hcd:
+ clk_disable(imx21->clk);
+failed_clock_enable:
+failed_clock_set:
+ clk_put(imx21->clk);
+failed_clock_get:
+ iounmap(imx21->regs);
+failed_ioremap:
+ release_mem_region(res->start, res->end - res->start);
+failed_request_mem:
+ remove_debug_files(imx21);
+ usb_put_hcd(hcd);
+ return ret;
+}
+
+static struct platform_driver imx21_hcd_driver = {
+ .driver = {
+ .name = (char *)hcd_name,
+ },
+ .probe = imx21_probe,
+ .remove = imx21_remove,
+ .suspend = NULL,
+ .resume = NULL,
+};
+
+static int __init imx21_hcd_init(void)
+{
+ return platform_driver_register(&imx21_hcd_driver);
+}
+
+static void __exit imx21_hcd_cleanup(void)
+{
+ platform_driver_unregister(&imx21_hcd_driver);
+}
+
+module_init(imx21_hcd_init);
+module_exit(imx21_hcd_cleanup);
+
+MODULE_DESCRIPTION("i.MX21 USB Host controller");
+MODULE_AUTHOR("Martin Fuzzey");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx21-hcd");
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
new file mode 100644
index 0000000..1b0d913
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.h
@@ -0,0 +1,436 @@
+/*
+ * Macros and prototypes for i.MX21
+ *
+ * Copyright (C) 2006 Loping Dog Embedded Systems
+ * Copyright (C) 2009 Martin Fuzzey
+ * Originally written by Jay Monkman <jtm@lopingdog.com>
+ * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_IMX21_HCD_H__
+#define __LINUX_IMX21_HCD_H__
+
+#include <mach/mx21-usbhost.h>
+
+#define NUM_ISO_ETDS 2
+#define USB_NUM_ETD 32
+#define DMEM_SIZE 4096
+
+/* Register definitions */
+#define USBOTG_HWMODE 0x00
+#define USBOTG_HWMODE_ANASDBEN (1 << 14)
+#define USBOTG_HWMODE_OTGXCVR_SHIFT 6
+#define USBOTG_HWMODE_OTGXCVR_MASK (3 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TD_RD (0 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TS_RD (2 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TD_RS (1 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TS_RS (3 << 6)
+#define USBOTG_HWMODE_HOSTXCVR_SHIFT 4
+#define USBOTG_HWMODE_HOSTXCVR_MASK (3 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TD_RD (0 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TS_RD (2 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TD_RS (1 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TS_RS (3 << 4)
+#define USBOTG_HWMODE_CRECFG_MASK (3 << 0)
+#define USBOTG_HWMODE_CRECFG_HOST (1 << 0)
+#define USBOTG_HWMODE_CRECFG_FUNC (2 << 0)
+#define USBOTG_HWMODE_CRECFG_HNP (3 << 0)
+
+#define USBOTG_CINT_STAT 0x04
+#define USBOTG_CINT_STEN 0x08
+#define USBOTG_ASHNPINT (1 << 5)
+#define USBOTG_ASFCINT (1 << 4)
+#define USBOTG_ASHCINT (1 << 3)
+#define USBOTG_SHNPINT (1 << 2)
+#define USBOTG_FCINT (1 << 1)
+#define USBOTG_HCINT (1 << 0)
+
+#define USBOTG_CLK_CTRL 0x0c
+#define USBOTG_CLK_CTRL_FUNC (1 << 2)
+#define USBOTG_CLK_CTRL_HST (1 << 1)
+#define USBOTG_CLK_CTRL_MAIN (1 << 0)
+
+#define USBOTG_RST_CTRL 0x10
+#define USBOTG_RST_RSTI2C (1 << 15)
+#define USBOTG_RST_RSTCTRL (1 << 5)
+#define USBOTG_RST_RSTFC (1 << 4)
+#define USBOTG_RST_RSTFSKE (1 << 3)
+#define USBOTG_RST_RSTRH (1 << 2)
+#define USBOTG_RST_RSTHSIE (1 << 1)
+#define USBOTG_RST_RSTHC (1 << 0)
+
+#define USBOTG_FRM_INTVL 0x14
+#define USBOTG_FRM_REMAIN 0x18
+#define USBOTG_HNP_CSR 0x1c
+#define USBOTG_HNP_ISR 0x2c
+#define USBOTG_HNP_IEN 0x30
+
+#define USBOTG_I2C_TXCVR_REG(x) (0x100 + (x))
+#define USBOTG_I2C_XCVR_DEVAD 0x118
+#define USBOTG_I2C_SEQ_OP_REG 0x119
+#define USBOTG_I2C_SEQ_RD_STARTAD 0x11a
+#define USBOTG_I2C_OP_CTRL_REG 0x11b
+#define USBOTG_I2C_SCLK_TO_SCK_HPER 0x11e
+#define USBOTG_I2C_MASTER_INT_REG 0x11f
+
+#define USBH_HOST_CTRL 0x80
+#define USBH_HOST_CTRL_HCRESET (1 << 31)
+#define USBH_HOST_CTRL_SCHDOVR(x) ((x) << 16)
+#define USBH_HOST_CTRL_RMTWUEN (1 << 4)
+#define USBH_HOST_CTRL_HCUSBSTE_RESET (0 << 2)
+#define USBH_HOST_CTRL_HCUSBSTE_RESUME (1 << 2)
+#define USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL (2 << 2)
+#define USBH_HOST_CTRL_HCUSBSTE_SUSPEND (3 << 2)
+#define USBH_HOST_CTRL_CTLBLKSR_1 (0 << 0)
+#define USBH_HOST_CTRL_CTLBLKSR_2 (1 << 0)
+#define USBH_HOST_CTRL_CTLBLKSR_3 (2 << 0)
+#define USBH_HOST_CTRL_CTLBLKSR_4 (3 << 0)
+
+#define USBH_SYSISR 0x88
+#define USBH_SYSISR_PSCINT (1 << 6)
+#define USBH_SYSISR_FMOFINT (1 << 5)
+#define USBH_SYSISR_HERRINT (1 << 4)
+#define USBH_SYSISR_RESDETINT (1 << 3)
+#define USBH_SYSISR_SOFINT (1 << 2)
+#define USBH_SYSISR_DONEINT (1 << 1)
+#define USBH_SYSISR_SORINT (1 << 0)
+
+#define USBH_SYSIEN 0x8c
+#define USBH_SYSIEN_PSCINT (1 << 6)
+#define USBH_SYSIEN_FMOFINT (1 << 5)
+#define USBH_SYSIEN_HERRINT (1 << 4)
+#define USBH_SYSIEN_RESDETINT (1 << 3)
+#define USBH_SYSIEN_SOFINT (1 << 2)
+#define USBH_SYSIEN_DONEINT (1 << 1)
+#define USBH_SYSIEN_SORINT (1 << 0)
+
+#define USBH_XBUFSTAT 0x98
+#define USBH_YBUFSTAT 0x9c
+#define USBH_XYINTEN 0xa0
+#define USBH_XFILLSTAT 0xa8
+#define USBH_YFILLSTAT 0xac
+#define USBH_ETDENSET 0xc0
+#define USBH_ETDENCLR 0xc4
+#define USBH_IMMEDINT 0xcc
+#define USBH_ETDDONESTAT 0xd0
+#define USBH_ETDDONEEN 0xd4
+#define USBH_FRMNUB 0xe0
+#define USBH_LSTHRESH 0xe4
+
+#define USBH_ROOTHUBA 0xe8
+#define USBH_ROOTHUBA_PWRTOGOOD_MASK (0xff)
+#define USBH_ROOTHUBA_PWRTOGOOD_SHIFT (24)
+#define USBH_ROOTHUBA_NOOVRCURP (1 << 12)
+#define USBH_ROOTHUBA_OVRCURPM (1 << 11)
+#define USBH_ROOTHUBA_DEVTYPE (1 << 10)
+#define USBH_ROOTHUBA_PWRSWTMD (1 << 9)
+#define USBH_ROOTHUBA_NOPWRSWT (1 << 8)
+#define USBH_ROOTHUBA_NDNSTMPRT_MASK (0xff)
+
+#define USBH_ROOTHUBB 0xec
+#define USBH_ROOTHUBB_PRTPWRCM(x) (1 << ((x) + 16))
+#define USBH_ROOTHUBB_DEVREMOVE(x) (1 << (x))
+
+#define USBH_ROOTSTAT 0xf0
+#define USBH_ROOTSTAT_CLRRMTWUE (1 << 31)
+#define USBH_ROOTSTAT_OVRCURCHG (1 << 17)
+#define USBH_ROOTSTAT_DEVCONWUE (1 << 15)
+#define USBH_ROOTSTAT_OVRCURI (1 << 1)
+#define USBH_ROOTSTAT_LOCPWRS (1 << 0)
+
+#define USBH_PORTSTAT(x) (0xf4 + ((x) * 4))
+#define USBH_PORTSTAT_PRTRSTSC (1 << 20)
+#define USBH_PORTSTAT_OVRCURIC (1 << 19)
+#define USBH_PORTSTAT_PRTSTATSC (1 << 18)
+#define USBH_PORTSTAT_PRTENBLSC (1 << 17)
+#define USBH_PORTSTAT_CONNECTSC (1 << 16)
+#define USBH_PORTSTAT_LSDEVCON (1 << 9)
+#define USBH_PORTSTAT_PRTPWRST (1 << 8)
+#define USBH_PORTSTAT_PRTRSTST (1 << 4)
+#define USBH_PORTSTAT_PRTOVRCURI (1 << 3)
+#define USBH_PORTSTAT_PRTSUSPST (1 << 2)
+#define USBH_PORTSTAT_PRTENABST (1 << 1)
+#define USBH_PORTSTAT_CURCONST (1 << 0)
+
+#define USB_DMAREV 0x800
+#define USB_DMAINTSTAT 0x804
+#define USB_DMAINTSTAT_EPERR (1 << 1)
+#define USB_DMAINTSTAT_ETDERR (1 << 0)
+
+#define USB_DMAINTEN 0x808
+#define USB_DMAINTEN_EPERRINTEN (1 << 1)
+#define USB_DMAINTEN_ETDERRINTEN (1 << 0)
+
+#define USB_ETDDMAERSTAT 0x80c
+#define USB_EPDMAERSTAT 0x810
+#define USB_ETDDMAEN 0x820
+#define USB_EPDMAEN 0x824
+#define USB_ETDDMAXTEN 0x828
+#define USB_EPDMAXTEN 0x82c
+#define USB_ETDDMAENXYT 0x830
+#define USB_EPDMAENXYT 0x834
+#define USB_ETDDMABST4EN 0x838
+#define USB_EPDMABST4EN 0x83c
+
+#define USB_MISCCONTROL 0x840
+#define USB_MISCCONTROL_ISOPREVFRM (1 << 3)
+#define USB_MISCCONTROL_SKPRTRY (1 << 2)
+#define USB_MISCCONTROL_ARBMODE (1 << 1)
+#define USB_MISCCONTROL_FILTCC (1 << 0)
+
+#define USB_ETDDMACHANLCLR 0x848
+#define USB_EPDMACHANLCLR 0x84c
+#define USB_ETDSMSA(x) (0x900 + ((x) * 4))
+#define USB_EPSMSA(x) (0x980 + ((x) * 4))
+#define USB_ETDDMABUFPTR(x) (0xa00 + ((x) * 4))
+#define USB_EPDMABUFPTR(x) (0xa80 + ((x) * 4))
+
+#define USB_ETD_DWORD(x, w) (0x200 + ((x) * 16) + ((w) * 4))
+#define DW0_ADDRESS 0
+#define DW0_ENDPNT 7
+#define DW0_DIRECT 11
+#define DW0_SPEED 13
+#define DW0_FORMAT 14
+#define DW0_MAXPKTSIZ 16
+#define DW0_HALTED 27
+#define DW0_TOGCRY 28
+#define DW0_SNDNAK 30
+
+#define DW1_XBUFSRTAD 0
+#define DW1_YBUFSRTAD 16
+
+#define DW2_RTRYDELAY 0
+#define DW2_POLINTERV 0
+#define DW2_STARTFRM 0
+#define DW2_RELPOLPOS 8
+#define DW2_DIRPID 16
+#define DW2_BUFROUND 18
+#define DW2_DELAYINT 19
+#define DW2_DATATOG 22
+#define DW2_ERRORCNT 24
+#define DW2_COMPCODE 28
+
+#define DW3_TOTBYECNT 0
+#define DW3_PKTLEN0 0
+#define DW3_COMPCODE0 12
+#define DW3_PKTLEN1 16
+#define DW3_BUFSIZE 21
+#define DW3_COMPCODE1 28
+
+#define USBCTRL 0x600
+#define USBCTRL_I2C_WU_INT_STAT (1 << 27)
+#define USBCTRL_OTG_WU_INT_STAT (1 << 26)
+#define USBCTRL_HOST_WU_INT_STAT (1 << 25)
+#define USBCTRL_FNT_WU_INT_STAT (1 << 24)
+#define USBCTRL_I2C_WU_INT_EN (1 << 19)
+#define USBCTRL_OTG_WU_INT_EN (1 << 18)
+#define USBCTRL_HOST_WU_INT_EN (1 << 17)
+#define USBCTRL_FNT_WU_INT_EN (1 << 16)
+#define USBCTRL_OTC_RCV_RXDP (1 << 13)
+#define USBCTRL_HOST1_BYP_TLL (1 << 12)
+#define USBCTRL_OTG_BYP_VAL(x) ((x) << 10)
+#define USBCTRL_HOST1_BYP_VAL(x) ((x) << 8)
+#define USBCTRL_OTG_PWR_MASK (1 << 6)
+#define USBCTRL_HOST1_PWR_MASK (1 << 5)
+#define USBCTRL_HOST2_PWR_MASK (1 << 4)
+#define USBCTRL_USB_BYP (1 << 2)
+#define USBCTRL_HOST1_TXEN_OE (1 << 1)
+
+
+/* Values in TD blocks */
+#define TD_DIR_SETUP 0
+#define TD_DIR_OUT 1
+#define TD_DIR_IN 2
+#define TD_FORMAT_CONTROL 0
+#define TD_FORMAT_ISO 1
+#define TD_FORMAT_BULK 2
+#define TD_FORMAT_INT 3
+#define TD_TOGGLE_CARRY 0
+#define TD_TOGGLE_DATA0 2
+#define TD_TOGGLE_DATA1 3
+
+/* control transfer states */
+#define US_CTRL_SETUP 2
+#define US_CTRL_DATA 1
+#define US_CTRL_ACK 0
+
+/* bulk transfer main state and 0-length packet */
+#define US_BULK 1
+#define US_BULK0 0
+
+/*ETD format description*/
+#define IMX_FMT_CTRL 0x0
+#define IMX_FMT_ISO 0x1
+#define IMX_FMT_BULK 0x2
+#define IMX_FMT_INT 0x3
+
+static char fmt_urb_to_etd[4] = {
+/*PIPE_ISOCHRONOUS*/ IMX_FMT_ISO,
+/*PIPE_INTERRUPT*/ IMX_FMT_INT,
+/*PIPE_CONTROL*/ IMX_FMT_CTRL,
+/*PIPE_BULK*/ IMX_FMT_BULK
+};
+
+/* condition (error) CC codes and mapping (OHCI like) */
+
+#define TD_CC_NOERROR 0x00
+#define TD_CC_CRC 0x01
+#define TD_CC_BITSTUFFING 0x02
+#define TD_CC_DATATOGGLEM 0x03
+#define TD_CC_STALL 0x04
+#define TD_DEVNOTRESP 0x05
+#define TD_PIDCHECKFAIL 0x06
+/*#define TD_UNEXPECTEDPID 0x07 - reserved, not active on MX2*/
+#define TD_DATAOVERRUN 0x08
+#define TD_DATAUNDERRUN 0x09
+#define TD_BUFFEROVERRUN 0x0C
+#define TD_BUFFERUNDERRUN 0x0D
+#define TD_SCHEDULEOVERRUN 0x0E
+#define TD_NOTACCESSED 0x0F
+
+static const int cc_to_error[16] = {
+ /* No Error */ 0,
+ /* CRC Error */ -EILSEQ,
+ /* Bit Stuff */ -EPROTO,
+ /* Data Togg */ -EILSEQ,
+ /* Stall */ -EPIPE,
+ /* DevNotResp */ -ETIMEDOUT,
+ /* PIDCheck */ -EPROTO,
+ /* UnExpPID */ -EPROTO,
+ /* DataOver */ -EOVERFLOW,
+ /* DataUnder */ -EREMOTEIO,
+ /* (for hw) */ -EIO,
+ /* (for hw) */ -EIO,
+ /* BufferOver */ -ECOMM,
+ /* BuffUnder */ -ENOSR,
+ /* (for HCD) */ -ENOSPC,
+ /* (for HCD) */ -EALREADY
+};
+
+/* HCD data associated with a usb core URB */
+struct urb_priv {
+ struct urb *urb;
+ struct usb_host_endpoint *ep;
+ int active;
+ int state;
+ struct td *isoc_td;
+ int isoc_remaining;
+ int isoc_status;
+};
+
+/* HCD data associated with a usb core endpoint */
+struct ep_priv {
+ struct usb_host_endpoint *ep;
+ struct list_head td_list;
+ struct list_head queue;
+ int etd[NUM_ISO_ETDS];
+ int waiting_etd;
+};
+
+/* isoc packet */
+struct td {
+ struct list_head list;
+ struct urb *urb;
+ struct usb_host_endpoint *ep;
+ dma_addr_t data;
+ unsigned long buf_addr;
+ int len;
+ int frame;
+ int isoc_index;
+};
+
+/* HCD data associated with a hardware ETD */
+struct etd_priv {
+ struct usb_host_endpoint *ep;
+ struct urb *urb;
+ struct td *td;
+ struct list_head queue;
+ dma_addr_t dma_handle;
+ int alloc;
+ int len;
+ int dmem_size;
+ int dmem_offset;
+ int active_count;
+#ifdef DEBUG
+ int activated_frame;
+ int disactivated_frame;
+ int last_int_frame;
+ int last_req_frame;
+ u32 submitted_dwords[4];
+#endif
+};
+
+/* Hardware data memory info */
+struct imx21_dmem_area {
+ struct usb_host_endpoint *ep;
+ unsigned int offset;
+ unsigned int size;
+ struct list_head list;
+};
+
+#ifdef DEBUG
+struct debug_usage_stats {
+ unsigned int value;
+ unsigned int maximum;
+};
+
+struct debug_stats {
+ unsigned long submitted;
+ unsigned long completed_ok;
+ unsigned long completed_failed;
+ unsigned long unlinked;
+ unsigned long queue_etd;
+ unsigned long queue_dmem;
+};
+
+struct debug_isoc_trace {
+ int schedule_frame;
+ int submit_frame;
+ int request_len;
+ int done_frame;
+ int done_len;
+ int cc;
+ struct td *td;
+};
+#endif
+
+/* HCD data structure */
+struct imx21 {
+ spinlock_t lock;
+ struct device *dev;
+ struct mx21_usbh_platform_data *pdata;
+ struct list_head dmem_list;
+ struct list_head queue_for_etd; /* eps queued due to etd shortage */
+ struct list_head queue_for_dmem; /* etds queued due to dmem shortage */
+ struct etd_priv etd[USB_NUM_ETD];
+ struct clk *clk;
+ void __iomem *regs;
+#ifdef DEBUG
+ struct dentry *debug_root;
+ struct debug_stats nonisoc_stats;
+ struct debug_stats isoc_stats;
+ struct debug_usage_stats etd_usage;
+ struct debug_usage_stats dmem_usage;
+ struct debug_isoc_trace isoc_trace[20];
+ struct debug_isoc_trace isoc_trace_failed[20];
+ unsigned long debug_unblocks;
+ int isoc_trace_index;
+ int isoc_trace_index_failed;
+#endif
+};
+
+#endif
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 4297165..217fb51 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -1257,7 +1257,7 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd,
/* avoid all allocations within spinlocks: request or endpoint */
if (!hep->hcpriv) {
- ep = kcalloc(1, sizeof *ep, mem_flags);
+ ep = kzalloc(sizeof *ep, mem_flags);
if (!ep)
return -ENOMEM;
}
@@ -2719,24 +2719,11 @@ static int __init isp1362_probe(struct platform_device *pdev)
}
irq = irq_res->start;
-#ifdef CONFIG_USB_HCD_DMA
- if (pdev->dev.dma_mask) {
- struct resource *dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-
- if (!dma_res) {
- retval = -ENODEV;
- goto err1;
- }
- isp1362_hcd->data_dma = dma_res->start;
- isp1362_hcd->max_dma_size = resource_len(dma_res);
- }
-#else
if (pdev->dev.dma_mask) {
DBG(1, "won't do DMA");
retval = -ENODEV;
goto err1;
}
-#endif
if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) {
retval = -EBUSY;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 27b8f7c..9f01293 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -17,7 +17,9 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/mm.h>
#include <asm/unaligned.h>
+#include <asm/cacheflush.h>
#include "../core/hcd.h"
#include "isp1760-hcd.h"
@@ -904,6 +906,14 @@ __acquires(priv->lock)
status = 0;
}
+ if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
+ void *ptr;
+ for (ptr = urb->transfer_buffer;
+ ptr < urb->transfer_buffer + urb->transfer_buffer_length;
+ ptr += PAGE_SIZE)
+ flush_dcache_page(virt_to_page(ptr));
+ }
+
/* complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb);
spin_unlock(&priv->lock);
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 1c9f977..4293cfd 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -109,7 +109,7 @@ static int of_isp1760_remove(struct of_device *dev)
return 0;
}
-static struct of_device_id of_isp1760_match[] = {
+static const struct of_device_id of_isp1760_match[] = {
{
.compatible = "nxp,usb-isp1760",
},
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
new file mode 100644
index 0000000..4aa08d3
--- /dev/null
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -0,0 +1,456 @@
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * TI DA8xx (OMAP-L1x) Bus Glue
+ *
+ * Derived from: ohci-omap.c and ohci-s3c2410.c
+ * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <mach/da8xx.h>
+#include <mach/usb.h>
+
+#ifndef CONFIG_ARCH_DAVINCI_DA8XX
+#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX."
+#endif
+
+#define CFGCHIP2 DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG)
+
+static struct clk *usb11_clk;
+static struct clk *usb20_clk;
+
+/* Over-current indicator change bitmask */
+static volatile u16 ocic_mask;
+
+static void ohci_da8xx_clock(int on)
+{
+ u32 cfgchip2;
+
+ cfgchip2 = __raw_readl(CFGCHIP2);
+ if (on) {
+ clk_enable(usb11_clk);
+
+ /*
+ * If USB 1.1 reference clock is sourced from USB 2.0 PHY, we
+ * need to enable the USB 2.0 module clocking, start its PHY,
+ * and not allow it to stop the clock during USB 2.0 suspend.
+ */
+ if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) {
+ clk_enable(usb20_clk);
+
+ cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN);
+ cfgchip2 |= CFGCHIP2_PHY_PLLON;
+ __raw_writel(cfgchip2, CFGCHIP2);
+
+ pr_info("Waiting for USB PHY clock good...\n");
+ while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD))
+ cpu_relax();
+ }
+
+ /* Enable USB 1.1 PHY */
+ cfgchip2 |= CFGCHIP2_USB1SUSPENDM;
+ } else {
+ clk_disable(usb11_clk);
+ if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX))
+ clk_disable(usb20_clk);
+
+ /* Disable USB 1.1 PHY */
+ cfgchip2 &= ~CFGCHIP2_USB1SUSPENDM;
+ }
+ __raw_writel(cfgchip2, CFGCHIP2);
+}
+
+/*
+ * Handle the port over-current indicator change.
+ */
+static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub,
+ unsigned port)
+{
+ ocic_mask |= 1 << port;
+
+ /* Once over-current is detected, the port needs to be powered down */
+ if (hub->get_oci(port) > 0)
+ hub->set_power(port, 0);
+}
+
+static int ohci_da8xx_init(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev->platform_data;
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int result;
+ u32 rh_a;
+
+ dev_dbg(dev, "starting USB controller\n");
+
+ ohci_da8xx_clock(1);
+
+ /*
+ * DA8xx only have 1 port connected to the pins but the HC root hub
+ * register A reports 2 ports, thus we'll have to override it...
+ */
+ ohci->num_ports = 1;
+
+ result = ohci_init(ohci);
+ if (result < 0)
+ return result;
+
+ /*
+ * Since we're providing a board-specific root hub port power control
+ * and over-current reporting, we have to override the HC root hub A
+ * register's default value, so that ohci_hub_control() could return
+ * the correct hub descriptor...
+ */
+ rh_a = ohci_readl(ohci, &ohci->regs->roothub.a);
+ if (hub->set_power) {
+ rh_a &= ~RH_A_NPS;
+ rh_a |= RH_A_PSM;
+ }
+ if (hub->get_oci) {
+ rh_a &= ~RH_A_NOCP;
+ rh_a |= RH_A_OCPM;
+ }
+ rh_a &= ~RH_A_POTPGT;
+ rh_a |= hub->potpgt << 24;
+ ohci_writel(ohci, rh_a, &ohci->regs->roothub.a);
+
+ return result;
+}
+
+static void ohci_da8xx_stop(struct usb_hcd *hcd)
+{
+ ohci_stop(hcd);
+ ohci_da8xx_clock(0);
+}
+
+static int ohci_da8xx_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int result;
+
+ result = ohci_run(ohci);
+ if (result < 0)
+ ohci_da8xx_stop(hcd);
+
+ return result;
+}
+
+/*
+ * Update the status data from the hub with the over-current indicator change.
+ */
+static int ohci_da8xx_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ int length = ohci_hub_status_data(hcd, buf);
+
+ /* See if we have OCIC bit set on port 1 */
+ if (ocic_mask & (1 << 1)) {
+ dev_dbg(hcd->self.controller, "over-current indicator change "
+ "on port 1\n");
+
+ if (!length)
+ length = 1;
+
+ buf[0] |= 1 << 1;
+ }
+ return length;
+}
+
+/*
+ * Look at the control requests to the root hub and see if we need to override.
+ */
+static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev->platform_data;
+ int temp;
+
+ switch (typeReq) {
+ case GetPortStatus:
+ /* Check the port number */
+ if (wIndex != 1)
+ break;
+
+ dev_dbg(dev, "GetPortStatus(%u)\n", wIndex);
+
+ temp = roothub_portstatus(hcd_to_ohci(hcd), wIndex - 1);
+
+ /* The port power status (PPS) bit defaults to 1 */
+ if (hub->get_power && hub->get_power(wIndex) == 0)
+ temp &= ~RH_PS_PPS;
+
+ /* The port over-current indicator (POCI) bit is always 0 */
+ if (hub->get_oci && hub->get_oci(wIndex) > 0)
+ temp |= RH_PS_POCI;
+
+ /* The over-current indicator change (OCIC) bit is 0 too */
+ if (ocic_mask & (1 << wIndex))
+ temp |= RH_PS_OCIC;
+
+ put_unaligned(cpu_to_le32(temp), (__le32 *)buf);
+ return 0;
+ case SetPortFeature:
+ temp = 1;
+ goto check_port;
+ case ClearPortFeature:
+ temp = 0;
+
+check_port:
+ /* Check the port number */
+ if (wIndex != 1)
+ break;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(dev, "%sPortFeature(%u): %s\n",
+ temp ? "Set" : "Clear", wIndex, "POWER");
+
+ if (!hub->set_power)
+ return -EPIPE;
+
+ return hub->set_power(wIndex, temp) ? -EPIPE : 0;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ dev_dbg(dev, "%sPortFeature(%u): %s\n",
+ temp ? "Set" : "Clear", wIndex,
+ "C_OVER_CURRENT");
+
+ if (temp)
+ ocic_mask |= 1 << wIndex;
+ else
+ ocic_mask &= ~(1 << wIndex);
+ return 0;
+ }
+ }
+
+ return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+}
+
+static const struct hc_driver ohci_da8xx_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "DA8xx OHCI",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ohci_irq,
+ .flags = HCD_USB11 | HCD_MEMORY,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ohci_da8xx_init,
+ .start = ohci_da8xx_start,
+ .stop = ohci_da8xx_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ohci_da8xx_hub_status_data,
+ .hub_control = ohci_da8xx_hub_control,
+
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+/*-------------------------------------------------------------------------*/
+
+
+/**
+ * usb_hcd_da8xx_probe - initialize DA8xx-based HCDs
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
+ struct platform_device *pdev)
+{
+ struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
+ struct usb_hcd *hcd;
+ struct resource *mem;
+ int error, irq;
+
+ if (hub == NULL)
+ return -ENODEV;
+
+ usb11_clk = clk_get(&pdev->dev, "usb11");
+ if (IS_ERR(usb11_clk))
+ return PTR_ERR(usb11_clk);
+
+ usb20_clk = clk_get(&pdev->dev, "usb20");
+ if (IS_ERR(usb20_clk)) {
+ error = PTR_ERR(usb20_clk);
+ goto err0;
+ }
+
+ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ error = -ENOMEM;
+ goto err1;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ error = -ENODEV;
+ goto err2;
+ }
+ hcd->rsrc_start = mem->start;
+ hcd->rsrc_len = mem->end - mem->start + 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+ dev_dbg(&pdev->dev, "request_mem_region failed\n");
+ error = -EBUSY;
+ goto err2;
+ }
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ error = -ENOMEM;
+ goto err3;
+ }
+
+ ohci_hcd_init(hcd_to_ohci(hcd));
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ error = -ENODEV;
+ goto err4;
+ }
+ error = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ if (error)
+ goto err4;
+
+ if (hub->ocic_notify) {
+ error = hub->ocic_notify(ohci_da8xx_ocic_handler);
+ if (!error)
+ return 0;
+ }
+
+ usb_remove_hcd(hcd);
+err4:
+ iounmap(hcd->regs);
+err3:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err2:
+ usb_put_hcd(hcd);
+err1:
+ clk_put(usb20_clk);
+err0:
+ clk_put(usb11_clk);
+ return error;
+}
+
+/**
+ * usb_hcd_da8xx_remove - shutdown processing for DA8xx-based HCDs
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of usb_hcd_da8xx_probe(), first invoking
+ * the HCD's stop() method. It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ */
+static inline void
+usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev)
+{
+ struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
+
+ hub->ocic_notify(NULL);
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+ clk_put(usb20_clk);
+ clk_put(usb11_clk);
+}
+
+static int ohci_hcd_da8xx_drv_probe(struct platform_device *dev)
+{
+ return usb_hcd_da8xx_probe(&ohci_da8xx_hc_driver, dev);
+}
+
+static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+
+ usb_hcd_da8xx_remove(hcd, dev);
+ platform_set_drvdata(dev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ohci_da8xx_suspend(struct platform_device *dev, pm_message_t message)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+
+ ohci_da8xx_clock(0);
+ hcd->state = HC_STATE_SUSPENDED;
+ dev->dev.power.power_state = PMSG_SUSPEND;
+ return 0;
+}
+
+static int ohci_da8xx_resume(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+
+ ohci_da8xx_clock(1);
+ dev->dev.power.power_state = PMSG_ON;
+ usb_hcd_resume_root_hub(hcd);
+ return 0;
+}
+#endif
+
+/*
+ * Driver definition to register with platform structure.
+ */
+static struct platform_driver ohci_hcd_da8xx_driver = {
+ .probe = ohci_hcd_da8xx_drv_probe,
+ .remove = ohci_hcd_da8xx_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+#ifdef CONFIG_PM
+ .suspend = ohci_da8xx_suspend,
+ .resume = ohci_da8xx_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ohci",
+ },
+};
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index 811f5dfd..8ad2441 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -53,13 +53,13 @@ urb_print(struct urb * urb, char * str, int small, int status)
int i, len;
if (usb_pipecontrol (pipe)) {
- printk (KERN_DEBUG __FILE__ ": setup(8):");
+ printk (KERN_DEBUG "%s: setup(8):", __FILE__);
for (i = 0; i < 8 ; i++)
printk (" %02x", ((__u8 *) urb->setup_packet) [i]);
printk ("\n");
}
if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) {
- printk (KERN_DEBUG __FILE__ ": data(%d/%d):",
+ printk (KERN_DEBUG "%s: data(%d/%d):", __FILE__,
urb->actual_length,
urb->transfer_buffer_length);
len = usb_pipeout (pipe)?
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 24eb747..afe59be 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1051,6 +1051,11 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER usb_hcd_pnx4008_driver
#endif
+#ifdef CONFIG_ARCH_DAVINCI_DA8XX
+#include "ohci-da8xx.c"
+#define PLATFORM_DRIVER ohci_hcd_da8xx_driver
+#endif
+
#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
defined(CONFIG_CPU_SUBTYPE_SH7721) || \
defined(CONFIG_CPU_SUBTYPE_SH7763) || \
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index de42283..18d39f0 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -28,8 +28,8 @@ extern int usb_disabled(void);
static void lh7a404_start_hc(struct platform_device *dev)
{
- printk(KERN_DEBUG __FILE__
- ": starting LH7A404 OHCI USB Controller\n");
+ printk(KERN_DEBUG "%s: starting LH7A404 OHCI USB Controller\n",
+ __FILE__);
/*
* Now, carefully enable the USB clock, and take
@@ -39,14 +39,13 @@ static void lh7a404_start_hc(struct platform_device *dev)
udelay(1000);
USBH_CMDSTATUS = OHCI_HCR;
- printk(KERN_DEBUG __FILE__
- ": Clock to USB host has been enabled \n");
+ printk(KERN_DEBUG "%s: Clock to USB host has been enabled \n", __FILE__);
}
static void lh7a404_stop_hc(struct platform_device *dev)
{
- printk(KERN_DEBUG __FILE__
- ": stopping LH7A404 OHCI USB Controller\n");
+ printk(KERN_DEBUG "%s: stopping LH7A404 OHCI USB Controller\n",
+ __FILE__);
CSC_PWRCNT &= ~CSC_PWRCNT_USBH_EN; /* Disable clock */
}
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 2769326..cd74bbd 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -327,7 +327,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev)
}
i2c_adap = i2c_get_adapter(2);
memset(&i2c_info, 0, sizeof(struct i2c_board_info));
- strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE);
+ strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE);
isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info,
normal_i2c);
i2c_put_adapter(i2c_adap);
@@ -411,7 +411,7 @@ out3:
out2:
clk_put(usb_clk);
out1:
- i2c_unregister_client(isp1301_i2c_client);
+ i2c_unregister_device(isp1301_i2c_client);
isp1301_i2c_client = NULL;
out_i2c_driver:
i2c_del_driver(&isp1301_driver);
@@ -430,7 +430,7 @@ static int usb_hcd_pnx4008_remove(struct platform_device *pdev)
pnx4008_unset_usb_bits();
clk_disable(usb_clk);
clk_put(usb_clk);
- i2c_unregister_client(isp1301_i2c_client);
+ i2c_unregister_device(isp1301_i2c_client);
isp1301_i2c_client = NULL;
i2c_del_driver(&isp1301_driver);
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 68a3017..103263c 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -114,21 +114,21 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
hcd->rsrc_len = res.end - res.start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- printk(KERN_ERR __FILE__ ": request_mem_region failed\n");
+ printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
rv = -EBUSY;
goto err_rmr;
}
irq = irq_of_parse_and_map(dn, 0);
if (irq == NO_IRQ) {
- printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n");
+ printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
rv = -EBUSY;
goto err_irq;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
- printk(KERN_ERR __FILE__ ": ioremap failed\n");
+ printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
rv = -ENOMEM;
goto err_ioremap;
}
@@ -169,7 +169,7 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
} else
release_mem_region(res.start, 0x4);
} else
- pr_debug(__FILE__ ": cannot get ehci offset from fdt\n");
+ pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__);
}
iounmap(hcd->regs);
@@ -212,7 +212,7 @@ static int ohci_hcd_ppc_of_shutdown(struct of_device *op)
}
-static struct of_device_id ohci_hcd_ppc_of_match[] = {
+static const struct of_device_id ohci_hcd_ppc_of_match[] = {
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE
{
.name = "usb",
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
index cd3398b..89e670e 100644
--- a/drivers/usb/host/ohci-ppc-soc.c
+++ b/drivers/usb/host/ohci-ppc-soc.c
@@ -41,14 +41,14 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
- pr_debug(__FILE__ ": no irq\n");
+ pr_debug("%s: no irq\n", __FILE__);
return -ENODEV;
}
irq = res->start;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- pr_debug(__FILE__ ": no reg addr\n");
+ pr_debug("%s: no reg addr\n", __FILE__);
return -ENODEV;
}
@@ -59,14 +59,14 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
hcd->rsrc_len = res->end - res->start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- pr_debug(__FILE__ ": request_mem_region failed\n");
+ pr_debug("%s: request_mem_region failed\n", __FILE__);
retval = -EBUSY;
goto err1;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
- pr_debug(__FILE__ ": ioremap failed\n");
+ pr_debug("%s: ioremap failed\n", __FILE__);
retval = -ENOMEM;
goto err2;
}
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index e4bbe8e..d8eb3bd 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -31,8 +31,8 @@ static void sa1111_start_hc(struct sa1111_dev *dev)
{
unsigned int usb_rst = 0;
- printk(KERN_DEBUG __FILE__
- ": starting SA-1111 OHCI USB Controller\n");
+ printk(KERN_DEBUG "%s: starting SA-1111 OHCI USB Controller\n",
+ __FILE__);
#ifdef CONFIG_SA1100_BADGE4
if (machine_is_badge4()) {
@@ -65,8 +65,8 @@ static void sa1111_start_hc(struct sa1111_dev *dev)
static void sa1111_stop_hc(struct sa1111_dev *dev)
{
unsigned int usb_rst;
- printk(KERN_DEBUG __FILE__
- ": stopping SA-1111 OHCI USB Controller\n");
+ printk(KERN_DEBUG "%s: stopping SA-1111 OHCI USB Controller\n",
+ __FILE__);
/*
* Put the USB host controller into reset.
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 5b22a4d..e11cc3a 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -51,6 +51,7 @@
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include "../core/hcd.h"
#include "sl811.h"
@@ -1272,12 +1273,12 @@ sl811h_hub_control(
sl811h_hub_descriptor(sl811, (struct usb_hub_descriptor *) buf);
break;
case GetHubStatus:
- *(__le32 *) buf = cpu_to_le32(0);
+ put_unaligned_le32(0, buf);
break;
case GetPortStatus:
if (wIndex != 1)
goto error;
- *(__le32 *) buf = cpu_to_le32(sl811->port1);
+ put_unaligned_le32(sl811->port1, buf);
#ifndef VERBOSE
if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 99cd00f..0919706 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -735,6 +735,7 @@ static void uhci_stop(struct usb_hcd *hcd)
uhci_hc_died(uhci);
uhci_scan_schedule(uhci);
spin_unlock_irq(&uhci->lock);
+ synchronize_irq(hcd->irq);
del_timer_sync(&uhci->fsbr_timer);
release_uhci(uhci);
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 33128d5..105fa8b 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -406,6 +406,25 @@ static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
}
}
+char *xhci_get_slot_state(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx)
+{
+ struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
+
+ switch (GET_SLOT_STATE(slot_ctx->dev_state)) {
+ case 0:
+ return "enabled/disabled";
+ case 1:
+ return "default";
+ case 2:
+ return "addressed";
+ case 3:
+ return "configured";
+ default:
+ return "reserved";
+ }
+}
+
void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
{
/* Fields are 32 bits wide, DMA addresses are in bytes */
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index ecc131c..78c4eda 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -101,12 +101,15 @@ static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset)
next = readl(base + ext_offset);
- if (ext_offset == XHCI_HCC_PARAMS_OFFSET)
+ if (ext_offset == XHCI_HCC_PARAMS_OFFSET) {
/* Find the first extended capability */
next = XHCI_HCC_EXT_CAPS(next);
- else
+ ext_offset = 0;
+ } else {
/* Find the next extended capability */
next = XHCI_EXT_CAPS_NEXT(next);
+ }
+
if (!next)
return 0;
/*
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 5e92c72..4cb69e0 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -1007,7 +1007,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
* for usb_set_interface() and usb_set_configuration() claim).
*/
if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
- udev, ep, GFP_KERNEL) < 0) {
+ udev, ep, GFP_NOIO) < 0) {
dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
__func__, ep->desc.bEndpointAddress);
return -ENOMEM;
@@ -1181,6 +1181,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
udev->slot_id);
if (ret < 0) {
+ if (command)
+ list_del(&command->cmd_list);
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
return -ENOMEM;
@@ -1264,30 +1266,13 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
xhci_zero_in_ctx(xhci, virt_dev);
/* Install new rings and free or cache any old rings */
for (i = 1; i < 31; ++i) {
- int rings_cached;
-
if (!virt_dev->eps[i].new_ring)
continue;
/* Only cache or free the old ring if it exists.
* It may not if this is the first add of an endpoint.
*/
if (virt_dev->eps[i].ring) {
- rings_cached = virt_dev->num_rings_cached;
- if (rings_cached < XHCI_MAX_RINGS_CACHED) {
- virt_dev->num_rings_cached++;
- rings_cached = virt_dev->num_rings_cached;
- virt_dev->ring_cache[rings_cached] =
- virt_dev->eps[i].ring;
- xhci_dbg(xhci, "Cached old ring, "
- "%d ring%s cached\n",
- rings_cached,
- (rings_cached > 1) ? "s" : "");
- } else {
- xhci_ring_free(xhci, virt_dev->eps[i].ring);
- xhci_dbg(xhci, "Ring cache full (%d rings), "
- "freeing ring\n",
- virt_dev->num_rings_cached);
- }
+ xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
}
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
virt_dev->eps[i].new_ring = NULL;
@@ -1458,6 +1443,131 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
}
/*
+ * This submits a Reset Device Command, which will set the device state to 0,
+ * set the device address to 0, and disable all the endpoints except the default
+ * control endpoint. The USB core should come back and call
+ * xhci_address_device(), and then re-set up the configuration. If this is
+ * called because of a usb_reset_and_verify_device(), then the old alternate
+ * settings will be re-installed through the normal bandwidth allocation
+ * functions.
+ *
+ * Wait for the Reset Device command to finish. Remove all structures
+ * associated with the endpoints that were disabled. Clear the input device
+ * structure? Cache the rings? Reset the control endpoint 0 max packet size?
+ */
+int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ int ret, i;
+ unsigned long flags;
+ struct xhci_hcd *xhci;
+ unsigned int slot_id;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_command *reset_device_cmd;
+ int timeleft;
+ int last_freed_endpoint;
+
+ ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
+ if (ret <= 0)
+ return ret;
+ xhci = hcd_to_xhci(hcd);
+ slot_id = udev->slot_id;
+ virt_dev = xhci->devs[slot_id];
+ if (!virt_dev) {
+ xhci_dbg(xhci, "%s called with invalid slot ID %u\n",
+ __func__, slot_id);
+ return -EINVAL;
+ }
+
+ xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
+ /* Allocate the command structure that holds the struct completion.
+ * Assume we're in process context, since the normal device reset
+ * process has to wait for the device anyway. Storage devices are
+ * reset as part of error handling, so use GFP_NOIO instead of
+ * GFP_KERNEL.
+ */
+ reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
+ if (!reset_device_cmd) {
+ xhci_dbg(xhci, "Couldn't allocate command structure.\n");
+ return -ENOMEM;
+ }
+
+ /* Attempt to submit the Reset Device command to the command ring */
+ spin_lock_irqsave(&xhci->lock, flags);
+ reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
+ list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
+ ret = xhci_queue_reset_device(xhci, slot_id);
+ if (ret) {
+ xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ list_del(&reset_device_cmd->cmd_list);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ goto command_cleanup;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* Wait for the Reset Device command to finish */
+ timeleft = wait_for_completion_interruptible_timeout(
+ reset_device_cmd->completion,
+ USB_CTRL_SET_TIMEOUT);
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for reset device command\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* The timeout might have raced with the event ring handler, so
+ * only delete from the list if the item isn't poisoned.
+ */
+ if (reset_device_cmd->cmd_list.next != LIST_POISON1)
+ list_del(&reset_device_cmd->cmd_list);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ ret = -ETIME;
+ goto command_cleanup;
+ }
+
+ /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
+ * unless we tried to reset a slot ID that wasn't enabled,
+ * or the device wasn't in the addressed or configured state.
+ */
+ ret = reset_device_cmd->status;
+ switch (ret) {
+ case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
+ case COMP_CTX_STATE: /* 0.96 completion code for same thing */
+ xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
+ slot_id,
+ xhci_get_slot_state(xhci, virt_dev->out_ctx));
+ xhci_info(xhci, "Not freeing device rings.\n");
+ /* Don't treat this as an error. May change my mind later. */
+ ret = 0;
+ goto command_cleanup;
+ case COMP_SUCCESS:
+ xhci_dbg(xhci, "Successful reset device command.\n");
+ break;
+ default:
+ if (xhci_is_vendor_info_code(xhci, ret))
+ break;
+ xhci_warn(xhci, "Unknown completion code %u for "
+ "reset device command.\n", ret);
+ ret = -EINVAL;
+ goto command_cleanup;
+ }
+
+ /* Everything but endpoint 0 is disabled, so free or cache the rings. */
+ last_freed_endpoint = 1;
+ for (i = 1; i < 31; ++i) {
+ if (!virt_dev->eps[i].ring)
+ continue;
+ xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+ last_freed_endpoint = i;
+ }
+ xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
+ xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
+ ret = 0;
+
+command_cleanup:
+ xhci_free_command(xhci, reset_device_cmd);
+ return ret;
+}
+
+/*
* At this point, the struct usb_device is about to go away, the device has
* disconnected, and all traffic has been stopped and the endpoints have been
* disabled. Free any HC data structures associated with that device.
@@ -1694,7 +1804,7 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
return -EINVAL;
}
- config_cmd = xhci_alloc_command(xhci, true, mem_flags);
+ config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
if (!config_cmd) {
xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
return -ENOMEM;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index eac5b53..208b805 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -129,6 +129,50 @@ static u32 xhci_port_state_to_neutral(u32 state)
return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
}
+static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex,
+ u32 __iomem *addr, u32 port_status)
+{
+ /* Write 1 to disable the port */
+ xhci_writel(xhci, port_status | PORT_PE, addr);
+ port_status = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "disable port, actual port %d status = 0x%x\n",
+ wIndex, port_status);
+}
+
+static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
+ u16 wIndex, u32 __iomem *addr, u32 port_status)
+{
+ char *port_change_bit;
+ u32 status;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_C_RESET:
+ status = PORT_RC;
+ port_change_bit = "reset";
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ status = PORT_CSC;
+ port_change_bit = "connect";
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ status = PORT_OCC;
+ port_change_bit = "over-current";
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ status = PORT_PEC;
+ port_change_bit = "enable/disable";
+ break;
+ default:
+ /* Should never happen */
+ return;
+ }
+ /* Change bits are all write 1 to clear */
+ xhci_writel(xhci, port_status | status, addr);
+ port_status = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
+ port_change_bit, wIndex, port_status);
+}
+
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
@@ -138,7 +182,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u32 temp, status;
int retval = 0;
u32 __iomem *addr;
- char *port_change_bit;
ports = HCS_MAX_PORTS(xhci->hcs_params1);
@@ -229,26 +272,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = xhci_port_state_to_neutral(temp);
switch (wValue) {
case USB_PORT_FEAT_C_RESET:
- status = PORT_RC;
- port_change_bit = "reset";
- break;
case USB_PORT_FEAT_C_CONNECTION:
- status = PORT_CSC;
- port_change_bit = "connect";
- break;
case USB_PORT_FEAT_C_OVER_CURRENT:
- status = PORT_OCC;
- port_change_bit = "over-current";
+ case USB_PORT_FEAT_C_ENABLE:
+ xhci_clear_port_change_bit(xhci, wValue, wIndex,
+ addr, temp);
+ break;
+ case USB_PORT_FEAT_ENABLE:
+ xhci_disable_port(xhci, wIndex, addr, temp);
break;
default:
goto error;
}
- /* Change bits are all write 1 to clear */
- xhci_writel(xhci, temp | status, addr);
- temp = xhci_readl(xhci, addr);
- xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
- port_change_bit, wIndex, temp);
- temp = xhci_readl(xhci, addr); /* unblock any posted writes */
break;
default:
error:
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index bffcef7..49f7d72 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -198,6 +198,31 @@ fail:
return 0;
}
+void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ unsigned int ep_index)
+{
+ int rings_cached;
+
+ rings_cached = virt_dev->num_rings_cached;
+ if (rings_cached < XHCI_MAX_RINGS_CACHED) {
+ virt_dev->num_rings_cached++;
+ rings_cached = virt_dev->num_rings_cached;
+ virt_dev->ring_cache[rings_cached] =
+ virt_dev->eps[ep_index].ring;
+ xhci_dbg(xhci, "Cached old ring, "
+ "%d ring%s cached\n",
+ rings_cached,
+ (rings_cached > 1) ? "s" : "");
+ } else {
+ xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
+ xhci_dbg(xhci, "Ring cache full (%d rings), "
+ "freeing ring\n",
+ virt_dev->num_rings_cached);
+ }
+ virt_dev->eps[ep_index].ring = NULL;
+}
+
/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
* pointers to the beginning of the ring.
*/
@@ -242,6 +267,8 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
+ if (!ctx)
+ return;
dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
kfree(ctx);
}
@@ -427,7 +454,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
case USB_SPEED_LOW:
slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
break;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
return -EINVAL;
break;
@@ -471,7 +498,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
case USB_SPEED_LOW:
ep0_ctx->ep_info2 |= MAX_PACKET(8);
break;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
return -EINVAL;
break;
@@ -819,7 +846,8 @@ static void scratchpad_free(struct xhci_hcd *xhci)
}
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
- bool allocate_completion, gfp_t mem_flags)
+ bool allocate_in_ctx, bool allocate_completion,
+ gfp_t mem_flags)
{
struct xhci_command *command;
@@ -827,11 +855,14 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
if (!command)
return NULL;
- command->in_ctx =
- xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags);
- if (!command->in_ctx) {
- kfree(command);
- return NULL;
+ if (allocate_in_ctx) {
+ command->in_ctx =
+ xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
+ mem_flags);
+ if (!command->in_ctx) {
+ kfree(command);
+ return NULL;
+ }
}
if (allocate_completion) {
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index e097008..417d37a 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -139,6 +139,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
.reset_bandwidth = xhci_reset_bandwidth,
.address_device = xhci_address_device,
.update_hub_device = xhci_update_hub_device,
+ .reset_device = xhci_reset_device,
/*
* scheduling support
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index ee7bc7e..6ba841b 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -953,6 +953,17 @@ bandwidth_change:
case TRB_TYPE(TRB_RESET_EP):
handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
break;
+ case TRB_TYPE(TRB_RESET_DEV):
+ xhci_dbg(xhci, "Completed reset device command.\n");
+ slot_id = TRB_TO_SLOT_ID(
+ xhci->cmd_ring->dequeue->generic.field[3]);
+ virt_dev = xhci->devs[slot_id];
+ if (virt_dev)
+ handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
+ else
+ xhci_warn(xhci, "Reset device command completion "
+ "for disabled slot %u\n", slot_id);
+ break;
default:
/* Skip over unknown commands on the event ring */
xhci->error_bitmask |= 1 << 6;
@@ -1080,6 +1091,20 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
return 0;
}
+int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
+{
+ if (trb_comp_code >= 224 && trb_comp_code <= 255) {
+ /* Vendor defined "informational" completion code,
+ * treat as not-an-error.
+ */
+ xhci_dbg(xhci, "Vendor defined info completion code %u\n",
+ trb_comp_code);
+ xhci_dbg(xhci, "Treating code as success.\n");
+ return 1;
+ }
+ return 0;
+}
+
/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -1196,13 +1221,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
status = -ENOSR;
break;
default:
- if (trb_comp_code >= 224 && trb_comp_code <= 255) {
- /* Vendor defined "informational" completion code,
- * treat as not-an-error.
- */
- xhci_dbg(xhci, "Vendor defined info completion code %u\n",
- trb_comp_code);
- xhci_dbg(xhci, "Treating code as success.\n");
+ if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
status = 0;
break;
}
@@ -2181,6 +2200,14 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
false);
}
+/* Queue a reset device command TRB */
+int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
+{
+ return queue_command(xhci, 0, 0, 0,
+ TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
+ false);
+}
+
/* Queue a configure endpoint command TRB */
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id, bool command_must_succeed)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8778135..e5eb09b 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1210,6 +1210,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
+char *xhci_get_slot_state(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx);
/* xHCI memory management */
void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -1233,8 +1235,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
struct usb_device *udev, struct usb_host_endpoint *ep,
gfp_t mem_flags);
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ unsigned int ep_index);
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
- bool allocate_completion, gfp_t mem_flags);
+ bool allocate_in_ctx, bool allocate_completion,
+ gfp_t mem_flags);
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command);
@@ -1264,6 +1270,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
+int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
@@ -1272,6 +1279,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
union xhci_trb *start_trb, union xhci_trb *end_trb,
dma_addr_t suspect_dma);
+int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
void *xhci_setup_one_noop(struct xhci_hcd *xhci);
void xhci_handle_event(struct xhci_hcd *xhci);
@@ -1293,6 +1301,7 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index);
+int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id);
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_td *cur_td, struct xhci_dequeue_state *state);
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index eca355d..e192e8f 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -967,7 +967,7 @@ static const struct file_operations mdc800_device_ops =
-static struct usb_device_id mdc800_table [] = {
+static const struct usb_device_id mdc800_table[] = {
{ USB_DEVICE(MDC800_VENDOR_ID, MDC800_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 459a728..3a6bcd5 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -155,7 +155,7 @@ static int mts_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void mts_usb_disconnect(struct usb_interface *intf);
-static struct usb_device_id mts_usb_ids [];
+static const struct usb_device_id mts_usb_ids[];
static struct usb_driver mts_usb_driver = {
.name = "microtekX6",
@@ -656,7 +656,7 @@ static struct scsi_host_template mts_scsi_host_template = {
/* The entries of microtek_table must correspond, line-by-line to
the entries of mts_supported_products[]. */
-static struct usb_device_id mts_usb_ids [] =
+static const struct usb_device_id mts_usb_ids[] =
{
{ USB_DEVICE(0x4ce, 0x0300) },
{ USB_DEVICE(0x5da, 0x0094) },
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index abe3aa6..55660ea 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -87,17 +87,6 @@ config USB_LCD
To compile this driver as a module, choose M here: the
module will be called usblcd.
-config USB_BERRY_CHARGE
- tristate "USB BlackBerry recharge support"
- depends on USB
- help
- Say Y here if you want to connect a BlackBerry device to your
- computer's USB port and have it automatically switch to "recharge"
- mode.
-
- To compile this driver as a module, choose M here: the
- module will be called berry_charge.
-
config USB_LED
tristate "USB LED driver support"
depends on USB
@@ -242,17 +231,3 @@ config USB_ISIGHTFW
driver beforehand. Tools for doing so are available at
http://bersace03.free.fr
-config USB_VST
- tristate "USB VST driver"
- depends on USB
- help
- This driver is intended for Vernier Software Technologies
- bulk usb devices such as their Ocean-Optics spectrometers or
- Labquest.
- It is a bulk channel driver with configurable read and write
- timeouts.
-
- To compile this driver as a module, choose M here: the
- module will be called vstusb.
-
-
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 0826aab..717703e 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_USB_ADUTUX) += adutux.o
obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o
-obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o
obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o
obj-$(CONFIG_USB_CYTHERM) += cytherm.o
obj-$(CONFIG_USB_EMI26) += emi26.o
@@ -23,7 +22,6 @@ obj-$(CONFIG_USB_TEST) += usbtest.o
obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o
obj-$(CONFIG_USB_USS720) += uss720.o
obj-$(CONFIG_USB_SEVSEG) += usbsevseg.o
-obj-$(CONFIG_USB_VST) += vstusb.o
obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 2035265..d240de0 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -38,7 +38,7 @@ static int debug = 1;
#define dbg(lvl, format, arg...) \
do { \
if (debug >= lvl) \
- printk(KERN_DEBUG __FILE__ " : " format " \n", ## arg); \
+ printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
} while (0)
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(debug, "Debug enabled or not");
#define ADU_PRODUCT_ID 0x0064
/* table of devices that work with this driver */
-static struct usb_device_id device_table [] = {
+static const struct usb_device_id device_table[] = {
{ USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID) }, /* ADU100 */
{ USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+20) }, /* ADU120 */
{ USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+30) }, /* ADU130 */
@@ -132,8 +132,8 @@ static void adu_debug_data(int level, const char *function, int size,
if (debug < level)
return;
- printk(KERN_DEBUG __FILE__": %s - length = %d, data = ",
- function, size);
+ printk(KERN_DEBUG "%s: %s - length = %d, data = ",
+ __FILE__, function, size);
for (i = 0; i < size; ++i)
printk("%.2x ", data[i]);
printk("\n");
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 1eb9e41..4d2952f 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -57,7 +57,7 @@
.bInterfaceProtocol = 0x00
/* table of devices that work with this driver */
-static struct usb_device_id appledisplay_table [] = {
+static const struct usb_device_id appledisplay_table[] = {
{ APPLEDISPLAY_DEVICE(0x9218) },
{ APPLEDISPLAY_DEVICE(0x9219) },
{ APPLEDISPLAY_DEVICE(0x921c) },
@@ -179,7 +179,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
return pdata->msgdata[1];
}
-static struct backlight_ops appledisplay_bl_data = {
+static const struct backlight_ops appledisplay_bl_data = {
.get_brightness = appledisplay_bl_get_brightness,
.update_status = appledisplay_bl_update_status,
};
@@ -283,6 +283,7 @@ static int appledisplay_probe(struct usb_interface *iface,
&appledisplay_bl_data);
if (IS_ERR(pdata->bd)) {
dev_err(&iface->dev, "Backlight registration failed\n");
+ retval = PTR_ERR(pdata->bd);
goto error;
}
diff --git a/drivers/usb/misc/berry_charge.c b/drivers/usb/misc/berry_charge.c
deleted file mode 100644
index c05a85b..0000000
--- a/drivers/usb/misc/berry_charge.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * USB BlackBerry charging module
- *
- * Copyright (C) 2007 Greg Kroah-Hartman <gregkh@suse.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- * Information on how to switch configs was taken by the bcharge.cc file
- * created by the barry.sf.net project.
- *
- * bcharge.cc has the following copyright:
- * Copyright (C) 2006, Net Direct Inc. (http://www.netdirect.ca/)
- * and is released under the GPLv2.
- *
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/usb.h>
-
-#define RIM_VENDOR 0x0fca
-#define BLACKBERRY 0x0001
-#define BLACKBERRY_PEARL_DUAL 0x0004
-#define BLACKBERRY_PEARL 0x0006
-
-static int debug;
-static int pearl_dual_mode = 1;
-
-#ifdef dbg
-#undef dbg
-#endif
-#define dbg(dev, format, arg...) \
- if (debug) \
- dev_printk(KERN_DEBUG , dev , format , ## arg)
-
-static struct usb_device_id id_table [] = {
- { USB_DEVICE(RIM_VENDOR, BLACKBERRY) },
- { USB_DEVICE(RIM_VENDOR, BLACKBERRY_PEARL) },
- { USB_DEVICE(RIM_VENDOR, BLACKBERRY_PEARL_DUAL) },
- { }, /* Terminating entry */
-};
-MODULE_DEVICE_TABLE(usb, id_table);
-
-static int magic_charge(struct usb_device *udev)
-{
- char *dummy_buffer = kzalloc(2, GFP_KERNEL);
- int retval;
-
- if (!dummy_buffer)
- return -ENOMEM;
-
- /* send two magic commands and then set the configuration. The device
- * will then reset itself with the new power usage and should start
- * charging. */
-
- /* Note, with testing, it only seems that the first message is really
- * needed (at least for the 8700c), but to be safe, we emulate what
- * other operating systems seem to be sending to their device. We
- * really need to get some specs for this device to be sure about what
- * is going on here.
- */
- dbg(&udev->dev, "Sending first magic command\n");
- retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- 0xa5, 0xc0, 0, 1, dummy_buffer, 2, 100);
- if (retval != 2) {
- dev_err(&udev->dev, "First magic command failed: %d.\n",
- retval);
- goto exit;
- }
-
- dbg(&udev->dev, "Sending second magic command\n");
- retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- 0xa2, 0x40, 0, 1, dummy_buffer, 0, 100);
- if (retval != 0) {
- dev_err(&udev->dev, "Second magic command failed: %d.\n",
- retval);
- goto exit;
- }
-
- dbg(&udev->dev, "Calling set_configuration\n");
- retval = usb_driver_set_configuration(udev, 1);
- if (retval)
- dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval);
-
-exit:
- kfree(dummy_buffer);
- return retval;
-}
-
-static int magic_dual_mode(struct usb_device *udev)
-{
- char *dummy_buffer = kzalloc(2, GFP_KERNEL);
- int retval;
-
- if (!dummy_buffer)
- return -ENOMEM;
-
- /* send magic command so that the Blackberry Pearl device exposes
- * two interfaces: both the USB mass-storage one and one which can
- * be used for database access. */
- dbg(&udev->dev, "Sending magic pearl command\n");
- retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- 0xa9, 0xc0, 1, 1, dummy_buffer, 2, 100);
- dbg(&udev->dev, "Magic pearl command returned %d\n", retval);
-
- dbg(&udev->dev, "Calling set_configuration\n");
- retval = usb_driver_set_configuration(udev, 1);
- if (retval)
- dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval);
-
- kfree(dummy_buffer);
- return retval;
-}
-
-static int berry_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *udev = interface_to_usbdev(intf);
-
- if (udev->bus_mA < 500) {
- dbg(&udev->dev, "Not enough power to charge available\n");
- return -ENODEV;
- }
-
- dbg(&udev->dev, "Power is set to %dmA\n",
- udev->actconfig->desc.bMaxPower * 2);
-
- /* check the power usage so we don't try to enable something that is
- * already enabled */
- if ((udev->actconfig->desc.bMaxPower * 2) == 500) {
- dbg(&udev->dev, "device is already charging, power is "
- "set to %dmA\n", udev->actconfig->desc.bMaxPower * 2);
- return -ENODEV;
- }
-
- /* turn the power on */
- magic_charge(udev);
-
- if ((le16_to_cpu(udev->descriptor.idProduct) == BLACKBERRY_PEARL) &&
- (pearl_dual_mode))
- magic_dual_mode(udev);
-
- /* we don't really want to bind to the device, userspace programs can
- * handle the syncing just fine, so get outta here. */
- return -ENODEV;
-}
-
-static void berry_disconnect(struct usb_interface *intf)
-{
-}
-
-static struct usb_driver berry_driver = {
- .name = "berry_charge",
- .probe = berry_probe,
- .disconnect = berry_disconnect,
- .id_table = id_table,
-};
-
-static int __init berry_init(void)
-{
- return usb_register(&berry_driver);
-}
-
-static void __exit berry_exit(void)
-{
- usb_deregister(&berry_driver);
-}
-
-module_init(berry_init);
-module_exit(berry_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>");
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
-module_param(pearl_dual_mode, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(pearl_dual_mode, "Change Blackberry Pearl to run in dual mode");
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 5720bfe..1547d8c 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -56,7 +56,7 @@
/* table of devices that work with this driver */
-static struct usb_device_id cypress_table [] = {
+static const struct usb_device_id cypress_table[] = {
{ USB_DEVICE(CYPRESS_VENDOR_ID, CYPRESS_PRODUCT_ID) },
{ }
};
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 4fb3c38..b9cbbbd 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -27,7 +27,7 @@
#define USB_SKEL_VENDOR_ID 0x04b4
#define USB_SKEL_PRODUCT_ID 0x0002
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
{ }
};
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index 879a980..a6521c9 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -245,7 +245,7 @@ wraperr:
return err;
}
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(EMI26_VENDOR_ID, EMI26_PRODUCT_ID) },
{ USB_DEVICE(EMI26_VENDOR_ID, EMI26B_PRODUCT_ID) },
{ } /* Terminating entry */
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index 59860b3..fc15ad4 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -259,7 +259,7 @@ wraperr:
return err;
}
-static __devinitdata struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] __devinitconst = {
{ USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 9d0675e..1edb6d3 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -86,7 +86,7 @@ static struct list_head ftdi_static_list;
#define USB_FTDI_ELAN_VENDOR_ID 0x0403
#define USB_FTDI_ELAN_PRODUCT_ID 0xd6ea
/* table of devices that work with this driver*/
-static struct usb_device_id ftdi_elan_table[] = {
+static const struct usb_device_id ftdi_elan_table[] = {
{USB_DEVICE(USB_FTDI_ELAN_VENDOR_ID, USB_FTDI_ELAN_PRODUCT_ID)},
{ /* Terminating entry */ }
};
@@ -623,9 +623,12 @@ static void ftdi_elan_status_work(struct work_struct *work)
*/
static int ftdi_elan_open(struct inode *inode, struct file *file)
{
- int subminor = iminor(inode);
- struct usb_interface *interface = usb_find_interface(&ftdi_elan_driver,
- subminor);
+ int subminor;
+ struct usb_interface *interface;
+
+ subminor = iminor(inode);
+ interface = usb_find_interface(&ftdi_elan_driver, subminor);
+
if (!interface) {
printk(KERN_ERR "can't find device for minor %d\n", subminor);
return -ENODEV;
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 1337a9c..a54c3cb 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -48,7 +48,7 @@
#define ID_CHERRY 0x0010
/* device ID table */
-static struct usb_device_id idmouse_table[] = {
+static const struct usb_device_id idmouse_table[] = {
{USB_DEVICE(ID_SIEMENS, ID_IDMOUSE)}, /* Siemens ID Mouse (Professional) */
{USB_DEVICE(ID_SIEMENS, ID_CHERRY )}, /* Cherry FingerTIP ID Board */
{} /* terminating null entry */
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index e75bb87..d3c8523 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -139,7 +139,7 @@ static int usb_set_report(struct usb_interface *intf, unsigned char type,
/* driver registration */
/*---------------------*/
/* table of devices that work with this driver */
-static struct usb_device_id iowarrior_ids[] = {
+static const struct usb_device_id iowarrior_ids[] = {
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW40)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)},
@@ -602,10 +602,12 @@ static int iowarrior_open(struct inode *inode, struct file *file)
dbg("%s", __func__);
+ lock_kernel();
subminor = iminor(inode);
interface = usb_find_interface(&iowarrior_driver, subminor);
if (!interface) {
+ unlock_kernel();
err("%s - error, can't find device for minor %d", __func__,
subminor);
return -ENODEV;
@@ -615,6 +617,7 @@ static int iowarrior_open(struct inode *inode, struct file *file)
dev = usb_get_intfdata(interface);
if (!dev) {
mutex_unlock(&iowarrior_open_disc_lock);
+ unlock_kernel();
return -ENODEV;
}
@@ -641,6 +644,7 @@ static int iowarrior_open(struct inode *inode, struct file *file)
out:
mutex_unlock(&dev->mutex);
+ unlock_kernel();
return retval;
}
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
index b897f65..06e990a 100644
--- a/drivers/usb/misc/isight_firmware.c
+++ b/drivers/usb/misc/isight_firmware.c
@@ -26,7 +26,7 @@
#include <linux/errno.h>
#include <linux/module.h>
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x05ac, 0x8300)},
{},
};
@@ -112,6 +112,8 @@ out:
return ret;
}
+MODULE_FIRMWARE("isight.fw");
+
static void isight_firmware_disconnect(struct usb_interface *intf)
{
}
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 90f1301..dd41d87 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -69,7 +69,7 @@
#endif
/* table of devices that work with this driver */
-static struct usb_device_id ld_usb_table [] = {
+static const struct usb_device_id ld_usb_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
@@ -798,7 +798,7 @@ static int __init ld_usb_init(void)
/* register this driver with the USB subsystem */
retval = usb_register(&ld_usb_driver);
if (retval)
- err("usb_register failed for the "__FILE__" driver. Error number %d\n", retval);
+ err("usb_register failed for the %s driver. Error number %d\n", __FILE__, retval);
return retval;
}
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index faa6d62..8547bf9 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -95,8 +95,11 @@
/* Use our own dbg macro */
#undef dbg
-#define dbg(lvl, format, arg...) do { if (debug >= lvl) printk(KERN_DEBUG __FILE__ ": " format "\n", ## arg); } while (0)
-
+#define dbg(lvl, format, arg...) \
+do { \
+ if (debug >= lvl) \
+ printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
+} while (0)
/* Version Information */
#define DRIVER_VERSION "v0.96"
@@ -192,7 +195,7 @@ struct tower_get_version_reply {
/* table of devices that work with this driver */
-static struct usb_device_id tower_table [] = {
+static const struct usb_device_id tower_table[] = {
{ USB_DEVICE(LEGO_USB_TOWER_VENDOR_ID, LEGO_USB_TOWER_PRODUCT_ID) },
{ } /* Terminating entry */
};
@@ -302,7 +305,7 @@ static inline void lego_usb_tower_debug_data (int level, const char *function, i
if (debug < level)
return;
- printk (KERN_DEBUG __FILE__": %s - length = %d, data = ", function, size);
+ printk (KERN_DEBUG "%s: %s - length = %d, data = ", __FILE__, function, size);
for (i = 0; i < size; ++i) {
printk ("%.2x ", data[i]);
}
@@ -1055,7 +1058,7 @@ static int __init lego_usb_tower_init(void)
/* register this driver with the USB subsystem */
result = usb_register(&tower_driver);
if (result < 0) {
- err("usb_register failed for the "__FILE__" driver. Error number %d", result);
+ err("usb_register failed for the %s driver. Error number %d", __FILE__, result);
retval = -1;
goto exit;
}
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 32d0199..a85771b 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -78,10 +78,13 @@ static int open_rio(struct inode *inode, struct file *file)
{
struct rio_usb_data *rio = &rio_instance;
+ /* against disconnect() */
+ lock_kernel();
mutex_lock(&(rio->lock));
if (rio->isopen || !rio->present) {
mutex_unlock(&(rio->lock));
+ unlock_kernel();
return -EBUSY;
}
rio->isopen = 1;
@@ -91,6 +94,7 @@ static int open_rio(struct inode *inode, struct file *file)
mutex_unlock(&(rio->lock));
dev_info(&rio->rio_dev->dev, "Rio opened.\n");
+ unlock_kernel();
return 0;
}
@@ -115,7 +119,6 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
int retries;
int retval=0;
- lock_kernel();
mutex_lock(&(rio->lock));
/* Sanity check to make sure rio is connected, powered, etc */
if (rio->present == 0 || rio->rio_dev == NULL) {
@@ -254,7 +257,6 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
err_out:
mutex_unlock(&(rio->lock));
- unlock_kernel();
return retval;
}
@@ -489,6 +491,7 @@ static void disconnect_rio(struct usb_interface *intf)
struct rio_usb_data *rio = usb_get_intfdata (intf);
usb_set_intfdata (intf, NULL);
+ lock_kernel();
if (rio) {
usb_deregister_dev(intf, &usb_rio_class);
@@ -498,6 +501,7 @@ static void disconnect_rio(struct usb_interface *intf)
/* better let it finish - the release will do whats needed */
rio->rio_dev = NULL;
mutex_unlock(&(rio->lock));
+ unlock_kernel();
return;
}
kfree(rio->ibuf);
@@ -508,9 +512,10 @@ static void disconnect_rio(struct usb_interface *intf)
rio->present = 0;
mutex_unlock(&(rio->lock));
}
+ unlock_kernel();
}
-static struct usb_device_id rio_table [] = {
+static const struct usb_device_id rio_table[] = {
{ USB_DEVICE(0x0841, 1) }, /* Rio 500 */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 8b37a4b..aae95a0 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -250,7 +250,7 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe,
sisusb->urbstatus[index] |= SU_URB_BUSY;
/* Submit URB */
- retval = usb_submit_urb(urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
/* If OK, and if timeout > 0, wait for completion */
if ((retval == 0) && timeout) {
@@ -306,7 +306,7 @@ sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data,
urb->actual_length = 0;
sisusb->completein = 0;
- retval = usb_submit_urb(urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval == 0) {
wait_event_timeout(sisusb->wait_q, sisusb->completein, timeout);
if (!sisusb->completein) {
@@ -2416,21 +2416,28 @@ sisusb_open(struct inode *inode, struct file *file)
struct usb_interface *interface;
int subminor = iminor(inode);
- if (!(interface = usb_find_interface(&sisusb_driver, subminor)))
+ lock_kernel();
+ if (!(interface = usb_find_interface(&sisusb_driver, subminor))) {
+ unlock_kernel();
return -ENODEV;
+ }
- if (!(sisusb = usb_get_intfdata(interface)))
+ if (!(sisusb = usb_get_intfdata(interface))) {
+ unlock_kernel();
return -ENODEV;
+ }
mutex_lock(&sisusb->lock);
if (!sisusb->present || !sisusb->ready) {
mutex_unlock(&sisusb->lock);
+ unlock_kernel();
return -ENODEV;
}
if (sisusb->isopen) {
mutex_unlock(&sisusb->lock);
+ unlock_kernel();
return -EBUSY;
}
@@ -2439,11 +2446,13 @@ sisusb_open(struct inode *inode, struct file *file)
if (sisusb_init_gfxdevice(sisusb, 0)) {
mutex_unlock(&sisusb->lock);
dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n");
+ unlock_kernel();
return -EIO;
}
} else {
mutex_unlock(&sisusb->lock);
dev_err(&sisusb->sisusb_dev->dev, "Device not attached to USB 2.0 hub\n");
+ unlock_kernel();
return -EIO;
}
}
@@ -2456,6 +2465,7 @@ sisusb_open(struct inode *inode, struct file *file)
file->private_data = sisusb;
mutex_unlock(&sisusb->lock);
+ unlock_kernel();
return 0;
}
@@ -3238,7 +3248,7 @@ static void sisusb_disconnect(struct usb_interface *intf)
kref_put(&sisusb->kref, sisusb_delete);
}
-static struct usb_device_id sisusb_table [] = {
+static const struct usb_device_id sisusb_table[] = {
{ USB_DEVICE(0x0711, 0x0550) },
{ USB_DEVICE(0x0711, 0x0900) },
{ USB_DEVICE(0x0711, 0x0901) },
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index 2e14102..5da28ea 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -33,7 +33,7 @@
#define TRANCEVIBRATOR_VENDOR_ID 0x0b49 /* ASCII Corporation */
#define TRANCEVIBRATOR_PRODUCT_ID 0x064f /* Trance Vibrator */
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(TRANCEVIBRATOR_VENDOR_ID, TRANCEVIBRATOR_PRODUCT_ID) },
{ },
};
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 4fb1203..90aede9 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -30,7 +30,7 @@
#define IOCTL_GET_DRV_VERSION 2
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, },
{ },
};
@@ -74,10 +74,12 @@ static int lcd_open(struct inode *inode, struct file *file)
struct usb_interface *interface;
int subminor, r;
+ lock_kernel();
subminor = iminor(inode);
interface = usb_find_interface(&lcd_driver, subminor);
if (!interface) {
+ unlock_kernel();
err ("USBLCD: %s - error, can't find device for minor %d",
__func__, subminor);
return -ENODEV;
@@ -87,6 +89,7 @@ static int lcd_open(struct inode *inode, struct file *file)
dev = usb_get_intfdata(interface);
if (!dev) {
mutex_unlock(&open_disc_mutex);
+ unlock_kernel();
return -ENODEV;
}
@@ -98,11 +101,13 @@ static int lcd_open(struct inode *inode, struct file *file)
r = usb_autopm_get_interface(interface);
if (r < 0) {
kref_put(&dev->kref, lcd_delete);
+ unlock_kernel();
return r;
}
/* save our object in the file's private structure */
file->private_data = dev;
+ unlock_kernel();
return 0;
}
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 06cb719..63da2c3 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -24,7 +24,7 @@
#define PRODUCT_ID 0x1223
/* table of devices that work with this driver */
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(VENDOR_ID, PRODUCT_ID) },
{ },
};
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index 3db2555..a9555cb 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -27,7 +27,7 @@
#define MAXLEN 6
/* table of devices that work with this driver */
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(VENDOR_ID, PRODUCT_ID) },
{ },
};
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 3dab0c0..a21cce6 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1580,10 +1580,6 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
return -ERESTARTSYS;
/* FIXME: What if a system sleep starts while a test is running? */
- if (!intf->is_active) {
- mutex_unlock(&dev->lock);
- return -EHOSTUNREACH;
- }
/* some devices, like ez-usb default devices, need a non-default
* altsetting to have any active endpoints. some tests change
@@ -2101,7 +2097,7 @@ static struct usbtest_info generic_info = {
#endif
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
/*-------------------------------------------------------------*/
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 9a6c27a..f56fed5 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -770,7 +770,7 @@ static void uss720_disconnect(struct usb_interface *intf)
}
/* table of cables that work through this driver */
-static struct usb_device_id uss720_table [] = {
+static const struct usb_device_id uss720_table[] = {
{ USB_DEVICE(0x047e, 0x1001) },
{ USB_DEVICE(0x0557, 0x2001) },
{ USB_DEVICE(0x0729, 0x1284) },
diff --git a/drivers/usb/misc/vstusb.c b/drivers/usb/misc/vstusb.c
deleted file mode 100644
index f26ea8d..0000000
--- a/drivers/usb/misc/vstusb.c
+++ /dev/null
@@ -1,783 +0,0 @@
-/*****************************************************************************
- * File: drivers/usb/misc/vstusb.c
- *
- * Purpose: Support for the bulk USB Vernier Spectrophotometers
- *
- * Author: Johnnie Peters
- * Axian Consulting
- * Beaverton, OR, USA 97005
- *
- * Modified by: EQware Engineering, Inc.
- * Oregon City, OR, USA 97045
- *
- * Copyright: 2007, 2008
- * Vernier Software & Technology
- * Beaverton, OR, USA 97005
- *
- * Web: www.vernier.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *****************************************************************************/
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-#include <linux/usb.h>
-
-#include <linux/usb/vstusb.h>
-
-#define DRIVER_VERSION "VST USB Driver Version 1.5"
-#define DRIVER_DESC "Vernier Software Technology Bulk USB Driver"
-
-#ifdef CONFIG_USB_DYNAMIC_MINORS
- #define VSTUSB_MINOR_BASE 0
-#else
- #define VSTUSB_MINOR_BASE 199
-#endif
-
-#define USB_VENDOR_OCEANOPTICS 0x2457
-#define USB_VENDOR_VERNIER 0x08F7 /* Vernier Software & Technology */
-
-#define USB_PRODUCT_USB2000 0x1002
-#define USB_PRODUCT_ADC1000_FW 0x1003 /* firmware download (renumerates) */
-#define USB_PRODUCT_ADC1000 0x1004
-#define USB_PRODUCT_HR2000_FW 0x1009 /* firmware download (renumerates) */
-#define USB_PRODUCT_HR2000 0x100A
-#define USB_PRODUCT_HR4000_FW 0x1011 /* firmware download (renumerates) */
-#define USB_PRODUCT_HR4000 0x1012
-#define USB_PRODUCT_USB650 0x1014 /* "Red Tide" */
-#define USB_PRODUCT_QE65000 0x1018
-#define USB_PRODUCT_USB4000 0x1022
-#define USB_PRODUCT_USB325 0x1024 /* "Vernier Spectrometer" */
-
-#define USB_PRODUCT_LABPRO 0x0001
-#define USB_PRODUCT_LABQUEST 0x0005
-
-#define VST_MAXBUFFER (64*1024)
-
-static struct usb_device_id id_table[] = {
- { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB2000)},
- { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_HR4000)},
- { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB650)},
- { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB4000)},
- { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB325)},
- { USB_DEVICE(USB_VENDOR_VERNIER, USB_PRODUCT_LABQUEST)},
- { USB_DEVICE(USB_VENDOR_VERNIER, USB_PRODUCT_LABPRO)},
- {},
-};
-
-MODULE_DEVICE_TABLE(usb, id_table);
-
-struct vstusb_device {
- struct kref kref;
- struct mutex lock;
- struct usb_device *usb_dev;
- char present;
- char isopen;
- struct usb_anchor submitted;
- int rd_pipe;
- int rd_timeout_ms;
- int wr_pipe;
- int wr_timeout_ms;
-};
-#define to_vst_dev(d) container_of(d, struct vstusb_device, kref)
-
-static struct usb_driver vstusb_driver;
-
-static void vstusb_delete(struct kref *kref)
-{
- struct vstusb_device *vstdev = to_vst_dev(kref);
-
- usb_put_dev(vstdev->usb_dev);
- kfree(vstdev);
-}
-
-static int vstusb_open(struct inode *inode, struct file *file)
-{
- struct vstusb_device *vstdev;
- struct usb_interface *interface;
-
- interface = usb_find_interface(&vstusb_driver, iminor(inode));
-
- if (!interface) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s - error, can't find device for minor %d\n",
- __func__, iminor(inode));
- return -ENODEV;
- }
-
- vstdev = usb_get_intfdata(interface);
-
- if (!vstdev)
- return -ENODEV;
-
- /* lock this device */
- mutex_lock(&vstdev->lock);
-
- /* can only open one time */
- if ((!vstdev->present) || (vstdev->isopen)) {
- mutex_unlock(&vstdev->lock);
- return -EBUSY;
- }
-
- /* increment our usage count */
- kref_get(&vstdev->kref);
-
- vstdev->isopen = 1;
-
- /* save device in the file's private structure */
- file->private_data = vstdev;
-
- dev_dbg(&vstdev->usb_dev->dev, "%s: opened\n", __func__);
-
- mutex_unlock(&vstdev->lock);
-
- return 0;
-}
-
-static int vstusb_release(struct inode *inode, struct file *file)
-{
- struct vstusb_device *vstdev;
-
- vstdev = file->private_data;
-
- if (vstdev == NULL)
- return -ENODEV;
-
- mutex_lock(&vstdev->lock);
-
- vstdev->isopen = 0;
-
- dev_dbg(&vstdev->usb_dev->dev, "%s: released\n", __func__);
-
- mutex_unlock(&vstdev->lock);
-
- kref_put(&vstdev->kref, vstusb_delete);
-
- return 0;
-}
-
-static void usb_api_blocking_completion(struct urb *urb)
-{
- struct completion *completeit = urb->context;
-
- complete(completeit);
-}
-
-static int vstusb_fill_and_send_urb(struct urb *urb,
- struct usb_device *usb_dev,
- unsigned int pipe, void *data,
- unsigned int len, struct completion *done)
-{
- struct usb_host_endpoint *ep;
- struct usb_host_endpoint **hostep;
- unsigned int pipend;
-
- int status;
-
- hostep = usb_pipein(pipe) ? usb_dev->ep_in : usb_dev->ep_out;
- pipend = usb_pipeendpoint(pipe);
- ep = hostep[pipend];
-
- if (!ep || (len == 0))
- return -EINVAL;
-
- if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- == USB_ENDPOINT_XFER_INT) {
- pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
- usb_fill_int_urb(urb, usb_dev, pipe, data, len,
- (usb_complete_t)usb_api_blocking_completion,
- NULL, ep->desc.bInterval);
- } else
- usb_fill_bulk_urb(urb, usb_dev, pipe, data, len,
- (usb_complete_t)usb_api_blocking_completion,
- NULL);
-
- init_completion(done);
- urb->context = done;
- urb->actual_length = 0;
- status = usb_submit_urb(urb, GFP_KERNEL);
-
- return status;
-}
-
-static int vstusb_complete_urb(struct urb *urb, struct completion *done,
- int timeout, int *actual_length)
-{
- unsigned long expire;
- int status;
-
- expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT;
- if (!wait_for_completion_interruptible_timeout(done, expire)) {
- usb_kill_urb(urb);
- status = urb->status == -ENOENT ? -ETIMEDOUT : urb->status;
-
- dev_dbg(&urb->dev->dev,
- "%s timed out on ep%d%s len=%d/%d, urb status = %d\n",
- current->comm,
- usb_pipeendpoint(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out",
- urb->actual_length,
- urb->transfer_buffer_length,
- urb->status);
-
- } else {
- if (signal_pending(current)) {
- /* if really an error */
- if (urb->status && !((urb->status == -ENOENT) ||
- (urb->status == -ECONNRESET) ||
- (urb->status == -ESHUTDOWN))) {
- status = -EINTR;
- usb_kill_urb(urb);
- } else {
- status = 0;
- }
-
- dev_dbg(&urb->dev->dev,
- "%s: signal pending on ep%d%s len=%d/%d,"
- "urb status = %d\n",
- current->comm,
- usb_pipeendpoint(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out",
- urb->actual_length,
- urb->transfer_buffer_length,
- urb->status);
-
- } else {
- status = urb->status;
- }
- }
-
- if (actual_length)
- *actual_length = urb->actual_length;
-
- return status;
-}
-
-static ssize_t vstusb_read(struct file *file, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct vstusb_device *vstdev;
- int cnt = -1;
- void *buf;
- int retval = 0;
-
- struct urb *urb;
- struct usb_device *dev;
- unsigned int pipe;
- int timeout;
-
- DECLARE_COMPLETION_ONSTACK(done);
-
- vstdev = file->private_data;
-
- if (vstdev == NULL)
- return -ENODEV;
-
- /* verify that we actually want to read some data */
- if ((count == 0) || (count > VST_MAXBUFFER))
- return -EINVAL;
-
- /* lock this object */
- if (mutex_lock_interruptible(&vstdev->lock))
- return -ERESTARTSYS;
-
- /* anyone home */
- if (!vstdev->present) {
- mutex_unlock(&vstdev->lock);
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: device not present\n", __func__);
- return -ENODEV;
- }
-
- /* pull out the necessary data */
- dev = vstdev->usb_dev;
- pipe = usb_rcvbulkpipe(dev, vstdev->rd_pipe);
- timeout = vstdev->rd_timeout_ms;
-
- buf = kmalloc(count, GFP_KERNEL);
- if (buf == NULL) {
- mutex_unlock(&vstdev->lock);
- return -ENOMEM;
- }
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- kfree(buf);
- mutex_unlock(&vstdev->lock);
- return -ENOMEM;
- }
-
- usb_anchor_urb(urb, &vstdev->submitted);
- retval = vstusb_fill_and_send_urb(urb, dev, pipe, buf, count, &done);
- mutex_unlock(&vstdev->lock);
- if (retval) {
- usb_unanchor_urb(urb);
- dev_err(&dev->dev, "%s: error %d filling and sending urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
- if (retval) {
- dev_err(&dev->dev, "%s: error %d completing urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- if (copy_to_user(buffer, buf, cnt)) {
- dev_err(&dev->dev, "%s: can't copy_to_user\n", __func__);
- retval = -EFAULT;
- } else {
- retval = cnt;
- dev_dbg(&dev->dev, "%s: read %d bytes from pipe %d\n",
- __func__, cnt, pipe);
- }
-
-exit:
- usb_free_urb(urb);
- kfree(buf);
- return retval;
-}
-
-static ssize_t vstusb_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct vstusb_device *vstdev;
- int cnt = -1;
- void *buf;
- int retval = 0;
-
- struct urb *urb;
- struct usb_device *dev;
- unsigned int pipe;
- int timeout;
-
- DECLARE_COMPLETION_ONSTACK(done);
-
- vstdev = file->private_data;
-
- if (vstdev == NULL)
- return -ENODEV;
-
- /* verify that we actually have some data to write */
- if ((count == 0) || (count > VST_MAXBUFFER))
- return retval;
-
- /* lock this object */
- if (mutex_lock_interruptible(&vstdev->lock))
- return -ERESTARTSYS;
-
- /* anyone home */
- if (!vstdev->present) {
- mutex_unlock(&vstdev->lock);
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: device not present\n", __func__);
- return -ENODEV;
- }
-
- /* pull out the necessary data */
- dev = vstdev->usb_dev;
- pipe = usb_sndbulkpipe(dev, vstdev->wr_pipe);
- timeout = vstdev->wr_timeout_ms;
-
- buf = kmalloc(count, GFP_KERNEL);
- if (buf == NULL) {
- mutex_unlock(&vstdev->lock);
- return -ENOMEM;
- }
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- kfree(buf);
- mutex_unlock(&vstdev->lock);
- return -ENOMEM;
- }
-
- if (copy_from_user(buf, buffer, count)) {
- mutex_unlock(&vstdev->lock);
- dev_err(&dev->dev, "%s: can't copy_from_user\n", __func__);
- retval = -EFAULT;
- goto exit;
- }
-
- usb_anchor_urb(urb, &vstdev->submitted);
- retval = vstusb_fill_and_send_urb(urb, dev, pipe, buf, count, &done);
- mutex_unlock(&vstdev->lock);
- if (retval) {
- usb_unanchor_urb(urb);
- dev_err(&dev->dev, "%s: error %d filling and sending urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
- if (retval) {
- dev_err(&dev->dev, "%s: error %d completing urb %d\n",
- __func__, retval, pipe);
- goto exit;
- } else {
- retval = cnt;
- dev_dbg(&dev->dev, "%s: sent %d bytes to pipe %d\n",
- __func__, cnt, pipe);
- }
-
-exit:
- usb_free_urb(urb);
- kfree(buf);
- return retval;
-}
-
-static long vstusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int retval = 0;
- int cnt = -1;
- void __user *data = (void __user *)arg;
- struct vstusb_args usb_data;
-
- struct vstusb_device *vstdev;
- void *buffer = NULL; /* must be initialized. buffer is
- * referenced on exit but not all
- * ioctls allocate it */
-
- struct urb *urb = NULL; /* must be initialized. urb is
- * referenced on exit but not all
- * ioctls allocate it */
- struct usb_device *dev;
- unsigned int pipe;
- int timeout;
-
- DECLARE_COMPLETION_ONSTACK(done);
-
- vstdev = file->private_data;
-
- if (_IOC_TYPE(cmd) != VST_IOC_MAGIC) {
- dev_warn(&vstdev->usb_dev->dev,
- "%s: ioctl command %x, bad ioctl magic %x, "
- "expected %x\n", __func__, cmd,
- _IOC_TYPE(cmd), VST_IOC_MAGIC);
- return -EINVAL;
- }
-
- if (vstdev == NULL)
- return -ENODEV;
-
- if (copy_from_user(&usb_data, data, sizeof(struct vstusb_args))) {
- dev_err(&vstdev->usb_dev->dev, "%s: can't copy_from_user\n",
- __func__);
- return -EFAULT;
- }
-
- /* lock this object */
- if (mutex_lock_interruptible(&vstdev->lock)) {
- retval = -ERESTARTSYS;
- goto exit;
- }
-
- /* anyone home */
- if (!vstdev->present) {
- mutex_unlock(&vstdev->lock);
- dev_err(&vstdev->usb_dev->dev, "%s: device not present\n",
- __func__);
- retval = -ENODEV;
- goto exit;
- }
-
- /* pull out the necessary data */
- dev = vstdev->usb_dev;
-
- switch (cmd) {
-
- case IOCTL_VSTUSB_CONFIG_RW:
-
- vstdev->rd_pipe = usb_data.rd_pipe;
- vstdev->rd_timeout_ms = usb_data.rd_timeout_ms;
- vstdev->wr_pipe = usb_data.wr_pipe;
- vstdev->wr_timeout_ms = usb_data.wr_timeout_ms;
-
- mutex_unlock(&vstdev->lock);
-
- dev_dbg(&dev->dev, "%s: setting pipes/timeouts, "
- "rdpipe = %d, rdtimeout = %d, "
- "wrpipe = %d, wrtimeout = %d\n", __func__,
- vstdev->rd_pipe, vstdev->rd_timeout_ms,
- vstdev->wr_pipe, vstdev->wr_timeout_ms);
- break;
-
- case IOCTL_VSTUSB_SEND_PIPE:
-
- if ((usb_data.count == 0) || (usb_data.count > VST_MAXBUFFER)) {
- mutex_unlock(&vstdev->lock);
- retval = -EINVAL;
- goto exit;
- }
-
- buffer = kmalloc(usb_data.count, GFP_KERNEL);
- if (buffer == NULL) {
- mutex_unlock(&vstdev->lock);
- retval = -ENOMEM;
- goto exit;
- }
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- mutex_unlock(&vstdev->lock);
- retval = -ENOMEM;
- goto exit;
- }
-
- timeout = usb_data.timeout_ms;
-
- pipe = usb_sndbulkpipe(dev, usb_data.pipe);
-
- if (copy_from_user(buffer, usb_data.buffer, usb_data.count)) {
- dev_err(&dev->dev, "%s: can't copy_from_user\n",
- __func__);
- mutex_unlock(&vstdev->lock);
- retval = -EFAULT;
- goto exit;
- }
-
- usb_anchor_urb(urb, &vstdev->submitted);
- retval = vstusb_fill_and_send_urb(urb, dev, pipe, buffer,
- usb_data.count, &done);
- mutex_unlock(&vstdev->lock);
- if (retval) {
- usb_unanchor_urb(urb);
- dev_err(&dev->dev,
- "%s: error %d filling and sending urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
- if (retval) {
- dev_err(&dev->dev, "%s: error %d completing urb %d\n",
- __func__, retval, pipe);
- }
-
- break;
- case IOCTL_VSTUSB_RECV_PIPE:
-
- if ((usb_data.count == 0) || (usb_data.count > VST_MAXBUFFER)) {
- mutex_unlock(&vstdev->lock);
- retval = -EINVAL;
- goto exit;
- }
-
- buffer = kmalloc(usb_data.count, GFP_KERNEL);
- if (buffer == NULL) {
- mutex_unlock(&vstdev->lock);
- retval = -ENOMEM;
- goto exit;
- }
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- mutex_unlock(&vstdev->lock);
- retval = -ENOMEM;
- goto exit;
- }
-
- timeout = usb_data.timeout_ms;
-
- pipe = usb_rcvbulkpipe(dev, usb_data.pipe);
-
- usb_anchor_urb(urb, &vstdev->submitted);
- retval = vstusb_fill_and_send_urb(urb, dev, pipe, buffer,
- usb_data.count, &done);
- mutex_unlock(&vstdev->lock);
- if (retval) {
- usb_unanchor_urb(urb);
- dev_err(&dev->dev,
- "%s: error %d filling and sending urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
- if (retval) {
- dev_err(&dev->dev, "%s: error %d completing urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- if (copy_to_user(usb_data.buffer, buffer, cnt)) {
- dev_err(&dev->dev, "%s: can't copy_to_user\n",
- __func__);
- retval = -EFAULT;
- goto exit;
- }
-
- usb_data.count = cnt;
- if (copy_to_user(data, &usb_data, sizeof(struct vstusb_args))) {
- dev_err(&dev->dev, "%s: can't copy_to_user\n",
- __func__);
- retval = -EFAULT;
- } else {
- dev_dbg(&dev->dev, "%s: recv %zd bytes from pipe %d\n",
- __func__, usb_data.count, usb_data.pipe);
- }
-
- break;
-
- default:
- mutex_unlock(&vstdev->lock);
- dev_warn(&dev->dev, "ioctl_vstusb: invalid ioctl cmd %x\n",
- cmd);
- return -EINVAL;
- break;
- }
-exit:
- usb_free_urb(urb);
- kfree(buffer);
- return retval;
-}
-
-static const struct file_operations vstusb_fops = {
- .owner = THIS_MODULE,
- .read = vstusb_read,
- .write = vstusb_write,
- .unlocked_ioctl = vstusb_ioctl,
- .compat_ioctl = vstusb_ioctl,
- .open = vstusb_open,
- .release = vstusb_release,
-};
-
-static struct usb_class_driver usb_vstusb_class = {
- .name = "usb/vstusb%d",
- .fops = &vstusb_fops,
- .minor_base = VSTUSB_MINOR_BASE,
-};
-
-static int vstusb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *dev = interface_to_usbdev(intf);
- struct vstusb_device *vstdev;
- int i;
- int retval = 0;
-
- /* allocate memory for our device state and intialize it */
-
- vstdev = kzalloc(sizeof(*vstdev), GFP_KERNEL);
- if (vstdev == NULL)
- return -ENOMEM;
-
- /* must do usb_get_dev() prior to kref_init() since the kref_put()
- * release function will do a usb_put_dev() */
- usb_get_dev(dev);
- kref_init(&vstdev->kref);
- mutex_init(&vstdev->lock);
-
- i = dev->descriptor.bcdDevice;
-
- dev_dbg(&intf->dev, "Version %1d%1d.%1d%1d found at address %d\n",
- (i & 0xF000) >> 12, (i & 0xF00) >> 8,
- (i & 0xF0) >> 4, (i & 0xF), dev->devnum);
-
- vstdev->present = 1;
- vstdev->isopen = 0;
- vstdev->usb_dev = dev;
- init_usb_anchor(&vstdev->submitted);
-
- usb_set_intfdata(intf, vstdev);
- retval = usb_register_dev(intf, &usb_vstusb_class);
- if (retval) {
- dev_err(&intf->dev,
- "%s: Not able to get a minor for this device.\n",
- __func__);
- usb_set_intfdata(intf, NULL);
- kref_put(&vstdev->kref, vstusb_delete);
- return retval;
- }
-
- /* let the user know what node this device is now attached to */
- dev_info(&intf->dev,
- "VST USB Device #%d now attached to major %d minor %d\n",
- (intf->minor - VSTUSB_MINOR_BASE), USB_MAJOR, intf->minor);
-
- dev_info(&intf->dev, "%s, %s\n", DRIVER_DESC, DRIVER_VERSION);
-
- return retval;
-}
-
-static void vstusb_disconnect(struct usb_interface *intf)
-{
- struct vstusb_device *vstdev = usb_get_intfdata(intf);
-
- usb_deregister_dev(intf, &usb_vstusb_class);
- usb_set_intfdata(intf, NULL);
-
- if (vstdev) {
-
- mutex_lock(&vstdev->lock);
- vstdev->present = 0;
-
- usb_kill_anchored_urbs(&vstdev->submitted);
-
- mutex_unlock(&vstdev->lock);
-
- kref_put(&vstdev->kref, vstusb_delete);
- }
-
-}
-
-static int vstusb_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct vstusb_device *vstdev = usb_get_intfdata(intf);
- int time;
- if (!vstdev)
- return 0;
-
- mutex_lock(&vstdev->lock);
- time = usb_wait_anchor_empty_timeout(&vstdev->submitted, 1000);
- if (!time)
- usb_kill_anchored_urbs(&vstdev->submitted);
- mutex_unlock(&vstdev->lock);
-
- return 0;
-}
-
-static int vstusb_resume(struct usb_interface *intf)
-{
- return 0;
-}
-
-static struct usb_driver vstusb_driver = {
- .name = "vstusb",
- .probe = vstusb_probe,
- .disconnect = vstusb_disconnect,
- .suspend = vstusb_suspend,
- .resume = vstusb_resume,
- .id_table = id_table,
-};
-
-static int __init vstusb_init(void)
-{
- int rc;
-
- rc = usb_register(&vstusb_driver);
- if (rc)
- printk(KERN_ERR "%s: failed to register (%d)", __func__, rc);
-
- return rc;
-}
-
-static void __exit vstusb_exit(void)
-{
- usb_deregister(&vstusb_driver);
-}
-
-module_init(vstusb_init);
-module_exit(vstusb_exit);
-
-MODULE_AUTHOR("Dennis O'Brien/Stephen Ware");
-MODULE_DESCRIPTION(DRIVER_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 385ec05..6dd44bc 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -460,8 +460,8 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
char ev_type, int status)
{
const struct usb_endpoint_descriptor *epd = &urb->ep->desc;
- unsigned long flags;
struct timeval ts;
+ unsigned long flags;
unsigned int urb_length;
unsigned int offset;
unsigned int length;
@@ -600,10 +600,13 @@ static void mon_bin_complete(void *data, struct urb *urb, int status)
static void mon_bin_error(void *data, struct urb *urb, int error)
{
struct mon_reader_bin *rp = data;
+ struct timeval ts;
unsigned long flags;
unsigned int offset;
struct mon_bin_hdr *ep;
+ do_gettimeofday(&ts);
+
spin_lock_irqsave(&rp->b_lock, flags);
offset = mon_buff_area_alloc(rp, PKT_SIZE);
@@ -623,6 +626,8 @@ static void mon_bin_error(void *data, struct urb *urb, int error)
ep->devnum = urb->dev->devnum;
ep->busnum = urb->dev->bus->busnum;
ep->id = (unsigned long) urb;
+ ep->ts_sec = ts.tv_sec;
+ ep->ts_usec = ts.tv_usec;
ep->status = error;
ep->flag_setup = '-';
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 047568f..31c1188 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -180,7 +180,7 @@ static inline unsigned int mon_get_timestamp(void)
unsigned int stamp;
do_gettimeofday(&tval);
- stamp = tval.tv_sec & 0xFFFF; /* 2^32 = 4294967296. Limit to 4096s. */
+ stamp = tval.tv_sec & 0xFFF; /* 2^32 = 4294967296. Limit to 4096s. */
stamp = stamp * 1000000 + tval.tv_usec;
return stamp;
}
@@ -273,12 +273,12 @@ static void mon_text_error(void *data, struct urb *urb, int error)
ep->type = 'E';
ep->id = (unsigned long) urb;
- ep->busnum = 0;
+ ep->busnum = urb->dev->bus->busnum;
ep->devnum = urb->dev->devnum;
ep->epnum = usb_endpoint_num(&urb->ep->desc);
ep->xfertype = usb_endpoint_type(&urb->ep->desc);
ep->is_in = usb_urb_dir_in(urb);
- ep->tstamp = 0;
+ ep->tstamp = mon_get_timestamp();
ep->length = 0;
ep->status = error;
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index ad26e65..bcee133 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -30,7 +30,6 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
void __iomem *fifo = hw_ep->fifo;
void __iomem *epio = hw_ep->regs;
u8 epnum = hw_ep->epnum;
- u16 dma_reg = 0;
prefetch((u8 *)src);
@@ -42,15 +41,17 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
dump_fifo_data(src, len);
if (!ANOMALY_05000380 && epnum != 0) {
- flush_dcache_range((unsigned int)src,
- (unsigned int)(src + len));
+ u16 dma_reg;
+
+ flush_dcache_range((unsigned long)src,
+ (unsigned long)(src + len));
/* Setup DMA address register */
- dma_reg = (u16) ((u32) src & 0xFFFF);
+ dma_reg = (u32)src;
bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
SSYNC();
- dma_reg = (u16) (((u32) src >> 16) & 0xFFFF);
+ dma_reg = (u32)src >> 16;
bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
SSYNC();
@@ -79,12 +80,9 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
SSYNC();
if (unlikely((unsigned long)src & 0x01))
- outsw_8((unsigned long)fifo, src,
- len & 0x01 ? (len >> 1) + 1 : len >> 1);
+ outsw_8((unsigned long)fifo, src, (len + 1) >> 1);
else
- outsw((unsigned long)fifo, src,
- len & 0x01 ? (len >> 1) + 1 : len >> 1);
-
+ outsw((unsigned long)fifo, src, (len + 1) >> 1);
}
}
/*
@@ -94,19 +92,19 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
{
void __iomem *fifo = hw_ep->fifo;
u8 epnum = hw_ep->epnum;
- u16 dma_reg = 0;
if (ANOMALY_05000467 && epnum != 0) {
+ u16 dma_reg;
- invalidate_dcache_range((unsigned int)dst,
- (unsigned int)(dst + len));
+ invalidate_dcache_range((unsigned long)dst,
+ (unsigned long)(dst + len));
/* Setup DMA address register */
- dma_reg = (u16) ((u32) dst & 0xFFFF);
+ dma_reg = (u32)dst;
bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
SSYNC();
- dma_reg = (u16) (((u32) dst >> 16) & 0xFFFF);
+ dma_reg = (u32)dst >> 16;
bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
SSYNC();
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index a44a450..3c69a76 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -1191,8 +1191,13 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
bd = tx_ch->head;
+ /*
+ * If Head is null then this could mean that a abort interrupt
+ * that needs to be acknowledged.
+ */
if (NULL == bd) {
DBG(1, "null BD\n");
+ tx_ram->tx_complete = 0;
continue;
}
@@ -1412,15 +1417,6 @@ static int cppi_channel_abort(struct dma_channel *channel)
if (cppi_ch->transmit) {
struct cppi_tx_stateram __iomem *tx_ram;
- int enabled;
-
- /* mask interrupts raised to signal teardown complete. */
- enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG)
- & (1 << cppi_ch->index);
- if (enabled)
- musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
- (1 << cppi_ch->index));
-
/* REVISIT put timeouts on these controller handshakes */
cppi_dump_tx(6, cppi_ch, " (teardown)");
@@ -1435,7 +1431,6 @@ static int cppi_channel_abort(struct dma_channel *channel)
do {
value = musb_readl(&tx_ram->tx_complete, 0);
} while (0xFFFFFFFC != value);
- musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC);
/* FIXME clean up the transfer state ... here?
* the completion routine should get called with
@@ -1448,23 +1443,15 @@ static int cppi_channel_abort(struct dma_channel *channel)
musb_writew(regs, MUSB_TXCSR, value);
musb_writew(regs, MUSB_TXCSR, value);
- /* While we scrub the TX state RAM, ensure that we clean
- * up any interrupt that's currently asserted:
+ /*
* 1. Write to completion Ptr value 0x1(bit 0 set)
* (write back mode)
- * 2. Write to completion Ptr value 0x0(bit 0 cleared)
- * (compare mode)
- * Value written is compared(for bits 31:2) and when
- * equal, interrupt is deasserted.
+ * 2. Wait for abort interrupt and then put the channel in
+ * compare mode by writing 1 to the tx_complete register.
*/
cppi_reset_tx(tx_ram, 1);
- musb_writel(&tx_ram->tx_complete, 0, 0);
-
- /* re-enable interrupt */
- if (enabled)
- musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
- (1 << cppi_ch->index));
-
+ cppi_ch->head = 0;
+ musb_writel(&tx_ram->tx_complete, 0, 1);
cppi_dump_tx(5, cppi_ch, " (done teardown)");
/* REVISIT tx side _should_ clean up the same way
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 738efd8..b4bbf8f 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -557,6 +557,69 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
handled = IRQ_HANDLED;
}
+
+ if (int_usb & MUSB_INTR_SUSPEND) {
+ DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
+ otg_state_string(musb), devctl, power);
+ handled = IRQ_HANDLED;
+
+ switch (musb->xceiv->state) {
+#ifdef CONFIG_USB_MUSB_OTG
+ case OTG_STATE_A_PERIPHERAL:
+ /* We also come here if the cable is removed, since
+ * this silicon doesn't report ID-no-longer-grounded.
+ *
+ * We depend on T(a_wait_bcon) to shut us down, and
+ * hope users don't do anything dicey during this
+ * undesired detour through A_WAIT_BCON.
+ */
+ musb_hnp_stop(musb);
+ usb_hcd_resume_root_hub(musb_to_hcd(musb));
+ musb_root_disconnect(musb);
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon
+ ? : OTG_TIME_A_WAIT_BCON));
+
+ break;
+#endif
+ case OTG_STATE_B_IDLE:
+ if (!musb->is_active)
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ musb_g_suspend(musb);
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv->gadget->b_hnp_enable;
+ if (musb->is_active) {
+#ifdef CONFIG_USB_MUSB_OTG
+ musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
+ DBG(1, "HNP: Setting timer for b_ase0_brst\n");
+ mod_timer(&musb->otg_timer, jiffies
+ + msecs_to_jiffies(
+ OTG_TIME_B_ASE0_BRST));
+#endif
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (musb->a_wait_bcon != 0)
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
+ break;
+ case OTG_STATE_A_HOST:
+ musb->xceiv->state = OTG_STATE_A_SUSPEND;
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv->host->b_hnp_enable;
+ break;
+ case OTG_STATE_B_HOST:
+ /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
+ DBG(1, "REVISIT: SUSPEND as B_HOST\n");
+ break;
+ default:
+ /* "should not happen" */
+ musb->is_active = 0;
+ break;
+ }
+ }
+
if (int_usb & MUSB_INTR_CONNECT) {
struct usb_hcd *hcd = musb_to_hcd(musb);
@@ -625,10 +688,61 @@ b_host:
}
#endif /* CONFIG_USB_MUSB_HDRC_HCD */
+ if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
+ DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
+ otg_state_string(musb),
+ MUSB_MODE(musb), devctl);
+ handled = IRQ_HANDLED;
+
+ switch (musb->xceiv->state) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_A_SUSPEND:
+ usb_hcd_resume_root_hub(musb_to_hcd(musb));
+ musb_root_disconnect(musb);
+ if (musb->a_wait_bcon != 0 && is_otg_enabled(musb))
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
+ break;
+#endif /* HOST */
+#ifdef CONFIG_USB_MUSB_OTG
+ case OTG_STATE_B_HOST:
+ /* REVISIT this behaves for "real disconnect"
+ * cases; make sure the other transitions from
+ * from B_HOST act right too. The B_HOST code
+ * in hnp_stop() is currently not used...
+ */
+ musb_root_disconnect(musb);
+ musb_to_hcd(musb)->self.is_b_host = 0;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ MUSB_DEV_MODE(musb);
+ musb_g_disconnect(musb);
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ musb_hnp_stop(musb);
+ musb_root_disconnect(musb);
+ /* FALLTHROUGH */
+ case OTG_STATE_B_WAIT_ACON:
+ /* FALLTHROUGH */
+#endif /* OTG */
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_IDLE:
+ musb_g_disconnect(musb);
+ break;
+#endif /* GADGET */
+ default:
+ WARNING("unhandled DISCONNECT transition (%s)\n",
+ otg_state_string(musb));
+ break;
+ }
+ }
+
/* mentor saves a bit: bus reset and babble share the same irq.
* only host sees babble; only peripheral sees bus reset.
*/
if (int_usb & MUSB_INTR_RESET) {
+ handled = IRQ_HANDLED;
if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
/*
* Looks like non-HS BABBLE can be ignored, but
@@ -641,7 +755,7 @@ b_host:
DBG(1, "BABBLE devctl: %02x\n", devctl);
else {
ERR("Stopping host session -- babble\n");
- musb_writeb(mbase, MUSB_DEVCTL, 0);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
}
} else if (is_peripheral_capable()) {
DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
@@ -686,29 +800,7 @@ b_host:
otg_state_string(musb));
}
}
-
- handled = IRQ_HANDLED;
}
- schedule_work(&musb->irq_work);
-
- return handled;
-}
-
-/*
- * Interrupt Service Routine to record USB "global" interrupts.
- * Since these do not happen often and signify things of
- * paramount importance, it seems OK to check them individually;
- * the order of the tests is specified in the manual
- *
- * @param musb instance pointer
- * @param int_usb register contents
- * @param devctl
- * @param power
- */
-static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
- u8 devctl, u8 power)
-{
- irqreturn_t handled = IRQ_NONE;
#if 0
/* REVISIT ... this would be for multiplexing periodic endpoints, or
@@ -755,117 +847,7 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
}
#endif
- if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
- DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
- otg_state_string(musb),
- MUSB_MODE(musb), devctl);
- handled = IRQ_HANDLED;
-
- switch (musb->xceiv->state) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- case OTG_STATE_A_HOST:
- case OTG_STATE_A_SUSPEND:
- usb_hcd_resume_root_hub(musb_to_hcd(musb));
- musb_root_disconnect(musb);
- if (musb->a_wait_bcon != 0 && is_otg_enabled(musb))
- musb_platform_try_idle(musb, jiffies
- + msecs_to_jiffies(musb->a_wait_bcon));
- break;
-#endif /* HOST */
-#ifdef CONFIG_USB_MUSB_OTG
- case OTG_STATE_B_HOST:
- /* REVISIT this behaves for "real disconnect"
- * cases; make sure the other transitions from
- * from B_HOST act right too. The B_HOST code
- * in hnp_stop() is currently not used...
- */
- musb_root_disconnect(musb);
- musb_to_hcd(musb)->self.is_b_host = 0;
- musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
- MUSB_DEV_MODE(musb);
- musb_g_disconnect(musb);
- break;
- case OTG_STATE_A_PERIPHERAL:
- musb_hnp_stop(musb);
- musb_root_disconnect(musb);
- /* FALLTHROUGH */
- case OTG_STATE_B_WAIT_ACON:
- /* FALLTHROUGH */
-#endif /* OTG */
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- case OTG_STATE_B_PERIPHERAL:
- case OTG_STATE_B_IDLE:
- musb_g_disconnect(musb);
- break;
-#endif /* GADGET */
- default:
- WARNING("unhandled DISCONNECT transition (%s)\n",
- otg_state_string(musb));
- break;
- }
-
- schedule_work(&musb->irq_work);
- }
-
- if (int_usb & MUSB_INTR_SUSPEND) {
- DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
- otg_state_string(musb), devctl, power);
- handled = IRQ_HANDLED;
-
- switch (musb->xceiv->state) {
-#ifdef CONFIG_USB_MUSB_OTG
- case OTG_STATE_A_PERIPHERAL:
- /* We also come here if the cable is removed, since
- * this silicon doesn't report ID-no-longer-grounded.
- *
- * We depend on T(a_wait_bcon) to shut us down, and
- * hope users don't do anything dicey during this
- * undesired detour through A_WAIT_BCON.
- */
- musb_hnp_stop(musb);
- usb_hcd_resume_root_hub(musb_to_hcd(musb));
- musb_root_disconnect(musb);
- musb_platform_try_idle(musb, jiffies
- + msecs_to_jiffies(musb->a_wait_bcon
- ? : OTG_TIME_A_WAIT_BCON));
- break;
-#endif
- case OTG_STATE_B_PERIPHERAL:
- musb_g_suspend(musb);
- musb->is_active = is_otg_enabled(musb)
- && musb->xceiv->gadget->b_hnp_enable;
- if (musb->is_active) {
-#ifdef CONFIG_USB_MUSB_OTG
- musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
- DBG(1, "HNP: Setting timer for b_ase0_brst\n");
- mod_timer(&musb->otg_timer, jiffies
- + msecs_to_jiffies(
- OTG_TIME_B_ASE0_BRST));
-#endif
- }
- break;
- case OTG_STATE_A_WAIT_BCON:
- if (musb->a_wait_bcon != 0)
- musb_platform_try_idle(musb, jiffies
- + msecs_to_jiffies(musb->a_wait_bcon));
- break;
- case OTG_STATE_A_HOST:
- musb->xceiv->state = OTG_STATE_A_SUSPEND;
- musb->is_active = is_otg_enabled(musb)
- && musb->xceiv->host->b_hnp_enable;
- break;
- case OTG_STATE_B_HOST:
- /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
- DBG(1, "REVISIT: SUSPEND as B_HOST\n");
- break;
- default:
- /* "should not happen" */
- musb->is_active = 0;
- break;
- }
- schedule_work(&musb->irq_work);
- }
-
+ schedule_work(&musb->irq_work);
return handled;
}
@@ -1095,6 +1077,36 @@ static struct fifo_cfg __initdata mode_4_cfg[] = {
{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
};
+/* mode 5 - fits in 8KB */
+static struct fifo_cfg __initdata mode_5_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
+{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
+{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+};
/*
* configure a fifo; for non-shared endpoints, this may be called
@@ -1210,6 +1222,10 @@ static int __init ep_config_from_table(struct musb *musb)
cfg = mode_4_cfg;
n = ARRAY_SIZE(mode_4_cfg);
break;
+ case 5:
+ cfg = mode_5_cfg;
+ n = ARRAY_SIZE(mode_5_cfg);
+ break;
}
printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
@@ -1314,9 +1330,6 @@ enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
*/
static int __init musb_core_init(u16 musb_type, struct musb *musb)
{
-#ifdef MUSB_AHB_ID
- u32 data;
-#endif
u8 reg;
char *type;
char aInfo[90], aRevision[32], aDate[12];
@@ -1328,23 +1341,17 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
reg = musb_read_configdata(mbase);
strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
- if (reg & MUSB_CONFIGDATA_DYNFIFO)
+ if (reg & MUSB_CONFIGDATA_DYNFIFO) {
strcat(aInfo, ", dyn FIFOs");
+ musb->dyn_fifo = true;
+ }
if (reg & MUSB_CONFIGDATA_MPRXE) {
strcat(aInfo, ", bulk combine");
-#ifdef C_MP_RX
musb->bulk_combine = true;
-#else
- strcat(aInfo, " (X)"); /* no driver support */
-#endif
}
if (reg & MUSB_CONFIGDATA_MPTXE) {
strcat(aInfo, ", bulk split");
-#ifdef C_MP_TX
musb->bulk_split = true;
-#else
- strcat(aInfo, " (X)"); /* no driver support */
-#endif
}
if (reg & MUSB_CONFIGDATA_HBRXE) {
strcat(aInfo, ", HB-ISO Rx");
@@ -1360,20 +1367,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
musb_driver_name, reg, aInfo);
-#ifdef MUSB_AHB_ID
- data = musb_readl(mbase, 0x404);
- sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff),
- (data >> 16) & 0xff, (data >> 24) & 0xff);
- /* FIXME ID2 and ID3 are unused */
- data = musb_readl(mbase, 0x408);
- printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data);
- data = musb_readl(mbase, 0x40c);
- printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data);
- reg = musb_readb(mbase, 0x400);
- musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC;
-#else
aDate[0] = 0;
-#endif
if (MUSB_CONTROLLER_MHDRC == musb_type) {
musb->is_multipoint = 1;
type = "M";
@@ -1404,21 +1398,10 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
musb->nr_endpoints = 1;
musb->epmask = 1;
- if (reg & MUSB_CONFIGDATA_DYNFIFO) {
- if (musb->config->dyn_fifo)
- status = ep_config_from_table(musb);
- else {
- ERR("reconfigure software for Dynamic FIFOs\n");
- status = -ENODEV;
- }
- } else {
- if (!musb->config->dyn_fifo)
- status = ep_config_from_hw(musb);
- else {
- ERR("reconfigure software for static FIFOs\n");
- return -ENODEV;
- }
- }
+ if (musb->dyn_fifo)
+ status = ep_config_from_table(musb);
+ else
+ status = ep_config_from_hw(musb);
if (status < 0)
return status;
@@ -1587,11 +1570,6 @@ irqreturn_t musb_interrupt(struct musb *musb)
ep_num++;
}
- /* finish handling "global" interrupts after handling fifos */
- if (musb->int_usb)
- retval |= musb_stage2_irq(musb,
- musb->int_usb, devctl, power);
-
return retval;
}
@@ -1696,7 +1674,7 @@ musb_vbus_store(struct device *dev, struct device_attribute *attr,
unsigned long val;
if (sscanf(buf, "%lu", &val) < 1) {
- printk(KERN_ERR "Invalid VBUS timeout ms value\n");
+ dev_err(dev, "Invalid VBUS timeout ms value\n");
return -EINVAL;
}
@@ -1746,7 +1724,7 @@ musb_srp_store(struct device *dev, struct device_attribute *attr,
if (sscanf(buf, "%hu", &srp) != 1
|| (srp != 1)) {
- printk(KERN_ERR "SRP: Value must be 1\n");
+ dev_err(dev, "SRP: Value must be 1\n");
return -EINVAL;
}
@@ -1759,6 +1737,19 @@ static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
+static struct attribute *musb_attributes[] = {
+ &dev_attr_mode.attr,
+ &dev_attr_vbus.attr,
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ &dev_attr_srp.attr,
+#endif
+ NULL
+};
+
+static const struct attribute_group musb_attr_group = {
+ .attrs = musb_attributes,
+};
+
#endif /* sysfs */
/* Only used to provide driver mode change events */
@@ -1833,11 +1824,7 @@ static void musb_free(struct musb *musb)
*/
#ifdef CONFIG_SYSFS
- device_remove_file(musb->controller, &dev_attr_mode);
- device_remove_file(musb->controller, &dev_attr_vbus);
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- device_remove_file(musb->controller, &dev_attr_srp);
-#endif
+ sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
#endif
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
@@ -2017,22 +2004,10 @@ bad_config:
musb->irq_wake = 0;
}
- pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
- musb_driver_name,
- ({char *s;
- switch (musb->board_mode) {
- case MUSB_HOST: s = "Host"; break;
- case MUSB_PERIPHERAL: s = "Peripheral"; break;
- default: s = "OTG"; break;
- }; s; }),
- ctrl,
- (is_dma_capable() && musb->dma_controller)
- ? "DMA" : "PIO",
- musb->nIrq);
-
/* host side needs more setup */
if (is_host_enabled(musb)) {
struct usb_hcd *hcd = musb_to_hcd(musb);
+ u8 busctl;
otg_set_host(musb->xceiv, &hcd->self);
@@ -2040,6 +2015,13 @@ bad_config:
hcd->self.otg_port = 1;
musb->xceiv->host = &hcd->self;
hcd->power_budget = 2 * (plat->power ? : 250);
+
+ /* program PHY to use external vBus if required */
+ if (plat->extvbus) {
+ busctl = musb_readb(musb->mregs, MUSB_ULPI_BUSCONTROL);
+ busctl |= MUSB_ULPI_USE_EXTVBUS;
+ musb_writeb(musb->mregs, MUSB_ULPI_BUSCONTROL, busctl);
+ }
}
/* For the host-only role, we can activate right away.
@@ -2079,26 +2061,26 @@ bad_config:
}
#ifdef CONFIG_SYSFS
- status = device_create_file(dev, &dev_attr_mode);
- status = device_create_file(dev, &dev_attr_vbus);
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- status = device_create_file(dev, &dev_attr_srp);
-#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
- status = 0;
+ status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
#endif
if (status)
goto fail2;
+ dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n",
+ ({char *s;
+ switch (musb->board_mode) {
+ case MUSB_HOST: s = "Host"; break;
+ case MUSB_PERIPHERAL: s = "Peripheral"; break;
+ default: s = "OTG"; break;
+ }; s; }),
+ ctrl,
+ (is_dma_capable() && musb->dma_controller)
+ ? "DMA" : "PIO",
+ musb->nIrq);
+
return 0;
fail2:
-#ifdef CONFIG_SYSFS
- device_remove_file(musb->controller, &dev_attr_mode);
- device_remove_file(musb->controller, &dev_attr_vbus);
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- device_remove_file(musb->controller, &dev_attr_srp);
-#endif
-#endif
musb_platform_exit(musb);
fail:
dev_err(musb->controller,
@@ -2127,6 +2109,7 @@ static int __init musb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int irq = platform_get_irq(pdev, 0);
+ int status;
struct resource *iomem;
void __iomem *base;
@@ -2134,7 +2117,7 @@ static int __init musb_probe(struct platform_device *pdev)
if (!iomem || irq == 0)
return -ENODEV;
- base = ioremap(iomem->start, iomem->end - iomem->start + 1);
+ base = ioremap(iomem->start, resource_size(iomem));
if (!base) {
dev_err(dev, "ioremap failed\n");
return -ENOMEM;
@@ -2144,7 +2127,12 @@ static int __init musb_probe(struct platform_device *pdev)
/* clobbered by use_dma=n */
orig_dma_mask = dev->dma_mask;
#endif
- return musb_init_controller(dev, irq, base);
+
+ status = musb_init_controller(dev, irq, base);
+ if (status < 0)
+ iounmap(base);
+
+ return status;
}
static int __exit musb_remove(struct platform_device *pdev)
@@ -2173,6 +2161,148 @@ static int __exit musb_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
+static struct musb_context_registers musb_context;
+
+void musb_save_context(struct musb *musb)
+{
+ int i;
+ void __iomem *musb_base = musb->mregs;
+
+ if (is_host_enabled(musb)) {
+ musb_context.frame = musb_readw(musb_base, MUSB_FRAME);
+ musb_context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
+ }
+ musb_context.power = musb_readb(musb_base, MUSB_POWER);
+ musb_context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE);
+ musb_context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE);
+ musb_context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
+ musb_context.index = musb_readb(musb_base, MUSB_INDEX);
+ musb_context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
+
+ for (i = 0; i < MUSB_C_NUM_EPS; ++i) {
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ musb_context.index_regs[i].txmaxp =
+ musb_readw(musb_base, 0x10 + MUSB_TXMAXP);
+ musb_context.index_regs[i].txcsr =
+ musb_readw(musb_base, 0x10 + MUSB_TXCSR);
+ musb_context.index_regs[i].rxmaxp =
+ musb_readw(musb_base, 0x10 + MUSB_RXMAXP);
+ musb_context.index_regs[i].rxcsr =
+ musb_readw(musb_base, 0x10 + MUSB_RXCSR);
+
+ if (musb->dyn_fifo) {
+ musb_context.index_regs[i].txfifoadd =
+ musb_read_txfifoadd(musb_base);
+ musb_context.index_regs[i].rxfifoadd =
+ musb_read_rxfifoadd(musb_base);
+ musb_context.index_regs[i].txfifosz =
+ musb_read_txfifosz(musb_base);
+ musb_context.index_regs[i].rxfifosz =
+ musb_read_rxfifosz(musb_base);
+ }
+ if (is_host_enabled(musb)) {
+ musb_context.index_regs[i].txtype =
+ musb_readb(musb_base, 0x10 + MUSB_TXTYPE);
+ musb_context.index_regs[i].txinterval =
+ musb_readb(musb_base, 0x10 + MUSB_TXINTERVAL);
+ musb_context.index_regs[i].rxtype =
+ musb_readb(musb_base, 0x10 + MUSB_RXTYPE);
+ musb_context.index_regs[i].rxinterval =
+ musb_readb(musb_base, 0x10 + MUSB_RXINTERVAL);
+
+ musb_context.index_regs[i].txfunaddr =
+ musb_read_txfunaddr(musb_base, i);
+ musb_context.index_regs[i].txhubaddr =
+ musb_read_txhubaddr(musb_base, i);
+ musb_context.index_regs[i].txhubport =
+ musb_read_txhubport(musb_base, i);
+
+ musb_context.index_regs[i].rxfunaddr =
+ musb_read_rxfunaddr(musb_base, i);
+ musb_context.index_regs[i].rxhubaddr =
+ musb_read_rxhubaddr(musb_base, i);
+ musb_context.index_regs[i].rxhubport =
+ musb_read_rxhubport(musb_base, i);
+ }
+ }
+
+ musb_writeb(musb_base, MUSB_INDEX, musb_context.index);
+
+ musb_platform_save_context(musb, &musb_context);
+}
+
+void musb_restore_context(struct musb *musb)
+{
+ int i;
+ void __iomem *musb_base = musb->mregs;
+ void __iomem *ep_target_regs;
+
+ musb_platform_restore_context(musb, &musb_context);
+
+ if (is_host_enabled(musb)) {
+ musb_writew(musb_base, MUSB_FRAME, musb_context.frame);
+ musb_writeb(musb_base, MUSB_TESTMODE, musb_context.testmode);
+ }
+ musb_writeb(musb_base, MUSB_POWER, musb_context.power);
+ musb_writew(musb_base, MUSB_INTRTXE, musb_context.intrtxe);
+ musb_writew(musb_base, MUSB_INTRRXE, musb_context.intrrxe);
+ musb_writeb(musb_base, MUSB_INTRUSBE, musb_context.intrusbe);
+ musb_writeb(musb_base, MUSB_DEVCTL, musb_context.devctl);
+
+ for (i = 0; i < MUSB_C_NUM_EPS; ++i) {
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ musb_writew(musb_base, 0x10 + MUSB_TXMAXP,
+ musb_context.index_regs[i].txmaxp);
+ musb_writew(musb_base, 0x10 + MUSB_TXCSR,
+ musb_context.index_regs[i].txcsr);
+ musb_writew(musb_base, 0x10 + MUSB_RXMAXP,
+ musb_context.index_regs[i].rxmaxp);
+ musb_writew(musb_base, 0x10 + MUSB_RXCSR,
+ musb_context.index_regs[i].rxcsr);
+
+ if (musb->dyn_fifo) {
+ musb_write_txfifosz(musb_base,
+ musb_context.index_regs[i].txfifosz);
+ musb_write_rxfifosz(musb_base,
+ musb_context.index_regs[i].rxfifosz);
+ musb_write_txfifoadd(musb_base,
+ musb_context.index_regs[i].txfifoadd);
+ musb_write_rxfifoadd(musb_base,
+ musb_context.index_regs[i].rxfifoadd);
+ }
+
+ if (is_host_enabled(musb)) {
+ musb_writeb(musb_base, 0x10 + MUSB_TXTYPE,
+ musb_context.index_regs[i].txtype);
+ musb_writeb(musb_base, 0x10 + MUSB_TXINTERVAL,
+ musb_context.index_regs[i].txinterval);
+ musb_writeb(musb_base, 0x10 + MUSB_RXTYPE,
+ musb_context.index_regs[i].rxtype);
+ musb_writeb(musb_base, 0x10 + MUSB_RXINTERVAL,
+
+ musb_context.index_regs[i].rxinterval);
+ musb_write_txfunaddr(musb_base, i,
+ musb_context.index_regs[i].txfunaddr);
+ musb_write_txhubaddr(musb_base, i,
+ musb_context.index_regs[i].txhubaddr);
+ musb_write_txhubport(musb_base, i,
+ musb_context.index_regs[i].txhubport);
+
+ ep_target_regs =
+ musb_read_target_reg_base(i, musb_base);
+
+ musb_write_rxfunaddr(ep_target_regs,
+ musb_context.index_regs[i].rxfunaddr);
+ musb_write_rxhubaddr(ep_target_regs,
+ musb_context.index_regs[i].rxhubaddr);
+ musb_write_rxhubport(ep_target_regs,
+ musb_context.index_regs[i].rxhubport);
+ }
+ }
+
+ musb_writeb(musb_base, MUSB_INDEX, musb_context.index);
+}
+
static int musb_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -2194,6 +2324,8 @@ static int musb_suspend(struct device *dev)
*/
}
+ musb_save_context(musb);
+
if (musb->set_clock)
musb->set_clock(musb->clock, 0);
else
@@ -2215,6 +2347,8 @@ static int musb_resume_noirq(struct device *dev)
else
clk_enable(musb->clock);
+ musb_restore_context(musb);
+
/* for static cmos like DaVinci, register values were preserved
* unless for some reason the whole soc powered down or the USB
* module got reset through the PSC (vs just being disabled).
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 5514c7e..d849fb8 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -52,6 +52,15 @@ struct musb;
struct musb_hw_ep;
struct musb_ep;
+/* Helper defines for struct musb->hwvers */
+#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
+#define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
+#define MUSB_HWVERS_RC 0x8000
+#define MUSB_HWVERS_1300 0x52C
+#define MUSB_HWVERS_1400 0x590
+#define MUSB_HWVERS_1800 0x720
+#define MUSB_HWVERS_1900 0x784
+#define MUSB_HWVERS_2000 0x800
#include "musb_debug.h"
#include "musb_dma.h"
@@ -322,13 +331,6 @@ struct musb {
struct clk *clock;
irqreturn_t (*isr)(int, void *);
struct work_struct irq_work;
-#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
-#define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
-#define MUSB_HWVERS_RC 0x8000
-#define MUSB_HWVERS_1300 0x52C
-#define MUSB_HWVERS_1400 0x590
-#define MUSB_HWVERS_1800 0x720
-#define MUSB_HWVERS_2000 0x800
u16 hwvers;
/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
@@ -411,22 +413,15 @@ struct musb {
unsigned hb_iso_rx:1; /* high bandwidth iso rx? */
unsigned hb_iso_tx:1; /* high bandwidth iso tx? */
+ unsigned dyn_fifo:1; /* dynamic FIFO supported? */
-#ifdef C_MP_TX
- unsigned bulk_split:1;
+ unsigned bulk_split:1;
#define can_bulk_split(musb,type) \
- (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
-#else
-#define can_bulk_split(musb, type) 0
-#endif
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
-#ifdef C_MP_RX
- unsigned bulk_combine:1;
+ unsigned bulk_combine:1;
#define can_bulk_combine(musb,type) \
- (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
-#else
-#define can_bulk_combine(musb, type) 0
-#endif
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
/* is_suspended means USB B_PERIPHERAL suspend */
@@ -461,6 +456,45 @@ struct musb {
#endif
};
+#ifdef CONFIG_PM
+struct musb_csr_regs {
+ /* FIFO registers */
+ u16 txmaxp, txcsr, rxmaxp, rxcsr;
+ u16 rxfifoadd, txfifoadd;
+ u8 txtype, txinterval, rxtype, rxinterval;
+ u8 rxfifosz, txfifosz;
+ u8 txfunaddr, txhubaddr, txhubport;
+ u8 rxfunaddr, rxhubaddr, rxhubport;
+};
+
+struct musb_context_registers {
+
+#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430)
+ u32 otg_sysconfig, otg_forcestandby;
+#endif
+ u8 power;
+ u16 intrtxe, intrrxe;
+ u8 intrusbe;
+ u16 frame;
+ u8 index, testmode;
+
+ u8 devctl, misc;
+
+ struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
+};
+
+#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430)
+extern void musb_platform_save_context(struct musb *musb,
+ struct musb_context_registers *musb_context);
+extern void musb_platform_restore_context(struct musb *musb,
+ struct musb_context_registers *musb_context);
+#else
+#define musb_platform_save_context(m, x) do {} while (0)
+#define musb_platform_restore_context(m, x) do {} while (0)
+#endif
+
+#endif
+
static inline void musb_set_vbus(struct musb *musb, int is_on)
{
musb->board_set_vbus(musb, is_on);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index cbcf14a2..a9f288c 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -895,7 +895,14 @@ static int musb_gadget_enable(struct usb_ep *ep,
/* REVISIT if can_bulk_split(), use by updating "tmp";
* likewise high bandwidth periodic tx
*/
- musb_writew(regs, MUSB_TXMAXP, tmp);
+ /* Set TXMAXP with the FIFO size of the endpoint
+ * to disable double buffering mode. Currently, It seems that double
+ * buffering has problem if musb RTL revision number < 2.0.
+ */
+ if (musb->hwvers < MUSB_HWVERS_2000)
+ musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
+ else
+ musb_writew(regs, MUSB_TXMAXP, tmp);
csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
if (musb_readw(regs, MUSB_TXCSR)
@@ -925,7 +932,13 @@ static int musb_gadget_enable(struct usb_ep *ep,
/* REVISIT if can_bulk_combine() use by updating "tmp"
* likewise high bandwidth periodic rx
*/
- musb_writew(regs, MUSB_RXMAXP, tmp);
+ /* Set RXMAXP with the FIFO size of the endpoint
+ * to disable double buffering mode.
+ */
+ if (musb->hwvers < MUSB_HWVERS_2000)
+ musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx);
+ else
+ musb_writew(regs, MUSB_RXMAXP, tmp);
/* force shared fifo to OUT-only mode */
if (hw_ep->is_shared_fifo) {
@@ -1697,8 +1710,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
return -EINVAL;
/* driver must be initialized to support peripheral mode */
- if (!musb || !(musb->board_mode == MUSB_OTG
- || musb->board_mode != MUSB_OTG)) {
+ if (!musb) {
DBG(1, "%s, no dev??\n", __func__);
return -ENODEV;
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 74c4c36..3421cf9 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -605,8 +605,14 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
/* NOTE: bulk combining rewrites high bits of maxpacket */
- musb_writew(ep->regs, MUSB_RXMAXP,
- qh->maxpacket | ((qh->hb_mult - 1) << 11));
+ /* Set RXMAXP with the FIFO size of the endpoint
+ * to disable double buffer mode.
+ */
+ if (musb->hwvers < MUSB_HWVERS_2000)
+ musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
+ else
+ musb_writew(ep->regs, MUSB_RXMAXP,
+ qh->maxpacket | ((qh->hb_mult - 1) << 11));
ep->rx_reinit = 0;
}
@@ -1771,6 +1777,9 @@ static int musb_schedule(
int best_end, epnum;
struct musb_hw_ep *hw_ep = NULL;
struct list_head *head = NULL;
+ u8 toggle;
+ u8 txtype;
+ struct urb *urb = next_urb(qh);
/* use fixed hardware for control and bulk */
if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
@@ -1809,6 +1818,27 @@ static int musb_schedule(
diff -= (qh->maxpacket * qh->hb_mult);
if (diff >= 0 && best_diff > diff) {
+
+ /*
+ * Mentor controller has a bug in that if we schedule
+ * a BULK Tx transfer on an endpoint that had earlier
+ * handled ISOC then the BULK transfer has to start on
+ * a zero toggle. If the BULK transfer starts on a 1
+ * toggle then this transfer will fail as the mentor
+ * controller starts the Bulk transfer on a 0 toggle
+ * irrespective of the programming of the toggle bits
+ * in the TXCSR register. Check for this condition
+ * while allocating the EP for a Tx Bulk transfer. If
+ * so skip this EP.
+ */
+ hw_ep = musb->endpoints + epnum;
+ toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
+ txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
+ >> 4) & 0x3;
+ if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
+ toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
+ continue;
+
best_diff = diff;
best_end = epnum;
}
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 473a94e..292894a 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -72,6 +72,10 @@
#define MUSB_DEVCTL_HR 0x02
#define MUSB_DEVCTL_SESSION 0x01
+/* MUSB ULPI VBUSCONTROL */
+#define MUSB_ULPI_USE_EXTVBUS 0x01
+#define MUSB_ULPI_USE_EXTVBUSIND 0x02
+
/* TESTMODE */
#define MUSB_TEST_FORCE_HOST 0x80
#define MUSB_TEST_FIFO_ACCESS 0x40
@@ -246,6 +250,7 @@
/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
#define MUSB_HWVERS 0x6C /* 8 bit */
+#define MUSB_ULPI_BUSCONTROL 0x70 /* 8 bit */
#define MUSB_EPINFO 0x78 /* 8 bit */
#define MUSB_RAMINFO 0x79 /* 8 bit */
@@ -321,6 +326,26 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
musb_writew(mbase, MUSB_RXFIFOADD, c_off);
}
+static inline u8 musb_read_txfifosz(void __iomem *mbase)
+{
+ return musb_readb(mbase, MUSB_TXFIFOSZ);
+}
+
+static inline u16 musb_read_txfifoadd(void __iomem *mbase)
+{
+ return musb_readw(mbase, MUSB_TXFIFOADD);
+}
+
+static inline u8 musb_read_rxfifosz(void __iomem *mbase)
+{
+ return musb_readb(mbase, MUSB_RXFIFOSZ);
+}
+
+static inline u16 musb_read_rxfifoadd(void __iomem *mbase)
+{
+ return musb_readw(mbase, MUSB_RXFIFOADD);
+}
+
static inline u8 musb_read_configdata(void __iomem *mbase)
{
musb_writeb(mbase, MUSB_INDEX, 0);
@@ -376,6 +401,36 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum,
qh_h_port_reg);
}
+static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXFUNCADDR));
+}
+
+static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBADDR));
+}
+
+static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBPORT));
+}
+
+static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR));
+}
+
+static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR));
+}
+
+static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT));
+}
+
#else /* CONFIG_BLACKFIN */
#define USB_BASE USB_FADDR
@@ -455,6 +510,22 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
{
}
+static inline u8 musb_read_txfifosz(void __iomem *mbase)
+{
+}
+
+static inline u16 musb_read_txfifoadd(void __iomem *mbase)
+{
+}
+
+static inline u8 musb_read_rxfifosz(void __iomem *mbase)
+{
+}
+
+static inline u16 musb_read_rxfifoadd(void __iomem *mbase)
+{
+}
+
static inline u8 musb_read_configdata(void __iomem *mbase)
{
return 0;
@@ -462,7 +533,11 @@ static inline u8 musb_read_configdata(void __iomem *mbase)
static inline u16 musb_read_hwvers(void __iomem *mbase)
{
- return 0;
+ /*
+ * This register is invisible on Blackfin, actually the MUSB
+ * RTL version of Blackfin is 1.9, so just harcode its value.
+ */
+ return MUSB_HWVERS_1900;
}
static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase)
@@ -500,6 +575,30 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum,
{
}
+static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
+{
+}
+
+static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
+{
+}
+
+static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
+{
+}
+
+static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
+{
+}
+
+static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
+{
+}
+
+static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum)
+{
+}
+
#endif /* CONFIG_BLACKFIN */
#endif /* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index a237550..2fa7d5c 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -250,20 +250,39 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
u8 bchannel;
u8 int_hsdma;
- u32 addr;
+ u32 addr, count;
u16 csr;
spin_lock_irqsave(&musb->lock, flags);
int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
- if (!int_hsdma)
- goto done;
#ifdef CONFIG_BLACKFIN
/* Clear DMA interrupt flags */
musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
#endif
+ if (!int_hsdma) {
+ DBG(2, "spurious DMA irq\n");
+
+ for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
+ musb_channel = (struct musb_dma_channel *)
+ &(controller->channel[bchannel]);
+ channel = &musb_channel->channel;
+ if (channel->status == MUSB_DMA_STATUS_BUSY) {
+ count = musb_read_hsdma_count(mbase, bchannel);
+
+ if (count == 0)
+ int_hsdma |= (1 << bchannel);
+ }
+ }
+
+ DBG(2, "int_hsdma = 0x%x\n", int_hsdma);
+
+ if (!int_hsdma)
+ goto done;
+ }
+
for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
if (int_hsdma & (1 << bchannel)) {
musb_channel = (struct musb_dma_channel *)
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index 1299d92..613f95a 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -55,6 +55,10 @@
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS), \
addr)
+#define musb_read_hsdma_count(mbase, bchannel) \
+ musb_readl(mbase, \
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT))
+
#define musb_write_hsdma_count(mbase, bchannel, len) \
musb_writel(mbase, \
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT), \
@@ -96,6 +100,19 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase,
((u16)(((u32) dma_addr >> 16) & 0xFFFF)));
}
+static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
+{
+ u32 count = musb_readw(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
+
+ count = count << 16;
+
+ count |= musb_readw(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
+
+ return count;
+}
+
static inline void musb_write_hsdma_count(void __iomem *mbase,
u8 bchannel, u32 len)
{
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 83beeac..3fe1686 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -220,7 +220,7 @@ int __init musb_platform_init(struct musb *musb)
musb_platform_resume(musb);
- l = omap_readl(OTG_SYSCONFIG);
+ l = musb_readl(musb->mregs, OTG_SYSCONFIG);
l &= ~ENABLEWAKEUP; /* disable wakeup */
l &= ~NOSTDBY; /* remove possible nostdby */
l |= SMARTSTDBY; /* enable smart standby */
@@ -233,17 +233,19 @@ int __init musb_platform_init(struct musb *musb)
*/
if (!cpu_is_omap3430())
l |= AUTOIDLE; /* enable auto idle */
- omap_writel(l, OTG_SYSCONFIG);
+ musb_writel(musb->mregs, OTG_SYSCONFIG, l);
- l = omap_readl(OTG_INTERFSEL);
+ l = musb_readl(musb->mregs, OTG_INTERFSEL);
l |= ULPI_12PIN;
- omap_writel(l, OTG_INTERFSEL);
+ musb_writel(musb->mregs, OTG_INTERFSEL, l);
pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
"sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
- omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG),
- omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL),
- omap_readl(OTG_SIMENABLE));
+ musb_readl(musb->mregs, OTG_REVISION),
+ musb_readl(musb->mregs, OTG_SYSCONFIG),
+ musb_readl(musb->mregs, OTG_SYSSTATUS),
+ musb_readl(musb->mregs, OTG_INTERFSEL),
+ musb_readl(musb->mregs, OTG_SIMENABLE));
omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
@@ -255,6 +257,22 @@ int __init musb_platform_init(struct musb *musb)
return 0;
}
+#ifdef CONFIG_PM
+void musb_platform_save_context(struct musb *musb,
+ struct musb_context_registers *musb_context)
+{
+ musb_context->otg_sysconfig = musb_readl(musb->mregs, OTG_SYSCONFIG);
+ musb_context->otg_forcestandby = musb_readl(musb->mregs, OTG_FORCESTDBY);
+}
+
+void musb_platform_restore_context(struct musb *musb,
+ struct musb_context_registers *musb_context)
+{
+ musb_writel(musb->mregs, OTG_SYSCONFIG, musb_context->otg_sysconfig);
+ musb_writel(musb->mregs, OTG_FORCESTDBY, musb_context->otg_forcestandby);
+}
+#endif
+
int musb_platform_suspend(struct musb *musb)
{
u32 l;
@@ -263,13 +281,13 @@ int musb_platform_suspend(struct musb *musb)
return 0;
/* in any role */
- l = omap_readl(OTG_FORCESTDBY);
+ l = musb_readl(musb->mregs, OTG_FORCESTDBY);
l |= ENABLEFORCE; /* enable MSTANDBY */
- omap_writel(l, OTG_FORCESTDBY);
+ musb_writel(musb->mregs, OTG_FORCESTDBY, l);
- l = omap_readl(OTG_SYSCONFIG);
+ l = musb_readl(musb->mregs, OTG_SYSCONFIG);
l |= ENABLEWAKEUP; /* enable wakeup */
- omap_writel(l, OTG_SYSCONFIG);
+ musb_writel(musb->mregs, OTG_SYSCONFIG, l);
otg_set_suspend(musb->xceiv, 1);
@@ -295,13 +313,13 @@ static int musb_platform_resume(struct musb *musb)
else
clk_enable(musb->clock);
- l = omap_readl(OTG_SYSCONFIG);
+ l = musb_readl(musb->mregs, OTG_SYSCONFIG);
l &= ~ENABLEWAKEUP; /* disable wakeup */
- omap_writel(l, OTG_SYSCONFIG);
+ musb_writel(musb->mregs, OTG_SYSCONFIG, l);
- l = omap_readl(OTG_FORCESTDBY);
+ l = musb_readl(musb->mregs, OTG_FORCESTDBY);
l &= ~ENABLEFORCE; /* disable MSTANDBY */
- omap_writel(l, OTG_FORCESTDBY);
+ musb_writel(musb->mregs, OTG_FORCESTDBY, l);
return 0;
}
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
index fbede77..40b3c02 100644
--- a/drivers/usb/musb/omap2430.h
+++ b/drivers/usb/musb/omap2430.h
@@ -10,47 +10,43 @@
#ifndef __MUSB_OMAP243X_H__
#define __MUSB_OMAP243X_H__
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
-#include <mach/hardware.h>
#include <plat/usb.h>
/*
* OMAP2430-specific definitions
*/
-#define MENTOR_BASE_OFFSET 0
-#if defined(CONFIG_ARCH_OMAP2430)
-#define OMAP_HSOTG_BASE (OMAP243X_HS_BASE)
-#elif defined(CONFIG_ARCH_OMAP3430)
-#define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE)
-#endif
-#define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset))
-#define OTG_REVISION OMAP_HSOTG(0x0)
-#define OTG_SYSCONFIG OMAP_HSOTG(0x4)
+#define OTG_REVISION 0x400
+
+#define OTG_SYSCONFIG 0x404
# define MIDLEMODE 12 /* bit position */
# define FORCESTDBY (0 << MIDLEMODE)
# define NOSTDBY (1 << MIDLEMODE)
# define SMARTSTDBY (2 << MIDLEMODE)
+
# define SIDLEMODE 3 /* bit position */
# define FORCEIDLE (0 << SIDLEMODE)
# define NOIDLE (1 << SIDLEMODE)
# define SMARTIDLE (2 << SIDLEMODE)
+
# define ENABLEWAKEUP (1 << 2)
# define SOFTRST (1 << 1)
# define AUTOIDLE (1 << 0)
-#define OTG_SYSSTATUS OMAP_HSOTG(0x8)
+
+#define OTG_SYSSTATUS 0x408
# define RESETDONE (1 << 0)
-#define OTG_INTERFSEL OMAP_HSOTG(0xc)
+
+#define OTG_INTERFSEL 0x40c
# define EXTCP (1 << 2)
-# define PHYSEL 0 /* bit position */
+# define PHYSEL 0 /* bit position */
# define UTMI_8BIT (0 << PHYSEL)
# define ULPI_12PIN (1 << PHYSEL)
# define ULPI_8PIN (2 << PHYSEL)
-#define OTG_SIMENABLE OMAP_HSOTG(0x10)
+
+#define OTG_SIMENABLE 0x410
# define TM1 (1 << 0)
-#define OTG_FORCESTDBY OMAP_HSOTG(0x14)
-# define ENABLEFORCE (1 << 0)
-#endif /* CONFIG_ARCH_OMAP2430 */
+#define OTG_FORCESTDBY 0x414
+# define ENABLEFORCE (1 << 0)
#endif /* __MUSB_OMAP243X_H__ */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 88b587c..ab776a8 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -1118,7 +1118,7 @@ int __init musb_platform_init(struct musb *musb)
}
musb->sync = mem->start;
- sync = ioremap(mem->start, mem->end - mem->start + 1);
+ sync = ioremap(mem->start, resource_size(mem));
if (!sync) {
pr_debug("ioremap for sync failed\n");
ret = -ENOMEM;
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index e13c770..1c86809 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -648,7 +648,7 @@ void dma_controller_destroy(struct dma_controller *c)
}
}
- if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0)
+ if (tusb_dma && !tusb_dma->multichannel && tusb_dma->ch >= 0)
omap_free_dma(tusb_dma->ch);
kfree(tusb_dma);
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 2be9f2f..3e4e9f4 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -36,7 +36,7 @@
#include <linux/i2c/twl.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
-
+#include <linux/notifier.h>
/* Register defines */
@@ -236,15 +236,6 @@
#define PMBR1 0x0D
#define GPIO_USB_4PIN_ULPI_2430C (3 << 0)
-
-
-enum linkstat {
- USB_LINK_UNKNOWN = 0,
- USB_LINK_NONE,
- USB_LINK_VBUS,
- USB_LINK_ID,
-};
-
struct twl4030_usb {
struct otg_transceiver otg;
struct device *dev;
@@ -347,10 +338,10 @@ twl4030_usb_clear_bits(struct twl4030_usb *twl, u8 reg, u8 bits)
/*-------------------------------------------------------------------------*/
-static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl)
+static enum usb_xceiv_events twl4030_usb_linkstat(struct twl4030_usb *twl)
{
int status;
- int linkstat = USB_LINK_UNKNOWN;
+ int linkstat = USB_EVENT_NONE;
/*
* For ID/VBUS sensing, see manual section 15.4.8 ...
@@ -368,11 +359,11 @@ static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl)
dev_err(twl->dev, "USB link status err %d\n", status);
else if (status & (BIT(7) | BIT(2))) {
if (status & BIT(2))
- linkstat = USB_LINK_ID;
+ linkstat = USB_EVENT_ID;
else
- linkstat = USB_LINK_VBUS;
+ linkstat = USB_EVENT_VBUS;
} else
- linkstat = USB_LINK_NONE;
+ linkstat = USB_EVENT_NONE;
dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
status, status, linkstat);
@@ -383,7 +374,7 @@ static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl)
spin_lock_irq(&twl->lock);
twl->linkstat = linkstat;
- if (linkstat == USB_LINK_ID) {
+ if (linkstat == USB_EVENT_ID) {
twl->otg.default_a = true;
twl->otg.state = OTG_STATE_A_IDLE;
} else {
@@ -564,7 +555,7 @@ static ssize_t twl4030_usb_vbus_show(struct device *dev,
spin_lock_irqsave(&twl->lock, flags);
ret = sprintf(buf, "%s\n",
- (twl->linkstat == USB_LINK_VBUS) ? "on" : "off");
+ (twl->linkstat == USB_EVENT_VBUS) ? "on" : "off");
spin_unlock_irqrestore(&twl->lock, flags);
return ret;
@@ -576,17 +567,8 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
struct twl4030_usb *twl = _twl;
int status;
-#ifdef CONFIG_LOCKDEP
- /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
- * we don't want and can't tolerate. Although it might be
- * friendlier not to borrow this thread context...
- */
- local_irq_enable();
-#endif
-
status = twl4030_usb_linkstat(twl);
- if (status != USB_LINK_UNKNOWN) {
-
+ if (status >= 0) {
/* FIXME add a set_power() method so that B-devices can
* configure the charger appropriately. It's not always
* correct to consume VBUS power, and how much current to
@@ -598,12 +580,13 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
* USB_LINK_VBUS state. musb_hdrc won't care until it
* starts to handle softconnect right.
*/
- if (status == USB_LINK_NONE)
+ if (status == USB_EVENT_NONE)
twl4030_phy_suspend(twl, 0);
else
twl4030_phy_resume(twl);
- twl4030charger_usb_en(status == USB_LINK_VBUS);
+ blocking_notifier_call_chain(&twl->otg.notifier, status,
+ twl->otg.gadget);
}
sysfs_notify(&twl->dev->kobj, NULL, "vbus");
@@ -693,6 +676,8 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
if (device_create_file(&pdev->dev, &dev_attr_vbus))
dev_warn(&pdev->dev, "could not create sysfs file\n");
+ BLOCKING_INIT_NOTIFIER_HEAD(&twl->otg.notifier);
+
/* Our job is to use irqs and status from the power module
* to keep the transceiver disabled when nothing's connected.
*
@@ -702,7 +687,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
* need both handles, otherwise just one suffices.
*/
twl->irq_enabled = true;
- status = request_irq(twl->irq, twl4030_usb_irq,
+ status = request_threaded_irq(twl->irq, NULL, twl4030_usb_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"twl4030_usb", twl);
if (status < 0) {
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index c480ea4..c78b255 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -472,6 +472,17 @@ config USB_SERIAL_OTI6858
To compile this driver as a module, choose M here: the
module will be called oti6858.
+config USB_SERIAL_QCAUX
+ tristate "USB Qualcomm Auxiliary Serial Port Driver"
+ ---help---
+ Say Y here if you want to use the auxiliary serial ports provided
+ by many modems based on Qualcomm chipsets. These ports often use
+ a proprietary protocol called DM and cannot be used for AT- or
+ PPP-based communication.
+
+ To compile this driver as a module, choose M here: the
+ module will be called moto_modem. If unsure, choose N.
+
config USB_SERIAL_QUALCOMM
tristate "USB Qualcomm Serial modem"
help
@@ -600,6 +611,14 @@ config USB_SERIAL_OPTICON
To compile this driver as a module, choose M here: the
module will be called opticon.
+config USB_SERIAL_VIVOPAY_SERIAL
+ tristate "USB ViVOpay serial interface driver"
+ help
+ Say Y here if you want to use a ViVOtech ViVOpay USB device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vivopay-serial.
+
config USB_SERIAL_DEBUG
tristate "USB Debugging Device"
help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 66619be..83c9e43 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o
obj-$(CONFIG_USB_SERIAL_OPTION) += option.o
obj-$(CONFIG_USB_SERIAL_OTI6858) += oti6858.o
obj-$(CONFIG_USB_SERIAL_PL2303) += pl2303.o
+obj-$(CONFIG_USB_SERIAL_QCAUX) += qcaux.o
obj-$(CONFIG_USB_SERIAL_QUALCOMM) += qcserial.o
obj-$(CONFIG_USB_SERIAL_SAFE) += safe_serial.o
obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o
@@ -55,4 +56,5 @@ obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o
obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o
obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o
obj-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda.o
+obj-$(CONFIG_USB_SERIAL_VIVOPAY_SERIAL) += vivopay-serial.o
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index b10ac84..365db10 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -78,7 +78,7 @@ static int debug;
#define DRIVER_DESC "AIRcable USB Driver"
/* ID table that will be registered with USB core */
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(AIRCABLE_VID, AIRCABLE_USB_PID) },
{ },
};
@@ -468,10 +468,6 @@ static void aircable_read_bulk_callback(struct urb *urb)
if (status) {
dbg("%s - urb status = %d", __func__, status);
- if (!port->port.count) {
- dbg("%s - port is closed, exiting.", __func__);
- return;
- }
if (status == -EPROTO) {
dbg("%s - caught -EPROTO, resubmitting the urb",
__func__);
@@ -530,23 +526,19 @@ static void aircable_read_bulk_callback(struct urb *urb)
}
tty_kref_put(tty);
- /* Schedule the next read _if_ we are still open */
- if (port->port.count) {
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- aircable_read_bulk_callback, port);
-
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- }
-
- return;
+ /* Schedule the next read */
+ usb_fill_bulk_urb(port->read_urb, port->serial->dev,
+ usb_rcvbulkpipe(port->serial->dev,
+ port->bulk_in_endpointAddress),
+ port->read_urb->transfer_buffer,
+ port->read_urb->transfer_buffer_length,
+ aircable_read_bulk_callback, port);
+
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result && result != -EPERM)
+ dev_err(&urb->dev->dev,
+ "%s - failed resubmitting read urb, error %d\n",
+ __func__, result);
}
/* Based on ftdi_sio.c throttle */
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index a9c2dec..547c944 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -50,7 +50,7 @@ static int debug;
/* usb timeout of 1 second */
#define ARK_TIMEOUT (1*HZ)
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x6547, 0x0232) },
{ USB_DEVICE(0x18ec, 0x3118) }, /* USB to IrDA adapter */
{ },
@@ -733,7 +733,6 @@ static void ark3116_read_bulk_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (tty) {
- tty_buffer_request_room(tty, urb->actual_length + 1);
/* overrun is special, not associated with a char */
if (unlikely(lsr & UART_LSR_OE))
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index a0467bc..1295e44 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -103,7 +103,7 @@ static int belkin_sa_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(BELKIN_SA_VID, BELKIN_SA_PID) },
{ USB_DEVICE(BELKIN_OLD_VID, BELKIN_OLD_PID) },
{ USB_DEVICE(PERACOM_VID, PERACOM_PID) },
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 59eff72..9f4fed1 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -22,6 +22,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial.h>
+#include <asm/unaligned.h>
#define DEFAULT_BAUD_RATE 9600
#define DEFAULT_TIMEOUT 1000
@@ -70,7 +71,7 @@
static int debug;
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x4348, 0x5523) },
{ USB_DEVICE(0x1a86, 0x7523) },
{ },
@@ -392,16 +393,22 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
struct usb_serial_port *port = tty->driver_data;
int r;
uint16_t reg_contents;
- uint8_t break_reg[2];
+ uint8_t *break_reg;
dbg("%s()", __func__);
+ break_reg = kmalloc(2, GFP_KERNEL);
+ if (!break_reg) {
+ dev_err(&port->dev, "%s - kmalloc failed\n", __func__);
+ return;
+ }
+
r = ch341_control_in(port->serial->dev, CH341_REQ_READ_REG,
- ch341_break_reg, 0, break_reg, sizeof(break_reg));
+ ch341_break_reg, 0, break_reg, 2);
if (r < 0) {
- printk(KERN_WARNING "%s: USB control read error whilst getting"
- " break register contents.\n", __FILE__);
- return;
+ dev_err(&port->dev, "%s - USB control read error (%d)\n",
+ __func__, r);
+ goto out;
}
dbg("%s - initial ch341 break register contents - reg1: %x, reg2: %x",
__func__, break_reg[0], break_reg[1]);
@@ -416,12 +423,14 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
}
dbg("%s - New ch341 break register contents - reg1: %x, reg2: %x",
__func__, break_reg[0], break_reg[1]);
- reg_contents = (uint16_t)break_reg[0] | ((uint16_t)break_reg[1] << 8);
+ reg_contents = get_unaligned_le16(break_reg);
r = ch341_control_out(port->serial->dev, CH341_REQ_WRITE_REG,
ch341_break_reg, reg_contents);
if (r < 0)
- printk(KERN_WARNING "%s: USB control write error whilst setting"
- " break register contents.\n", __FILE__);
+ dev_err(&port->dev, "%s - USB control write error (%d)\n",
+ __func__, r);
+out:
+ kfree(break_reg);
}
static int ch341_tiocmset(struct tty_struct *tty, struct file *file,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index bd254ec..507382b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -55,7 +55,7 @@ static int cp210x_carrier_raised(struct usb_serial_port *p);
static int debug;
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
@@ -91,11 +91,12 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
+ { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
{ USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */
{ USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
{ USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demostration module */
- { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesys ETRX2USB */
{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -612,7 +613,7 @@ static void cp210x_set_termios(struct tty_struct *tty,
baud);
if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV,
((BAUD_RATE_GEN_FREQ + baud/2) / baud))) {
- dbg("Baud rate requested not supported by device\n");
+ dbg("Baud rate requested not supported by device");
baud = tty_termios_baud_rate(old_termios);
}
}
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index b0f6402..f744ab7 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -70,7 +70,7 @@ static void cyberjack_read_int_callback(struct urb *urb);
static void cyberjack_read_bulk_callback(struct urb *urb);
static void cyberjack_write_bulk_callback(struct urb *urb);
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(CYBERJACK_VENDOR_ID, CYBERJACK_PRODUCT_ID) },
{ } /* Terminating entry */
};
@@ -391,11 +391,10 @@ static void cyberjack_read_bulk_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (!tty) {
- dbg("%s - ignoring since device not open\n", __func__);
+ dbg("%s - ignoring since device not open", __func__);
return;
}
if (urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index a591ebe..baf74b4 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -66,17 +66,15 @@
#include <linux/serial.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
+#include <asm/unaligned.h>
#include "cypress_m8.h"
-#ifdef CONFIG_USB_SERIAL_DEBUG
- static int debug = 1;
-#else
- static int debug;
-#endif
+static int debug;
static int stats;
static int interval;
+static int unstable_bauds;
/*
* Version Information
@@ -89,24 +87,24 @@ static int interval;
#define CYPRESS_BUF_SIZE 1024
#define CYPRESS_CLOSING_WAIT (30*HZ)
-static struct usb_device_id id_table_earthmate [] = {
+static const struct usb_device_id id_table_earthmate[] = {
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_cyphidcomrs232 [] = {
+static const struct usb_device_id id_table_cyphidcomrs232[] = {
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
{ USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_nokiaca42v2 [] = {
+static const struct usb_device_id id_table_nokiaca42v2[] = {
{ USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
@@ -295,6 +293,9 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
struct cypress_private *priv;
priv = usb_get_serial_port_data(port);
+ if (unstable_bauds)
+ return new_rate;
+
/*
* The general purpose firmware for the Cypress M8 allows for
* a maximum speed of 57600bps (I have no idea whether DeLorme
@@ -344,7 +345,8 @@ static int cypress_serial_control(struct tty_struct *tty,
{
int new_baudrate = 0, retval = 0, tries = 0;
struct cypress_private *priv;
- __u8 feature_buffer[5];
+ u8 *feature_buffer;
+ const unsigned int feature_len = 5;
unsigned long flags;
dbg("%s", __func__);
@@ -354,17 +356,18 @@ static int cypress_serial_control(struct tty_struct *tty,
if (!priv->comm_is_ok)
return -ENODEV;
+ feature_buffer = kcalloc(feature_len, sizeof(u8), GFP_KERNEL);
+ if (!feature_buffer)
+ return -ENOMEM;
+
switch (cypress_request_type) {
case CYPRESS_SET_CONFIG:
- new_baudrate = priv->baud_rate;
/* 0 means 'Hang up' so doesn't change the true bit rate */
- if (baud_rate == 0)
- new_baudrate = priv->baud_rate;
- /* Change of speed ? */
- else if (baud_rate != priv->baud_rate) {
+ new_baudrate = priv->baud_rate;
+ if (baud_rate && baud_rate != priv->baud_rate) {
dbg("%s - baud rate is changing", __func__);
retval = analyze_baud_rate(port, baud_rate);
- if (retval >= 0) {
+ if (retval >= 0) {
new_baudrate = retval;
dbg("%s - New baud rate set to %d",
__func__, new_baudrate);
@@ -373,9 +376,8 @@ static int cypress_serial_control(struct tty_struct *tty,
dbg("%s - baud rate is being sent as %d",
__func__, new_baudrate);
- memset(feature_buffer, 0, sizeof(feature_buffer));
/* fill the feature_buffer with new configuration */
- *((u_int32_t *)feature_buffer) = new_baudrate;
+ put_unaligned_le32(new_baudrate, feature_buffer);
feature_buffer[4] |= data_bits; /* assign data bits in 2 bit space ( max 3 ) */
/* 1 bit gap */
feature_buffer[4] |= (stop_bits << 3); /* assign stop bits in 1 bit space */
@@ -397,15 +399,15 @@ static int cypress_serial_control(struct tty_struct *tty,
HID_REQ_SET_REPORT,
USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
0x0300, 0, feature_buffer,
- sizeof(feature_buffer), 500);
+ feature_len, 500);
if (tries++ >= 3)
break;
- } while (retval != sizeof(feature_buffer) &&
+ } while (retval != feature_len &&
retval != -ENODEV);
- if (retval != sizeof(feature_buffer)) {
+ if (retval != feature_len) {
dev_err(&port->dev, "%s - failed sending serial "
"line settings - %d\n", __func__, retval);
cypress_set_dead(port);
@@ -425,43 +427,42 @@ static int cypress_serial_control(struct tty_struct *tty,
/* Not implemented for this device,
and if we try to do it we're likely
to crash the hardware. */
- return -ENOTTY;
+ retval = -ENOTTY;
+ goto out;
}
dbg("%s - retreiving serial line settings", __func__);
- /* set initial values in feature buffer */
- memset(feature_buffer, 0, sizeof(feature_buffer));
-
do {
retval = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
HID_REQ_GET_REPORT,
USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
0x0300, 0, feature_buffer,
- sizeof(feature_buffer), 500);
+ feature_len, 500);
if (tries++ >= 3)
break;
- } while (retval != sizeof(feature_buffer)
+ } while (retval != feature_len
&& retval != -ENODEV);
- if (retval != sizeof(feature_buffer)) {
+ if (retval != feature_len) {
dev_err(&port->dev, "%s - failed to retrieve serial "
"line settings - %d\n", __func__, retval);
cypress_set_dead(port);
- return retval;
+ goto out;
} else {
spin_lock_irqsave(&priv->lock, flags);
/* store the config in one byte, and later
use bit masks to check values */
priv->current_config = feature_buffer[4];
- priv->baud_rate = *((u_int32_t *)feature_buffer);
+ priv->baud_rate = get_unaligned_le32(feature_buffer);
spin_unlock_irqrestore(&priv->lock, flags);
}
}
spin_lock_irqsave(&priv->lock, flags);
++priv->cmd_count;
spin_unlock_irqrestore(&priv->lock, flags);
-
+out:
+ kfree(feature_buffer);
return retval;
} /* cypress_serial_control */
@@ -690,7 +691,6 @@ static void cypress_dtr_rts(struct usb_serial_port *port, int on)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
/* drop dtr and rts */
- priv = usb_get_serial_port_data(port);
spin_lock_irq(&priv->lock);
if (on == 0)
priv->line_control = 0;
@@ -1307,13 +1307,9 @@ static void cypress_read_int_callback(struct urb *urb)
spin_unlock_irqrestore(&priv->lock, flags);
/* process read if there is data other than line status */
- if (tty && (bytes > i)) {
- bytes = tty_buffer_request_room(tty, bytes);
- for (; i < bytes ; ++i) {
- dbg("pushing byte number %d - %d - %c", i, data[i],
- data[i]);
- tty_insert_flip_char(tty, data[i], tty_flag);
- }
+ if (tty && bytes > i) {
+ tty_insert_flip_string_fixed_flag(tty, data + i,
+ bytes - i, tty_flag);
tty_flip_buffer_push(tty);
}
@@ -1325,9 +1321,9 @@ static void cypress_read_int_callback(struct urb *urb)
continue_read:
tty_kref_put(tty);
- /* Continue trying to always read... unless the port has closed. */
+ /* Continue trying to always read */
- if (port->port.count > 0 && priv->comm_is_ok) {
+ if (priv->comm_is_ok) {
usb_fill_int_urb(port->interrupt_in_urb, port->serial->dev,
usb_rcvintpipe(port->serial->dev,
port->interrupt_in_endpointAddress),
@@ -1336,7 +1332,7 @@ continue_read:
cypress_read_int_callback, port,
priv->read_urb_interval);
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
- if (result) {
+ if (result && result != -EPERM) {
dev_err(&urb->dev->dev, "%s - failed resubmitting "
"read urb, error %d\n", __func__,
result);
@@ -1650,3 +1646,5 @@ module_param(stats, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(stats, "Enable statistics or not");
module_param(interval, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(interval, "Overrides interrupt interval");
+module_param(unstable_bauds, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(unstable_bauds, "Allow unstable baud rates");
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 68e80be..68b0aa5 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -470,18 +470,18 @@ static int digi_read_oob_callback(struct urb *urb);
static int debug;
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_2 [] = {
+static const struct usb_device_id id_table_2[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_4 [] = {
+static const struct usb_device_id id_table_4[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
{ } /* Terminating entry */
};
@@ -1262,10 +1262,10 @@ static void digi_write_bulk_callback(struct urb *urb)
return;
}
- /* try to send any buffered data on this port, if it is open */
+ /* try to send any buffered data on this port */
spin_lock(&priv->dp_port_lock);
priv->dp_write_urb_in_use = 0;
- if (port->port.count && priv->dp_out_buf_len > 0) {
+ if (priv->dp_out_buf_len > 0) {
*((unsigned char *)(port->write_urb->transfer_buffer))
= (unsigned char)DIGI_CMD_SEND_DATA;
*((unsigned char *)(port->write_urb->transfer_buffer) + 1)
@@ -1288,7 +1288,7 @@ static void digi_write_bulk_callback(struct urb *urb)
schedule_work(&priv->dp_wakeup_work);
spin_unlock(&priv->dp_port_lock);
- if (ret)
+ if (ret && ret != -EPERM)
dev_err(&port->dev,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
@@ -1353,8 +1353,7 @@ static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
struct digi_port *priv = usb_get_serial_port_data(port);
struct ktermios not_termios;
- dbg("digi_open: TOP: port=%d, open_count=%d",
- priv->dp_port_num, port->port.count);
+ dbg("digi_open: TOP: port=%d", priv->dp_port_num);
/* be sure the device is started up */
if (digi_startup_device(port->serial) != 0)
@@ -1393,8 +1392,7 @@ static void digi_close(struct usb_serial_port *port)
unsigned char buf[32];
struct digi_port *priv = usb_get_serial_port_data(port);
- dbg("digi_close: TOP: port=%d, open_count=%d",
- priv->dp_port_num, port->port.count);
+ dbg("digi_close: TOP: port=%d", priv->dp_port_num);
mutex_lock(&port->serial->disc_mutex);
/* if disconnected, just clear flags */
@@ -1629,7 +1627,7 @@ static void digi_read_bulk_callback(struct urb *urb)
/* continue read */
urb->dev = port->serial->dev;
ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret != 0) {
+ if (ret != 0 && ret != -EPERM) {
dev_err(&port->dev,
"%s: failed resubmitting urb, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
@@ -1658,12 +1656,11 @@ static int digi_read_inb_callback(struct urb *urb)
int port_status = ((unsigned char *)urb->transfer_buffer)[2];
unsigned char *data = ((unsigned char *)urb->transfer_buffer) + 3;
int flag, throttled;
- int i;
int status = urb->status;
/* do not process callbacks on closed ports */
/* but do continue the read chain */
- if (port->port.count == 0)
+ if (urb->status == -ENOENT)
return 0;
/* short/multiple packet check */
@@ -1705,17 +1702,9 @@ static int digi_read_inb_callback(struct urb *urb)
/* data length is len-1 (one byte of len is port_status) */
--len;
-
- len = tty_buffer_request_room(tty, len);
if (len > 0) {
- /* Hot path */
- if (flag == TTY_NORMAL)
- tty_insert_flip_string(tty, data, len);
- else {
- for (i = 0; i < len; i++)
- tty_insert_flip_char(tty,
- data[i], flag);
- }
+ tty_insert_flip_string_fixed_flag(tty, data, len,
+ flag);
tty_flip_buffer_push(tty);
}
}
@@ -1776,8 +1765,7 @@ static int digi_read_oob_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
rts = 0;
- if (port->port.count)
- rts = tty->termios->c_cflag & CRTSCTS;
+ rts = tty->termios->c_cflag & CRTSCTS;
if (opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
spin_lock(&priv->dp_port_lock);
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 7dd0e3e..5f740a1 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -93,7 +93,7 @@ static void empeg_init_termios(struct tty_struct *tty);
static void empeg_write_bulk_callback(struct urb *urb);
static void empeg_read_bulk_callback(struct urb *urb);
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(EMPEG_VENDOR_ID, EMPEG_PRODUCT_ID) },
{ } /* Terminating entry */
};
@@ -346,7 +346,6 @@ static void empeg_read_bulk_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
bytes_in += urb->actual_length;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7638828..6af0dfa 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -33,12 +33,12 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/serial.h>
@@ -88,10 +88,10 @@ struct ftdi_private {
unsigned int latency; /* latency setting in use */
spinlock_t tx_lock; /* spinlock for transmit state */
- unsigned long tx_bytes;
unsigned long tx_outstanding_bytes;
unsigned long tx_outstanding_urbs;
unsigned short max_packet_size;
+ struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() */
};
/* struct ftdi_sio_quirk is used by devices requiring special attention. */
@@ -614,6 +614,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
{ USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
+ { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
@@ -737,6 +738,10 @@ static struct usb_device_id id_table_combined [] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
{ USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
@@ -812,7 +817,7 @@ static struct usb_serial_driver ftdi_sio_device = {
.name = "ftdi_sio",
},
.description = "FTDI USB Serial Device",
- .usb_driver = &ftdi_driver ,
+ .usb_driver = &ftdi_driver,
.id_table = id_table_combined,
.num_ports = 1,
.probe = ftdi_sio_probe,
@@ -828,8 +833,8 @@ static struct usb_serial_driver ftdi_sio_device = {
.chars_in_buffer = ftdi_chars_in_buffer,
.read_bulk_callback = ftdi_read_bulk_callback,
.write_bulk_callback = ftdi_write_bulk_callback,
- .tiocmget = ftdi_tiocmget,
- .tiocmset = ftdi_tiocmset,
+ .tiocmget = ftdi_tiocmget,
+ .tiocmset = ftdi_tiocmset,
.ioctl = ftdi_ioctl,
.set_termios = ftdi_set_termios,
.break_ctl = ftdi_break_ctl,
@@ -935,7 +940,6 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
unsigned int clear)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
- char *buf;
unsigned urb_value;
int rv;
@@ -944,10 +948,6 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
return 0; /* no change */
}
- buf = kmalloc(1, GFP_NOIO);
- if (!buf)
- return -ENOMEM;
-
clear &= ~set; /* 'set' takes precedence over 'clear' */
urb_value = 0;
if (clear & TIOCM_DTR)
@@ -963,9 +963,7 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
FTDI_SIO_SET_MODEM_CTRL_REQUEST,
FTDI_SIO_SET_MODEM_CTRL_REQUEST_TYPE,
urb_value, priv->interface,
- buf, 0, WDR_TIMEOUT);
-
- kfree(buf);
+ NULL, 0, WDR_TIMEOUT);
if (rv < 0) {
dbg("%s Error from MODEM_CTRL urb: DTR %s, RTS %s",
__func__,
@@ -1124,16 +1122,11 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
- char *buf;
__u16 urb_value;
__u16 urb_index;
__u32 urb_index_value;
int rv;
- buf = kmalloc(1, GFP_NOIO);
- if (!buf)
- return -ENOMEM;
-
urb_index_value = get_ftdi_divisor(tty, port);
urb_value = (__u16)urb_index_value;
urb_index = (__u16)(urb_index_value >> 16);
@@ -1146,9 +1139,7 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
FTDI_SIO_SET_BAUDRATE_REQUEST,
FTDI_SIO_SET_BAUDRATE_REQUEST_TYPE,
urb_value, urb_index,
- buf, 0, WDR_SHORT_TIMEOUT);
-
- kfree(buf);
+ NULL, 0, WDR_SHORT_TIMEOUT);
return rv;
}
@@ -1156,8 +1147,7 @@ static int write_latency_timer(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
- char buf[1];
- int rv = 0;
+ int rv;
int l = priv->latency;
if (priv->flags & ASYNC_LOW_LATENCY)
@@ -1170,8 +1160,7 @@ static int write_latency_timer(struct usb_serial_port *port)
FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
l, priv->interface,
- buf, 0, WDR_TIMEOUT);
-
+ NULL, 0, WDR_TIMEOUT);
if (rv < 0)
dev_err(&port->dev, "Unable to write latency timer: %i\n", rv);
return rv;
@@ -1181,24 +1170,29 @@ static int read_latency_timer(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
- unsigned short latency = 0;
- int rv = 0;
-
+ unsigned char *buf;
+ int rv;
dbg("%s", __func__);
+ buf = kmalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
rv = usb_control_msg(udev,
usb_rcvctrlpipe(udev, 0),
FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
0, priv->interface,
- (char *) &latency, 1, WDR_TIMEOUT);
-
- if (rv < 0) {
+ buf, 1, WDR_TIMEOUT);
+ if (rv < 0)
dev_err(&port->dev, "Unable to read latency timer: %i\n", rv);
- return -EIO;
- }
- return latency;
+ else
+ priv->latency = buf[0];
+
+ kfree(buf);
+
+ return rv;
}
static int get_serial_info(struct usb_serial_port *port,
@@ -1229,7 +1223,7 @@ static int set_serial_info(struct tty_struct *tty,
if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
return -EFAULT;
- lock_kernel();
+ mutex_lock(&priv->cfg_lock);
old_priv = *priv;
/* Do error checking and permission checking */
@@ -1237,7 +1231,7 @@ static int set_serial_info(struct tty_struct *tty,
if (!capable(CAP_SYS_ADMIN)) {
if (((new_serial.flags & ~ASYNC_USR_MASK) !=
(priv->flags & ~ASYNC_USR_MASK))) {
- unlock_kernel();
+ mutex_unlock(&priv->cfg_lock);
return -EPERM;
}
priv->flags = ((priv->flags & ~ASYNC_USR_MASK) |
@@ -1248,7 +1242,7 @@ static int set_serial_info(struct tty_struct *tty,
if ((new_serial.baud_base != priv->baud_base) &&
(new_serial.baud_base < 9600)) {
- unlock_kernel();
+ mutex_unlock(&priv->cfg_lock);
return -EINVAL;
}
@@ -1278,11 +1272,11 @@ check_and_exit:
(priv->flags & ASYNC_SPD_MASK)) ||
(((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) &&
(old_priv.custom_divisor != priv->custom_divisor))) {
- unlock_kernel();
+ mutex_unlock(&priv->cfg_lock);
change_speed(tty, port);
}
else
- unlock_kernel();
+ mutex_unlock(&priv->cfg_lock);
return 0;
} /* set_serial_info */
@@ -1338,20 +1332,20 @@ static void ftdi_determine_type(struct usb_serial_port *port)
__func__);
}
} else if (version < 0x200) {
- /* Old device. Assume its the original SIO. */
+ /* Old device. Assume it's the original SIO. */
priv->chip_type = SIO;
priv->baud_base = 12000000 / 16;
priv->write_offset = 1;
} else if (version < 0x400) {
- /* Assume its an FT8U232AM (or FT8U245AM) */
+ /* Assume it's an FT8U232AM (or FT8U245AM) */
/* (It might be a BM because of the iSerialNumber bug,
* but it will still work as an AM device.) */
priv->chip_type = FT8U232AM;
} else if (version < 0x600) {
- /* Assume its an FT232BM (or FT245BM) */
+ /* Assume it's an FT232BM (or FT245BM) */
priv->chip_type = FT232BM;
} else {
- /* Assume its an FT232R */
+ /* Assume it's an FT232R */
priv->chip_type = FT232RL;
}
dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
@@ -1371,7 +1365,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
unsigned num_endpoints;
- int i = 0;
+ int i;
num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
@@ -1423,7 +1417,7 @@ static ssize_t store_latency_timer(struct device *dev,
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
int v = simple_strtoul(valbuf, NULL, 10);
- int rv = 0;
+ int rv;
priv->latency = v;
rv = write_latency_timer(port);
@@ -1440,9 +1434,8 @@ static ssize_t store_event_char(struct device *dev,
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
- char buf[1];
int v = simple_strtoul(valbuf, NULL, 10);
- int rv = 0;
+ int rv;
dbg("%s: setting event char = %i", __func__, v);
@@ -1451,8 +1444,7 @@ static ssize_t store_event_char(struct device *dev,
FTDI_SIO_SET_EVENT_CHAR_REQUEST,
FTDI_SIO_SET_EVENT_CHAR_REQUEST_TYPE,
v, priv->interface,
- buf, 0, WDR_TIMEOUT);
-
+ NULL, 0, WDR_TIMEOUT);
if (rv < 0) {
dbg("Unable to write event character: %i", rv);
return -EIO;
@@ -1551,9 +1543,9 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
kref_init(&priv->kref);
spin_lock_init(&priv->tx_lock);
+ mutex_init(&priv->cfg_lock);
init_waitqueue_head(&priv->delta_msr_wait);
- /* This will push the characters through immediately rather
- than queue a task to deliver them */
+
priv->flags = ASYNC_LOW_LATENCY;
if (quirk && quirk->port_probe)
@@ -1585,7 +1577,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
ftdi_determine_type(port);
ftdi_set_max_packet_size(port);
- read_latency_timer(port);
+ if (read_latency_timer(port) < 0)
+ priv->latency = 16;
create_sysfs_attrs(port);
return 0;
}
@@ -1630,8 +1623,6 @@ static int ftdi_NDI_device_setup(struct usb_serial *serial)
{
struct usb_device *udev = serial->dev;
int latency = ndi_latency_timer;
- int rv = 0;
- char buf[1];
if (latency == 0)
latency = 1;
@@ -1641,10 +1632,11 @@ static int ftdi_NDI_device_setup(struct usb_serial *serial)
dbg("%s setting NDI device latency to %d", __func__, latency);
dev_info(&udev->dev, "NDI device with a latency value of %d", latency);
- rv = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ /* FIXME: errors are not returned */
+ usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
- latency, 0, buf, 0, WDR_TIMEOUT);
+ latency, 0, NULL, 0, WDR_TIMEOUT);
return 0;
}
@@ -1720,7 +1712,7 @@ static int ftdi_submit_read_urb(struct usb_serial_port *port, gfp_t mem_flags)
urb->transfer_buffer_length,
ftdi_read_bulk_callback, port);
result = usb_submit_urb(urb, mem_flags);
- if (result)
+ if (result && result != -EPERM)
dev_err(&port->dev,
"%s - failed submitting read urb, error %d\n",
__func__, result);
@@ -1732,16 +1724,10 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
struct usb_device *dev = port->serial->dev;
struct ftdi_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
-
- int result = 0;
- char buf[1]; /* Needed for the usb_control_msg I think */
+ int result;
dbg("%s", __func__);
- spin_lock_irqsave(&priv->tx_lock, flags);
- priv->tx_bytes = 0;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
-
write_latency_timer(port);
/* No error checking for this (will get errors later anyway) */
@@ -1749,7 +1735,7 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
FTDI_SIO_RESET_REQUEST, FTDI_SIO_RESET_REQUEST_TYPE,
FTDI_SIO_RESET_SIO,
- priv->interface, buf, 0, WDR_TIMEOUT);
+ priv->interface, NULL, 0, WDR_TIMEOUT);
/* Termios defaults are set by usb_serial_init. We don't change
port->tty->termios - this would lose speed settings, etc.
@@ -1777,7 +1763,6 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
- char buf[1];
mutex_lock(&port->serial->disc_mutex);
if (!port->serial->disconnected) {
@@ -1786,7 +1771,7 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
usb_sndctrlpipe(port->serial->dev, 0),
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
- 0, priv->interface, buf, 0,
+ 0, priv->interface, NULL, 0,
WDR_TIMEOUT) < 0) {
dev_err(&port->dev, "error from flowcontrol urb\n");
}
@@ -1847,7 +1832,7 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
spin_lock_irqsave(&priv->tx_lock, flags);
if (priv->tx_outstanding_urbs > URB_UPPER_LIMIT) {
spin_unlock_irqrestore(&priv->tx_lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return 0;
}
priv->tx_outstanding_urbs++;
@@ -1927,7 +1912,6 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
} else {
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_outstanding_bytes += count;
- priv->tx_bytes += count;
spin_unlock_irqrestore(&priv->tx_lock, flags);
}
@@ -2154,8 +2138,7 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
- __u16 urb_value = 0;
- char buf[1];
+ __u16 urb_value;
/* break_state = -1 to turn on break, and 0 to turn off break */
/* see drivers/char/tty_io.c to see it used */
@@ -2171,7 +2154,7 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
FTDI_SIO_SET_DATA_REQUEST,
FTDI_SIO_SET_DATA_REQUEST_TYPE,
urb_value , priv->interface,
- buf, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev, "%s FAILED to enable/disable break state "
"(state was %d)\n", __func__, break_state);
}
@@ -2195,7 +2178,6 @@ static void ftdi_set_termios(struct tty_struct *tty,
struct ktermios *termios = tty->termios;
unsigned int cflag = termios->c_cflag;
__u16 urb_value; /* will hold the new flags */
- char buf[1]; /* Perhaps I should dynamically alloc this? */
/* Added for xon/xoff support */
unsigned int iflag = termios->c_iflag;
@@ -2246,12 +2228,10 @@ static void ftdi_set_termios(struct tty_struct *tty,
}
if (cflag & CSIZE) {
switch (cflag & CSIZE) {
- case CS5: urb_value |= 5; dbg("Setting CS5"); break;
- case CS6: urb_value |= 6; dbg("Setting CS6"); break;
case CS7: urb_value |= 7; dbg("Setting CS7"); break;
case CS8: urb_value |= 8; dbg("Setting CS8"); break;
default:
- dev_err(&port->dev, "CSIZE was set but not CS5-CS8\n");
+ dev_err(&port->dev, "CSIZE was set but not CS7-CS8\n");
}
}
@@ -2263,7 +2243,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
FTDI_SIO_SET_DATA_REQUEST,
FTDI_SIO_SET_DATA_REQUEST_TYPE,
urb_value , priv->interface,
- buf, 0, WDR_SHORT_TIMEOUT) < 0) {
+ NULL, 0, WDR_SHORT_TIMEOUT) < 0) {
dev_err(&port->dev, "%s FAILED to set "
"databits/stopbits/parity\n", __func__);
}
@@ -2275,7 +2255,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
0, priv->interface,
- buf, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev,
"%s error from disable flowcontrol urb\n",
__func__);
@@ -2301,7 +2281,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
0 , (FTDI_SIO_RTS_CTS_HS | priv->interface),
- buf, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev,
"urb failed to set to rts/cts flow control\n");
}
@@ -2333,7 +2313,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
urb_value , (FTDI_SIO_XON_XOFF_HS
| priv->interface),
- buf, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev, "urb failed to set to "
"xon/xoff flow control\n");
}
@@ -2347,7 +2327,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
0, priv->interface,
- buf, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev,
"urb failed to clear flow control\n");
}
@@ -2361,21 +2341,22 @@ static int ftdi_tiocmget(struct tty_struct *tty, struct file *file)
{
struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
- unsigned char buf[2];
+ unsigned char *buf;
+ int len;
int ret;
dbg("%s TIOCMGET", __func__);
+
+ buf = kmalloc(2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ /*
+ * The 8U232AM returns a two byte value (the SIO a 1 byte value) in
+ * the same format as the data returned from the in point.
+ */
switch (priv->chip_type) {
case SIO:
- /* Request the status from the device */
- ret = usb_control_msg(port->serial->dev,
- usb_rcvctrlpipe(port->serial->dev, 0),
- FTDI_SIO_GET_MODEM_STATUS_REQUEST,
- FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
- 0, 0,
- buf, 1, WDR_TIMEOUT);
- if (ret < 0)
- return ret;
+ len = 1;
break;
case FT8U232AM:
case FT232BM:
@@ -2383,27 +2364,30 @@ static int ftdi_tiocmget(struct tty_struct *tty, struct file *file)
case FT232RL:
case FT2232H:
case FT4232H:
- /* the 8U232AM returns a two byte value (the sio is a 1 byte
- value) - in the same format as the data returned from the in
- point */
- ret = usb_control_msg(port->serial->dev,
- usb_rcvctrlpipe(port->serial->dev, 0),
- FTDI_SIO_GET_MODEM_STATUS_REQUEST,
- FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
- 0, priv->interface,
- buf, 2, WDR_TIMEOUT);
- if (ret < 0)
- return ret;
+ len = 2;
break;
default:
- return -EFAULT;
+ ret = -EFAULT;
+ goto out;
}
- return (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) |
+ ret = usb_control_msg(port->serial->dev,
+ usb_rcvctrlpipe(port->serial->dev, 0),
+ FTDI_SIO_GET_MODEM_STATUS_REQUEST,
+ FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
+ 0, priv->interface,
+ buf, len, WDR_TIMEOUT);
+ if (ret < 0)
+ goto out;
+
+ ret = (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) |
(buf[0] & FTDI_SIO_CTS_MASK ? TIOCM_CTS : 0) |
(buf[0] & FTDI_SIO_RI_MASK ? TIOCM_RI : 0) |
(buf[0] & FTDI_SIO_RLSD_MASK ? TIOCM_CD : 0) |
priv->last_dtr_rts;
+out:
+ kfree(buf);
+ return ret;
}
static int ftdi_tiocmset(struct tty_struct *tty, struct file *file,
@@ -2508,8 +2492,7 @@ void ftdi_unthrottle(struct tty_struct *tty)
port->throttled = port->throttle_req = 0;
spin_unlock_irqrestore(&port->lock, flags);
- /* Resubmit urb if throttled and open. */
- if (was_throttled && test_bit(ASYNCB_INITIALIZED, &port->port.flags))
+ if (was_throttled)
ftdi_submit_read_urb(port, GFP_KERNEL);
}
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index b0e0d64..ff9bf80 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -28,13 +28,13 @@
#define FTDI_SIO_SET_FLOW_CTRL 2 /* Set flow control register */
#define FTDI_SIO_SET_BAUD_RATE 3 /* Set baud rate */
#define FTDI_SIO_SET_DATA 4 /* Set the data characteristics of the port */
-#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modern status register */
+#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modem status register */
#define FTDI_SIO_SET_EVENT_CHAR 6 /* Set the event character */
#define FTDI_SIO_SET_ERROR_CHAR 7 /* Set the error character */
#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */
#define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */
-/* Interface indicies for FT2232, FT2232H and FT4232H devices*/
+/* Interface indices for FT2232, FT2232H and FT4232H devices */
#define INTERFACE_A 1
#define INTERFACE_B 2
#define INTERFACE_C 3
@@ -270,7 +270,7 @@ typedef enum {
* BmRequestType: 0100 0000b
* bRequest: FTDI_SIO_SET_FLOW_CTRL
* wValue: Xoff/Xon
- * wIndex: Protocol/Port - hIndex is protocl / lIndex is port
+ * wIndex: Protocol/Port - hIndex is protocol / lIndex is port
* wLength: 0
* Data: None
*
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index c8951ae..0727e19 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -22,7 +22,7 @@
#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
-#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
+#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
@@ -49,7 +49,7 @@
#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
-#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
+#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmbH */
/* OpenDCC (www.opendcc.de) product id */
#define FTDI_OPENDCC_PID 0xBFD8
@@ -185,7 +185,7 @@
#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */
#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */
#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */
-#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
+#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Energy monitor EM 1010 PC */
#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
@@ -212,8 +212,8 @@
* drivers, or possibly the Comedi drivers in some cases. */
#define FTDI_ELV_CLI7000_PID 0xFB59 /* Computer-Light-Interface (CLI 7000) */
#define FTDI_ELV_PPS7330_PID 0xFB5C /* Processor-Power-Supply (PPS 7330) */
-#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperartur-Feuchte Messgeraet (TFM 100) */
-#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkurh (UDF 77) */
+#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperatur-Feuchte-Messgeraet (TFM 100) */
+#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkuhr (UDF 77) */
#define FTDI_ELV_UIO88_PID 0xFB5F /* USB-I/O Interface (UIO 88) */
/*
@@ -320,7 +320,7 @@
/*
* 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485,
- * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices
+ * USB-TTY aktiv, USB-TTY passiv. Some PIDs are used by several devices
* and I'm not entirely sure which are used by which.
*/
#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
@@ -330,10 +330,10 @@
* Linx Technologies product ids
*/
#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
-#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */
-#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */
-#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */
-#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
+#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */
+#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */
+#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */
+#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
/*
* Oceanic product ids
@@ -494,6 +494,13 @@
#define RATOC_PRODUCT_ID_USB60F 0xb020
/*
+ * Contec products (http://www.contec.com)
+ * Submitted by Daniel Sangorrin
+ */
+#define CONTEC_VID 0x06CE /* Vendor ID */
+#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
+
+/*
* Definitions for B&B Electronics products.
*/
#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
@@ -642,7 +649,7 @@
#define FALCOM_TWIST_PID 0x0001 /* Falcom Twist USB GPRS modem */
#define FALCOM_SAMBA_PID 0x0005 /* Falcom Samba USB GPRS modem */
-/* Larsen and Brusgaard AltiTrack/USBtrack */
+/* Larsen and Brusgaard AltiTrack/USBtrack */
#define LARSENBRUSGAARD_VID 0x0FD8
#define LB_ALTITRACK_PID 0x0001
@@ -971,7 +978,7 @@
#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
/*
- * Dresden Elektronic Sensor Terminal Board
+ * Dresden Elektronik Sensor Terminal Board
*/
#define DE_VID 0x1cf1 /* Vendor ID */
#define STB_PID 0x0001 /* Sensor Terminal Board */
@@ -1002,3 +1009,11 @@
#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/
#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/
#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */
+
+/*
+ * MJS Gadgets HD Radio / XM Radio / Sirius Radio interfaces (using VID 0x0403)
+ */
+#define MJSG_GENERIC_PID 0x9378
+#define MJSG_SR_RADIO_PID 0x9379
+#define MJSG_XM_RADIO_PID 0x937A
+#define MJSG_HD_RADIO_PID 0x937C
diff --git a/drivers/usb/serial/funsoft.c b/drivers/usb/serial/funsoft.c
index d30f736..e21ce9d 100644
--- a/drivers/usb/serial/funsoft.c
+++ b/drivers/usb/serial/funsoft.c
@@ -18,7 +18,7 @@
static int debug;
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1404, 0xcddc) },
{ },
};
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 5ac900e..a42b29a 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -210,7 +210,7 @@ static unsigned char const PRIVATE_REQ[]
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
/* the same device id seems to be used by all
usb enabled GPS devices */
{ USB_DEVICE(GARMIN_VENDOR_ID, 3) },
@@ -271,7 +271,6 @@ static void send_to_tty(struct usb_serial_port *port,
usb_serial_debug_data(debug, &port->dev,
__func__, actual_length, data);
- tty_buffer_request_room(tty, actual_length);
tty_insert_flip_string(tty, data, actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 83443d6..89fac36 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -20,6 +20,7 @@
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
#include <linux/kfifo.h>
+#include <linux/serial.h>
static int debug;
@@ -41,7 +42,7 @@ static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
/* we want to look at all devices, as the vendor/product id can change
* depending on the command line argument */
-static struct usb_device_id generic_serial_ids[] = {
+static const struct usb_device_id generic_serial_ids[] = {
{.driver_info = 42},
{}
};
@@ -194,7 +195,7 @@ static int usb_serial_multi_urb_write(struct tty_struct *tty,
if (port->urbs_in_flight >
port->serial->type->max_in_flight_urbs) {
spin_unlock_irqrestore(&port->lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return bwrite;
}
port->tx_bytes_flight += towrite;
@@ -585,7 +586,7 @@ int usb_serial_generic_resume(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
- if (!port->port.count)
+ if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
continue;
if (port->read_urb) {
diff --git a/drivers/usb/serial/hp4x.c b/drivers/usb/serial/hp4x.c
index 4313292..8093791 100644
--- a/drivers/usb/serial/hp4x.c
+++ b/drivers/usb/serial/hp4x.c
@@ -29,7 +29,7 @@
#define HP_VENDOR_ID 0x03f0
#define HP49GP_PRODUCT_ID 0x0121
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(HP_VENDOR_ID, HP49GP_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index b97960a..3ef8df0 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -364,42 +364,6 @@ static void update_edgeport_E2PROM(struct edgeport_serial *edge_serial)
release_firmware(fw);
}
-
-/************************************************************************
- * *
- * Get string descriptor from device *
- * *
- ************************************************************************/
-static int get_string(struct usb_device *dev, int Id, char *string, int buflen)
-{
- struct usb_string_descriptor StringDesc;
- struct usb_string_descriptor *pStringDesc;
-
- dbg("%s - USB String ID = %d", __func__, Id);
-
- if (!usb_get_descriptor(dev, USB_DT_STRING, Id,
- &StringDesc, sizeof(StringDesc)))
- return 0;
-
- pStringDesc = kmalloc(StringDesc.bLength, GFP_KERNEL);
- if (!pStringDesc)
- return 0;
-
- if (!usb_get_descriptor(dev, USB_DT_STRING, Id,
- pStringDesc, StringDesc.bLength)) {
- kfree(pStringDesc);
- return 0;
- }
-
- unicode_to_ascii(string, buflen,
- pStringDesc->wData, pStringDesc->bLength/2);
-
- kfree(pStringDesc);
- dbg("%s - USB String %s", __func__, string);
- return strlen(string);
-}
-
-
#if 0
/************************************************************************
*
@@ -2007,7 +1971,7 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
return;
case IOSP_EXT_STATUS_RX_CHECK_RSP:
- dbg("%s ========== Port %u CHECK_RSP Sequence = %02x =============\n", __func__, edge_serial->rxPort, byte3);
+ dbg("%s ========== Port %u CHECK_RSP Sequence = %02x =============", __func__, edge_serial->rxPort, byte3);
/* Port->RxCheckRsp = true; */
return;
}
@@ -2075,7 +2039,7 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
break;
default:
- dbg("%s - Unrecognized IOSP status code %u\n", __func__, code);
+ dbg("%s - Unrecognized IOSP status code %u", __func__, code);
break;
}
return;
@@ -2091,18 +2055,13 @@ static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
{
int cnt;
- do {
- cnt = tty_buffer_request_room(tty, length);
- if (cnt < length) {
- dev_err(dev, "%s - dropping data, %d bytes lost\n",
- __func__, length - cnt);
- if (cnt == 0)
- break;
- }
- tty_insert_flip_string(tty, data, cnt);
- data += cnt;
- length -= cnt;
- } while (length > 0);
+ cnt = tty_insert_flip_string(tty, data, length);
+ if (cnt < length) {
+ dev_err(dev, "%s - dropping data, %d bytes lost\n",
+ __func__, length - cnt);
+ }
+ data += cnt;
+ length -= cnt;
tty_flip_buffer_push(tty);
}
@@ -2530,7 +2489,7 @@ static int calc_baud_rate_divisor(int baudrate, int *divisor)
*divisor = custom;
- dbg("%s - Baud %d = %d\n", __func__, baudrate, custom);
+ dbg("%s - Baud %d = %d", __func__, baudrate, custom);
return 0;
}
@@ -2915,7 +2874,7 @@ static void load_application_firmware(struct edgeport_serial *edge_serial)
break;
case EDGE_DOWNLOAD_FILE_NONE:
- dbg ("No download file specified, skipping download\n");
+ dbg("No download file specified, skipping download");
return;
default:
@@ -2997,10 +2956,12 @@ static int edge_startup(struct usb_serial *serial)
usb_set_serial_data(serial, edge_serial);
/* get the name for the device from the device */
- i = get_string(dev, dev->descriptor.iManufacturer,
+ i = usb_string(dev, dev->descriptor.iManufacturer,
&edge_serial->name[0], MAX_NAME_LEN+1);
+ if (i < 0)
+ i = 0;
edge_serial->name[i++] = ' ';
- get_string(dev, dev->descriptor.iProduct,
+ usb_string(dev, dev->descriptor.iProduct,
&edge_serial->name[i], MAX_NAME_LEN+2 - i);
dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name);
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index 9241d31..feb56a4 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -14,7 +14,7 @@
#ifndef IO_TABLES_H
#define IO_TABLES_H
-static struct usb_device_id edgeport_2port_id_table [] = {
+static const struct usb_device_id edgeport_2port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_421) },
@@ -23,7 +23,7 @@ static struct usb_device_id edgeport_2port_id_table [] = {
{ }
};
-static struct usb_device_id edgeport_4port_id_table [] = {
+static const struct usb_device_id edgeport_4port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) },
@@ -37,7 +37,7 @@ static struct usb_device_id edgeport_4port_id_table [] = {
{ }
};
-static struct usb_device_id edgeport_8port_id_table [] = {
+static const struct usb_device_id edgeport_8port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8I) },
@@ -47,7 +47,7 @@ static struct usb_device_id edgeport_8port_id_table [] = {
{ }
};
-static struct usb_device_id Epic_port_id_table [] = {
+static const struct usb_device_id Epic_port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) },
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) },
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) },
@@ -60,7 +60,7 @@ static struct usb_device_id Epic_port_id_table [] = {
};
/* Devices that this driver supports */
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) },
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index d4cc0f7..aa876f7 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -134,7 +134,7 @@ struct edgeport_serial {
/* Devices that this driver supports */
-static struct usb_device_id edgeport_1port_id_table [] = {
+static const struct usb_device_id edgeport_1port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) },
@@ -154,7 +154,7 @@ static struct usb_device_id edgeport_1port_id_table [] = {
{ }
};
-static struct usb_device_id edgeport_2port_id_table [] = {
+static const struct usb_device_id edgeport_2port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) },
@@ -177,7 +177,7 @@ static struct usb_device_id edgeport_2port_id_table [] = {
};
/* Devices that this driver supports */
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) },
@@ -413,11 +413,18 @@ static int write_boot_mem(struct edgeport_serial *serial,
{
int status = 0;
int i;
- __u8 temp;
+ u8 *temp;
/* Must do a read before write */
if (!serial->TiReadI2C) {
- status = read_boot_mem(serial, 0, 1, &temp);
+ temp = kmalloc(1, GFP_KERNEL);
+ if (!temp) {
+ dev_err(&serial->serial->dev->dev,
+ "%s - out of memory\n", __func__);
+ return -ENOMEM;
+ }
+ status = read_boot_mem(serial, 0, 1, temp);
+ kfree(temp);
if (status)
return status;
}
@@ -935,37 +942,47 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
static int i2c_type_bootmode(struct edgeport_serial *serial)
{
int status;
- __u8 data;
+ u8 *data;
+
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data) {
+ dev_err(&serial->serial->dev->dev,
+ "%s - out of memory\n", __func__);
+ return -ENOMEM;
+ }
/* Try to read type 2 */
status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
- DTK_ADDR_SPACE_I2C_TYPE_II, 0, &data, 0x01);
+ DTK_ADDR_SPACE_I2C_TYPE_II, 0, data, 0x01);
if (status)
dbg("%s - read 2 status error = %d", __func__, status);
else
- dbg("%s - read 2 data = 0x%x", __func__, data);
- if ((!status) && (data == UMP5152 || data == UMP3410)) {
+ dbg("%s - read 2 data = 0x%x", __func__, *data);
+ if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
dbg("%s - ROM_TYPE_II", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
- return 0;
+ goto out;
}
/* Try to read type 3 */
status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
- DTK_ADDR_SPACE_I2C_TYPE_III, 0, &data, 0x01);
+ DTK_ADDR_SPACE_I2C_TYPE_III, 0, data, 0x01);
if (status)
dbg("%s - read 3 status error = %d", __func__, status);
else
- dbg("%s - read 2 data = 0x%x", __func__, data);
- if ((!status) && (data == UMP5152 || data == UMP3410)) {
+ dbg("%s - read 2 data = 0x%x", __func__, *data);
+ if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
dbg("%s - ROM_TYPE_III", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III;
- return 0;
+ goto out;
}
dbg("%s - Unknown", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
- return -ENODEV;
+ status = -ENODEV;
+out:
+ kfree(data);
+ return status;
}
static int bulk_xfer(struct usb_serial *serial, void *buffer,
@@ -1113,7 +1130,7 @@ static int download_fw(struct edgeport_serial *serial)
I2C_DESC_TYPE_FIRMWARE_BASIC, rom_desc);
if (start_address != 0) {
struct ti_i2c_firmware_rec *firmware_version;
- __u8 record;
+ u8 *record;
dbg("%s - Found Type FIRMWARE (Type 2) record",
__func__);
@@ -1165,6 +1182,15 @@ static int download_fw(struct edgeport_serial *serial)
OperationalMajorVersion,
OperationalMinorVersion);
+ record = kmalloc(1, GFP_KERNEL);
+ if (!record) {
+ dev_err(dev, "%s - out of memory.\n",
+ __func__);
+ kfree(firmware_version);
+ kfree(rom_desc);
+ kfree(ti_manuf_desc);
+ return -ENOMEM;
+ }
/* In order to update the I2C firmware we must
* change the type 2 record to type 0xF2. This
* will force the UMP to come up in Boot Mode.
@@ -1177,13 +1203,14 @@ static int download_fw(struct edgeport_serial *serial)
* firmware will update the record type from
* 0xf2 to 0x02.
*/
- record = I2C_DESC_TYPE_FIRMWARE_BLANK;
+ *record = I2C_DESC_TYPE_FIRMWARE_BLANK;
/* Change the I2C Firmware record type to
0xf2 to trigger an update */
status = write_rom(serial, start_address,
- sizeof(record), &record);
+ sizeof(*record), record);
if (status) {
+ kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
@@ -1196,19 +1223,21 @@ static int download_fw(struct edgeport_serial *serial)
*/
status = read_rom(serial,
start_address,
- sizeof(record),
- &record);
+ sizeof(*record),
+ record);
if (status) {
+ kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
- if (record != I2C_DESC_TYPE_FIRMWARE_BLANK) {
+ if (*record != I2C_DESC_TYPE_FIRMWARE_BLANK) {
dev_err(dev,
"%s - error resetting device\n",
__func__);
+ kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
@@ -1226,6 +1255,7 @@ static int download_fw(struct edgeport_serial *serial)
__func__, status);
/* return an error on purpose. */
+ kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
@@ -1686,7 +1716,7 @@ static void edge_interrupt_callback(struct urb *urb)
case TIUMP_INTERRUPT_CODE_MSR: /* MSR */
/* Copy MSR from UMP */
msr = data[1];
- dbg("%s - ===== Port %u MSR Status = %02x ======\n",
+ dbg("%s - ===== Port %u MSR Status = %02x ======",
__func__, port_number, msr);
handle_new_msr(edge_port, msr);
break;
@@ -1790,7 +1820,6 @@ static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
{
int queued;
- tty_buffer_request_room(tty, length);
queued = tty_insert_flip_string(tty, data, length);
if (queued < length)
dev_err(dev, "%s - dropping data, %d bytes lost\n",
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index d6231c3..3fea929 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -747,7 +747,6 @@ static void ipaq_read_bulk_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
bytes_in += urb->actual_length;
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 727d323..e1d0784 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -134,7 +134,7 @@ enum {
#define IPW_WANTS_TO_SEND 0x30
-static struct usb_device_id usb_ipw_ids[] = {
+static const struct usb_device_id usb_ipw_ids[] = {
{ USB_DEVICE(IPW_VID, IPW_PID) },
{ },
};
@@ -172,7 +172,6 @@ static void ipw_read_bulk_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 95d8d26..4a0f519 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -100,7 +100,7 @@ static u8 ir_baud;
static u8 ir_xbof;
static u8 ir_add_bof;
-static struct usb_device_id ir_id_table[] = {
+static const struct usb_device_id ir_id_table[] = {
{ USB_DEVICE(0x050f, 0x0180) }, /* KC Technology, KC-180 */
{ USB_DEVICE(0x08e9, 0x0100) }, /* XTNDAccess */
{ USB_DEVICE(0x09c4, 0x0011) }, /* ACTiSys ACT-IR2000U */
@@ -445,11 +445,6 @@ static void ir_read_bulk_callback(struct urb *urb)
dbg("%s - port %d", __func__, port->number);
- if (!port->port.count) {
- dbg("%s - port closed.", __func__);
- return;
- }
-
switch (status) {
case 0: /* Successful */
/*
@@ -462,10 +457,8 @@ static void ir_read_bulk_callback(struct urb *urb)
usb_serial_debug_data(debug, &port->dev, __func__,
urb->actual_length, data);
tty = tty_port_tty_get(&port->port);
- if (tty_buffer_request_room(tty, urb->actual_length - 1)) {
- tty_insert_flip_string(tty, data+1, urb->actual_length - 1);
- tty_flip_buffer_push(tty);
- }
+ tty_insert_flip_string(tty, data+1, urb->actual_length - 1);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
/*
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index e6e02b1..43f13cf 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -43,7 +43,7 @@ static int debug;
#define DRIVER_VERSION "v0.11"
#define DRIVER_DESC "Infinity USB Unlimited Phoenix driver"
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{USB_DEVICE(IUU_USB_VENDOR_ID, IUU_USB_PRODUCT_ID)},
{} /* Terminating entry */
};
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index f8c4b07..297163c 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -464,13 +464,9 @@ static void usa26_indat_callback(struct urb *urb)
/* Resubmit urb so we continue receiving */
urb->dev = port->serial->dev;
- if (port->port.count) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err != 0)
- dbg("%s - resubmit read urb failed. (%d)",
- __func__, err);
- }
- return;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err != 0)
+ dbg("%s - resubmit read urb failed. (%d)", __func__, err);
}
/* Outdat handling is common for all devices */
@@ -483,8 +479,7 @@ static void usa2x_outdat_callback(struct urb *urb)
p_priv = usb_get_serial_port_data(port);
dbg("%s - urb %d", __func__, urb == p_priv->out_urbs[1]);
- if (port->port.count)
- usb_serial_port_softint(port);
+ usb_serial_port_softint(port);
}
static void usa26_inack_callback(struct urb *urb)
@@ -615,12 +610,10 @@ static void usa28_indat_callback(struct urb *urb)
/* Resubmit urb so we continue receiving */
urb->dev = port->serial->dev;
- if (port->port.count) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err != 0)
- dbg("%s - resubmit read urb failed. (%d)",
- __func__, err);
- }
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err != 0)
+ dbg("%s - resubmit read urb failed. (%d)",
+ __func__, err);
p_priv->in_flip ^= 1;
urb = p_priv->in_urbs[p_priv->in_flip];
@@ -856,12 +849,9 @@ static void usa49_indat_callback(struct urb *urb)
/* Resubmit urb so we continue receiving */
urb->dev = port->serial->dev;
- if (port->port.count) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err != 0)
- dbg("%s - resubmit read urb failed. (%d)",
- __func__, err);
- }
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err != 0)
+ dbg("%s - resubmit read urb failed. (%d)", __func__, err);
}
static void usa49wg_indat_callback(struct urb *urb)
@@ -904,11 +894,7 @@ static void usa49wg_indat_callback(struct urb *urb)
/* no error on any byte */
i++;
for (x = 1; x < len ; ++x)
- if (port->port.count)
- tty_insert_flip_char(tty,
- data[i++], 0);
- else
- i++;
+ tty_insert_flip_char(tty, data[i++], 0);
} else {
/*
* some bytes had errors, every byte has status
@@ -922,14 +908,12 @@ static void usa49wg_indat_callback(struct urb *urb)
if (stat & RXERROR_PARITY)
flag |= TTY_PARITY;
/* XXX should handle break (0x10) */
- if (port->port.count)
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(tty,
data[i+1], flag);
i += 2;
}
}
- if (port->port.count)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
}
@@ -1013,13 +997,9 @@ static void usa90_indat_callback(struct urb *urb)
/* Resubmit urb so we continue receiving */
urb->dev = port->serial->dev;
- if (port->port.count) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err != 0)
- dbg("%s - resubmit read urb failed. (%d)",
- __func__, err);
- }
- return;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err != 0)
+ dbg("%s - resubmit read urb failed. (%d)", __func__, err);
}
@@ -2418,8 +2398,7 @@ static int keyspan_usa90_send_setup(struct usb_serial *serial,
msg.portEnabled = 0;
/* Sending intermediate configs */
else {
- if (port->port.count)
- msg.portEnabled = 1;
+ msg.portEnabled = 1;
msg.txBreak = (p_priv->break_on);
}
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 30771e5..bf3297d 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -456,7 +456,7 @@ static const struct keyspan_device_details *keyspan_devices[] = {
NULL,
};
-static struct usb_device_id keyspan_ids_combined[] = {
+static const struct usb_device_id keyspan_ids_combined[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_pre_product_id) },
@@ -497,7 +497,7 @@ static struct usb_driver keyspan_driver = {
};
/* usb_device_id table for the pre-firmware download keyspan devices */
-static struct usb_device_id keyspan_pre_ids[] = {
+static const struct usb_device_id keyspan_pre_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_pre_product_id) },
@@ -513,7 +513,7 @@ static struct usb_device_id keyspan_pre_ids[] = {
{ } /* Terminating entry */
};
-static struct usb_device_id keyspan_1port_ids[] = {
+static const struct usb_device_id keyspan_1port_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_product_id) },
@@ -524,7 +524,7 @@ static struct usb_device_id keyspan_1port_ids[] = {
{ } /* Terminating entry */
};
-static struct usb_device_id keyspan_2port_ids[] = {
+static const struct usb_device_id keyspan_2port_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) },
@@ -532,7 +532,7 @@ static struct usb_device_id keyspan_2port_ids[] = {
{ } /* Terminating entry */
};
-static struct usb_device_id keyspan_4port_ids[] = {
+static const struct usb_device_id keyspan_4port_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)},
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wg_product_id)},
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 1296a09..185fe9a 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -125,7 +125,7 @@ struct keyspan_pda_private {
#define ENTREGRA_VENDOR_ID 0x1645
#define ENTREGRA_FAKE_ID 0x8093
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
#ifdef KEYSPAN
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) },
#endif
@@ -147,20 +147,20 @@ static struct usb_driver keyspan_pda_driver = {
.no_dynamic_id = 1,
};
-static struct usb_device_id id_table_std [] = {
+static const struct usb_device_id id_table_std[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
{ } /* Terminating entry */
};
#ifdef KEYSPAN
-static struct usb_device_id id_table_fake [] = {
+static const struct usb_device_id id_table_fake[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) },
{ } /* Terminating entry */
};
#endif
#ifdef XIRCOM
-static struct usb_device_id id_table_fake_xircom [] = {
+static const struct usb_device_id id_table_fake_xircom[] = {
{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
{ USB_DEVICE(ENTREGRA_VENDOR_ID, ENTREGRA_FAKE_ID) },
{ }
@@ -429,13 +429,20 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
unsigned char *value)
{
int rc;
- unsigned char data;
+ u8 *data;
+
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
3, /* get pins */
USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
- 0, 0, &data, 1, 2000);
+ 0, 0, data, 1, 2000);
if (rc >= 0)
- *value = data;
+ *value = *data;
+
+ kfree(data);
return rc;
}
@@ -543,7 +550,14 @@ static int keyspan_pda_write(struct tty_struct *tty,
device how much room it really has. This is done only on
scheduler time, since usb_control_msg() sleeps. */
if (count > priv->tx_room && !in_interrupt()) {
- unsigned char room;
+ u8 *room;
+
+ room = kmalloc(1, GFP_KERNEL);
+ if (!room) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
rc = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
6, /* write_room */
@@ -551,9 +565,14 @@ static int keyspan_pda_write(struct tty_struct *tty,
| USB_DIR_IN,
0, /* value: 0 means "remaining room" */
0, /* index */
- &room,
+ room,
1,
2000);
+ if (rc > 0) {
+ dbg(" roomquery says %d", *room);
+ priv->tx_room = *room;
+ }
+ kfree(room);
if (rc < 0) {
dbg(" roomquery failed");
goto exit;
@@ -563,8 +582,6 @@ static int keyspan_pda_write(struct tty_struct *tty,
rc = -EIO; /* device didn't return any data */
goto exit;
}
- dbg(" roomquery says %d", room);
- priv->tx_room = room;
}
if (count > priv->tx_room) {
/* we're about to completely fill the Tx buffer, so
@@ -684,18 +701,22 @@ static int keyspan_pda_open(struct tty_struct *tty,
struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- unsigned char room;
+ u8 *room;
int rc = 0;
struct keyspan_pda_private *priv;
/* find out how much room is in the Tx ring */
+ room = kmalloc(1, GFP_KERNEL);
+ if (!room)
+ return -ENOMEM;
+
rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
6, /* write_room */
USB_TYPE_VENDOR | USB_RECIP_INTERFACE
| USB_DIR_IN,
0, /* value */
0, /* index */
- &room,
+ room,
1,
2000);
if (rc < 0) {
@@ -708,8 +729,8 @@ static int keyspan_pda_open(struct tty_struct *tty,
goto error;
}
priv = usb_get_serial_port_data(port);
- priv->tx_room = room;
- priv->tx_throttled = room ? 0 : 1;
+ priv->tx_room = *room;
+ priv->tx_throttled = *room ? 0 : 1;
/*Start reading from the device*/
port->interrupt_in_urb->dev = serial->dev;
@@ -718,8 +739,8 @@ static int keyspan_pda_open(struct tty_struct *tty,
dbg("%s - usb_submit_urb(read int) failed", __func__);
goto error;
}
-
error:
+ kfree(room);
return rc;
}
static void keyspan_pda_close(struct usb_serial_port *port)
@@ -789,6 +810,13 @@ static int keyspan_pda_fake_startup(struct usb_serial *serial)
return 1;
}
+#ifdef KEYSPAN
+MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
+#endif
+#ifdef XIRCOM
+MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
+#endif
+
static int keyspan_pda_startup(struct usb_serial *serial)
{
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 3a78738..8eef91b 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -94,7 +94,7 @@ static int klsi_105_tiocmset(struct tty_struct *tty, struct file *file,
/*
* All of the device info needed for the KLSI converters.
*/
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PALMCONNECT_VID, PALMCONNECT_PID) },
{ USB_DEVICE(KLSI_VID, KLSI_KL5KUSB105D_PID) },
{ } /* Terminating entry */
@@ -212,10 +212,19 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
unsigned long *line_state_p)
{
int rc;
- __u8 status_buf[KLSI_STATUSBUF_LEN] = { -1, -1};
+ u8 *status_buf;
__u16 status;
dev_info(&port->serial->dev->dev, "sending SIO Poll request\n");
+
+ status_buf = kmalloc(KLSI_STATUSBUF_LEN, GFP_KERNEL);
+ if (!status_buf) {
+ dev_err(&port->dev, "%s - out of memory for status buffer.\n",
+ __func__);
+ return -ENOMEM;
+ }
+ status_buf[0] = 0xff;
+ status_buf[1] = 0xff;
rc = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
KL5KUSB105A_SIO_POLL,
@@ -236,6 +245,8 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
*line_state_p = klsi_105_status2linestate(status);
}
+
+ kfree(status_buf);
return rc;
}
@@ -364,7 +375,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
int rc;
int i;
unsigned long line_state;
- struct klsi_105_port_settings cfg;
+ struct klsi_105_port_settings *cfg;
unsigned long flags;
dbg("%s port %d", __func__, port->number);
@@ -376,12 +387,18 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
* Then read the modem line control and store values in
* priv->line_state.
*/
- cfg.pktlen = 5;
- cfg.baudrate = kl5kusb105a_sio_b9600;
- cfg.databits = kl5kusb105a_dtb_8;
- cfg.unknown1 = 0;
- cfg.unknown2 = 1;
- klsi_105_chg_port_settings(port, &cfg);
+ cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ dev_err(&port->dev, "%s - out of memory for config buffer.\n",
+ __func__);
+ return -ENOMEM;
+ }
+ cfg->pktlen = 5;
+ cfg->baudrate = kl5kusb105a_sio_b9600;
+ cfg->databits = kl5kusb105a_dtb_8;
+ cfg->unknown1 = 0;
+ cfg->unknown2 = 1;
+ klsi_105_chg_port_settings(port, cfg);
/* set up termios structure */
spin_lock_irqsave(&priv->lock, flags);
@@ -391,11 +408,11 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
priv->termios.c_lflag = tty->termios->c_lflag;
for (i = 0; i < NCCS; i++)
priv->termios.c_cc[i] = tty->termios->c_cc[i];
- priv->cfg.pktlen = cfg.pktlen;
- priv->cfg.baudrate = cfg.baudrate;
- priv->cfg.databits = cfg.databits;
- priv->cfg.unknown1 = cfg.unknown1;
- priv->cfg.unknown2 = cfg.unknown2;
+ priv->cfg.pktlen = cfg->pktlen;
+ priv->cfg.baudrate = cfg->baudrate;
+ priv->cfg.databits = cfg->databits;
+ priv->cfg.unknown1 = cfg->unknown1;
+ priv->cfg.unknown2 = cfg->unknown2;
spin_unlock_irqrestore(&priv->lock, flags);
/* READ_ON and urb submission */
@@ -441,6 +458,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
retval = rc;
exit:
+ kfree(cfg);
return retval;
} /* klsi_105_open */
@@ -681,7 +699,6 @@ static void klsi_105_read_bulk_callback(struct urb *urb)
bytes_sent = urb->actual_length - 2;
}
- tty_buffer_request_room(tty, bytes_sent);
tty_insert_flip_string(tty, data + 2, bytes_sent);
tty_flip_buffer_push(tty);
tty_kref_put(tty);
@@ -714,10 +731,17 @@ static void klsi_105_set_termios(struct tty_struct *tty,
unsigned int old_iflag = old_termios->c_iflag;
unsigned int cflag = tty->termios->c_cflag;
unsigned int old_cflag = old_termios->c_cflag;
- struct klsi_105_port_settings cfg;
+ struct klsi_105_port_settings *cfg;
unsigned long flags;
speed_t baud;
+ cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ dev_err(&port->dev, "%s - out of memory for config buffer.\n",
+ __func__);
+ return;
+ }
+
/* lock while we are modifying the settings */
spin_lock_irqsave(&priv->lock, flags);
@@ -793,11 +817,11 @@ static void klsi_105_set_termios(struct tty_struct *tty,
case CS5:
dbg("%s - 5 bits/byte not supported", __func__);
spin_unlock_irqrestore(&priv->lock, flags);
- return ;
+ goto err;
case CS6:
dbg("%s - 6 bits/byte not supported", __func__);
spin_unlock_irqrestore(&priv->lock, flags);
- return ;
+ goto err;
case CS7:
priv->cfg.databits = kl5kusb105a_dtb_7;
break;
@@ -856,11 +880,13 @@ static void klsi_105_set_termios(struct tty_struct *tty,
#endif
;
}
- memcpy(&cfg, &priv->cfg, sizeof(cfg));
+ memcpy(cfg, &priv->cfg, sizeof(*cfg));
spin_unlock_irqrestore(&priv->lock, flags);
/* now commit changes to device */
- klsi_105_chg_port_settings(port, &cfg);
+ klsi_105_chg_port_settings(port, cfg);
+err:
+ kfree(cfg);
} /* klsi_105_set_termios */
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 45ea694..c113a2a 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -86,7 +86,7 @@ static void kobil_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
static void kobil_init_termios(struct tty_struct *tty);
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_B_PRODUCT_ID) },
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_K_PRODUCT_ID) },
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_USBTWIN_PRODUCT_ID) },
@@ -388,7 +388,6 @@ static void kobil_read_int_callback(struct urb *urb)
*/
/* END DEBUG */
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
@@ -624,7 +623,6 @@ static void kobil_set_termios(struct tty_struct *tty,
unsigned short urb_val = 0;
int c_cflag = tty->termios->c_cflag;
speed_t speed;
- void *settings;
priv = usb_get_serial_port_data(port);
if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID ||
@@ -647,25 +645,13 @@ static void kobil_set_termios(struct tty_struct *tty,
}
urb_val |= (c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits :
SUSBCR_SPASB_1StopBit;
-
- settings = kzalloc(50, GFP_KERNEL);
- if (!settings)
- return;
-
- sprintf(settings, "%d ", speed);
-
if (c_cflag & PARENB) {
- if (c_cflag & PARODD) {
+ if (c_cflag & PARODD)
urb_val |= SUSBCR_SPASB_OddParity;
- strcat(settings, "Odd Parity");
- } else {
+ else
urb_val |= SUSBCR_SPASB_EvenParity;
- strcat(settings, "Even Parity");
- }
- } else {
+ } else
urb_val |= SUSBCR_SPASB_NoParity;
- strcat(settings, "No Parity");
- }
tty->termios->c_cflag &= ~CMSPAR;
tty_encode_baud_rate(tty, speed, speed);
@@ -675,11 +661,10 @@ static void kobil_set_termios(struct tty_struct *tty,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
urb_val,
0,
- settings,
+ NULL,
0,
KOBIL_TIMEOUT
);
- kfree(settings);
}
static int kobil_ioctl(struct tty_struct *tty, struct file *file,
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index cd009cb..2849f8c 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -75,6 +75,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
+#include <asm/unaligned.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include "mct_u232.h"
@@ -110,7 +111,7 @@ static void mct_u232_unthrottle(struct tty_struct *tty);
/*
* All of the device info needed for the MCT USB-RS232 converter.
*/
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(MCT_U232_VID, MCT_U232_PID) },
{ USB_DEVICE(MCT_U232_VID, MCT_U232_SITECOM_PID) },
{ USB_DEVICE(MCT_U232_VID, MCT_U232_DU_H3SP_PID) },
@@ -231,19 +232,22 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial,
static int mct_u232_set_baud_rate(struct tty_struct *tty,
struct usb_serial *serial, struct usb_serial_port *port, speed_t value)
{
- __le32 divisor;
+ unsigned int divisor;
int rc;
- unsigned char zero_byte = 0;
+ unsigned char *buf;
unsigned char cts_enable_byte = 0;
speed_t speed;
- divisor = cpu_to_le32(mct_u232_calculate_baud_rate(serial, value,
- &speed));
+ buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ divisor = mct_u232_calculate_baud_rate(serial, value, &speed);
+ put_unaligned_le32(cpu_to_le32(divisor), buf);
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_BAUD_RATE_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &divisor, MCT_U232_SET_BAUD_RATE_SIZE,
+ 0, 0, buf, MCT_U232_SET_BAUD_RATE_SIZE,
WDR_TIMEOUT);
if (rc < 0) /*FIXME: What value speed results */
dev_err(&port->dev, "Set BAUD RATE %d failed (error = %d)\n",
@@ -269,10 +273,11 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty,
a device which is not asserting 'CTS'.
*/
+ buf[0] = 0;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_UNKNOWN1_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &zero_byte, MCT_U232_SET_UNKNOWN1_SIZE,
+ 0, 0, buf, MCT_U232_SET_UNKNOWN1_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&port->dev, "Sending USB device request code %d "
@@ -284,30 +289,40 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty,
dbg("set_baud_rate: send second control message, data = %02X",
cts_enable_byte);
+ buf[0] = cts_enable_byte;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_CTS_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &cts_enable_byte, MCT_U232_SET_CTS_SIZE,
+ 0, 0, buf, MCT_U232_SET_CTS_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&port->dev, "Sending USB device request code %d "
"failed (error = %d)\n", MCT_U232_SET_CTS_REQUEST, rc);
+ kfree(buf);
return rc;
} /* mct_u232_set_baud_rate */
static int mct_u232_set_line_ctrl(struct usb_serial *serial, unsigned char lcr)
{
int rc;
+ unsigned char *buf;
+
+ buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ buf[0] = lcr;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_LINE_CTRL_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &lcr, MCT_U232_SET_LINE_CTRL_SIZE,
+ 0, 0, buf, MCT_U232_SET_LINE_CTRL_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&serial->dev->dev,
"Set LINE CTRL 0x%x failed (error = %d)\n", lcr, rc);
dbg("set_line_ctrl: 0x%x", lcr);
+ kfree(buf);
return rc;
} /* mct_u232_set_line_ctrl */
@@ -315,23 +330,31 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial,
unsigned int control_state)
{
int rc;
- unsigned char mcr = MCT_U232_MCR_NONE;
+ unsigned char mcr;
+ unsigned char *buf;
+
+ buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ mcr = MCT_U232_MCR_NONE;
if (control_state & TIOCM_DTR)
mcr |= MCT_U232_MCR_DTR;
if (control_state & TIOCM_RTS)
mcr |= MCT_U232_MCR_RTS;
+ buf[0] = mcr;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_MODEM_CTRL_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &mcr, MCT_U232_SET_MODEM_CTRL_SIZE,
+ 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&serial->dev->dev,
"Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
+ kfree(buf);
return rc;
} /* mct_u232_set_modem_ctrl */
@@ -339,17 +362,27 @@ static int mct_u232_get_modem_stat(struct usb_serial *serial,
unsigned char *msr)
{
int rc;
+ unsigned char *buf;
+
+ buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
+ if (buf == NULL) {
+ *msr = 0;
+ return -ENOMEM;
+ }
rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
MCT_U232_GET_MODEM_STAT_REQUEST,
MCT_U232_GET_REQUEST_TYPE,
- 0, 0, msr, MCT_U232_GET_MODEM_STAT_SIZE,
+ 0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE,
WDR_TIMEOUT);
if (rc < 0) {
dev_err(&serial->dev->dev,
"Get MODEM STATus failed (error = %d)\n", rc);
*msr = 0;
+ } else {
+ *msr = buf[0];
}
dbg("get_modem_stat: 0x%x", *msr);
+ kfree(buf);
return rc;
} /* mct_u232_get_modem_stat */
diff --git a/drivers/usb/serial/mct_u232.h b/drivers/usb/serial/mct_u232.h
index 07b6bec..7417d5c 100644
--- a/drivers/usb/serial/mct_u232.h
+++ b/drivers/usb/serial/mct_u232.h
@@ -73,6 +73,8 @@
#define MCT_U232_SET_CTS_REQUEST 12
#define MCT_U232_SET_CTS_SIZE 1
+#define MCT_U232_MAX_SIZE 4 /* of MCT_XXX_SIZE */
+
/*
* Baud rate (divisor)
* Actually, there are two of them, MCT website calls them "Philips solution"
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 763e32a..0d47f2c 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -81,12 +81,15 @@ struct moschip_serial {
static int debug;
+static struct usb_serial_driver moschip7720_2port_driver;
+
#define USB_VENDOR_ID_MOSCHIP 0x9710
#define MOSCHIP_DEVICE_ID_7720 0x7720
#define MOSCHIP_DEVICE_ID_7715 0x7715
-static struct usb_device_id moschip_port_id_table[] = {
+static const struct usb_device_id moschip_port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7720) },
+ { USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7715) },
{ } /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, moschip_port_id_table);
@@ -106,7 +109,7 @@ static void mos7720_interrupt_callback(struct urb *urb)
__u8 sp1;
__u8 sp2;
- dbg("%s", " : Entering\n");
+ dbg(" : Entering");
switch (status) {
case 0:
@@ -186,6 +189,75 @@ exit:
}
/*
+ * mos7715_interrupt_callback
+ * this is the 7715's callback function for when we have received data on
+ * the interrupt endpoint.
+ */
+static void mos7715_interrupt_callback(struct urb *urb)
+{
+ int result;
+ int length;
+ int status = urb->status;
+ __u8 *data;
+ __u8 iir;
+
+ switch (status) {
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ dbg("%s - urb shutting down with status: %d", __func__,
+ status);
+ return;
+ default:
+ dbg("%s - nonzero urb status received: %d", __func__,
+ status);
+ goto exit;
+ }
+
+ length = urb->actual_length;
+ data = urb->transfer_buffer;
+
+ /* Structure of data from 7715 device:
+ * Byte 1: IIR serial Port
+ * Byte 2: unused
+ * Byte 2: DSR parallel port
+ * Byte 4: FIFO status for both */
+
+ if (unlikely(length != 4)) {
+ dbg("Wrong data !!!");
+ return;
+ }
+
+ iir = data[0];
+ if (!(iir & 0x01)) { /* serial port interrupt pending */
+ switch (iir & 0x0f) {
+ case SERIAL_IIR_RLS:
+ dbg("Serial Port: Receiver status error or address "
+ "bit detected in 9-bit mode\n");
+ break;
+ case SERIAL_IIR_CTI:
+ dbg("Serial Port: Receiver time out");
+ break;
+ case SERIAL_IIR_MS:
+ dbg("Serial Port: Modem status change");
+ break;
+ }
+ }
+
+exit:
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result)
+ dev_err(&urb->dev->dev,
+ "%s - Error %d submitting control urb\n",
+ __func__, result);
+ return;
+}
+
+/*
* mos7720_bulk_in_callback
* this is the callback function for when we have received data on the
* bulk in endpoint.
@@ -206,7 +278,7 @@ static void mos7720_bulk_in_callback(struct urb *urb)
mos7720_port = urb->context;
if (!mos7720_port) {
- dbg("%s", "NULL mos7720_port pointer \n");
+ dbg("NULL mos7720_port pointer");
return ;
}
@@ -218,7 +290,6 @@ static void mos7720_bulk_in_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
@@ -275,17 +346,15 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
* this function will be used for sending command to device
*/
static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value,
- __u16 index, void *data)
+ __u16 index, u8 *data)
{
int status;
- unsigned int pipe;
+ u8 *buf;
u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
- __u8 requesttype;
- __u16 size = 0x0000;
if (value < MOS_MAX_PORT) {
if (product == MOSCHIP_DEVICE_ID_7715)
- value = value*0x100+0x100;
+ value = 0x0200; /* identifies the 7715's serial port */
else
value = value*0x100+0x200;
} else {
@@ -298,27 +367,58 @@ static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value,
}
if (request == MOS_WRITE) {
- request = (__u8)MOS_WRITE;
- requesttype = (__u8)0x40;
- value = value + (__u16)*((unsigned char *)data);
- data = NULL;
- pipe = usb_sndctrlpipe(serial->dev, 0);
+ value = value + *data;
+ status = usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0), MOS_WRITE,
+ 0x40, value, index, NULL, 0, MOS_WDR_TIMEOUT);
} else {
- request = (__u8)MOS_READ;
- requesttype = (__u8)0xC0;
- size = 0x01;
- pipe = usb_rcvctrlpipe(serial->dev, 0);
+ buf = kmalloc(1, GFP_KERNEL);
+ if (!buf) {
+ status = -ENOMEM;
+ goto out;
+ }
+ status = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0), MOS_READ,
+ 0xc0, value, index, buf, 1, MOS_WDR_TIMEOUT);
+ *data = *buf;
+ kfree(buf);
}
-
- status = usb_control_msg(serial->dev, pipe, request, requesttype,
- value, index, data, size, MOS_WDR_TIMEOUT);
-
+out:
if (status < 0)
- dbg("Command Write failed Value %x index %x\n", value, index);
+ dbg("Command Write failed Value %x index %x", value, index);
return status;
}
+
+/*
+ * mos77xx_probe
+ * this function installs the appropriate read interrupt endpoint callback
+ * depending on whether the device is a 7720 or 7715, thus avoiding costly
+ * run-time checks in the high-frequency callback routine itself.
+ */
+static int mos77xx_probe(struct usb_serial *serial,
+ const struct usb_device_id *id)
+{
+ if (id->idProduct == MOSCHIP_DEVICE_ID_7715)
+ moschip7720_2port_driver.read_int_callback =
+ mos7715_interrupt_callback;
+ else
+ moschip7720_2port_driver.read_int_callback =
+ mos7720_interrupt_callback;
+
+ return 0;
+}
+
+static int mos77xx_calc_num_ports(struct usb_serial *serial)
+{
+ u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ if (product == MOSCHIP_DEVICE_ID_7715)
+ return 1;
+
+ return 2;
+}
+
static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial;
@@ -390,7 +490,7 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
*/
port_number = port->number - port->serial->minor;
send_mos_cmd(port->serial, MOS_READ, port_number, UART_LSR, &data);
- dbg("SS::%p LSR:%x\n", mos7720_port, data);
+ dbg("SS::%p LSR:%x", mos7720_port, data);
dbg("Check:Sending Command ..........");
@@ -729,7 +829,7 @@ static void mos7720_throttle(struct tty_struct *tty)
struct moschip_port *mos7720_port;
int status;
- dbg("%s- port %d\n", __func__, port->number);
+ dbg("%s- port %d", __func__, port->number);
mos7720_port = usb_get_serial_port_data(port);
@@ -1208,7 +1308,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
return;
}
- dbg("%s\n", "setting termios - ASPIRE");
+ dbg("setting termios - ASPIRE");
cflag = tty->termios->c_cflag;
@@ -1226,7 +1326,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
change_port_settings(tty, mos7720_port, old_termios);
if (!port->read_urb) {
- dbg("%s", "URB KILLED !!!!!\n");
+ dbg("URB KILLED !!!!!");
return;
}
@@ -1495,6 +1595,7 @@ static int mos7720_startup(struct usb_serial *serial)
struct usb_device *dev;
int i;
char data;
+ u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
dbg("%s: Entering ..........", __func__);
@@ -1514,6 +1615,29 @@ static int mos7720_startup(struct usb_serial *serial)
usb_set_serial_data(serial, mos7720_serial);
+ /*
+ * The 7715 uses the first bulk in/out endpoint pair for the parallel
+ * port, and the second for the serial port. Because the usbserial core
+ * assumes both pairs are serial ports, we must engage in a bit of
+ * subterfuge and swap the pointers for ports 0 and 1 in order to make
+ * port 0 point to the serial port. However, both moschip devices use a
+ * single interrupt-in endpoint for both ports (as mentioned a little
+ * further down), and this endpoint was assigned to port 0. So after
+ * the swap, we must copy the interrupt endpoint elements from port 1
+ * (as newly assigned) to port 0, and null out port 1 pointers.
+ */
+ if (product == MOSCHIP_DEVICE_ID_7715) {
+ struct usb_serial_port *tmp = serial->port[0];
+ serial->port[0] = serial->port[1];
+ serial->port[1] = tmp;
+ serial->port[0]->interrupt_in_urb = tmp->interrupt_in_urb;
+ serial->port[0]->interrupt_in_buffer = tmp->interrupt_in_buffer;
+ serial->port[0]->interrupt_in_endpointAddress =
+ tmp->interrupt_in_endpointAddress;
+ serial->port[1]->interrupt_in_urb = NULL;
+ serial->port[1]->interrupt_in_buffer = NULL;
+ }
+
/* we set up the pointers to the endpoints in the mos7720_open *
* function, as the structures aren't created yet. */
@@ -1529,7 +1653,7 @@ static int mos7720_startup(struct usb_serial *serial)
/* Initialize all port interrupt end point to port 0 int
* endpoint. Our device has only one interrupt endpoint
- * comman to all ports */
+ * common to all ports */
serial->port[i]->interrupt_in_endpointAddress =
serial->port[0]->interrupt_in_endpointAddress;
@@ -1584,11 +1708,12 @@ static struct usb_serial_driver moschip7720_2port_driver = {
.description = "Moschip 2 port adapter",
.usb_driver = &usb_driver,
.id_table = moschip_port_id_table,
- .num_ports = 2,
+ .calc_num_ports = mos77xx_calc_num_ports,
.open = mos7720_open,
.close = mos7720_close,
.throttle = mos7720_throttle,
.unthrottle = mos7720_unthrottle,
+ .probe = mos77xx_probe,
.attach = mos7720_startup,
.release = mos7720_release,
.ioctl = mos7720_ioctl,
@@ -1600,7 +1725,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
.chars_in_buffer = mos7720_chars_in_buffer,
.break_ctl = mos7720_break,
.read_bulk_callback = mos7720_bulk_in_callback,
- .read_int_callback = mos7720_interrupt_callback,
+ .read_int_callback = NULL /* dynamically assigned in probe() */
};
static int __init moschip7720_init(void)
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 2cfe245..2fda1c0 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -181,7 +181,7 @@
#define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */
-static struct usb_device_id moschip_port_id_table[] = {
+static const struct usb_device_id moschip_port_id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
@@ -198,7 +198,7 @@ static struct usb_device_id moschip_port_id_table[] = {
{} /* terminating entry */
};
-static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
+static const struct usb_device_id moschip_id_table_combined[] __devinitconst = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
@@ -283,12 +283,19 @@ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg,
{
struct usb_device *dev = port->serial->dev;
int ret = 0;
+ u8 *buf;
+
+ buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
- MCS_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH,
+ MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
+ *val = buf[0];
dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val);
- *val = (*val) & 0x00ff;
+
+ kfree(buf);
return ret;
}
@@ -341,6 +348,11 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
struct usb_device *dev = port->serial->dev;
int ret = 0;
__u16 Wval;
+ u8 *buf;
+
+ buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
/* dbg("application number is %4x",
(((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */
@@ -364,9 +376,11 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
}
}
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
- MCS_RD_RTYPE, Wval, reg, val, VENDOR_READ_LENGTH,
+ MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
- *val = (*val) & 0x00ff;
+ *val = buf[0];
+
+ kfree(buf);
return ret;
}
@@ -750,7 +764,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
if (urb->actual_length) {
tty = tty_port_tty_get(&mos7840_port->port->port);
if (tty) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
dbg(" %s ", data);
tty_flip_buffer_push(tty);
diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
index 99bd00f5..cf17183 100644
--- a/drivers/usb/serial/moto_modem.c
+++ b/drivers/usb/serial/moto_modem.c
@@ -21,7 +21,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */
{ USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */
{ USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 5ceaa4c6..04a6cbb 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -22,7 +22,7 @@
static int debug;
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
{ },
};
@@ -66,7 +66,6 @@ static void navman_read_int_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 0622650..89c724c 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -75,7 +75,7 @@ static void omninet_disconnect(struct usb_serial *serial);
static void omninet_release(struct usb_serial *serial);
static int omninet_attach(struct usb_serial *serial);
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
{ USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
{ } /* Terminating entry */
@@ -218,8 +218,8 @@ static void omninet_read_bulk_callback(struct urb *urb)
if (debug && header->oh_xxx != 0x30) {
if (urb->actual_length) {
- printk(KERN_DEBUG __FILE__
- ": omninet_read %d: ", header->oh_len);
+ printk(KERN_DEBUG "%s: omninet_read %d: ",
+ __FILE__, header->oh_len);
for (i = 0; i < (header->oh_len +
OMNINET_HEADERLEN); i++)
printk("%.2x ", data[i]);
@@ -332,7 +332,7 @@ static void omninet_write_bulk_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
int status = urb->status;
- dbg("%s - port %0x\n", __func__, port->number);
+ dbg("%s - port %0x", __func__, port->number);
port->write_urb_busy = 0;
if (status) {
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 4cdb975..f37476e 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -22,7 +22,7 @@
static int debug;
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x065a, 0x0009) },
{ },
};
@@ -55,7 +55,6 @@ static void opticon_bulk_callback(struct urb *urb)
int status = urb->status;
struct tty_struct *tty;
int result;
- int available_room = 0;
int data_length;
dbg("%s - port %d", __func__, port->number);
@@ -96,13 +95,9 @@ static void opticon_bulk_callback(struct urb *urb)
/* real data, send it to the tty layer */
tty = tty_port_tty_get(&port->port);
if (tty) {
- available_room = tty_buffer_request_room(tty,
- data_length);
- if (available_room) {
- tty_insert_flip_string(tty, data,
- available_room);
- tty_flip_buffer_push(tty);
- }
+ tty_insert_flip_string(tty, data,
+ data_length);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
} else {
@@ -217,7 +212,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return 0;
}
priv->outstanding_urbs++;
@@ -288,7 +283,7 @@ static int opticon_write_room(struct tty_struct *tty)
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) {
spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return 0;
}
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6e94a67..847b805 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -336,15 +336,42 @@ static int option_resume(struct usb_serial *serial);
#define AIRPLUS_VENDOR_ID 0x1011
#define AIRPLUS_PRODUCT_MCD650 0x3198
+/* Longcheer/Longsung vendor ID; makes whitelabel devices that
+ * many other vendors like 4G Systems, Alcatel, ChinaBird,
+ * Mobidata, etc sell under their own brand names.
+ */
+#define LONGCHEER_VENDOR_ID 0x1c9e
+
/* 4G Systems products */
-#define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e
+/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
+ * It seems to contain a Qualcomm QSC6240/6290 chipset */
#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
/* Haier products */
#define HAIER_VENDOR_ID 0x201e
#define HAIER_PRODUCT_CE100 0x2009
-static struct usb_device_id option_ids[] = {
+/* some devices interfaces need special handling due to a number of reasons */
+enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+ OPTION_BLACKLIST_SENDSETUP = 1,
+ OPTION_BLACKLIST_RESERVED_IF = 2
+};
+
+struct option_blacklist_info {
+ const u32 infolen; /* number of interface numbers on blacklist */
+ const u8 *ifaceinfo; /* pointer to the array holding the numbers */
+ enum option_blacklist_reason reason;
+};
+
+static const u8 four_g_w14_no_sendsetup[] = { 0, 1 };
+static const struct option_blacklist_info four_g_w14_blacklist = {
+ .infolen = ARRAY_SIZE(four_g_w14_no_sendsetup),
+ .ifaceinfo = four_g_w14_no_sendsetup,
+ .reason = OPTION_BLACKLIST_SENDSETUP
+};
+
+static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_LIGHT) },
@@ -644,7 +671,9 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
- { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+ .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
+ },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
{ } /* Terminating entry */
};
@@ -709,6 +738,7 @@ struct option_intf_private {
spinlock_t susp_lock;
unsigned int suspended:1;
int in_flight;
+ struct option_blacklist_info *blacklist_info;
};
struct option_port_private {
@@ -778,9 +808,27 @@ static int option_probe(struct usb_serial *serial,
if (!data)
return -ENOMEM;
spin_lock_init(&data->susp_lock);
+ data->blacklist_info = (struct option_blacklist_info*) id->driver_info;
return 0;
}
+static enum option_blacklist_reason is_blacklisted(const u8 ifnum,
+ const struct option_blacklist_info *blacklist)
+{
+ const u8 *info;
+ int i;
+
+ if (blacklist) {
+ info = blacklist->ifaceinfo;
+
+ for (i = 0; i < blacklist->infolen; i++) {
+ if (info[i] == ifnum)
+ return blacklist->reason;
+ }
+ }
+ return OPTION_BLACKLIST_NONE;
+}
+
static void option_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
@@ -921,7 +969,6 @@ static void option_indat_callback(struct urb *urb)
} else {
tty = tty_port_tty_get(&port->port);
if (urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
} else
@@ -929,9 +976,9 @@ static void option_indat_callback(struct urb *urb)
tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
- if (port->port.count && status != -ESHUTDOWN) {
+ if (status != -ESHUTDOWN) {
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err)
+ if (err && err != -EPERM)
printk(KERN_ERR "%s: resubmit read urb failed. "
"(%d)", __func__, err);
else
@@ -985,7 +1032,7 @@ static void option_instat_callback(struct urb *urb)
(struct usb_ctrlrequest *)urb->transfer_buffer;
if (!req_pkt) {
- dbg("%s: NULL req_pkt\n", __func__);
+ dbg("%s: NULL req_pkt", __func__);
return;
}
if ((req_pkt->bRequestType == 0xA1) &&
@@ -1211,11 +1258,19 @@ static void option_setup_urbs(struct usb_serial *serial)
static int option_send_setup(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
+ struct option_intf_private *intfdata =
+ (struct option_intf_private *) serial->private;
struct option_port_private *portdata;
int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
int val = 0;
dbg("%s", __func__);
+ if (is_blacklisted(ifNum, intfdata->blacklist_info) ==
+ OPTION_BLACKLIST_SENDSETUP) {
+ dbg("No send_setup on blacklisted interface #%d\n", ifNum);
+ return -EIO;
+ }
+
portdata = usb_get_serial_port_data(port);
if (portdata->dtr_state)
@@ -1401,7 +1456,7 @@ static int option_resume(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
if (!port->interrupt_in_urb) {
- dbg("%s: No interrupt URB for port %d\n", __func__, i);
+ dbg("%s: No interrupt URB for port %d", __func__, i);
continue;
}
err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index c644e26..deeacde 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -58,7 +58,7 @@
#define OTI6858_AUTHOR "Tomasz Michal Lukaszewski <FIXME@FIXME>"
#define OTI6858_VERSION "0.1"
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(OTI6858_VENDOR_ID, OTI6858_PRODUCT_ID) },
{ }
};
@@ -302,7 +302,7 @@ void send_data(struct work_struct *work)
struct usb_serial_port *port = priv->port;
int count = 0, result;
unsigned long flags;
- unsigned char allow;
+ u8 *allow;
dbg("%s(port = %d)", __func__, port->number);
@@ -321,13 +321,20 @@ void send_data(struct work_struct *work)
count = port->bulk_out_size;
if (count != 0) {
+ allow = kmalloc(1, GFP_KERNEL);
+ if (!allow) {
+ dev_err(&port->dev, "%s(): kmalloc failed\n",
+ __func__);
+ return;
+ }
result = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
OTI6858_REQ_T_CHECK_TXBUFF,
OTI6858_REQ_CHECK_TXBUFF,
- count, 0, &allow, 1, 100);
- if (result != 1 || allow != 0)
+ count, 0, allow, 1, 100);
+ if (result != 1 || *allow != 0)
count = 0;
+ kfree(allow);
}
if (count == 0) {
@@ -578,9 +585,6 @@ static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port)
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
- if (port->port.count != 1)
- return 0;
-
buf = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL);
if (buf == NULL) {
dev_err(&port->dev, "%s(): out of memory!\n", __func__);
@@ -927,10 +931,6 @@ static void oti6858_read_bulk_callback(struct urb *urb)
spin_unlock_irqrestore(&priv->lock, flags);
if (status != 0) {
- if (!port->port.count) {
- dbg("%s(): port is closed, exiting", __func__);
- return;
- }
/*
if (status == -EPROTO) {
* PL2303 mysteriously fails with -EPROTO reschedule
@@ -954,14 +954,12 @@ static void oti6858_read_bulk_callback(struct urb *urb)
}
tty_kref_put(tty);
- /* schedule the interrupt urb if we are still open */
- if (port->port.count != 0) {
- port->interrupt_in_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
- if (result != 0) {
- dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
- " error %d\n", __func__, result);
- }
+ /* schedule the interrupt urb */
+ port->interrupt_in_urb->dev = port->serial->dev;
+ result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
+ if (result != 0 && result != -EPERM) {
+ dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
+ " error %d\n", __func__, result);
}
}
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9ec1a49..73d5f34 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -50,7 +50,7 @@ struct pl2303_buf {
char *buf_put;
};
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
@@ -451,7 +451,6 @@ static void pl2303_send(struct usb_serial_port *port)
port->write_urb->transfer_buffer);
port->write_urb->transfer_buffer_length = count;
- port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err(&port->dev, "%s - failed submitting write urb,"
@@ -769,7 +768,6 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
pl2303_set_termios(tty, port, &tmp_termios);
dbg("%s - submitting read urb", __func__);
- port->read_urb->dev = serial->dev;
result = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting read urb,"
@@ -779,7 +777,6 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
}
dbg("%s - submitting interrupt urb", __func__);
- port->interrupt_in_urb->dev = serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
@@ -895,10 +892,23 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
static int pl2303_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
+ struct serial_struct ser;
struct usb_serial_port *port = tty->driver_data;
dbg("%s (%d) cmd = 0x%04x", __func__, port->number, cmd);
switch (cmd) {
+ case TIOCGSERIAL:
+ memset(&ser, 0, sizeof ser);
+ ser.type = PORT_16654;
+ ser.line = port->serial->minor;
+ ser.port = port->number;
+ ser.baud_base = 460800;
+
+ if (copy_to_user((void __user *)arg, &ser, sizeof ser))
+ return -EFAULT;
+
+ return 0;
+
case TIOCMIWAIT:
dbg("%s (%d) TIOCMIWAIT", __func__, port->number);
return wait_modem_info(port, arg);
@@ -1042,7 +1052,6 @@ static void pl2303_push_data(struct tty_struct *tty,
tty_flag = TTY_FRAME;
dbg("%s - tty_flag = %d", __func__, tty_flag);
- tty_buffer_request_room(tty, urb->actual_length + 1);
/* overrun is special, not associated with a char */
if (line_status & UART_OVERRUN_ERROR)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
@@ -1072,16 +1081,11 @@ static void pl2303_read_bulk_callback(struct urb *urb)
if (status) {
dbg("%s - urb status = %d", __func__, status);
- if (!port->port.count) {
- dbg("%s - port is closed, exiting.", __func__);
- return;
- }
if (status == -EPROTO) {
/* PL2303 mysteriously fails with -EPROTO reschedule
* the read */
dbg("%s - caught -EPROTO, resubmitting the urb",
__func__);
- urb->dev = port->serial->dev;
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(&urb->dev->dev, "%s - failed"
@@ -1108,15 +1112,10 @@ static void pl2303_read_bulk_callback(struct urb *urb)
}
tty_kref_put(tty);
/* Schedule the next read _if_ we are still open */
- if (port->port.count) {
- urb->dev = port->serial->dev;
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev, "%s - failed resubmitting"
- " read urb, error %d\n", __func__, result);
- }
-
- return;
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result && result != -EPERM)
+ dev_err(&urb->dev->dev, "%s - failed resubmitting"
+ " read urb, error %d\n", __func__, result);
}
static void pl2303_write_bulk_callback(struct urb *urb)
@@ -1146,7 +1145,6 @@ static void pl2303_write_bulk_callback(struct urb *urb)
dbg("%s - nonzero write bulk status received: %d", __func__,
status);
port->write_urb->transfer_buffer_length = 1;
- port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result)
dev_err(&urb->dev->dev, "%s - failed resubmitting write"
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
new file mode 100644
index 0000000..0b93620
--- /dev/null
+++ b/drivers/usb/serial/qcaux.c
@@ -0,0 +1,96 @@
+/*
+ * Qualcomm USB Auxiliary Serial Port driver
+ *
+ * Copyright (C) 2008 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2010 Dan Williams <dcbw@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Devices listed here usually provide a CDC ACM port on which normal modem
+ * AT commands and PPP can be used. But when that port is in-use by PPP it
+ * cannot be used simultaneously for status or signal strength. Instead, the
+ * ports here can be queried for that information using the Qualcomm DM
+ * protocol.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+
+/* NOTE: for now, only use this driver for devices that provide a CDC-ACM port
+ * for normal AT commands, but also provide secondary USB interfaces for the
+ * QCDM-capable ports. Devices that do not provide a CDC-ACM port should
+ * probably be driven by option.ko.
+ */
+
+/* UTStarcom/Pantech/Curitel devices */
+#define UTSTARCOM_VENDOR_ID 0x106c
+#define UTSTARCOM_PRODUCT_PC5740 0x3701
+#define UTSTARCOM_PRODUCT_PC5750 0x3702 /* aka Pantech PX-500 */
+#define UTSTARCOM_PRODUCT_UM150 0x3711
+#define UTSTARCOM_PRODUCT_UM175_V1 0x3712
+#define UTSTARCOM_PRODUCT_UM175_V2 0x3714
+#define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715
+
+/* CMOTECH devices */
+#define CMOTECH_VENDOR_ID 0x16d8
+#define CMOTECH_PRODUCT_CDU550 0x5553
+#define CMOTECH_PRODUCT_CDX650 0x6512
+
+static struct usb_device_id id_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM150, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_V1, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_V2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver qcaux_driver = {
+ .name = "qcaux",
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .id_table = id_table,
+ .no_dynamic_id = 1,
+};
+
+static struct usb_serial_driver qcaux_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "qcaux",
+ },
+ .id_table = id_table,
+ .num_ports = 1,
+};
+
+static int __init qcaux_init(void)
+{
+ int retval;
+
+ retval = usb_serial_register(&qcaux_device);
+ if (retval)
+ return retval;
+ retval = usb_register(&qcaux_driver);
+ if (retval)
+ usb_serial_deregister(&qcaux_device);
+ return retval;
+}
+
+static void __exit qcaux_exit(void)
+{
+ usb_deregister(&qcaux_driver);
+ usb_serial_deregister(&qcaux_device);
+}
+
+module_init(qcaux_init);
+module_exit(qcaux_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 7528b8d..310ff6e 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -21,7 +21,7 @@
static int debug;
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x05c6, 0x9211)}, /* Acer Gobi QDL device */
{USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
{USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
diff --git a/drivers/usb/serial/siemens_mpi.c b/drivers/usb/serial/siemens_mpi.c
index 951ea0c..cb8195c 100644
--- a/drivers/usb/serial/siemens_mpi.c
+++ b/drivers/usb/serial/siemens_mpi.c
@@ -22,7 +22,7 @@
#define DRIVER_DESC "Driver for Siemens USB/MPI adapter"
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
/* Vendor and product id for 6ES7-972-0CB20-0XA0 */
{ USB_DEVICE(0x908, 0x0004) },
{ },
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 3eb6143..34e6f89 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -226,7 +226,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = {
.ifaceinfo = direct_ip_non_serial_ifaces,
};
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
{ USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
{ USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
@@ -304,16 +304,6 @@ static struct usb_device_id id_table [] = {
};
MODULE_DEVICE_TABLE(usb, id_table);
-static struct usb_driver sierra_driver = {
- .name = "sierra",
- .probe = usb_serial_probe,
- .disconnect = usb_serial_disconnect,
- .suspend = usb_serial_suspend,
- .resume = usb_serial_resume,
- .id_table = id_table,
- .no_dynamic_id = 1,
- .supports_autosuspend = 1,
-};
struct sierra_port_private {
spinlock_t lock; /* lock the structure */
@@ -477,7 +467,7 @@ static void sierra_outdat_callback(struct urb *urb)
static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
- struct sierra_port_private *portdata = usb_get_serial_port_data(port);
+ struct sierra_port_private *portdata;
struct sierra_intf_private *intfdata;
struct usb_serial *serial = port->serial;
unsigned long flags;
@@ -604,14 +594,15 @@ static void sierra_indat_callback(struct urb *urb)
} else {
if (urb->actual_length) {
tty = tty_port_tty_get(&port->port);
-
- tty_buffer_request_room(tty, urb->actual_length);
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
+ if (tty) {
+ tty_insert_flip_string(tty, data,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
+
+ tty_kref_put(tty);
+ usb_serial_debug_data(debug, &port->dev,
+ __func__, urb->actual_length, data);
+ }
} else {
dev_dbg(&port->dev, "%s: empty read urb"
" received\n", __func__);
@@ -619,10 +610,10 @@ static void sierra_indat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- if (port->port.count && status != -ESHUTDOWN && status != -EPERM) {
+ if (status != -ESHUTDOWN && status != -EPERM) {
usb_mark_last_busy(port->serial->dev);
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err)
+ if (err && err != -EPERM)
dev_err(&port->dev, "resubmit read urb failed."
"(%d)\n", err);
}
@@ -681,11 +672,11 @@ static void sierra_instat_callback(struct urb *urb)
dev_dbg(&port->dev, "%s: error %d\n", __func__, status);
/* Resubmit urb so we continue receiving IRQ data */
- if (port->port.count && status != -ESHUTDOWN && status != -ENOENT) {
+ if (status != -ESHUTDOWN && status != -ENOENT) {
usb_mark_last_busy(serial->dev);
urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err)
+ if (err && err != -EPERM)
dev_err(&port->dev, "%s: resubmit intr urb "
"failed. (%d)\n", __func__, err);
}
@@ -1061,11 +1052,31 @@ static int sierra_resume(struct usb_serial *serial)
return ec ? -EIO : 0;
}
+
+static int sierra_reset_resume(struct usb_interface *intf)
+{
+ struct usb_serial *serial = usb_get_intfdata(intf);
+ dev_err(&serial->dev->dev, "%s\n", __func__);
+ return usb_serial_resume(intf);
+}
#else
#define sierra_suspend NULL
#define sierra_resume NULL
+#define sierra_reset_resume NULL
#endif
+static struct usb_driver sierra_driver = {
+ .name = "sierra",
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .suspend = usb_serial_suspend,
+ .resume = usb_serial_resume,
+ .reset_resume = sierra_reset_resume,
+ .id_table = id_table,
+ .no_dynamic_id = 1,
+ .supports_autosuspend = 1,
+};
+
static struct usb_serial_driver sierra_device = {
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 1e58220..5d39191 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -45,7 +45,7 @@ static int debug;
#define SPCP8x5_835_VID 0x04fc
#define SPCP8x5_835_PID 0x0231
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SPCP8x5_PHILIPS_VID , SPCP8x5_PHILIPS_PID)},
{ USB_DEVICE(SPCP8x5_INTERMATIC_VID, SPCP8x5_INTERMATIC_PID)},
{ USB_DEVICE(SPCP8x5_835_VID, SPCP8x5_835_PID)},
@@ -609,7 +609,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
if (i < 0)
dev_err(&port->dev, "Set UART format %#x failed (error = %d)\n",
uartdata, i);
- dbg("0x21:0x40:0:0 %d\n", i);
+ dbg("0x21:0x40:0:0 %d", i);
if (cflag & CRTSCTS) {
/* enable hardware flow control */
@@ -677,7 +677,6 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
- int i;
int result = urb->status;
u8 status;
char tty_flag;
@@ -687,8 +686,6 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
/* check the urb status */
if (result) {
- if (!port->port.count)
- return;
if (result == -EPROTO) {
/* spcp8x5 mysteriously fails with -EPROTO */
/* reschedule the read */
@@ -726,26 +723,20 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length + 1);
/* overrun is special, not associated with a char */
if (status & UART_OVERRUN_ERROR)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- for (i = 0; i < urb->actual_length; ++i)
- tty_insert_flip_char(tty, data[i], tty_flag);
+ tty_insert_flip_string_fixed_flag(tty, data,
+ urb->actual_length, tty_flag);
tty_flip_buffer_push(tty);
}
tty_kref_put(tty);
- /* Schedule the next read _if_ we are still open */
- if (port->port.count) {
- urb->dev = port->serial->dev;
- result = usb_submit_urb(urb , GFP_ATOMIC);
- if (result)
- dev_dbg(&port->dev, "failed submitting read urb %d\n",
- result);
- }
-
- return;
+ /* Schedule the next read */
+ urb->dev = port->serial->dev;
+ result = usb_submit_urb(urb , GFP_ATOMIC);
+ if (result)
+ dev_dbg(&port->dev, "failed submitting read urb %d\n", result);
}
/* get data from ring buffer and then write to usb bus */
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index b282c0f..7239888 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -21,7 +21,7 @@
static int debug;
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x05e0, 0x0600) },
{ },
};
@@ -51,7 +51,6 @@ static void symbol_int_callback(struct urb *urb)
int status = urb->status;
struct tty_struct *tty;
int result;
- int available_room = 0;
int data_length;
dbg("%s - port %d", __func__, port->number);
@@ -89,13 +88,8 @@ static void symbol_int_callback(struct urb *urb)
*/
tty = tty_port_tty_get(&port->port);
if (tty) {
- available_room = tty_buffer_request_room(tty,
- data_length);
- if (available_room) {
- tty_insert_flip_string(tty, &data[1],
- available_room);
- tty_flip_buffer_push(tty);
- }
+ tty_insert_flip_string(tty, &data[1], data_length);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
} else {
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 1e9dc88..0afe5c7 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1271,14 +1271,13 @@ static void ti_recv(struct device *dev, struct tty_struct *tty,
int cnt;
do {
- cnt = tty_buffer_request_room(tty, length);
+ cnt = tty_insert_flip_string(tty, data, length);
if (cnt < length) {
dev_err(dev, "%s - dropping data, %d bytes lost\n",
__func__, length - cnt);
if (cnt == 0)
break;
}
- tty_insert_flip_string(tty, data, cnt);
tty_flip_buffer_push(tty);
data += cnt;
length -= cnt;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 33c85f7..3873660 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -358,10 +358,6 @@ static int serial_write(struct tty_struct *tty, const unsigned char *buf,
dbg("%s - port %d, %d byte(s)", __func__, port->number, count);
- /* count is managed under the mutex lock for the tty so cannot
- drop to zero until after the last close completes */
- WARN_ON(!port->port.count);
-
/* pass on to the driver specific version of this function */
retval = port->serial->type->write(tty, port, buf, count);
@@ -373,7 +369,6 @@ static int serial_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
/* pass on to the driver specific version of this function */
return port->serial->type->write_room(tty);
}
@@ -381,7 +376,7 @@ static int serial_write_room(struct tty_struct *tty)
static int serial_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- dbg("%s = port %d", __func__, port->number);
+ dbg("%s - port %d", __func__, port->number);
/* if the device was unplugged then any remaining characters
fell out of the connector ;) */
@@ -396,7 +391,6 @@ static void serial_throttle(struct tty_struct *tty)
struct usb_serial_port *port = tty->driver_data;
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
/* pass on to the driver specific version of this function */
if (port->serial->type->throttle)
port->serial->type->throttle(tty);
@@ -407,7 +401,6 @@ static void serial_unthrottle(struct tty_struct *tty)
struct usb_serial_port *port = tty->driver_data;
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
/* pass on to the driver specific version of this function */
if (port->serial->type->unthrottle)
port->serial->type->unthrottle(tty);
@@ -421,8 +414,6 @@ static int serial_ioctl(struct tty_struct *tty, struct file *file,
dbg("%s - port %d, cmd 0x%.4x", __func__, port->number, cmd);
- WARN_ON(!port->port.count);
-
/* pass on to the driver specific version of this function
if it is available */
if (port->serial->type->ioctl) {
@@ -437,7 +428,6 @@ static void serial_set_termios(struct tty_struct *tty, struct ktermios *old)
struct usb_serial_port *port = tty->driver_data;
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
/* pass on to the driver specific version of this function
if it is available */
if (port->serial->type->set_termios)
@@ -452,7 +442,6 @@ static int serial_break(struct tty_struct *tty, int break_state)
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
/* pass on to the driver specific version of this function
if it is available */
if (port->serial->type->break_ctl)
@@ -513,7 +502,6 @@ static int serial_tiocmget(struct tty_struct *tty, struct file *file)
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
if (port->serial->type->tiocmget)
return port->serial->type->tiocmget(tty, file);
return -EINVAL;
@@ -526,7 +514,6 @@ static int serial_tiocmset(struct tty_struct *tty, struct file *file,
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
if (port->serial->type->tiocmset)
return port->serial->type->tiocmset(tty, file, set, clear);
return -EINVAL;
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 7b5bfc4..252cc2d 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -29,7 +29,7 @@ static char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = {
0xff,
};
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0525, 0x127a) },
{ },
};
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index ad1f923..0949427 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -368,7 +368,7 @@ static int visor_write(struct tty_struct *tty, struct usb_serial_port *port,
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return 0;
}
priv->outstanding_urbs++;
@@ -446,7 +446,7 @@ static int visor_write_room(struct tty_struct *tty)
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) {
spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return 0;
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -503,13 +503,9 @@ static void visor_read_bulk_callback(struct urb *urb)
if (urb->actual_length) {
tty = tty_port_tty_get(&port->port);
if (tty) {
- available_room = tty_buffer_request_room(tty,
- urb->actual_length);
- if (available_room) {
- tty_insert_flip_string(tty, data,
- available_room);
- tty_flip_buffer_push(tty);
- }
+ tty_insert_flip_string(tty, data,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
spin_lock(&priv->lock);
@@ -807,10 +803,14 @@ static int clie_3_5_startup(struct usb_serial *serial)
{
struct device *dev = &serial->dev->dev;
int result;
- u8 data;
+ u8 *data;
dbg("%s", __func__);
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
/*
* Note that PEG-300 series devices expect the following two calls.
*/
@@ -818,36 +818,42 @@ static int clie_3_5_startup(struct usb_serial *serial)
/* get the config number */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
USB_REQ_GET_CONFIGURATION, USB_DIR_IN,
- 0, 0, &data, 1, 3000);
+ 0, 0, data, 1, 3000);
if (result < 0) {
dev_err(dev, "%s: get config number failed: %d\n",
__func__, result);
- return result;
+ goto out;
}
if (result != 1) {
dev_err(dev, "%s: get config number bad return length: %d\n",
__func__, result);
- return -EIO;
+ result = -EIO;
+ goto out;
}
/* get the interface number */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
USB_REQ_GET_INTERFACE,
USB_DIR_IN | USB_RECIP_INTERFACE,
- 0, 0, &data, 1, 3000);
+ 0, 0, data, 1, 3000);
if (result < 0) {
dev_err(dev, "%s: get interface number failed: %d\n",
__func__, result);
- return result;
+ goto out;
}
if (result != 1) {
dev_err(dev,
"%s: get interface number bad return length: %d\n",
__func__, result);
- return -EIO;
+ result = -EIO;
+ goto out;
}
- return generic_startup(serial);
+ result = generic_startup(serial);
+out:
+ kfree(data);
+
+ return result;
}
static int treo_attach(struct usb_serial *serial)
diff --git a/drivers/usb/serial/vivopay-serial.c b/drivers/usb/serial/vivopay-serial.c
new file mode 100644
index 0000000..f719d00
--- /dev/null
+++ b/drivers/usb/serial/vivopay-serial.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2001-2005 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2009 Outpost Embedded, LLC
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+
+
+#define DRIVER_VERSION "v1.0"
+#define DRIVER_DESC "ViVOpay USB Serial Driver"
+
+#define VIVOPAY_VENDOR_ID 0x1d5f
+
+
+static struct usb_device_id id_table [] = {
+ /* ViVOpay 8800 */
+ { USB_DEVICE(VIVOPAY_VENDOR_ID, 0x1004) },
+ { },
+};
+
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver vivopay_serial_driver = {
+ .name = "vivopay-serial",
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .id_table = id_table,
+ .no_dynamic_id = 1,
+};
+
+static struct usb_serial_driver vivopay_serial_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "vivopay-serial",
+ },
+ .id_table = id_table,
+ .usb_driver = &vivopay_serial_driver,
+ .num_ports = 1,
+};
+
+static int __init vivopay_serial_init(void)
+{
+ int retval;
+ retval = usb_serial_register(&vivopay_serial_device);
+ if (retval)
+ goto failed_usb_serial_register;
+ retval = usb_register(&vivopay_serial_driver);
+ if (retval)
+ goto failed_usb_register;
+ printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+ DRIVER_DESC "\n");
+ return 0;
+failed_usb_register:
+ usb_serial_deregister(&vivopay_serial_device);
+failed_usb_serial_register:
+ return retval;
+}
+
+static void __exit vivopay_serial_exit(void)
+{
+ usb_deregister(&vivopay_serial_driver);
+ usb_serial_deregister(&vivopay_serial_device);
+}
+
+module_init(vivopay_serial_init);
+module_exit(vivopay_serial_exit);
+
+MODULE_AUTHOR("Forest Bond <forest.bond@outpostembedded.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 1093d2e..12ed820 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -111,17 +111,17 @@ static int debug;
separate ID tables, and then a third table that combines them
just for the purpose of exporting the autoloading information.
*/
-static struct usb_device_id id_table_std [] = {
+static const struct usb_device_id id_table_std[] = {
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_prerenumeration [] = {
+static const struct usb_device_id id_table_prerenumeration[] = {
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) },
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) },
{ } /* Terminating entry */
@@ -1492,21 +1492,9 @@ static void rx_data_softint(struct work_struct *work)
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
- if (tty && urb->actual_length) {
- int len = tty_buffer_request_room(tty,
- urb->actual_length);
- /* This stuff can go away now I suspect */
- if (unlikely(len < urb->actual_length)) {
- spin_lock_irqsave(&info->lock, flags);
- list_add(tmp, &info->rx_urb_q);
- spin_unlock_irqrestore(&info->lock, flags);
- tty_flip_buffer_push(tty);
- schedule_work(&info->rx_work);
- goto out;
- }
- tty_insert_flip_string(tty, urb->transfer_buffer, len);
- sent += len;
- }
+ if (tty && urb->actual_length)
+ sent += tty_insert_flip_string(tty,
+ urb->transfer_buffer, urb->actual_length);
urb->dev = port->serial->dev;
result = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 80e65f2..198bb3e 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -202,7 +202,7 @@ static int onetouch_connect_input(struct us_data *ss)
goto fail1;
onetouch->data = usb_buffer_alloc(udev, ONETOUCH_PKT_LEN,
- GFP_ATOMIC, &onetouch->data_dma);
+ GFP_KERNEL, &onetouch->data_dma);
if (!onetouch->data)
goto fail1;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index aadc16b..4cc0355 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -133,7 +133,7 @@ static int slave_configure(struct scsi_device *sdev)
if (us->fflags & US_FL_MAX_SECTORS_MIN)
max_sectors = PAGE_CACHE_SIZE >> 9;
- if (queue_max_sectors(sdev->request_queue) > max_sectors)
+ if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
blk_queue_max_hw_sectors(sdev->request_queue,
max_sectors);
} else if (sdev->type == TYPE_TAPE) {
@@ -484,7 +484,7 @@ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *att
{
struct scsi_device *sdev = to_scsi_device(dev);
- return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue));
+ return sprintf(buf, "%u\n", queue_max_hw_sectors(sdev->request_queue));
}
/* Input routine for the sysfs max_sectors file */
@@ -494,9 +494,9 @@ static ssize_t store_max_sectors(struct device *dev, struct device_attribute *at
struct scsi_device *sdev = to_scsi_device(dev);
unsigned short ms;
- if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) {
+ if (sscanf(buf, "%hu", &ms) > 0) {
blk_queue_max_hw_sectors(sdev->request_queue, ms);
- return strlen(buf);
+ return count;
}
return -EINVAL;
}
@@ -539,7 +539,7 @@ struct scsi_host_template usb_stor_host_template = {
.slave_configure = slave_configure,
/* lots of sg segments can be handled */
- .sg_tablesize = SG_ALL,
+ .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
/* limit the total size of a transfer to 120 KB */
.max_sectors = 240,
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index b62a288..bd3f415 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1628,10 +1628,10 @@ static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us)
return USB_STOR_TRANSPORT_ERROR;
}
- if ( (result = usbat_multiple_write(us,
- registers, data, 7)) != USB_STOR_TRANSPORT_GOOD) {
+ result = usbat_multiple_write(us, registers, data, 7);
+
+ if (result != USB_STOR_TRANSPORT_GOOD)
return result;
- }
/*
* Write the 12-byte command header.
@@ -1643,12 +1643,11 @@ static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us)
* AT SPEED 4 IS UNRELIABLE!!!
*/
- if ((result = usbat_write_block(us,
- USBAT_ATA, srb->cmnd, 12,
- (srb->cmnd[0]==GPCMD_BLANK ? 75 : 10), 0) !=
- USB_STOR_TRANSPORT_GOOD)) {
+ result = usbat_write_block(us, USBAT_ATA, srb->cmnd, 12,
+ srb->cmnd[0] == GPCMD_BLANK ? 75 : 10, 0);
+
+ if (result != USB_STOR_TRANSPORT_GOOD)
return result;
- }
/* If there is response data to be read in then do it here. */
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index cc313d1..4680381 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -47,6 +47,8 @@
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/usb/quirks.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
@@ -1297,6 +1299,10 @@ int usb_stor_port_reset(struct us_data *us)
{
int result;
+ /*for these devices we must use the class specific method */
+ if (us->pusb_dev->quirks & USB_QUIRK_RESET_MORPHS)
+ return -EPERM;
+
result = usb_lock_device_for_reset(us->pusb_dev, us->pusb_intf);
if (result < 0)
US_DEBUGP("unable to lock device for reset: %d\n", result);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 49575fba..98b549b 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1147,8 +1147,8 @@ UNUSUAL_DEV( 0x0af0, 0x7401, 0x0000, 0x0000,
0 ),
/* Reported by Jan Dumon <j.dumon@option.com>
- * This device (wrongly) has a vendor-specific device descriptor.
- * The entry is needed so usb-storage can bind to it's mass-storage
+ * These devices (wrongly) have a vendor-specific device descriptor.
+ * These entries are needed so usb-storage can bind to their mass-storage
* interface as an interface driver */
UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
"Option",
@@ -1156,6 +1156,90 @@ UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
US_SC_DEVICE, US_PR_DEVICE, NULL,
0 ),
+UNUSUAL_DEV( 0x0af0, 0x7701, 0x0000, 0x0000,
+ "Option",
+ "GI 0451 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7706, 0x0000, 0x0000,
+ "Option",
+ "GI 0451 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7901, 0x0000, 0x0000,
+ "Option",
+ "GI 0452 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7A01, 0x0000, 0x0000,
+ "Option",
+ "GI 0461 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7A05, 0x0000, 0x0000,
+ "Option",
+ "GI 0461 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x8300, 0x0000, 0x0000,
+ "Option",
+ "GI 033x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x8302, 0x0000, 0x0000,
+ "Option",
+ "GI 033x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x8304, 0x0000, 0x0000,
+ "Option",
+ "GI 033x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xc100, 0x0000, 0x0000,
+ "Option",
+ "GI 070x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd057, 0x0000, 0x0000,
+ "Option",
+ "GI 1505 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd058, 0x0000, 0x0000,
+ "Option",
+ "GI 1509 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd157, 0x0000, 0x0000,
+ "Option",
+ "GI 1515 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd257, 0x0000, 0x0000,
+ "Option",
+ "GI 1215 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd357, 0x0000, 0x0000,
+ "Option",
+ "GI 1505 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
/* Reported by Ben Efros <ben@pc-doctor.com> */
UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
"Seagate",
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index b1e579c..6152278 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -28,7 +28,7 @@
#define USB_SKEL_PRODUCT_ID 0xfff0
/* table of devices that work with this driver */
-static struct usb_device_id skel_table[] = {
+static const struct usb_device_id skel_table[] = {
{ USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index 25eae40..51a8e0d 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -641,7 +641,7 @@ static void cbaf_disconnect(struct usb_interface *iface)
kzfree(cbaf);
}
-static struct usb_device_id cbaf_id_table[] = {
+static const struct usb_device_id cbaf_id_table[] = {
{ USB_INTERFACE_INFO(0xef, 0x03, 0x01), },
{ },
};
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index dced419..1c91828 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -868,7 +868,7 @@ static struct usb_wireless_cap_descriptor wusb_cap_descr_default = {
* reference that we'll drop.
*
* First we need to determine if the device is a WUSB device (else we
- * ignore it). For that we use the speed setting (USB_SPEED_VARIABLE)
+ * ignore it). For that we use the speed setting (USB_SPEED_WIRELESS)
* [FIXME: maybe we'd need something more definitive]. If so, we track
* it's usb_busd and from there, the WUSB HC.
*
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
index 3b52161..2d82739 100644
--- a/drivers/usb/wusbcore/mmc.c
+++ b/drivers/usb/wusbcore/mmc.c
@@ -263,7 +263,7 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
{
int result = 0;
- if (memcmp(chid, &wusb_ckhdid_zero, sizeof(chid)) == 0)
+ if (memcmp(chid, &wusb_ckhdid_zero, sizeof(*chid)) == 0)
chid = NULL;
mutex_lock(&wusbhc->mutex);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 3681c6a..b0a3fa0 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3025,6 +3025,20 @@ static int fbcon_fb_unregistered(struct fb_info *info)
return 0;
}
+static void fbcon_remap_all(int idx)
+{
+ int i;
+ for (i = first_fb_vc; i <= last_fb_vc; i++)
+ set_con2fb_map(i, idx, 0);
+
+ if (con_is_bound(&fb_con)) {
+ printk(KERN_INFO "fbcon: Remapping primary device, "
+ "fb%i, to tty %i-%i\n", idx,
+ first_fb_vc + 1, last_fb_vc + 1);
+ info_idx = idx;
+ }
+}
+
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
static void fbcon_select_primary(struct fb_info *info)
{
@@ -3225,6 +3239,10 @@ static int fbcon_event_notify(struct notifier_block *self,
caps = event->data;
fbcon_get_requirement(info, caps);
break;
+ case FB_EVENT_REMAP_ALL_CONSOLE:
+ idx = info->node;
+ fbcon_remap_all(idx);
+ break;
}
done:
return ret;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 99bbd28..a15b44e 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1513,7 +1513,6 @@ register_framebuffer(struct fb_info *fb_info)
fb_info->fix.id,
registered_fb[i]->fix.id);
unregister_framebuffer(registered_fb[i]);
- break;
}
}
}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index ce602dd..2f84137 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -649,9 +649,13 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
int bit_idx = __ffs(pending_bits);
int port = (word_idx * BITS_PER_LONG) + bit_idx;
int irq = evtchn_to_irq[port];
+ struct irq_desc *desc;
- if (irq != -1)
- handle_irq(irq, regs);
+ if (irq != -1) {
+ desc = irq_to_desc(irq);
+ if (desc)
+ generic_handle_irq_desc(irq, desc);
+ }
}
}
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 9cc1877..2ff622f 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -121,7 +121,7 @@ struct adfs_discmap {
/* Inode stuff */
struct inode *adfs_iget(struct super_block *sb, struct object_info *obj);
-int adfs_write_inode(struct inode *inode,int unused);
+int adfs_write_inode(struct inode *inode, struct writeback_control *wbc);
int adfs_notify_change(struct dentry *dentry, struct iattr *attr);
/* map.c */
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 3f57ce4..0f5e309 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -9,6 +9,7 @@
*/
#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
+#include <linux/writeback.h>
#include "adfs.h"
/*
@@ -360,7 +361,7 @@ out:
* The adfs-specific inode data has already been updated by
* adfs_notify_change()
*/
-int adfs_write_inode(struct inode *inode, int wait)
+int adfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct super_block *sb = inode->i_sb;
struct object_info obj;
@@ -375,7 +376,7 @@ int adfs_write_inode(struct inode *inode, int wait)
obj.attr = ADFS_I(inode)->attr;
obj.size = inode->i_size;
- ret = adfs_dir_update(sb, &obj, wait);
+ ret = adfs_dir_update(sb, &obj, wbc->sync_mode == WB_SYNC_ALL);
unlock_kernel();
return ret;
}
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index 0e40caa..861dae6 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -175,7 +175,8 @@ extern void affs_delete_inode(struct inode *inode);
extern void affs_clear_inode(struct inode *inode);
extern struct inode *affs_iget(struct super_block *sb,
unsigned long ino);
-extern int affs_write_inode(struct inode *inode, int);
+extern int affs_write_inode(struct inode *inode,
+ struct writeback_control *wbc);
extern int affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s32 type);
/* file.c */
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 3c4ec7d..c9744d7 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -166,7 +166,7 @@ bad_inode:
}
int
-affs_write_inode(struct inode *inode, int unused)
+affs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct super_block *sb = inode->i_sb;
struct buffer_head *bh;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 6ece2a1..c54dad4 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -733,7 +733,6 @@ extern int afs_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata);
extern int afs_writepage(struct page *, struct writeback_control *);
extern int afs_writepages(struct address_space *, struct writeback_control *);
-extern int afs_write_inode(struct inode *, int);
extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
extern ssize_t afs_file_write(struct kiocb *, const struct iovec *,
unsigned long, loff_t);
diff --git a/fs/afs/super.c b/fs/afs/super.c
index e1ea1c2..14f6431 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -48,7 +48,6 @@ struct file_system_type afs_fs_type = {
static const struct super_operations afs_super_ops = {
.statfs = afs_statfs,
.alloc_inode = afs_alloc_inode,
- .write_inode = afs_write_inode,
.destroy_inode = afs_destroy_inode,
.clear_inode = afs_clear_inode,
.put_super = afs_put_super,
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 5e15a21..3bed54a 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -585,27 +585,6 @@ int afs_writepages(struct address_space *mapping,
}
/*
- * write an inode back
- */
-int afs_write_inode(struct inode *inode, int sync)
-{
- struct afs_vnode *vnode = AFS_FS_I(inode);
- int ret;
-
- _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
-
- ret = 0;
- if (sync) {
- ret = filemap_fdatawait(inode->i_mapping);
- if (ret < 0)
- __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
- }
-
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
* completion of write to server
*/
void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 0118d67..3d283ab 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -60,11 +60,6 @@ do { \
current->pid, __func__, ##args); \
} while (0)
-struct rehash_entry {
- struct task_struct *task;
- struct list_head list;
-};
-
/* Unified info structure. This is pointed to by both the dentry and
inode structures. Each file in the filesystem has an instance of this
structure. It holds a reference to the dentry, so dentries are never
@@ -81,7 +76,6 @@ struct autofs_info {
struct list_head active;
int active_count;
- struct list_head rehash_list;
struct list_head expiring;
@@ -104,7 +98,6 @@ struct autofs_info {
#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */
#define AUTOFS_INF_MOUNTPOINT (1<<1) /* mountpoint status for direct expire */
#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
-#define AUTOFS_INF_REHASH (1<<3) /* dentry in transit to ->lookup() */
struct autofs_wait_queue {
wait_queue_head_t queue;
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 00bf8fc..c8a80df 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -544,10 +544,9 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
goto out;
devid = new_encode_dev(path.mnt->mnt_sb->s_dev);
err = 0;
- if (path.dentry->d_inode &&
- path.mnt->mnt_root == path.dentry) {
+ if (path.mnt->mnt_root == path.dentry) {
err = 1;
- magic = path.dentry->d_inode->i_sb->s_magic;
+ magic = path.mnt->mnt_sb->s_magic;
}
} else {
dev_t dev = sbi->sb->s_dev;
@@ -560,10 +559,8 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
err = have_submounts(path.dentry);
- if (path.mnt->mnt_mountpoint != path.mnt->mnt_root) {
- if (follow_down(&path))
- magic = path.mnt->mnt_sb->s_magic;
- }
+ if (follow_down(&path))
+ magic = path.mnt->mnt_sb->s_magic;
}
param->ismountpoint.out.devid = devid;
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 74bc9aa..a796c94 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -279,7 +279,6 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
root->d_mounted--;
}
ino->flags |= AUTOFS_INF_EXPIRING;
- autofs4_add_expiring(root);
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
return root;
@@ -407,7 +406,6 @@ found:
expired, (int)expired->d_name.len, expired->d_name.name);
ino = autofs4_dentry_ino(expired);
ino->flags |= AUTOFS_INF_EXPIRING;
- autofs4_add_expiring(expired);
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
spin_lock(&dcache_lock);
@@ -435,7 +433,7 @@ int autofs4_expire_wait(struct dentry *dentry)
DPRINTK("expire done status=%d", status);
- if (d_unhashed(dentry) && IS_DEADDIR(dentry->d_inode))
+ if (d_unhashed(dentry))
return -EAGAIN;
return status;
@@ -475,7 +473,6 @@ int autofs4_expire_run(struct super_block *sb,
spin_lock(&sbi->fs_lock);
ino = autofs4_dentry_ino(dentry);
ino->flags &= ~AUTOFS_INF_EXPIRING;
- autofs4_del_expiring(dentry);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
@@ -506,7 +503,6 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
ino->flags &= ~AUTOFS_INF_MOUNTPOINT;
}
ino->flags &= ~AUTOFS_INF_EXPIRING;
- autofs4_del_expiring(dentry);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
dput(dentry);
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index d0a3de2..821b2b9 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -49,7 +49,6 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino,
ino->dentry = NULL;
ino->size = 0;
INIT_LIST_HEAD(&ino->active);
- INIT_LIST_HEAD(&ino->rehash_list);
ino->active_count = 0;
INIT_LIST_HEAD(&ino->expiring);
atomic_set(&ino->count, 0);
@@ -97,63 +96,6 @@ void autofs4_free_ino(struct autofs_info *ino)
kfree(ino);
}
-/*
- * Deal with the infamous "Busy inodes after umount ..." message.
- *
- * Clean up the dentry tree. This happens with autofs if the user
- * space program goes away due to a SIGKILL, SIGSEGV etc.
- */
-static void autofs4_force_release(struct autofs_sb_info *sbi)
-{
- struct dentry *this_parent = sbi->sb->s_root;
- struct list_head *next;
-
- if (!sbi->sb->s_root)
- return;
-
- spin_lock(&dcache_lock);
-repeat:
- next = this_parent->d_subdirs.next;
-resume:
- while (next != &this_parent->d_subdirs) {
- struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child);
-
- /* Negative dentry - don`t care */
- if (!simple_positive(dentry)) {
- next = next->next;
- continue;
- }
-
- if (!list_empty(&dentry->d_subdirs)) {
- this_parent = dentry;
- goto repeat;
- }
-
- next = next->next;
- spin_unlock(&dcache_lock);
-
- DPRINTK("dentry %p %.*s",
- dentry, (int)dentry->d_name.len, dentry->d_name.name);
-
- dput(dentry);
- spin_lock(&dcache_lock);
- }
-
- if (this_parent != sbi->sb->s_root) {
- struct dentry *dentry = this_parent;
-
- next = this_parent->d_u.d_child.next;
- this_parent = this_parent->d_parent;
- spin_unlock(&dcache_lock);
- DPRINTK("parent dentry %p %.*s",
- dentry, (int)dentry->d_name.len, dentry->d_name.name);
- dput(dentry);
- spin_lock(&dcache_lock);
- goto resume;
- }
- spin_unlock(&dcache_lock);
-}
-
void autofs4_kill_sb(struct super_block *sb)
{
struct autofs_sb_info *sbi = autofs4_sbi(sb);
@@ -170,15 +112,12 @@ void autofs4_kill_sb(struct super_block *sb)
/* Free wait queues, close pipe */
autofs4_catatonic_mode(sbi);
- /* Clean up and release dangling references */
- autofs4_force_release(sbi);
-
sb->s_fs_info = NULL;
kfree(sbi);
out_kill_sb:
DPRINTK("shutting down");
- kill_anon_super(sb);
+ kill_litter_super(sb);
}
static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt)
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 30cc9dd..a015b49 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -104,99 +104,6 @@ static void autofs4_del_active(struct dentry *dentry)
return;
}
-static void autofs4_add_rehash_entry(struct autofs_info *ino,
- struct rehash_entry *entry)
-{
- entry->task = current;
- INIT_LIST_HEAD(&entry->list);
- list_add(&entry->list, &ino->rehash_list);
- return;
-}
-
-static void autofs4_remove_rehash_entry(struct autofs_info *ino)
-{
- struct list_head *head = &ino->rehash_list;
- struct rehash_entry *entry;
- list_for_each_entry(entry, head, list) {
- if (entry->task == current) {
- list_del(&entry->list);
- kfree(entry);
- break;
- }
- }
- return;
-}
-
-static void autofs4_remove_rehash_entrys(struct autofs_info *ino)
-{
- struct autofs_sb_info *sbi = ino->sbi;
- struct rehash_entry *entry, *next;
- struct list_head *head;
-
- spin_lock(&sbi->fs_lock);
- spin_lock(&sbi->lookup_lock);
- if (!(ino->flags & AUTOFS_INF_REHASH)) {
- spin_unlock(&sbi->lookup_lock);
- spin_unlock(&sbi->fs_lock);
- return;
- }
- ino->flags &= ~AUTOFS_INF_REHASH;
- head = &ino->rehash_list;
- list_for_each_entry_safe(entry, next, head, list) {
- list_del(&entry->list);
- kfree(entry);
- }
- spin_unlock(&sbi->lookup_lock);
- spin_unlock(&sbi->fs_lock);
- dput(ino->dentry);
-
- return;
-}
-
-static void autofs4_revalidate_drop(struct dentry *dentry,
- struct rehash_entry *entry)
-{
- struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
- /*
- * Add to the active list so we can pick this up in
- * ->lookup(). Also add an entry to a rehash list so
- * we know when there are no dentrys in flight so we
- * know when we can rehash the dentry.
- */
- spin_lock(&sbi->lookup_lock);
- if (list_empty(&ino->active))
- list_add(&ino->active, &sbi->active_list);
- autofs4_add_rehash_entry(ino, entry);
- spin_unlock(&sbi->lookup_lock);
- if (!(ino->flags & AUTOFS_INF_REHASH)) {
- ino->flags |= AUTOFS_INF_REHASH;
- dget(dentry);
- spin_lock(&dentry->d_lock);
- __d_drop(dentry);
- spin_unlock(&dentry->d_lock);
- }
- return;
-}
-
-static void autofs4_revalidate_rehash(struct dentry *dentry)
-{
- struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
- if (ino->flags & AUTOFS_INF_REHASH) {
- spin_lock(&sbi->lookup_lock);
- autofs4_remove_rehash_entry(ino);
- if (list_empty(&ino->rehash_list)) {
- spin_unlock(&sbi->lookup_lock);
- ino->flags &= ~AUTOFS_INF_REHASH;
- d_rehash(dentry);
- dput(ino->dentry);
- } else
- spin_unlock(&sbi->lookup_lock);
- }
- return;
-}
-
static unsigned int autofs4_need_mount(unsigned int flags)
{
unsigned int res = 0;
@@ -236,7 +143,7 @@ out:
return dcache_dir_open(inode, file);
}
-static int try_to_fill_dentry(struct dentry *dentry)
+static int try_to_fill_dentry(struct dentry *dentry, int flags)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
@@ -249,17 +156,55 @@ static int try_to_fill_dentry(struct dentry *dentry)
* Wait for a pending mount, triggering one if there
* isn't one already
*/
- DPRINTK("waiting for mount name=%.*s",
- dentry->d_name.len, dentry->d_name.name);
+ if (dentry->d_inode == NULL) {
+ DPRINTK("waiting for mount name=%.*s",
+ dentry->d_name.len, dentry->d_name.name);
- status = autofs4_wait(sbi, dentry, NFY_MOUNT);
+ status = autofs4_wait(sbi, dentry, NFY_MOUNT);
- DPRINTK("mount done status=%d", status);
+ DPRINTK("mount done status=%d", status);
- /* Update expiry counter */
- ino->last_used = jiffies;
+ /* Turn this into a real negative dentry? */
+ if (status == -ENOENT) {
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
+ return status;
+ } else if (status) {
+ /* Return a negative dentry, but leave it "pending" */
+ return status;
+ }
+ /* Trigger mount for path component or follow link */
+ } else if (ino->flags & AUTOFS_INF_PENDING ||
+ autofs4_need_mount(flags) ||
+ current->link_count) {
+ DPRINTK("waiting for mount name=%.*s",
+ dentry->d_name.len, dentry->d_name.name);
- return status;
+ spin_lock(&sbi->fs_lock);
+ ino->flags |= AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
+ status = autofs4_wait(sbi, dentry, NFY_MOUNT);
+
+ DPRINTK("mount done status=%d", status);
+
+ if (status) {
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
+ return status;
+ }
+ }
+
+ /* Initialize expiry counter after successful mount */
+ if (ino)
+ ino->last_used = jiffies;
+
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
+
+ return 0;
}
/* For autofs direct mounts the follow link triggers the mount */
@@ -313,16 +258,10 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
*/
if (ino->flags & AUTOFS_INF_PENDING ||
(!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) {
- ino->flags |= AUTOFS_INF_PENDING;
spin_unlock(&dcache_lock);
spin_unlock(&sbi->fs_lock);
- status = try_to_fill_dentry(dentry);
-
- spin_lock(&sbi->fs_lock);
- ino->flags &= ~AUTOFS_INF_PENDING;
- spin_unlock(&sbi->fs_lock);
-
+ status = try_to_fill_dentry(dentry, 0);
if (status)
goto out_error;
@@ -361,47 +300,18 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
{
struct inode *dir = dentry->d_parent->d_inode;
struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
- struct rehash_entry *entry;
+ int oz_mode = autofs4_oz_mode(sbi);
int flags = nd ? nd->flags : 0;
- unsigned int mutex_aquired;
+ int status = 1;
- DPRINTK("name = %.*s oz_mode = %d",
- dentry->d_name.len, dentry->d_name.name, oz_mode);
-
- /* Daemon never causes a mount to trigger */
- if (autofs4_oz_mode(sbi))
- return 1;
-
- entry = kmalloc(sizeof(struct rehash_entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- mutex_aquired = mutex_trylock(&dir->i_mutex);
-
- spin_lock(&sbi->fs_lock);
- spin_lock(&dcache_lock);
/* Pending dentry */
+ spin_lock(&sbi->fs_lock);
if (autofs4_ispending(dentry)) {
- int status;
-
- /*
- * We can only unhash and send this to ->lookup() if
- * the directory mutex is held over d_revalidate() and
- * ->lookup(). This prevents the VFS from incorrectly
- * seeing the dentry as non-existent.
- */
- ino->flags |= AUTOFS_INF_PENDING;
- if (!mutex_aquired) {
- autofs4_revalidate_drop(dentry, entry);
- spin_unlock(&dcache_lock);
- spin_unlock(&sbi->fs_lock);
- return 0;
- }
- spin_unlock(&dcache_lock);
+ /* The daemon never causes a mount to trigger */
spin_unlock(&sbi->fs_lock);
- mutex_unlock(&dir->i_mutex);
- kfree(entry);
+
+ if (oz_mode)
+ return 1;
/*
* If the directory has gone away due to an expire
@@ -415,82 +325,45 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
* A zero status is success otherwise we have a
* negative error code.
*/
- status = try_to_fill_dentry(dentry);
-
- spin_lock(&sbi->fs_lock);
- ino->flags &= ~AUTOFS_INF_PENDING;
- spin_unlock(&sbi->fs_lock);
-
+ status = try_to_fill_dentry(dentry, flags);
if (status == 0)
return 1;
return status;
}
+ spin_unlock(&sbi->fs_lock);
+
+ /* Negative dentry.. invalidate if "old" */
+ if (dentry->d_inode == NULL)
+ return 0;
/* Check for a non-mountpoint directory with no contents */
+ spin_lock(&dcache_lock);
if (S_ISDIR(dentry->d_inode->i_mode) &&
!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
DPRINTK("dentry=%p %.*s, emptydir",
dentry, dentry->d_name.len, dentry->d_name.name);
+ spin_unlock(&dcache_lock);
- if (autofs4_need_mount(flags) || current->link_count) {
- int status;
-
- /*
- * We can only unhash and send this to ->lookup() if
- * the directory mutex is held over d_revalidate() and
- * ->lookup(). This prevents the VFS from incorrectly
- * seeing the dentry as non-existent.
- */
- ino->flags |= AUTOFS_INF_PENDING;
- if (!mutex_aquired) {
- autofs4_revalidate_drop(dentry, entry);
- spin_unlock(&dcache_lock);
- spin_unlock(&sbi->fs_lock);
- return 0;
- }
- spin_unlock(&dcache_lock);
- spin_unlock(&sbi->fs_lock);
- mutex_unlock(&dir->i_mutex);
- kfree(entry);
-
- /*
- * A zero status is success otherwise we have a
- * negative error code.
- */
- status = try_to_fill_dentry(dentry);
-
- spin_lock(&sbi->fs_lock);
- ino->flags &= ~AUTOFS_INF_PENDING;
- spin_unlock(&sbi->fs_lock);
+ /* The daemon never causes a mount to trigger */
+ if (oz_mode)
+ return 1;
- if (status == 0)
- return 1;
+ /*
+ * A zero status is success otherwise we have a
+ * negative error code.
+ */
+ status = try_to_fill_dentry(dentry, flags);
+ if (status == 0)
+ return 1;
- return status;
- }
+ return status;
}
spin_unlock(&dcache_lock);
- spin_unlock(&sbi->fs_lock);
-
- if (mutex_aquired)
- mutex_unlock(&dir->i_mutex);
-
- kfree(entry);
return 1;
}
-static void autofs4_free_rehash_entrys(struct autofs_info *inf)
-{
- struct list_head *head = &inf->rehash_list;
- struct rehash_entry *entry, *next;
- list_for_each_entry_safe(entry, next, head, list) {
- list_del(&entry->list);
- kfree(entry);
- }
-}
-
void autofs4_dentry_release(struct dentry *de)
{
struct autofs_info *inf;
@@ -509,8 +382,6 @@ void autofs4_dentry_release(struct dentry *de)
list_del(&inf->active);
if (!list_empty(&inf->expiring))
list_del(&inf->expiring);
- if (!list_empty(&inf->rehash_list))
- autofs4_free_rehash_entrys(inf);
spin_unlock(&sbi->lookup_lock);
}
@@ -543,7 +414,6 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
const unsigned char *str = name->name;
struct list_head *p, *head;
-restart:
spin_lock(&dcache_lock);
spin_lock(&sbi->lookup_lock);
head = &sbi->active_list;
@@ -561,19 +431,6 @@ restart:
if (atomic_read(&active->d_count) == 0)
goto next;
- if (active->d_inode && IS_DEADDIR(active->d_inode)) {
- if (!list_empty(&ino->rehash_list)) {
- dget(active);
- spin_unlock(&active->d_lock);
- spin_unlock(&sbi->lookup_lock);
- spin_unlock(&dcache_lock);
- autofs4_remove_rehash_entrys(ino);
- dput(active);
- goto restart;
- }
- goto next;
- }
-
qstr = &active->d_name;
if (active->d_name.hash != hash)
@@ -586,11 +443,13 @@ restart:
if (memcmp(qstr->name, str, len))
goto next;
- dget(active);
- spin_unlock(&active->d_lock);
- spin_unlock(&sbi->lookup_lock);
- spin_unlock(&dcache_lock);
- return active;
+ if (d_unhashed(active)) {
+ dget(active);
+ spin_unlock(&active->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ spin_unlock(&dcache_lock);
+ return active;
+ }
next:
spin_unlock(&active->d_lock);
}
@@ -639,11 +498,13 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
if (memcmp(qstr->name, str, len))
goto next;
- dget(expiring);
- spin_unlock(&expiring->d_lock);
- spin_unlock(&sbi->lookup_lock);
- spin_unlock(&dcache_lock);
- return expiring;
+ if (d_unhashed(expiring)) {
+ dget(expiring);
+ spin_unlock(&expiring->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ spin_unlock(&dcache_lock);
+ return expiring;
+ }
next:
spin_unlock(&expiring->d_lock);
}
@@ -653,48 +514,6 @@ next:
return NULL;
}
-static struct autofs_info *init_new_dentry(struct autofs_sb_info *sbi,
- struct dentry *dentry, int oz_mode)
-{
- struct autofs_info *ino;
-
- /*
- * Mark the dentry incomplete but don't hash it. We do this
- * to serialize our inode creation operations (symlink and
- * mkdir) which prevents deadlock during the callback to
- * the daemon. Subsequent user space lookups for the same
- * dentry are placed on the wait queue while the daemon
- * itself is allowed passage unresticted so the create
- * operation itself can then hash the dentry. Finally,
- * we check for the hashed dentry and return the newly
- * hashed dentry.
- */
- dentry->d_op = &autofs4_root_dentry_operations;
-
- /*
- * And we need to ensure that the same dentry is used for
- * all following lookup calls until it is hashed so that
- * the dentry flags are persistent throughout the request.
- */
- ino = autofs4_init_ino(NULL, sbi, 0555);
- if (!ino)
- return ERR_PTR(-ENOMEM);
-
- dentry->d_fsdata = ino;
- ino->dentry = dentry;
-
- /*
- * Only set the mount pending flag for new dentrys not created
- * by the daemon.
- */
- if (!oz_mode)
- ino->flags |= AUTOFS_INF_PENDING;
-
- d_instantiate(dentry, NULL);
-
- return ino;
-}
-
/* Lookups in the root directory */
static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
@@ -702,7 +521,6 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
struct autofs_info *ino;
struct dentry *expiring, *active;
int oz_mode;
- int status = 0;
DPRINTK("name = %.*s",
dentry->d_name.len, dentry->d_name.name);
@@ -717,26 +535,44 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d",
current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode);
- spin_lock(&sbi->fs_lock);
active = autofs4_lookup_active(dentry);
if (active) {
dentry = active;
ino = autofs4_dentry_ino(dentry);
- /* If this came from revalidate, rehash it */
- autofs4_revalidate_rehash(dentry);
- spin_unlock(&sbi->fs_lock);
} else {
- spin_unlock(&sbi->fs_lock);
- ino = init_new_dentry(sbi, dentry, oz_mode);
- if (IS_ERR(ino))
- return (struct dentry *) ino;
- }
+ /*
+ * Mark the dentry incomplete but don't hash it. We do this
+ * to serialize our inode creation operations (symlink and
+ * mkdir) which prevents deadlock during the callback to
+ * the daemon. Subsequent user space lookups for the same
+ * dentry are placed on the wait queue while the daemon
+ * itself is allowed passage unresticted so the create
+ * operation itself can then hash the dentry. Finally,
+ * we check for the hashed dentry and return the newly
+ * hashed dentry.
+ */
+ dentry->d_op = &autofs4_root_dentry_operations;
+
+ /*
+ * And we need to ensure that the same dentry is used for
+ * all following lookup calls until it is hashed so that
+ * the dentry flags are persistent throughout the request.
+ */
+ ino = autofs4_init_ino(NULL, sbi, 0555);
+ if (!ino)
+ return ERR_PTR(-ENOMEM);
- autofs4_add_active(dentry);
+ dentry->d_fsdata = ino;
+ ino->dentry = dentry;
+
+ autofs4_add_active(dentry);
+
+ d_instantiate(dentry, NULL);
+ }
if (!oz_mode) {
- expiring = autofs4_lookup_expiring(dentry);
mutex_unlock(&dir->i_mutex);
+ expiring = autofs4_lookup_expiring(dentry);
if (expiring) {
/*
* If we are racing with expire the request might not
@@ -744,22 +580,23 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
* so it must have been successful, so just wait for it.
*/
autofs4_expire_wait(expiring);
+ autofs4_del_expiring(expiring);
dput(expiring);
}
- status = try_to_fill_dentry(dentry);
- mutex_lock(&dir->i_mutex);
+
spin_lock(&sbi->fs_lock);
- ino->flags &= ~AUTOFS_INF_PENDING;
+ ino->flags |= AUTOFS_INF_PENDING;
spin_unlock(&sbi->fs_lock);
+ if (dentry->d_op && dentry->d_op->d_revalidate)
+ (dentry->d_op->d_revalidate)(dentry, nd);
+ mutex_lock(&dir->i_mutex);
}
- autofs4_del_active(dentry);
-
/*
- * If we had a mount fail, check if we had to handle
+ * If we are still pending, check if we had to handle
* a signal. If so we can force a restart..
*/
- if (status) {
+ if (ino->flags & AUTOFS_INF_PENDING) {
/* See if we were interrupted */
if (signal_pending(current)) {
sigset_t *sigset = &current->pending.signal;
@@ -771,46 +608,43 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
return ERR_PTR(-ERESTARTNOINTR);
}
}
- }
-
- /*
- * User space can (and has done in the past) remove and re-create
- * this directory during the callback. This can leave us with an
- * unhashed dentry, but a successful mount! So we need to
- * perform another cached lookup in case the dentry now exists.
- */
- if (!oz_mode && !have_submounts(dentry)) {
- struct dentry *new;
- new = d_lookup(dentry->d_parent, &dentry->d_name);
- if (new) {
- if (active)
- dput(active);
- return new;
- } else {
- if (!status)
- status = -ENOENT;
+ if (!oz_mode) {
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
}
}
/*
- * If we had a mount failure, return status to user space.
- * If the mount succeeded and we used a dentry from the active queue
- * return it.
+ * If this dentry is unhashed, then we shouldn't honour this
+ * lookup. Returning ENOENT here doesn't do the right thing
+ * for all system calls, but it should be OK for the operations
+ * we permit from an autofs.
*/
- if (status) {
- dentry = ERR_PTR(status);
- if (active)
- dput(active);
- return dentry;
- } else {
+ if (!oz_mode && d_unhashed(dentry)) {
/*
- * Valid successful mount, return active dentry or NULL
- * for a new dentry.
+ * A user space application can (and has done in the past)
+ * remove and re-create this directory during the callback.
+ * This can leave us with an unhashed dentry, but a
+ * successful mount! So we need to perform another
+ * cached lookup in case the dentry now exists.
*/
+ struct dentry *parent = dentry->d_parent;
+ struct dentry *new = d_lookup(parent, &dentry->d_name);
+ if (new != NULL)
+ dentry = new;
+ else
+ dentry = ERR_PTR(-ENOENT);
+
if (active)
- return active;
+ dput(active);
+
+ return dentry;
}
+ if (active)
+ return active;
+
return NULL;
}
@@ -834,6 +668,8 @@ static int autofs4_dir_symlink(struct inode *dir,
if (!ino)
return -ENOMEM;
+ autofs4_del_active(dentry);
+
ino->size = strlen(symname);
cp = kmalloc(ino->size + 1, GFP_KERNEL);
if (!cp) {
@@ -910,6 +746,7 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
dir->i_mtime = CURRENT_TIME;
spin_lock(&dcache_lock);
+ autofs4_add_expiring(dentry);
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
@@ -935,6 +772,7 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
spin_unlock(&dcache_lock);
return -ENOTEMPTY;
}
+ autofs4_add_expiring(dentry);
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
@@ -972,6 +810,8 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (!ino)
return -ENOMEM;
+ autofs4_del_active(dentry);
+
inode = autofs4_get_inode(dir->i_sb, ino);
if (!inode) {
if (!dentry->d_fsdata)
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 8f3d9fd..f22a7d3 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -15,6 +15,7 @@
#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include <linux/vfs.h>
+#include <linux/writeback.h>
#include <asm/uaccess.h>
#include "bfs.h"
@@ -98,7 +99,7 @@ error:
return ERR_PTR(-EIO);
}
-static int bfs_write_inode(struct inode *inode, int wait)
+static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct bfs_sb_info *info = BFS_SB(inode->i_sb);
unsigned int ino = (u16)inode->i_ino;
@@ -147,7 +148,7 @@ static int bfs_write_inode(struct inode *inode, int wait)
di->i_eoffset = cpu_to_le32(i_sblock * BFS_BSIZE + inode->i_size - 1);
mark_buffer_dirty(bh);
- if (wait) {
+ if (wbc->sync_mode == WB_SYNC_ALL) {
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh))
err = -EIO;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2aa8ec6..8b5cfdd 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2326,7 +2326,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_delete_inode(struct inode *inode);
void btrfs_put_inode(struct inode *inode);
-int btrfs_write_inode(struct inode *inode, int wait);
+int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
void btrfs_dirty_inode(struct inode *inode);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 4deb280..c41db6d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3968,7 +3968,7 @@ err:
return ret;
}
-int btrfs_write_inode(struct inode *inode, int wait)
+int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
@@ -3977,7 +3977,7 @@ int btrfs_write_inode(struct inode *inode, int wait)
if (root->fs_info->btree_inode == inode)
return 0;
- if (wait) {
+ if (wbc->sync_mode == WB_SYNC_ALL) {
trans = btrfs_join_transaction(root, 1);
btrfs_set_trans_block_group(trans, inode);
ret = btrfs_commit_transaction(trans, root);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 49503d2..bc0025c 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,6 +1,7 @@
Version 1.62
------------
-Add sockopt=TCP_NODELAY mount option.
+Add sockopt=TCP_NODELAY mount option. EA (xattr) routines hardened
+to more strictly handle corrupt frames.
Version 1.61
------------
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index ed751bb..a1c817e 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -205,7 +205,7 @@ struct cifsUidInfo {
struct cifsSesInfo {
struct list_head smb_ses_list;
struct list_head tcon_list;
- struct semaphore sesSem;
+ struct mutex session_mutex;
#if 0
struct cifsUidInfo *uidInfo; /* pointer to user info */
#endif
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 3877737..14d036d 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -415,10 +415,10 @@ struct smb_hdr {
__u8 WordCount;
} __attribute__((packed));
/* given a pointer to an smb_hdr retrieve the value of byte count */
-#define BCC(smb_var) (*(__u16 *)((char *)smb_var + sizeof(struct smb_hdr) + (2 * smb_var->WordCount)))
-#define BCC_LE(smb_var) (*(__le16 *)((char *)smb_var + sizeof(struct smb_hdr) + (2 * smb_var->WordCount)))
+#define BCC(smb_var) (*(__u16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
+#define BCC_LE(smb_var) (*(__le16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
/* given a pointer to an smb_hdr retrieve the pointer to the byte area */
-#define pByteArea(smb_var) ((unsigned char *)smb_var + sizeof(struct smb_hdr) + (2 * smb_var->WordCount) + 2)
+#define pByteArea(smb_var) ((unsigned char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount) + 2)
/*
* Computer Name Length (since Netbios name was length 16 with last byte 0x20)
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 5646727..88e2bc4 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -363,13 +363,10 @@ extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
__u32 filter, struct file *file, int multishot,
const struct nls_table *nls_codepage);
extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName, char *EAData,
+ const unsigned char *searchName,
+ const unsigned char *ea_name, char *EAData,
size_t bufsize, const struct nls_table *nls_codepage,
int remap_special_chars);
-extern ssize_t CIFSSMBQueryEA(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName, const unsigned char *ea_name,
- unsigned char *ea_value, size_t buf_size,
- const struct nls_table *nls_codepage, int remap_special_chars);
extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
const char *fileName, const char *ea_name,
const void *ea_value, const __u16 ea_value_len,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 941441d..9d17df3 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -170,19 +170,19 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
* need to prevent multiple threads trying to simultaneously
* reconnect the same SMB session
*/
- down(&ses->sesSem);
+ mutex_lock(&ses->session_mutex);
if (ses->need_reconnect)
rc = cifs_setup_session(0, ses, nls_codepage);
/* do we need to reconnect tcon? */
if (rc || !tcon->need_reconnect) {
- up(&ses->sesSem);
+ mutex_unlock(&ses->session_mutex);
goto out;
}
mark_open_files_invalid(tcon);
rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage);
- up(&ses->sesSem);
+ mutex_unlock(&ses->session_mutex);
cFYI(1, ("reconnect tcon rc = %d", rc));
if (rc)
@@ -700,13 +700,13 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
if (!ses || !ses->server)
return -EIO;
- down(&ses->sesSem);
+ mutex_lock(&ses->session_mutex);
if (ses->need_reconnect)
goto session_already_dead; /* no need to send SMBlogoff if uid
already closed due to reconnect */
rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB);
if (rc) {
- up(&ses->sesSem);
+ mutex_unlock(&ses->session_mutex);
return rc;
}
@@ -721,7 +721,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
pSMB->AndXCommand = 0xFF;
rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0);
session_already_dead:
- up(&ses->sesSem);
+ mutex_unlock(&ses->session_mutex);
/* if session dead then we do not need to do ulogoff,
since server closed smb session, no sense reporting
@@ -5269,22 +5269,34 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
cifs_buf_release(pSMB);
return rc;
}
+
#ifdef CONFIG_CIFS_XATTR
+/*
+ * Do a path-based QUERY_ALL_EAS call and parse the result. This is a common
+ * function used by listxattr and getxattr type calls. When ea_name is set,
+ * it looks for that attribute name and stuffs that value into the EAData
+ * buffer. When ea_name is NULL, it stuffs a list of attribute names into the
+ * buffer. In both cases, the return value is either the length of the
+ * resulting data or a negative error code. If EAData is a NULL pointer then
+ * the data isn't copied to it, but the length is returned.
+ */
ssize_t
CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName,
- char *EAData, size_t buf_size,
- const struct nls_table *nls_codepage, int remap)
+ const unsigned char *searchName, const unsigned char *ea_name,
+ char *EAData, size_t buf_size,
+ const struct nls_table *nls_codepage, int remap)
{
/* BB assumes one setup word */
TRANSACTION2_QPI_REQ *pSMB = NULL;
TRANSACTION2_QPI_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned;
- int name_len;
+ int list_len;
+ struct fealist *ea_response_data;
struct fea *temp_fea;
char *temp_ptr;
- __u16 params, byte_count;
+ char *end_of_smb;
+ __u16 params, byte_count, data_offset;
cFYI(1, ("In Query All EAs path %s", searchName));
QAllEAsRetry:
@@ -5294,22 +5306,22 @@ QAllEAsRetry:
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len =
+ list_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
- name_len++; /* trailing null */
- name_len *= 2;
+ list_len++; /* trailing null */
+ list_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ list_len = strnlen(searchName, PATH_MAX);
+ list_len++; /* trailing null */
+ strncpy(pSMB->FileName, searchName, list_len);
}
- params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
+ params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max SMB PDU from sess structure BB */
- pSMB->MaxDataCount = cpu_to_le16(4000);
+ pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -5334,237 +5346,117 @@ QAllEAsRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, ("Send error in QueryAllEAs = %d", rc));
- } else { /* decode response */
- rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+ goto QAllEAsOut;
+ }
- /* BB also check enough total bytes returned */
- /* BB we need to improve the validity checking
- of these trans2 responses */
- if (rc || (pSMBr->ByteCount < 4))
- rc = -EIO; /* bad smb */
- /* else if (pFindData){
- memcpy((char *) pFindData,
- (char *) &pSMBr->hdr.Protocol +
- data_offset, kl);
- }*/ else {
- /* check that length of list is not more than bcc */
- /* check that each entry does not go beyond length
- of list */
- /* check that each element of each entry does not
- go beyond end of list */
- __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- struct fealist *ea_response_data;
- rc = 0;
- /* validate_trans2_offsets() */
- /* BB check if start of smb + data_offset > &bcc+ bcc */
- ea_response_data = (struct fealist *)
- (((char *) &pSMBr->hdr.Protocol) +
- data_offset);
- name_len = le32_to_cpu(ea_response_data->list_len);
- cFYI(1, ("ea length %d", name_len));
- if (name_len <= 8) {
- /* returned EA size zeroed at top of function */
- cFYI(1, ("empty EA list returned from server"));
- } else {
- /* account for ea list len */
- name_len -= 4;
- temp_fea = ea_response_data->list;
- temp_ptr = (char *)temp_fea;
- while (name_len > 0) {
- __u16 value_len;
- name_len -= 4;
- temp_ptr += 4;
- rc += temp_fea->name_len;
- /* account for prefix user. and trailing null */
- rc = rc + 5 + 1;
- if (rc < (int)buf_size) {
- memcpy(EAData, "user.", 5);
- EAData += 5;
- memcpy(EAData, temp_ptr,
- temp_fea->name_len);
- EAData += temp_fea->name_len;
- /* null terminate name */
- *EAData = 0;
- EAData = EAData + 1;
- } else if (buf_size == 0) {
- /* skip copy - calc size only */
- } else {
- /* stop before overrun buffer */
- rc = -ERANGE;
- break;
- }
- name_len -= temp_fea->name_len;
- temp_ptr += temp_fea->name_len;
- /* account for trailing null */
- name_len--;
- temp_ptr++;
- value_len =
- le16_to_cpu(temp_fea->value_len);
- name_len -= value_len;
- temp_ptr += value_len;
- /* BB check that temp_ptr is still
- within the SMB BB*/
-
- /* no trailing null to account for
- in value len */
- /* go on to next EA */
- temp_fea = (struct fea *)temp_ptr;
- }
- }
- }
+
+ /* BB also check enough total bytes returned */
+ /* BB we need to improve the validity checking
+ of these trans2 responses */
+
+ rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+ if (rc || (pSMBr->ByteCount < 4)) {
+ rc = -EIO; /* bad smb */
+ goto QAllEAsOut;
}
- cifs_buf_release(pSMB);
- if (rc == -EAGAIN)
- goto QAllEAsRetry;
- return (ssize_t)rc;
-}
+ /* check that length of list is not more than bcc */
+ /* check that each entry does not go beyond length
+ of list */
+ /* check that each element of each entry does not
+ go beyond end of list */
+ /* validate_trans2_offsets() */
+ /* BB check if start of smb + data_offset > &bcc+ bcc */
-ssize_t CIFSSMBQueryEA(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName, const unsigned char *ea_name,
- unsigned char *ea_value, size_t buf_size,
- const struct nls_table *nls_codepage, int remap)
-{
- TRANSACTION2_QPI_REQ *pSMB = NULL;
- TRANSACTION2_QPI_RSP *pSMBr = NULL;
- int rc = 0;
- int bytes_returned;
- int name_len;
- struct fea *temp_fea;
- char *temp_ptr;
- __u16 params, byte_count;
+ data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+ ea_response_data = (struct fealist *)
+ (((char *) &pSMBr->hdr.Protocol) + data_offset);
- cFYI(1, ("In Query EA path %s", searchName));
-QEARetry:
- rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
- if (rc)
- return rc;
+ list_len = le32_to_cpu(ea_response_data->list_len);
+ cFYI(1, ("ea length %d", list_len));
+ if (list_len <= 8) {
+ cFYI(1, ("empty EA list returned from server"));
+ goto QAllEAsOut;
+ }
- if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len =
- cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
- PATH_MAX, nls_codepage, remap);
- name_len++; /* trailing null */
- name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ /* make sure list_len doesn't go past end of SMB */
+ end_of_smb = (char *)pByteArea(&pSMBr->hdr) + BCC(&pSMBr->hdr);
+ if ((char *)ea_response_data + list_len > end_of_smb) {
+ cFYI(1, ("EA list appears to go beyond SMB"));
+ rc = -EIO;
+ goto QAllEAsOut;
}
- params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
- pSMB->TotalDataCount = 0;
- pSMB->MaxParameterCount = cpu_to_le16(2);
- /* BB find exact max SMB PDU from sess structure BB */
- pSMB->MaxDataCount = cpu_to_le16(4000);
- pSMB->MaxSetupCount = 0;
- pSMB->Reserved = 0;
- pSMB->Flags = 0;
- pSMB->Timeout = 0;
- pSMB->Reserved2 = 0;
- pSMB->ParameterOffset = cpu_to_le16(offsetof(
- struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
- pSMB->DataCount = 0;
- pSMB->DataOffset = 0;
- pSMB->SetupCount = 1;
- pSMB->Reserved3 = 0;
- pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
- byte_count = params + 1 /* pad */ ;
- pSMB->TotalParameterCount = cpu_to_le16(params);
- pSMB->ParameterCount = pSMB->TotalParameterCount;
- pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS);
- pSMB->Reserved4 = 0;
- pSMB->hdr.smb_buf_length += byte_count;
- pSMB->ByteCount = cpu_to_le16(byte_count);
+ /* account for ea list len */
+ list_len -= 4;
+ temp_fea = ea_response_data->list;
+ temp_ptr = (char *)temp_fea;
+ while (list_len > 0) {
+ unsigned int name_len;
+ __u16 value_len;
+
+ list_len -= 4;
+ temp_ptr += 4;
+ /* make sure we can read name_len and value_len */
+ if (list_len < 0) {
+ cFYI(1, ("EA entry goes beyond length of list"));
+ rc = -EIO;
+ goto QAllEAsOut;
+ }
- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
- cFYI(1, ("Send error in Query EA = %d", rc));
- } else { /* decode response */
- rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+ name_len = temp_fea->name_len;
+ value_len = le16_to_cpu(temp_fea->value_len);
+ list_len -= name_len + 1 + value_len;
+ if (list_len < 0) {
+ cFYI(1, ("EA entry goes beyond length of list"));
+ rc = -EIO;
+ goto QAllEAsOut;
+ }
- /* BB also check enough total bytes returned */
- /* BB we need to improve the validity checking
- of these trans2 responses */
- if (rc || (pSMBr->ByteCount < 4))
- rc = -EIO; /* bad smb */
- /* else if (pFindData){
- memcpy((char *) pFindData,
- (char *) &pSMBr->hdr.Protocol +
- data_offset, kl);
- }*/ else {
- /* check that length of list is not more than bcc */
- /* check that each entry does not go beyond length
- of list */
- /* check that each element of each entry does not
- go beyond end of list */
- __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- struct fealist *ea_response_data;
- rc = -ENODATA;
- /* validate_trans2_offsets() */
- /* BB check if start of smb + data_offset > &bcc+ bcc*/
- ea_response_data = (struct fealist *)
- (((char *) &pSMBr->hdr.Protocol) +
- data_offset);
- name_len = le32_to_cpu(ea_response_data->list_len);
- cFYI(1, ("ea length %d", name_len));
- if (name_len <= 8) {
- /* returned EA size zeroed at top of function */
- cFYI(1, ("empty EA list returned from server"));
- } else {
- /* account for ea list len */
- name_len -= 4;
- temp_fea = ea_response_data->list;
- temp_ptr = (char *)temp_fea;
- /* loop through checking if we have a matching
- name and then return the associated value */
- while (name_len > 0) {
- __u16 value_len;
- name_len -= 4;
- temp_ptr += 4;
- value_len =
- le16_to_cpu(temp_fea->value_len);
- /* BB validate that value_len falls within SMB,
- even though maximum for name_len is 255 */
- if (memcmp(temp_fea->name, ea_name,
- temp_fea->name_len) == 0) {
- /* found a match */
- rc = value_len;
- /* account for prefix user. and trailing null */
- if (rc <= (int)buf_size) {
- memcpy(ea_value,
- temp_fea->name+temp_fea->name_len+1,
- rc);
- /* ea values, unlike ea
- names, are not null
- terminated */
- } else if (buf_size == 0) {
- /* skip copy - calc size only */
- } else {
- /* stop before overrun buffer */
- rc = -ERANGE;
- }
- break;
- }
- name_len -= temp_fea->name_len;
- temp_ptr += temp_fea->name_len;
- /* account for trailing null */
- name_len--;
- temp_ptr++;
- name_len -= value_len;
- temp_ptr += value_len;
- /* No trailing null to account for in
- value_len. Go on to next EA */
- temp_fea = (struct fea *)temp_ptr;
+ if (ea_name) {
+ if (strncmp(ea_name, temp_ptr, name_len) == 0) {
+ temp_ptr += name_len + 1;
+ rc = value_len;
+ if (buf_size == 0)
+ goto QAllEAsOut;
+ if ((size_t)value_len > buf_size) {
+ rc = -ERANGE;
+ goto QAllEAsOut;
}
+ memcpy(EAData, temp_ptr, value_len);
+ goto QAllEAsOut;
+ }
+ } else {
+ /* account for prefix user. and trailing null */
+ rc += (5 + 1 + name_len);
+ if (rc < (int) buf_size) {
+ memcpy(EAData, "user.", 5);
+ EAData += 5;
+ memcpy(EAData, temp_ptr, name_len);
+ EAData += name_len;
+ /* null terminate name */
+ *EAData = 0;
+ ++EAData;
+ } else if (buf_size == 0) {
+ /* skip copy - calc size only */
+ } else {
+ /* stop before overrun buffer */
+ rc = -ERANGE;
+ break;
}
}
+ temp_ptr += name_len + 1 + value_len;
+ temp_fea = (struct fea *)temp_ptr;
}
+
+ /* didn't find the named attribute */
+ if (ea_name)
+ rc = -ENODATA;
+
+QAllEAsOut:
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
- goto QEARetry;
+ goto QAllEAsRetry;
return (ssize_t)rc;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2e9e09c..45eb6cb 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2388,13 +2388,13 @@ try_mount_again:
*/
cifs_put_tcp_session(srvTcp);
- down(&pSesInfo->sesSem);
+ mutex_lock(&pSesInfo->session_mutex);
if (pSesInfo->need_reconnect) {
cFYI(1, ("Session needs reconnect"));
rc = cifs_setup_session(xid, pSesInfo,
cifs_sb->local_nls);
}
- up(&pSesInfo->sesSem);
+ mutex_unlock(&pSesInfo->session_mutex);
} else if (!rc) {
cFYI(1, ("Existing smb sess not found"));
pSesInfo = sesInfoAlloc();
@@ -2437,12 +2437,12 @@ try_mount_again:
}
pSesInfo->linux_uid = volume_info->linux_uid;
pSesInfo->overrideSecFlg = volume_info->secFlg;
- down(&pSesInfo->sesSem);
+ mutex_lock(&pSesInfo->session_mutex);
/* BB FIXME need to pass vol->secFlgs BB */
rc = cifs_setup_session(xid, pSesInfo,
cifs_sb->local_nls);
- up(&pSesInfo->sesSem);
+ mutex_unlock(&pSesInfo->session_mutex);
}
/* search for existing tcon to this server share */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 057e1da..3d8f8a9 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2289,9 +2289,9 @@ cifs_oplock_break(struct slow_work *work)
if (inode && S_ISREG(inode->i_mode)) {
#ifdef CONFIG_CIFS_EXPERIMENTAL
if (cinode->clientCanCacheAll == 0)
- break_lease(inode, FMODE_READ);
+ break_lease(inode, O_RDONLY);
else if (cinode->clientCanCacheRead == 0)
- break_lease(inode, FMODE_WRITE);
+ break_lease(inode, O_WRONLY);
#endif
rc = filemap_fdatawrite(inode->i_mapping);
if (cinode->clientCanCacheRead == 0) {
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index e3fda97..8bdbc81 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -111,6 +111,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
+ cifs_i->server_eof = fattr->cf_eof;
/*
* Can't safely change the file size here if the client is writing to
* it due to potential races.
@@ -366,7 +367,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
char ea_value[4];
__u32 mode;
- rc = CIFSSMBQueryEA(xid, cifs_sb->tcon, path, "SETFILEBITS",
+ rc = CIFSSMBQAllEAs(xid, cifs_sb->tcon, path, "SETFILEBITS",
ea_value, 4 /* size of buf */, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index d27d4ec..d147499 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -79,7 +79,7 @@ sesInfoAlloc(void)
++ret_buf->ses_count;
INIT_LIST_HEAD(&ret_buf->smb_ses_list);
INIT_LIST_HEAD(&ret_buf->tcon_list);
- init_MUTEX(&ret_buf->sesSem);
+ mutex_init(&ret_buf->session_mutex);
}
return ret_buf;
}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index a75afa3..3e2ef0d 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -244,7 +244,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
/* revalidate/getattr then populate from inode */
} /* BB add else when above is implemented */
ea_name += 5; /* skip past user. prefix */
- rc = CIFSSMBQueryEA(xid, pTcon, full_path, ea_name, ea_value,
+ rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
} else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
@@ -252,7 +252,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
goto get_ea_exit;
ea_name += 4; /* skip past os2. prefix */
- rc = CIFSSMBQueryEA(xid, pTcon, full_path, ea_name, ea_value,
+ rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
} else if (strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
@@ -364,8 +364,8 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
/* if proc/fs/cifs/streamstoxattr is set then
search server for EAs or streams to
returns as xattrs */
- rc = CIFSSMBQAllEAs(xid, pTcon, full_path, data, buf_size,
- cifs_sb->local_nls,
+ rc = CIFSSMBQAllEAs(xid, pTcon, full_path, NULL, data,
+ buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
diff --git a/fs/dcache.c b/fs/dcache.c
index 953173a..f1358e5 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -257,6 +257,7 @@ kill_it:
if (dentry)
goto repeat;
}
+EXPORT_SYMBOL(dput);
/**
* d_invalidate - invalidate a dentry
@@ -314,6 +315,7 @@ int d_invalidate(struct dentry * dentry)
spin_unlock(&dcache_lock);
return 0;
}
+EXPORT_SYMBOL(d_invalidate);
/* This should be called _only_ with dcache_lock held */
@@ -328,6 +330,7 @@ struct dentry * dget_locked(struct dentry *dentry)
{
return __dget_locked(dentry);
}
+EXPORT_SYMBOL(dget_locked);
/**
* d_find_alias - grab a hashed alias of inode
@@ -384,6 +387,7 @@ struct dentry * d_find_alias(struct inode *inode)
}
return de;
}
+EXPORT_SYMBOL(d_find_alias);
/*
* Try to kill dentries associated with this inode.
@@ -408,6 +412,7 @@ restart:
}
spin_unlock(&dcache_lock);
}
+EXPORT_SYMBOL(d_prune_aliases);
/*
* Throw away a dentry - free the inode, dput the parent. This requires that
@@ -610,6 +615,7 @@ void shrink_dcache_sb(struct super_block * sb)
{
__shrink_dcache_sb(sb, NULL, 0);
}
+EXPORT_SYMBOL(shrink_dcache_sb);
/*
* destroy a single subtree of dentries for unmount
@@ -792,6 +798,7 @@ positive:
spin_unlock(&dcache_lock);
return 1;
}
+EXPORT_SYMBOL(have_submounts);
/*
* Search the dentry child list for the specified parent,
@@ -876,6 +883,7 @@ void shrink_dcache_parent(struct dentry * parent)
while ((found = select_parent(parent)) != 0)
__shrink_dcache_sb(sb, &found, 0);
}
+EXPORT_SYMBOL(shrink_dcache_parent);
/*
* Scan `nr' dentries and return the number which remain.
@@ -968,6 +976,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
return dentry;
}
+EXPORT_SYMBOL(d_alloc);
struct dentry *d_alloc_name(struct dentry *parent, const char *name)
{
@@ -1012,6 +1021,7 @@ void d_instantiate(struct dentry *entry, struct inode * inode)
spin_unlock(&dcache_lock);
security_d_instantiate(entry, inode);
}
+EXPORT_SYMBOL(d_instantiate);
/**
* d_instantiate_unique - instantiate a non-aliased dentry
@@ -1108,6 +1118,7 @@ struct dentry * d_alloc_root(struct inode * root_inode)
}
return res;
}
+EXPORT_SYMBOL(d_alloc_root);
static inline struct hlist_head *d_hash(struct dentry *parent,
unsigned long hash)
@@ -1211,7 +1222,6 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
spin_unlock(&dcache_lock);
security_d_instantiate(new, inode);
- d_rehash(dentry);
d_move(new, dentry);
iput(inode);
} else {
@@ -1225,6 +1235,7 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
d_add(dentry, inode);
return new;
}
+EXPORT_SYMBOL(d_splice_alias);
/**
* d_add_ci - lookup or allocate new dentry with case-exact name
@@ -1314,6 +1325,7 @@ err_out:
iput(inode);
return ERR_PTR(error);
}
+EXPORT_SYMBOL(d_add_ci);
/**
* d_lookup - search for a dentry
@@ -1357,6 +1369,7 @@ struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
} while (read_seqretry(&rename_lock, seq));
return dentry;
}
+EXPORT_SYMBOL(d_lookup);
struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
{
@@ -1483,6 +1496,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent)
out:
return 0;
}
+EXPORT_SYMBOL(d_validate);
/*
* When a file is deleted, we have two options:
@@ -1528,6 +1542,7 @@ void d_delete(struct dentry * dentry)
fsnotify_nameremove(dentry, isdir);
}
+EXPORT_SYMBOL(d_delete);
static void __d_rehash(struct dentry * entry, struct hlist_head *list)
{
@@ -1556,6 +1571,7 @@ void d_rehash(struct dentry * entry)
spin_unlock(&entry->d_lock);
spin_unlock(&dcache_lock);
}
+EXPORT_SYMBOL(d_rehash);
/*
* When switching names, the actual string doesn't strictly have to
@@ -1702,6 +1718,7 @@ void d_move(struct dentry * dentry, struct dentry * target)
d_move_locked(dentry, target);
spin_unlock(&dcache_lock);
}
+EXPORT_SYMBOL(d_move);
/**
* d_ancestor - search for an ancestor
@@ -1868,6 +1885,7 @@ shouldnt_be_hashed:
spin_unlock(&dcache_lock);
BUG();
}
+EXPORT_SYMBOL_GPL(d_materialise_unique);
static int prepend(char **buffer, int *buflen, const char *str, int namelen)
{
@@ -2005,6 +2023,7 @@ char *d_path(const struct path *path, char *buf, int buflen)
path_put(&root);
return res;
}
+EXPORT_SYMBOL(d_path);
/*
* Helper function for dentry_operations.d_dname() members
@@ -2171,6 +2190,30 @@ int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
return result;
}
+int path_is_under(struct path *path1, struct path *path2)
+{
+ struct vfsmount *mnt = path1->mnt;
+ struct dentry *dentry = path1->dentry;
+ int res;
+ spin_lock(&vfsmount_lock);
+ if (mnt != path2->mnt) {
+ for (;;) {
+ if (mnt->mnt_parent == mnt) {
+ spin_unlock(&vfsmount_lock);
+ return 0;
+ }
+ if (mnt->mnt_parent == path2->mnt)
+ break;
+ mnt = mnt->mnt_parent;
+ }
+ dentry = mnt->mnt_mountpoint;
+ }
+ res = is_subdir(dentry, path2->dentry);
+ spin_unlock(&vfsmount_lock);
+ return res;
+}
+EXPORT_SYMBOL(path_is_under);
+
void d_genocide(struct dentry *root)
{
struct dentry *this_parent = root;
@@ -2228,6 +2271,7 @@ ino_t find_inode_number(struct dentry *dir, struct qstr *name)
}
return ino;
}
+EXPORT_SYMBOL(find_inode_number);
static __initdata unsigned long dhash_entries;
static int __init set_dhash_entries(char *str)
@@ -2297,6 +2341,7 @@ static void __init dcache_init(void)
/* SLAB cache for __getname() consumers */
struct kmem_cache *names_cachep __read_mostly;
+EXPORT_SYMBOL(names_cachep);
EXPORT_SYMBOL(d_genocide);
@@ -2326,26 +2371,3 @@ void __init vfs_caches_init(unsigned long mempages)
bdev_cache_init();
chrdev_init();
}
-
-EXPORT_SYMBOL(d_alloc);
-EXPORT_SYMBOL(d_alloc_root);
-EXPORT_SYMBOL(d_delete);
-EXPORT_SYMBOL(d_find_alias);
-EXPORT_SYMBOL(d_instantiate);
-EXPORT_SYMBOL(d_invalidate);
-EXPORT_SYMBOL(d_lookup);
-EXPORT_SYMBOL(d_move);
-EXPORT_SYMBOL_GPL(d_materialise_unique);
-EXPORT_SYMBOL(d_path);
-EXPORT_SYMBOL(d_prune_aliases);
-EXPORT_SYMBOL(d_rehash);
-EXPORT_SYMBOL(d_splice_alias);
-EXPORT_SYMBOL(d_add_ci);
-EXPORT_SYMBOL(d_validate);
-EXPORT_SYMBOL(dget_locked);
-EXPORT_SYMBOL(dput);
-EXPORT_SYMBOL(find_inode_number);
-EXPORT_SYMBOL(have_submounts);
-EXPORT_SYMBOL(names_cachep);
-EXPORT_SYMBOL(shrink_dcache_parent);
-EXPORT_SYMBOL(shrink_dcache_sb);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 274ac86..049d6c3 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -496,7 +496,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
}
d_move(old_dentry, dentry);
fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name,
- old_dentry->d_name.name, S_ISDIR(old_dentry->d_inode->i_mode),
+ S_ISDIR(old_dentry->d_inode->i_mode),
NULL, old_dentry);
fsnotify_oldname_free(old_name);
unlock_rename(new_dir, old_dir);
diff --git a/fs/exofs/common.h b/fs/exofs/common.h
index b1b178e..f0d5203 100644
--- a/fs/exofs/common.h
+++ b/fs/exofs/common.h
@@ -55,6 +55,8 @@
/* exofs Application specific page/attribute */
# define EXOFS_APAGE_FS_DATA (OSD_APAGE_APP_DEFINED_FIRST + 3)
# define EXOFS_ATTR_INODE_DATA 1
+# define EXOFS_ATTR_INODE_FILE_LAYOUT 2
+# define EXOFS_ATTR_INODE_DIR_LAYOUT 3
/*
* The maximum number of files we can have is limited by the size of the
@@ -206,4 +208,41 @@ enum {
(((name_len) + offsetof(struct exofs_dir_entry, name) + \
EXOFS_DIR_ROUND) & ~EXOFS_DIR_ROUND)
+/*
+ * The on-disk (optional) layout structure.
+ * sits in an EXOFS_ATTR_INODE_FILE_LAYOUT or EXOFS_ATTR_INODE_DIR_LAYOUT
+ * attribute, attached to any inode, usually to a directory.
+ */
+
+enum exofs_inode_layout_gen_functions {
+ LAYOUT_MOVING_WINDOW = 0,
+ LAYOUT_IMPLICT = 1,
+};
+
+struct exofs_on_disk_inode_layout {
+ __le16 gen_func; /* One of enum exofs_inode_layout_gen_functions */
+ __le16 pad;
+ union {
+ /* gen_func == LAYOUT_MOVING_WINDOW (default) */
+ struct exofs_layout_sliding_window {
+ __le32 num_devices; /* first n devices in global-table*/
+ } sliding_window __packed;
+
+ /* gen_func == LAYOUT_IMPLICT */
+ struct exofs_layout_implict_list {
+ struct exofs_dt_data_map data_map;
+ /* Variable array of size data_map.cb_num_comps. These
+ * are device indexes of the devices in the global table
+ */
+ __le32 dev_indexes[];
+ } implict __packed;
+ };
+} __packed;
+
+static inline size_t exofs_on_disk_inode_layout_size(unsigned max_devs)
+{
+ return sizeof(struct exofs_on_disk_inode_layout) +
+ max_devs * sizeof(__le32);
+}
+
#endif /*ifndef __EXOFS_COM_H__*/
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index c35fd46..8442e35 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -55,12 +55,28 @@
/* u64 has problems with printk this will cast it to unsigned long long */
#define _LLU(x) (unsigned long long)(x)
+struct exofs_layout {
+ osd_id s_pid; /* partition ID of file system*/
+
+ /* Our way of looking at the data_map */
+ unsigned stripe_unit;
+ unsigned mirrors_p1;
+
+ unsigned group_width;
+ u64 group_depth;
+ unsigned group_count;
+
+ enum exofs_inode_layout_gen_functions lay_func;
+
+ unsigned s_numdevs; /* Num of devices in array */
+ struct osd_dev *s_ods[0]; /* Variable length */
+};
+
/*
* our extension to the in-memory superblock
*/
struct exofs_sb_info {
struct exofs_fscb s_fscb; /* Written often, pre-allocate*/
- osd_id s_pid; /* partition ID of file system*/
int s_timeout; /* timeout for OSD operations */
uint64_t s_nextid; /* highest object ID used */
uint32_t s_numfiles; /* number of files on fs */
@@ -69,22 +85,27 @@ struct exofs_sb_info {
atomic_t s_curr_pending; /* number of pending commands */
uint8_t s_cred[OSD_CAP_LEN]; /* credential for the fscb */
- struct pnfs_osd_data_map data_map; /* Default raid to use */
- unsigned s_numdevs; /* Num of devices in array */
- struct osd_dev *s_ods[1]; /* Variable length, minimum 1 */
+ struct pnfs_osd_data_map data_map; /* Default raid to use
+ * FIXME: Needed ?
+ */
+/* struct exofs_layout dir_layout;*/ /* Default dir layout */
+ struct exofs_layout layout; /* Default files layout,
+ * contains the variable osd_dev
+ * array. Keep last */
+ struct osd_dev *_min_one_dev[1]; /* Place holder for one dev */
};
/*
* our extension to the in-memory inode
*/
struct exofs_i_info {
+ struct inode vfs_inode; /* normal in-memory inode */
+ wait_queue_head_t i_wq; /* wait queue for inode */
unsigned long i_flags; /* various atomic flags */
uint32_t i_data[EXOFS_IDATA];/*short symlink names and device #s*/
uint32_t i_dir_start_lookup; /* which page to start lookup */
- wait_queue_head_t i_wq; /* wait queue for inode */
uint64_t i_commit_size; /* the object's written length */
uint8_t i_cred[OSD_CAP_LEN];/* all-powerful credential */
- struct inode vfs_inode; /* normal in-memory inode */
};
static inline osd_id exofs_oi_objno(struct exofs_i_info *oi)
@@ -101,7 +122,7 @@ struct exofs_io_state {
void *private;
exofs_io_done_fn done;
- struct exofs_sb_info *sbi;
+ struct exofs_layout *layout;
struct osd_obj_id obj;
u8 *cred;
@@ -109,7 +130,11 @@ struct exofs_io_state {
loff_t offset;
unsigned long length;
void *kern_buff;
- struct bio *bio;
+
+ struct page **pages;
+ unsigned nr_pages;
+ unsigned pgbase;
+ unsigned pages_consumed;
/* Attributes */
unsigned in_attr_len;
@@ -122,6 +147,9 @@ struct exofs_io_state {
struct exofs_per_dev_state {
struct osd_request *or;
struct bio *bio;
+ loff_t offset;
+ unsigned length;
+ unsigned dev;
} per_dev[];
};
@@ -175,6 +203,12 @@ static inline struct exofs_i_info *exofs_i(struct inode *inode)
}
/*
+ * Given a layout, object_number and stripe_index return the associated global
+ * dev_index
+ */
+unsigned exofs_layout_od_id(struct exofs_layout *layout,
+ osd_id obj_no, unsigned layout_index);
+/*
* Maximum count of links to a file
*/
#define EXOFS_LINK_MAX 32000
@@ -189,7 +223,8 @@ void exofs_make_credential(u8 cred_a[OSD_CAP_LEN],
int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
u64 offset, void *p, unsigned length);
-int exofs_get_io_state(struct exofs_sb_info *sbi, struct exofs_io_state** ios);
+int exofs_get_io_state(struct exofs_layout *layout,
+ struct exofs_io_state **ios);
void exofs_put_io_state(struct exofs_io_state *ios);
int exofs_check_io(struct exofs_io_state *ios, u64 *resid);
@@ -226,7 +261,7 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
struct page **pagep, void **fsdata);
extern struct inode *exofs_iget(struct super_block *, unsigned long);
struct inode *exofs_new_inode(struct inode *, int);
-extern int exofs_write_inode(struct inode *, int);
+extern int exofs_write_inode(struct inode *, struct writeback_control *wbc);
extern void exofs_delete_inode(struct inode *);
/* dir.c: */
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 2afbceb..a17e4b7 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -41,16 +41,18 @@
enum { BIO_MAX_PAGES_KMALLOC =
(PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),
+ MAX_PAGES_KMALLOC =
+ PAGE_SIZE / sizeof(struct page *),
};
struct page_collect {
struct exofs_sb_info *sbi;
- struct request_queue *req_q;
struct inode *inode;
unsigned expected_pages;
struct exofs_io_state *ios;
- struct bio *bio;
+ struct page **pages;
+ unsigned alloc_pages;
unsigned nr_pages;
unsigned long length;
loff_t pg_first; /* keep 64bit also in 32-arches */
@@ -62,15 +64,12 @@ static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
pcol->sbi = sbi;
- /* Create master bios on first Q, later on cloning, each clone will be
- * allocated on it's destination Q
- */
- pcol->req_q = osd_request_queue(sbi->s_ods[0]);
pcol->inode = inode;
pcol->expected_pages = expected_pages;
pcol->ios = NULL;
- pcol->bio = NULL;
+ pcol->pages = NULL;
+ pcol->alloc_pages = 0;
pcol->nr_pages = 0;
pcol->length = 0;
pcol->pg_first = -1;
@@ -80,7 +79,8 @@ static void _pcol_reset(struct page_collect *pcol)
{
pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages);
- pcol->bio = NULL;
+ pcol->pages = NULL;
+ pcol->alloc_pages = 0;
pcol->nr_pages = 0;
pcol->length = 0;
pcol->pg_first = -1;
@@ -90,38 +90,43 @@ static void _pcol_reset(struct page_collect *pcol)
* it might not end here. don't be left with nothing
*/
if (!pcol->expected_pages)
- pcol->expected_pages = BIO_MAX_PAGES_KMALLOC;
+ pcol->expected_pages = MAX_PAGES_KMALLOC;
}
static int pcol_try_alloc(struct page_collect *pcol)
{
- int pages = min_t(unsigned, pcol->expected_pages,
- BIO_MAX_PAGES_KMALLOC);
+ unsigned pages = min_t(unsigned, pcol->expected_pages,
+ MAX_PAGES_KMALLOC);
if (!pcol->ios) { /* First time allocate io_state */
- int ret = exofs_get_io_state(pcol->sbi, &pcol->ios);
+ int ret = exofs_get_io_state(&pcol->sbi->layout, &pcol->ios);
if (ret)
return ret;
}
+ /* TODO: easily support bio chaining */
+ pages = min_t(unsigned, pages,
+ pcol->sbi->layout.group_width * BIO_MAX_PAGES_KMALLOC);
+
for (; pages; pages >>= 1) {
- pcol->bio = bio_kmalloc(GFP_KERNEL, pages);
- if (likely(pcol->bio))
+ pcol->pages = kmalloc(pages * sizeof(struct page *),
+ GFP_KERNEL);
+ if (likely(pcol->pages)) {
+ pcol->alloc_pages = pages;
return 0;
+ }
}
- EXOFS_ERR("Failed to bio_kmalloc expected_pages=%u\n",
+ EXOFS_ERR("Failed to kmalloc expected_pages=%u\n",
pcol->expected_pages);
return -ENOMEM;
}
static void pcol_free(struct page_collect *pcol)
{
- if (pcol->bio) {
- bio_put(pcol->bio);
- pcol->bio = NULL;
- }
+ kfree(pcol->pages);
+ pcol->pages = NULL;
if (pcol->ios) {
exofs_put_io_state(pcol->ios);
@@ -132,11 +137,10 @@ static void pcol_free(struct page_collect *pcol)
static int pcol_add_page(struct page_collect *pcol, struct page *page,
unsigned len)
{
- int added_len = bio_add_pc_page(pcol->req_q, pcol->bio, page, len, 0);
- if (unlikely(len != added_len))
+ if (unlikely(pcol->nr_pages >= pcol->alloc_pages))
return -ENOMEM;
- ++pcol->nr_pages;
+ pcol->pages[pcol->nr_pages++] = page;
pcol->length += len;
return 0;
}
@@ -181,7 +185,6 @@ static void update_write_page(struct page *page, int ret)
*/
static int __readpages_done(struct page_collect *pcol, bool do_unlock)
{
- struct bio_vec *bvec;
int i;
u64 resid;
u64 good_bytes;
@@ -193,13 +196,13 @@ static int __readpages_done(struct page_collect *pcol, bool do_unlock)
else
good_bytes = pcol->length - resid;
- EXOFS_DBGMSG("readpages_done(0x%lx) good_bytes=0x%llx"
+ EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx"
" length=0x%lx nr_pages=%u\n",
pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
pcol->nr_pages);
- __bio_for_each_segment(bvec, pcol->bio, i, 0) {
- struct page *page = bvec->bv_page;
+ for (i = 0; i < pcol->nr_pages; i++) {
+ struct page *page = pcol->pages[i];
struct inode *inode = page->mapping->host;
int page_stat;
@@ -218,11 +221,11 @@ static int __readpages_done(struct page_collect *pcol, bool do_unlock)
ret = update_read_page(page, page_stat);
if (do_unlock)
unlock_page(page);
- length += bvec->bv_len;
+ length += PAGE_SIZE;
}
pcol_free(pcol);
- EXOFS_DBGMSG("readpages_done END\n");
+ EXOFS_DBGMSG2("readpages_done END\n");
return ret;
}
@@ -238,11 +241,10 @@ static void readpages_done(struct exofs_io_state *ios, void *p)
static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
{
- struct bio_vec *bvec;
int i;
- __bio_for_each_segment(bvec, pcol->bio, i, 0) {
- struct page *page = bvec->bv_page;
+ for (i = 0; i < pcol->nr_pages; i++) {
+ struct page *page = pcol->pages[i];
if (rw == READ)
update_read_page(page, ret);
@@ -260,13 +262,14 @@ static int read_exec(struct page_collect *pcol, bool is_sync)
struct page_collect *pcol_copy = NULL;
int ret;
- if (!pcol->bio)
+ if (!pcol->pages)
return 0;
/* see comment in _readpage() about sync reads */
WARN_ON(is_sync && (pcol->nr_pages != 1));
- ios->bio = pcol->bio;
+ ios->pages = pcol->pages;
+ ios->nr_pages = pcol->nr_pages;
ios->length = pcol->length;
ios->offset = pcol->pg_first << PAGE_CACHE_SHIFT;
@@ -290,7 +293,7 @@ static int read_exec(struct page_collect *pcol, bool is_sync)
atomic_inc(&pcol->sbi->s_curr_pending);
- EXOFS_DBGMSG("read_exec obj=0x%llx start=0x%llx length=0x%lx\n",
+ EXOFS_DBGMSG2("read_exec obj=0x%llx start=0x%llx length=0x%lx\n",
ios->obj.id, _LLU(ios->offset), pcol->length);
/* pages ownership was passed to pcol_copy */
@@ -366,7 +369,7 @@ try_again:
goto try_again;
}
- if (!pcol->bio) {
+ if (!pcol->pages) {
ret = pcol_try_alloc(pcol);
if (unlikely(ret))
goto fail;
@@ -448,7 +451,6 @@ static int exofs_readpage(struct file *file, struct page *page)
static void writepages_done(struct exofs_io_state *ios, void *p)
{
struct page_collect *pcol = p;
- struct bio_vec *bvec;
int i;
u64 resid;
u64 good_bytes;
@@ -462,13 +464,13 @@ static void writepages_done(struct exofs_io_state *ios, void *p)
else
good_bytes = pcol->length - resid;
- EXOFS_DBGMSG("writepages_done(0x%lx) good_bytes=0x%llx"
+ EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx"
" length=0x%lx nr_pages=%u\n",
pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
pcol->nr_pages);
- __bio_for_each_segment(bvec, pcol->bio, i, 0) {
- struct page *page = bvec->bv_page;
+ for (i = 0; i < pcol->nr_pages; i++) {
+ struct page *page = pcol->pages[i];
struct inode *inode = page->mapping->host;
int page_stat;
@@ -485,12 +487,12 @@ static void writepages_done(struct exofs_io_state *ios, void *p)
EXOFS_DBGMSG2(" writepages_done(0x%lx, 0x%lx) status=%d\n",
inode->i_ino, page->index, page_stat);
- length += bvec->bv_len;
+ length += PAGE_SIZE;
}
pcol_free(pcol);
kfree(pcol);
- EXOFS_DBGMSG("writepages_done END\n");
+ EXOFS_DBGMSG2("writepages_done END\n");
}
static int write_exec(struct page_collect *pcol)
@@ -500,7 +502,7 @@ static int write_exec(struct page_collect *pcol)
struct page_collect *pcol_copy = NULL;
int ret;
- if (!pcol->bio)
+ if (!pcol->pages)
return 0;
pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
@@ -512,9 +514,8 @@ static int write_exec(struct page_collect *pcol)
*pcol_copy = *pcol;
- pcol_copy->bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
-
- ios->bio = pcol_copy->bio;
+ ios->pages = pcol_copy->pages;
+ ios->nr_pages = pcol_copy->nr_pages;
ios->offset = pcol_copy->pg_first << PAGE_CACHE_SHIFT;
ios->length = pcol_copy->length;
ios->done = writepages_done;
@@ -527,7 +528,7 @@ static int write_exec(struct page_collect *pcol)
}
atomic_inc(&pcol->sbi->s_curr_pending);
- EXOFS_DBGMSG("write_exec(0x%lx, 0x%llx) start=0x%llx length=0x%lx\n",
+ EXOFS_DBGMSG2("write_exec(0x%lx, 0x%llx) start=0x%llx length=0x%lx\n",
pcol->inode->i_ino, pcol->pg_first, _LLU(ios->offset),
pcol->length);
/* pages ownership was passed to pcol_copy */
@@ -605,7 +606,7 @@ try_again:
goto try_again;
}
- if (!pcol->bio) {
+ if (!pcol->pages) {
ret = pcol_try_alloc(pcol);
if (unlikely(ret))
goto fail;
@@ -616,7 +617,7 @@ try_again:
ret = pcol_add_page(pcol, page, len);
if (unlikely(ret)) {
- EXOFS_DBGMSG("Failed pcol_add_page "
+ EXOFS_DBGMSG2("Failed pcol_add_page "
"nr_pages=%u total_length=0x%lx\n",
pcol->nr_pages, pcol->length);
@@ -663,7 +664,7 @@ static int exofs_writepages(struct address_space *mapping,
if (expected_pages < 32L)
expected_pages = 32L;
- EXOFS_DBGMSG("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
+ EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
"nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
mapping->host->i_ino, wbc->range_start, wbc->range_end,
mapping->nrpages, start, end, expected_pages);
@@ -859,20 +860,33 @@ int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
return error;
}
+static const struct osd_attr g_attr_inode_file_layout = ATTR_DEF(
+ EXOFS_APAGE_FS_DATA,
+ EXOFS_ATTR_INODE_FILE_LAYOUT,
+ 0);
+static const struct osd_attr g_attr_inode_dir_layout = ATTR_DEF(
+ EXOFS_APAGE_FS_DATA,
+ EXOFS_ATTR_INODE_DIR_LAYOUT,
+ 0);
+
/*
- * Read an inode from the OSD, and return it as is. We also return the size
- * attribute in the 'obj_size' argument.
+ * Read the Linux inode info from the OSD, and return it as is. In exofs the
+ * inode info is in an application specific page/attribute of the osd-object.
*/
static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
- struct exofs_fcb *inode, uint64_t *obj_size)
+ struct exofs_fcb *inode)
{
struct exofs_sb_info *sbi = sb->s_fs_info;
- struct osd_attr attrs[2];
+ struct osd_attr attrs[] = {
+ [0] = g_attr_inode_data,
+ [1] = g_attr_inode_file_layout,
+ [2] = g_attr_inode_dir_layout,
+ };
struct exofs_io_state *ios;
+ struct exofs_on_disk_inode_layout *layout;
int ret;
- *obj_size = ~0;
- ret = exofs_get_io_state(sbi, &ios);
+ ret = exofs_get_io_state(&sbi->layout, &ios);
if (unlikely(ret)) {
EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
return ret;
@@ -882,14 +896,25 @@ static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
exofs_make_credential(oi->i_cred, &ios->obj);
ios->cred = oi->i_cred;
- attrs[0] = g_attr_inode_data;
- attrs[1] = g_attr_logical_length;
+ attrs[1].len = exofs_on_disk_inode_layout_size(sbi->layout.s_numdevs);
+ attrs[2].len = exofs_on_disk_inode_layout_size(sbi->layout.s_numdevs);
+
ios->in_attr = attrs;
ios->in_attr_len = ARRAY_SIZE(attrs);
ret = exofs_sbi_read(ios);
- if (ret)
+ if (unlikely(ret)) {
+ EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
+ _LLU(ios->obj.id), ret);
+ memset(inode, 0, sizeof(*inode));
+ inode->i_mode = 0040000 | (0777 & ~022);
+ /* If object is lost on target we might as well enable it's
+ * delete.
+ */
+ if ((ret == -ENOENT) || (ret == -EINVAL))
+ ret = 0;
goto out;
+ }
ret = extract_attr_from_ios(ios, &attrs[0]);
if (ret) {
@@ -901,11 +926,33 @@ static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
ret = extract_attr_from_ios(ios, &attrs[1]);
if (ret) {
- EXOFS_ERR("%s: extract_attr of logical_length failed\n",
- __func__);
+ EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
+ goto out;
+ }
+ if (attrs[1].len) {
+ layout = attrs[1].val_ptr;
+ if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
+ EXOFS_ERR("%s: unsupported files layout %d\n",
+ __func__, layout->gen_func);
+ ret = -ENOTSUPP;
+ goto out;
+ }
+ }
+
+ ret = extract_attr_from_ios(ios, &attrs[2]);
+ if (ret) {
+ EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
goto out;
}
- *obj_size = get_unaligned_be64(attrs[1].val_ptr);
+ if (attrs[2].len) {
+ layout = attrs[2].val_ptr;
+ if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
+ EXOFS_ERR("%s: unsupported meta-data layout %d\n",
+ __func__, layout->gen_func);
+ ret = -ENOTSUPP;
+ goto out;
+ }
+ }
out:
exofs_put_io_state(ios);
@@ -925,7 +972,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
struct exofs_i_info *oi;
struct exofs_fcb fcb;
struct inode *inode;
- uint64_t obj_size;
int ret;
inode = iget_locked(sb, ino);
@@ -937,7 +983,7 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
__oi_init(oi);
/* read the inode from the osd */
- ret = exofs_get_inode(sb, oi, &fcb, &obj_size);
+ ret = exofs_get_inode(sb, oi, &fcb);
if (ret)
goto bad_inode;
@@ -958,13 +1004,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
inode->i_blkbits = EXOFS_BLKSHIFT;
inode->i_generation = le32_to_cpu(fcb.i_generation);
- if ((inode->i_size != obj_size) &&
- (!exofs_inode_is_fast_symlink(inode))) {
- EXOFS_ERR("WARNING: Size of inode=%llu != object=%llu\n",
- inode->i_size, _LLU(obj_size));
- /* FIXME: call exofs_inode_recovery() */
- }
-
oi->i_dir_start_lookup = 0;
if ((inode->i_nlink == 0) && (inode->i_mode == 0)) {
@@ -1043,7 +1082,7 @@ static void create_done(struct exofs_io_state *ios, void *p)
if (unlikely(ret)) {
EXOFS_ERR("object=0x%llx creation faild in pid=0x%llx",
- _LLU(exofs_oi_objno(oi)), _LLU(sbi->s_pid));
+ _LLU(exofs_oi_objno(oi)), _LLU(sbi->layout.s_pid));
/*TODO: When FS is corrupted creation can fail, object already
* exist. Get rid of this asynchronous creation, if exist
* increment the obj counter and try the next object. Until we
@@ -1104,7 +1143,7 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
mark_inode_dirty(inode);
- ret = exofs_get_io_state(sbi, &ios);
+ ret = exofs_get_io_state(&sbi->layout, &ios);
if (unlikely(ret)) {
EXOFS_ERR("exofs_new_inode: exofs_get_io_state failed\n");
return ERR_PTR(ret);
@@ -1170,8 +1209,10 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
int ret;
args = kzalloc(sizeof(*args), GFP_KERNEL);
- if (!args)
+ if (!args) {
+ EXOFS_DBGMSG("Faild kzalloc of args\n");
return -ENOMEM;
+ }
fcb = &args->fcb;
@@ -1200,7 +1241,7 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
} else
memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
- ret = exofs_get_io_state(sbi, &ios);
+ ret = exofs_get_io_state(&sbi->layout, &ios);
if (unlikely(ret)) {
EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
goto free_args;
@@ -1234,13 +1275,14 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
free_args:
kfree(args);
out:
- EXOFS_DBGMSG("ret=>%d\n", ret);
+ EXOFS_DBGMSG("(0x%lx) do_sync=%d ret=>%d\n",
+ inode->i_ino, do_sync, ret);
return ret;
}
-int exofs_write_inode(struct inode *inode, int wait)
+int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
- return exofs_update_inode(inode, wait);
+ return exofs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
}
/*
@@ -1283,7 +1325,7 @@ void exofs_delete_inode(struct inode *inode)
clear_inode(inode);
- ret = exofs_get_io_state(sbi, &ios);
+ ret = exofs_get_io_state(&sbi->layout, &ios);
if (unlikely(ret)) {
EXOFS_ERR("%s: exofs_get_io_state failed\n", __func__);
return;
diff --git a/fs/exofs/ios.c b/fs/exofs/ios.c
index 5bad01f..5293bc4 100644
--- a/fs/exofs/ios.c
+++ b/fs/exofs/ios.c
@@ -23,9 +23,13 @@
*/
#include <scsi/scsi_device.h>
+#include <asm/div64.h>
#include "exofs.h"
+#define EXOFS_DBGMSG2(M...) do {} while (0)
+/* #define EXOFS_DBGMSG2 EXOFS_DBGMSG */
+
void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj)
{
osd_sec_init_nosec_doall_caps(cred_a, obj, false, true);
@@ -64,21 +68,24 @@ out:
return ret;
}
-int exofs_get_io_state(struct exofs_sb_info *sbi, struct exofs_io_state** pios)
+int exofs_get_io_state(struct exofs_layout *layout,
+ struct exofs_io_state **pios)
{
struct exofs_io_state *ios;
/*TODO: Maybe use kmem_cach per sbi of size
- * exofs_io_state_size(sbi->s_numdevs)
+ * exofs_io_state_size(layout->s_numdevs)
*/
- ios = kzalloc(exofs_io_state_size(sbi->s_numdevs), GFP_KERNEL);
+ ios = kzalloc(exofs_io_state_size(layout->s_numdevs), GFP_KERNEL);
if (unlikely(!ios)) {
+ EXOFS_DBGMSG("Faild kzalloc bytes=%d\n",
+ exofs_io_state_size(layout->s_numdevs));
*pios = NULL;
return -ENOMEM;
}
- ios->sbi = sbi;
- ios->obj.partition = sbi->s_pid;
+ ios->layout = layout;
+ ios->obj.partition = layout->s_pid;
*pios = ios;
return 0;
}
@@ -101,6 +108,29 @@ void exofs_put_io_state(struct exofs_io_state *ios)
}
}
+unsigned exofs_layout_od_id(struct exofs_layout *layout,
+ osd_id obj_no, unsigned layout_index)
+{
+/* switch (layout->lay_func) {
+ case LAYOUT_MOVING_WINDOW:
+ {*/
+ unsigned dev_mod = obj_no;
+
+ return (layout_index + dev_mod * layout->mirrors_p1) %
+ layout->s_numdevs;
+/* }
+ case LAYOUT_FUNC_IMPLICT:
+ return layout->devs[layout_index];
+ }*/
+}
+
+static inline struct osd_dev *exofs_ios_od(struct exofs_io_state *ios,
+ unsigned layout_index)
+{
+ return ios->layout->s_ods[
+ exofs_layout_od_id(ios->layout, ios->obj.id, layout_index)];
+}
+
static void _sync_done(struct exofs_io_state *ios, void *p)
{
struct completion *waiting = p;
@@ -168,6 +198,21 @@ static int exofs_io_execute(struct exofs_io_state *ios)
return ret;
}
+static void _clear_bio(struct bio *bio)
+{
+ struct bio_vec *bv;
+ unsigned i;
+
+ __bio_for_each_segment(bv, bio, i, 0) {
+ unsigned this_count = bv->bv_len;
+
+ if (likely(PAGE_SIZE == this_count))
+ clear_highpage(bv->bv_page);
+ else
+ zero_user(bv->bv_page, bv->bv_offset, this_count);
+ }
+}
+
int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
{
enum osd_err_priority acumulated_osd_err = 0;
@@ -176,16 +221,25 @@ int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
for (i = 0; i < ios->numdevs; i++) {
struct osd_sense_info osi;
- int ret = osd_req_decode_sense(ios->per_dev[i].or, &osi);
+ struct osd_request *or = ios->per_dev[i].or;
+ int ret;
+
+ if (unlikely(!or))
+ continue;
+ ret = osd_req_decode_sense(or, &osi);
if (likely(!ret))
continue;
- if (unlikely(ret == -EFAULT)) {
- EXOFS_DBGMSG("%s: EFAULT Need page clear\n", __func__);
- /*FIXME: All the pages in this device range should:
- * clear_highpage(page);
- */
+ if (OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) {
+ /* start read offset passed endof file */
+ _clear_bio(ios->per_dev[i].bio);
+ EXOFS_DBGMSG("start read offset passed end of file "
+ "offset=0x%llx, length=0x%llx\n",
+ _LLU(ios->per_dev[i].offset),
+ _LLU(ios->per_dev[i].length));
+
+ continue; /* we recovered */
}
if (osi.osd_err_pri >= acumulated_osd_err) {
@@ -205,14 +259,259 @@ int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
return acumulated_lin_err;
}
+/*
+ * L - logical offset into the file
+ *
+ * U - The number of bytes in a stripe within a group
+ *
+ * U = stripe_unit * group_width
+ *
+ * T - The number of bytes striped within a group of component objects
+ * (before advancing to the next group)
+ *
+ * T = stripe_unit * group_width * group_depth
+ *
+ * S - The number of bytes striped across all component objects
+ * before the pattern repeats
+ *
+ * S = stripe_unit * group_width * group_depth * group_count
+ *
+ * M - The "major" (i.e., across all components) stripe number
+ *
+ * M = L / S
+ *
+ * G - Counts the groups from the beginning of the major stripe
+ *
+ * G = (L - (M * S)) / T [or (L % S) / T]
+ *
+ * H - The byte offset within the group
+ *
+ * H = (L - (M * S)) % T [or (L % S) % T]
+ *
+ * N - The "minor" (i.e., across the group) stripe number
+ *
+ * N = H / U
+ *
+ * C - The component index coresponding to L
+ *
+ * C = (H - (N * U)) / stripe_unit + G * group_width
+ * [or (L % U) / stripe_unit + G * group_width]
+ *
+ * O - The component offset coresponding to L
+ *
+ * O = L % stripe_unit + N * stripe_unit + M * group_depth * stripe_unit
+ */
+struct _striping_info {
+ u64 obj_offset;
+ u64 group_length;
+ u64 total_group_length;
+ u64 Major;
+ unsigned dev;
+ unsigned unit_off;
+};
+
+static void _calc_stripe_info(struct exofs_io_state *ios, u64 file_offset,
+ struct _striping_info *si)
+{
+ u32 stripe_unit = ios->layout->stripe_unit;
+ u32 group_width = ios->layout->group_width;
+ u64 group_depth = ios->layout->group_depth;
+
+ u32 U = stripe_unit * group_width;
+ u64 T = U * group_depth;
+ u64 S = T * ios->layout->group_count;
+ u64 M = div64_u64(file_offset, S);
+
+ /*
+ G = (L - (M * S)) / T
+ H = (L - (M * S)) % T
+ */
+ u64 LmodS = file_offset - M * S;
+ u32 G = div64_u64(LmodS, T);
+ u64 H = LmodS - G * T;
+
+ u32 N = div_u64(H, U);
+
+ /* "H - (N * U)" is just "H % U" so it's bound to u32 */
+ si->dev = (u32)(H - (N * U)) / stripe_unit + G * group_width;
+ si->dev *= ios->layout->mirrors_p1;
+
+ div_u64_rem(file_offset, stripe_unit, &si->unit_off);
+
+ si->obj_offset = si->unit_off + (N * stripe_unit) +
+ (M * group_depth * stripe_unit);
+
+ si->group_length = T - H;
+ si->total_group_length = T;
+ si->Major = M;
+}
+
+static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
+ unsigned pgbase, struct exofs_per_dev_state *per_dev,
+ int cur_len)
+{
+ unsigned pg = *cur_pg;
+ struct request_queue *q =
+ osd_request_queue(exofs_ios_od(ios, per_dev->dev));
+
+ per_dev->length += cur_len;
+
+ if (per_dev->bio == NULL) {
+ unsigned pages_in_stripe = ios->layout->group_width *
+ (ios->layout->stripe_unit / PAGE_SIZE);
+ unsigned bio_size = (ios->nr_pages + pages_in_stripe) /
+ ios->layout->group_width;
+
+ per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
+ if (unlikely(!per_dev->bio)) {
+ EXOFS_DBGMSG("Faild to allocate BIO size=%u\n",
+ bio_size);
+ return -ENOMEM;
+ }
+ }
+
+ while (cur_len > 0) {
+ unsigned pglen = min_t(unsigned, PAGE_SIZE - pgbase, cur_len);
+ unsigned added_len;
+
+ BUG_ON(ios->nr_pages <= pg);
+ cur_len -= pglen;
+
+ added_len = bio_add_pc_page(q, per_dev->bio, ios->pages[pg],
+ pglen, pgbase);
+ if (unlikely(pglen != added_len))
+ return -ENOMEM;
+ pgbase = 0;
+ ++pg;
+ }
+ BUG_ON(cur_len);
+
+ *cur_pg = pg;
+ return 0;
+}
+
+static int _prepare_one_group(struct exofs_io_state *ios, u64 length,
+ struct _striping_info *si, unsigned first_comp)
+{
+ unsigned stripe_unit = ios->layout->stripe_unit;
+ unsigned mirrors_p1 = ios->layout->mirrors_p1;
+ unsigned devs_in_group = ios->layout->group_width * mirrors_p1;
+ unsigned dev = si->dev;
+ unsigned first_dev = dev - (dev % devs_in_group);
+ unsigned comp = first_comp + (dev - first_dev);
+ unsigned max_comp = ios->numdevs ? ios->numdevs - mirrors_p1 : 0;
+ unsigned cur_pg = ios->pages_consumed;
+ int ret = 0;
+
+ while (length) {
+ struct exofs_per_dev_state *per_dev = &ios->per_dev[comp];
+ unsigned cur_len, page_off = 0;
+
+ if (!per_dev->length) {
+ per_dev->dev = dev;
+ if (dev < si->dev) {
+ per_dev->offset = si->obj_offset + stripe_unit -
+ si->unit_off;
+ cur_len = stripe_unit;
+ } else if (dev == si->dev) {
+ per_dev->offset = si->obj_offset;
+ cur_len = stripe_unit - si->unit_off;
+ page_off = si->unit_off & ~PAGE_MASK;
+ BUG_ON(page_off && (page_off != ios->pgbase));
+ } else { /* dev > si->dev */
+ per_dev->offset = si->obj_offset - si->unit_off;
+ cur_len = stripe_unit;
+ }
+
+ if (max_comp < comp)
+ max_comp = comp;
+
+ dev += mirrors_p1;
+ dev = (dev % devs_in_group) + first_dev;
+ } else {
+ cur_len = stripe_unit;
+ }
+ if (cur_len >= length)
+ cur_len = length;
+
+ ret = _add_stripe_unit(ios, &cur_pg, page_off , per_dev,
+ cur_len);
+ if (unlikely(ret))
+ goto out;
+
+ comp += mirrors_p1;
+ comp = (comp % devs_in_group) + first_comp;
+
+ length -= cur_len;
+ }
+out:
+ ios->numdevs = max_comp + mirrors_p1;
+ ios->pages_consumed = cur_pg;
+ return ret;
+}
+
+static int _prepare_for_striping(struct exofs_io_state *ios)
+{
+ u64 length = ios->length;
+ struct _striping_info si;
+ unsigned devs_in_group = ios->layout->group_width *
+ ios->layout->mirrors_p1;
+ unsigned first_comp = 0;
+ int ret = 0;
+
+ _calc_stripe_info(ios, ios->offset, &si);
+
+ if (!ios->pages) {
+ if (ios->kern_buff) {
+ struct exofs_per_dev_state *per_dev = &ios->per_dev[0];
+
+ per_dev->offset = si.obj_offset;
+ per_dev->dev = si.dev;
+
+ /* no cross device without page array */
+ BUG_ON((ios->layout->group_width > 1) &&
+ (si.unit_off + ios->length >
+ ios->layout->stripe_unit));
+ }
+ ios->numdevs = ios->layout->mirrors_p1;
+ return 0;
+ }
+
+ while (length) {
+ if (length < si.group_length)
+ si.group_length = length;
+
+ ret = _prepare_one_group(ios, si.group_length, &si, first_comp);
+ if (unlikely(ret))
+ goto out;
+
+ length -= si.group_length;
+
+ si.group_length = si.total_group_length;
+ si.unit_off = 0;
+ ++si.Major;
+ si.obj_offset = si.Major * ios->layout->stripe_unit *
+ ios->layout->group_depth;
+
+ si.dev = (si.dev - (si.dev % devs_in_group)) + devs_in_group;
+ si.dev %= ios->layout->s_numdevs;
+
+ first_comp += devs_in_group;
+ first_comp %= ios->layout->s_numdevs;
+ }
+
+out:
+ return ret;
+}
+
int exofs_sbi_create(struct exofs_io_state *ios)
{
int i, ret;
- for (i = 0; i < ios->sbi->s_numdevs; i++) {
+ for (i = 0; i < ios->layout->s_numdevs; i++) {
struct osd_request *or;
- or = osd_start_request(ios->sbi->s_ods[i], GFP_KERNEL);
+ or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL);
if (unlikely(!or)) {
EXOFS_ERR("%s: osd_start_request failed\n", __func__);
ret = -ENOMEM;
@@ -233,10 +532,10 @@ int exofs_sbi_remove(struct exofs_io_state *ios)
{
int i, ret;
- for (i = 0; i < ios->sbi->s_numdevs; i++) {
+ for (i = 0; i < ios->layout->s_numdevs; i++) {
struct osd_request *or;
- or = osd_start_request(ios->sbi->s_ods[i], GFP_KERNEL);
+ or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL);
if (unlikely(!or)) {
EXOFS_ERR("%s: osd_start_request failed\n", __func__);
ret = -ENOMEM;
@@ -253,51 +552,74 @@ out:
return ret;
}
-int exofs_sbi_write(struct exofs_io_state *ios)
+static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
{
- int i, ret;
+ struct exofs_per_dev_state *master_dev = &ios->per_dev[cur_comp];
+ unsigned dev = ios->per_dev[cur_comp].dev;
+ unsigned last_comp = cur_comp + ios->layout->mirrors_p1;
+ int ret = 0;
- for (i = 0; i < ios->sbi->s_numdevs; i++) {
+ if (ios->pages && !master_dev->length)
+ return 0; /* Just an empty slot */
+
+ for (; cur_comp < last_comp; ++cur_comp, ++dev) {
+ struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
struct osd_request *or;
- or = osd_start_request(ios->sbi->s_ods[i], GFP_KERNEL);
+ or = osd_start_request(exofs_ios_od(ios, dev), GFP_KERNEL);
if (unlikely(!or)) {
EXOFS_ERR("%s: osd_start_request failed\n", __func__);
ret = -ENOMEM;
goto out;
}
- ios->per_dev[i].or = or;
- ios->numdevs++;
+ per_dev->or = or;
+ per_dev->offset = master_dev->offset;
- if (ios->bio) {
+ if (ios->pages) {
struct bio *bio;
- if (i != 0) {
+ if (per_dev != master_dev) {
bio = bio_kmalloc(GFP_KERNEL,
- ios->bio->bi_max_vecs);
+ master_dev->bio->bi_max_vecs);
if (unlikely(!bio)) {
+ EXOFS_DBGMSG(
+ "Faild to allocate BIO size=%u\n",
+ master_dev->bio->bi_max_vecs);
ret = -ENOMEM;
goto out;
}
- __bio_clone(bio, ios->bio);
+ __bio_clone(bio, master_dev->bio);
bio->bi_bdev = NULL;
bio->bi_next = NULL;
- ios->per_dev[i].bio = bio;
+ per_dev->length = master_dev->length;
+ per_dev->bio = bio;
+ per_dev->dev = dev;
} else {
- bio = ios->bio;
+ bio = master_dev->bio;
+ /* FIXME: bio_set_dir() */
+ bio->bi_rw |= (1 << BIO_RW);
}
- osd_req_write(or, &ios->obj, ios->offset, bio,
- ios->length);
-/* EXOFS_DBGMSG("write sync=%d\n", sync);*/
+ osd_req_write(or, &ios->obj, per_dev->offset, bio,
+ per_dev->length);
+ EXOFS_DBGMSG("write(0x%llx) offset=0x%llx "
+ "length=0x%llx dev=%d\n",
+ _LLU(ios->obj.id), _LLU(per_dev->offset),
+ _LLU(per_dev->length), dev);
} else if (ios->kern_buff) {
- osd_req_write_kern(or, &ios->obj, ios->offset,
+ ret = osd_req_write_kern(or, &ios->obj, per_dev->offset,
ios->kern_buff, ios->length);
-/* EXOFS_DBGMSG("write_kern sync=%d\n", sync);*/
+ if (unlikely(ret))
+ goto out;
+ EXOFS_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
+ "length=0x%llx dev=%d\n",
+ _LLU(ios->obj.id), _LLU(per_dev->offset),
+ _LLU(ios->length), dev);
} else {
osd_req_set_attributes(or, &ios->obj);
-/* EXOFS_DBGMSG("set_attributes sync=%d\n", sync);*/
+ EXOFS_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
+ _LLU(ios->obj.id), ios->out_attr_len, dev);
}
if (ios->out_attr)
@@ -308,54 +630,93 @@ int exofs_sbi_write(struct exofs_io_state *ios)
osd_req_add_get_attr_list(or, ios->in_attr,
ios->in_attr_len);
}
- ret = exofs_io_execute(ios);
out:
return ret;
}
-int exofs_sbi_read(struct exofs_io_state *ios)
+int exofs_sbi_write(struct exofs_io_state *ios)
{
- int i, ret;
+ int i;
+ int ret;
- for (i = 0; i < 1; i++) {
- struct osd_request *or;
- unsigned first_dev = (unsigned)ios->obj.id;
+ ret = _prepare_for_striping(ios);
+ if (unlikely(ret))
+ return ret;
- first_dev %= ios->sbi->s_numdevs;
- or = osd_start_request(ios->sbi->s_ods[first_dev], GFP_KERNEL);
- if (unlikely(!or)) {
- EXOFS_ERR("%s: osd_start_request failed\n", __func__);
- ret = -ENOMEM;
- goto out;
- }
- ios->per_dev[i].or = or;
- ios->numdevs++;
+ for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
+ ret = _sbi_write_mirror(ios, i);
+ if (unlikely(ret))
+ return ret;
+ }
- if (ios->bio) {
- osd_req_read(or, &ios->obj, ios->offset, ios->bio,
- ios->length);
-/* EXOFS_DBGMSG("read sync=%d\n", sync);*/
- } else if (ios->kern_buff) {
- osd_req_read_kern(or, &ios->obj, ios->offset,
- ios->kern_buff, ios->length);
-/* EXOFS_DBGMSG("read_kern sync=%d\n", sync);*/
- } else {
- osd_req_get_attributes(or, &ios->obj);
-/* EXOFS_DBGMSG("get_attributes sync=%d\n", sync);*/
- }
+ ret = exofs_io_execute(ios);
+ return ret;
+}
- if (ios->out_attr)
- osd_req_add_set_attr_list(or, ios->out_attr,
- ios->out_attr_len);
+static int _sbi_read_mirror(struct exofs_io_state *ios, unsigned cur_comp)
+{
+ struct osd_request *or;
+ struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
+ unsigned first_dev = (unsigned)ios->obj.id;
- if (ios->in_attr)
- osd_req_add_get_attr_list(or, ios->in_attr,
- ios->in_attr_len);
+ if (ios->pages && !per_dev->length)
+ return 0; /* Just an empty slot */
+
+ first_dev = per_dev->dev + first_dev % ios->layout->mirrors_p1;
+ or = osd_start_request(exofs_ios_od(ios, first_dev), GFP_KERNEL);
+ if (unlikely(!or)) {
+ EXOFS_ERR("%s: osd_start_request failed\n", __func__);
+ return -ENOMEM;
}
- ret = exofs_io_execute(ios);
+ per_dev->or = or;
+
+ if (ios->pages) {
+ osd_req_read(or, &ios->obj, per_dev->offset,
+ per_dev->bio, per_dev->length);
+ EXOFS_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx"
+ " dev=%d\n", _LLU(ios->obj.id),
+ _LLU(per_dev->offset), _LLU(per_dev->length),
+ first_dev);
+ } else if (ios->kern_buff) {
+ int ret = osd_req_read_kern(or, &ios->obj, per_dev->offset,
+ ios->kern_buff, ios->length);
+ EXOFS_DBGMSG2("read_kern(0x%llx) offset=0x%llx "
+ "length=0x%llx dev=%d ret=>%d\n",
+ _LLU(ios->obj.id), _LLU(per_dev->offset),
+ _LLU(ios->length), first_dev, ret);
+ if (unlikely(ret))
+ return ret;
+ } else {
+ osd_req_get_attributes(or, &ios->obj);
+ EXOFS_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n",
+ _LLU(ios->obj.id), ios->in_attr_len, first_dev);
+ }
+ if (ios->out_attr)
+ osd_req_add_set_attr_list(or, ios->out_attr, ios->out_attr_len);
-out:
+ if (ios->in_attr)
+ osd_req_add_get_attr_list(or, ios->in_attr, ios->in_attr_len);
+
+ return 0;
+}
+
+int exofs_sbi_read(struct exofs_io_state *ios)
+{
+ int i;
+ int ret;
+
+ ret = _prepare_for_striping(ios);
+ if (unlikely(ret))
+ return ret;
+
+ for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
+ ret = _sbi_read_mirror(ios, i);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = exofs_io_execute(ios);
return ret;
}
@@ -380,42 +741,82 @@ int extract_attr_from_ios(struct exofs_io_state *ios, struct osd_attr *attr)
return -EIO;
}
+static int _truncate_mirrors(struct exofs_io_state *ios, unsigned cur_comp,
+ struct osd_attr *attr)
+{
+ int last_comp = cur_comp + ios->layout->mirrors_p1;
+
+ for (; cur_comp < last_comp; ++cur_comp) {
+ struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
+ struct osd_request *or;
+
+ or = osd_start_request(exofs_ios_od(ios, cur_comp), GFP_KERNEL);
+ if (unlikely(!or)) {
+ EXOFS_ERR("%s: osd_start_request failed\n", __func__);
+ return -ENOMEM;
+ }
+ per_dev->or = or;
+
+ osd_req_set_attributes(or, &ios->obj);
+ osd_req_add_set_attr_list(or, attr, 1);
+ }
+
+ return 0;
+}
+
int exofs_oi_truncate(struct exofs_i_info *oi, u64 size)
{
struct exofs_sb_info *sbi = oi->vfs_inode.i_sb->s_fs_info;
struct exofs_io_state *ios;
- struct osd_attr attr;
- __be64 newsize;
+ struct exofs_trunc_attr {
+ struct osd_attr attr;
+ __be64 newsize;
+ } *size_attrs;
+ struct _striping_info si;
int i, ret;
- if (exofs_get_io_state(sbi, &ios))
- return -ENOMEM;
+ ret = exofs_get_io_state(&sbi->layout, &ios);
+ if (unlikely(ret))
+ return ret;
+
+ size_attrs = kcalloc(ios->layout->group_width, sizeof(*size_attrs),
+ GFP_KERNEL);
+ if (unlikely(!size_attrs)) {
+ ret = -ENOMEM;
+ goto out;
+ }
ios->obj.id = exofs_oi_objno(oi);
ios->cred = oi->i_cred;
- newsize = cpu_to_be64(size);
- attr = g_attr_logical_length;
- attr.val_ptr = &newsize;
+ ios->numdevs = ios->layout->s_numdevs;
+ _calc_stripe_info(ios, size, &si);
- for (i = 0; i < sbi->s_numdevs; i++) {
- struct osd_request *or;
+ for (i = 0; i < ios->layout->group_width; ++i) {
+ struct exofs_trunc_attr *size_attr = &size_attrs[i];
+ u64 obj_size;
- or = osd_start_request(sbi->s_ods[i], GFP_KERNEL);
- if (unlikely(!or)) {
- EXOFS_ERR("%s: osd_start_request failed\n", __func__);
- ret = -ENOMEM;
- goto out;
- }
- ios->per_dev[i].or = or;
- ios->numdevs++;
+ if (i < si.dev)
+ obj_size = si.obj_offset +
+ ios->layout->stripe_unit - si.unit_off;
+ else if (i == si.dev)
+ obj_size = si.obj_offset;
+ else /* i > si.dev */
+ obj_size = si.obj_offset - si.unit_off;
- osd_req_set_attributes(or, &ios->obj);
- osd_req_add_set_attr_list(or, &attr, 1);
+ size_attr->newsize = cpu_to_be64(obj_size);
+ size_attr->attr = g_attr_logical_length;
+ size_attr->attr.val_ptr = &size_attr->newsize;
+
+ ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
+ &size_attr->attr);
+ if (unlikely(ret))
+ goto out;
}
ret = exofs_io_execute(ios);
out:
+ kfree(size_attrs);
exofs_put_io_state(ios);
return ret;
}
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index a1d1e77..6cf5e4e 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -210,7 +210,7 @@ int exofs_sync_fs(struct super_block *sb, int wait)
sbi = sb->s_fs_info;
fscb = &sbi->s_fscb;
- ret = exofs_get_io_state(sbi, &ios);
+ ret = exofs_get_io_state(&sbi->layout, &ios);
if (ret)
goto out;
@@ -264,12 +264,12 @@ static void _exofs_print_device(const char *msg, const char *dev_path,
void exofs_free_sbi(struct exofs_sb_info *sbi)
{
- while (sbi->s_numdevs) {
- int i = --sbi->s_numdevs;
- struct osd_dev *od = sbi->s_ods[i];
+ while (sbi->layout.s_numdevs) {
+ int i = --sbi->layout.s_numdevs;
+ struct osd_dev *od = sbi->layout.s_ods[i];
if (od) {
- sbi->s_ods[i] = NULL;
+ sbi->layout.s_ods[i] = NULL;
osduld_put_device(od);
}
}
@@ -298,7 +298,8 @@ static void exofs_put_super(struct super_block *sb)
msecs_to_jiffies(100));
}
- _exofs_print_device("Unmounting", NULL, sbi->s_ods[0], sbi->s_pid);
+ _exofs_print_device("Unmounting", NULL, sbi->layout.s_ods[0],
+ sbi->layout.s_pid);
exofs_free_sbi(sbi);
sb->s_fs_info = NULL;
@@ -307,6 +308,8 @@ static void exofs_put_super(struct super_block *sb)
static int _read_and_match_data_map(struct exofs_sb_info *sbi, unsigned numdevs,
struct exofs_device_table *dt)
{
+ u64 stripe_length;
+
sbi->data_map.odm_num_comps =
le32_to_cpu(dt->dt_data_map.cb_num_comps);
sbi->data_map.odm_stripe_unit =
@@ -320,14 +323,63 @@ static int _read_and_match_data_map(struct exofs_sb_info *sbi, unsigned numdevs,
sbi->data_map.odm_raid_algorithm =
le32_to_cpu(dt->dt_data_map.cb_raid_algorithm);
-/* FIXME: Hard coded mirror only for now. if not so do not mount */
- if ((sbi->data_map.odm_num_comps != numdevs) ||
- (sbi->data_map.odm_stripe_unit != EXOFS_BLKSIZE) ||
- (sbi->data_map.odm_raid_algorithm != PNFS_OSD_RAID_0) ||
- (sbi->data_map.odm_mirror_cnt != (numdevs - 1)))
+/* FIXME: Only raid0 for now. if not so, do not mount */
+ if (sbi->data_map.odm_num_comps != numdevs) {
+ EXOFS_ERR("odm_num_comps(%u) != numdevs(%u)\n",
+ sbi->data_map.odm_num_comps, numdevs);
return -EINVAL;
- else
- return 0;
+ }
+ if (sbi->data_map.odm_raid_algorithm != PNFS_OSD_RAID_0) {
+ EXOFS_ERR("Only RAID_0 for now\n");
+ return -EINVAL;
+ }
+ if (0 != (numdevs % (sbi->data_map.odm_mirror_cnt + 1))) {
+ EXOFS_ERR("Data Map wrong, numdevs=%d mirrors=%d\n",
+ numdevs, sbi->data_map.odm_mirror_cnt);
+ return -EINVAL;
+ }
+
+ if (0 != (sbi->data_map.odm_stripe_unit & ~PAGE_MASK)) {
+ EXOFS_ERR("Stripe Unit(0x%llx)"
+ " must be Multples of PAGE_SIZE(0x%lx)\n",
+ _LLU(sbi->data_map.odm_stripe_unit), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ sbi->layout.stripe_unit = sbi->data_map.odm_stripe_unit;
+ sbi->layout.mirrors_p1 = sbi->data_map.odm_mirror_cnt + 1;
+
+ if (sbi->data_map.odm_group_width) {
+ sbi->layout.group_width = sbi->data_map.odm_group_width;
+ sbi->layout.group_depth = sbi->data_map.odm_group_depth;
+ if (!sbi->layout.group_depth) {
+ EXOFS_ERR("group_depth == 0 && group_width != 0\n");
+ return -EINVAL;
+ }
+ sbi->layout.group_count = sbi->data_map.odm_num_comps /
+ sbi->layout.mirrors_p1 /
+ sbi->data_map.odm_group_width;
+ } else {
+ if (sbi->data_map.odm_group_depth) {
+ printk(KERN_NOTICE "Warning: group_depth ignored "
+ "group_width == 0 && group_depth == %d\n",
+ sbi->data_map.odm_group_depth);
+ sbi->data_map.odm_group_depth = 0;
+ }
+ sbi->layout.group_width = sbi->data_map.odm_num_comps /
+ sbi->layout.mirrors_p1;
+ sbi->layout.group_depth = -1;
+ sbi->layout.group_count = 1;
+ }
+
+ stripe_length = (u64)sbi->layout.group_width * sbi->layout.stripe_unit;
+ if (stripe_length >= (1ULL << 32)) {
+ EXOFS_ERR("Total Stripe length(0x%llx)"
+ " >= 32bit is not supported\n", _LLU(stripe_length));
+ return -EINVAL;
+ }
+
+ return 0;
}
/* @odi is valid only as long as @fscb_dev is valid */
@@ -361,7 +413,7 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
{
struct exofs_sb_info *sbi = *psbi;
struct osd_dev *fscb_od;
- struct osd_obj_id obj = {.partition = sbi->s_pid,
+ struct osd_obj_id obj = {.partition = sbi->layout.s_pid,
.id = EXOFS_DEVTABLE_ID};
struct exofs_device_table *dt;
unsigned table_bytes = table_count * sizeof(dt->dt_dev_table[0]) +
@@ -376,9 +428,9 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
return -ENOMEM;
}
- fscb_od = sbi->s_ods[0];
- sbi->s_ods[0] = NULL;
- sbi->s_numdevs = 0;
+ fscb_od = sbi->layout.s_ods[0];
+ sbi->layout.s_ods[0] = NULL;
+ sbi->layout.s_numdevs = 0;
ret = exofs_read_kern(fscb_od, sbi->s_cred, &obj, 0, dt, table_bytes);
if (unlikely(ret)) {
EXOFS_ERR("ERROR: reading device table\n");
@@ -397,14 +449,15 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
goto out;
if (likely(numdevs > 1)) {
- unsigned size = numdevs * sizeof(sbi->s_ods[0]);
+ unsigned size = numdevs * sizeof(sbi->layout.s_ods[0]);
sbi = krealloc(sbi, sizeof(*sbi) + size, GFP_KERNEL);
if (unlikely(!sbi)) {
ret = -ENOMEM;
goto out;
}
- memset(&sbi->s_ods[1], 0, size - sizeof(sbi->s_ods[0]));
+ memset(&sbi->layout.s_ods[1], 0,
+ size - sizeof(sbi->layout.s_ods[0]));
*psbi = sbi;
}
@@ -427,8 +480,8 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
* line. We always keep them in device-table order.
*/
if (fscb_od && osduld_device_same(fscb_od, &odi)) {
- sbi->s_ods[i] = fscb_od;
- ++sbi->s_numdevs;
+ sbi->layout.s_ods[i] = fscb_od;
+ ++sbi->layout.s_numdevs;
fscb_od = NULL;
continue;
}
@@ -441,8 +494,8 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
goto out;
}
- sbi->s_ods[i] = od;
- ++sbi->s_numdevs;
+ sbi->layout.s_ods[i] = od;
+ ++sbi->layout.s_numdevs;
/* Read the fscb of the other devices to make sure the FS
* partition is there.
@@ -499,9 +552,15 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
}
- sbi->s_ods[0] = od;
- sbi->s_numdevs = 1;
- sbi->s_pid = opts->pid;
+ /* Default layout in case we do not have a device-table */
+ sbi->layout.stripe_unit = PAGE_SIZE;
+ sbi->layout.mirrors_p1 = 1;
+ sbi->layout.group_width = 1;
+ sbi->layout.group_depth = -1;
+ sbi->layout.group_count = 1;
+ sbi->layout.s_ods[0] = od;
+ sbi->layout.s_numdevs = 1;
+ sbi->layout.s_pid = opts->pid;
sbi->s_timeout = opts->timeout;
/* fill in some other data by hand */
@@ -514,7 +573,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_bdev = NULL;
sb->s_dev = 0;
- obj.partition = sbi->s_pid;
+ obj.partition = sbi->layout.s_pid;
obj.id = EXOFS_SUPER_ID;
exofs_make_credential(sbi->s_cred, &obj);
@@ -578,13 +637,13 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
}
- _exofs_print_device("Mounting", opts->dev_name, sbi->s_ods[0],
- sbi->s_pid);
+ _exofs_print_device("Mounting", opts->dev_name, sbi->layout.s_ods[0],
+ sbi->layout.s_pid);
return 0;
free_sbi:
EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n",
- opts->dev_name, sbi->s_pid, ret);
+ opts->dev_name, sbi->layout.s_pid, ret);
exofs_free_sbi(sbi);
return ret;
}
@@ -627,7 +686,7 @@ static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf)
uint8_t cred_a[OSD_CAP_LEN];
int ret;
- ret = exofs_get_io_state(sbi, &ios);
+ ret = exofs_get_io_state(&sbi->layout, &ios);
if (ret) {
EXOFS_DBGMSG("exofs_get_io_state failed.\n");
return ret;
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 061914a..0b038e4 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -118,7 +118,7 @@ extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
/* inode.c */
extern struct inode *ext2_iget (struct super_block *, unsigned long);
-extern int ext2_write_inode (struct inode *, int);
+extern int ext2_write_inode (struct inode *, struct writeback_control *);
extern void ext2_delete_inode (struct inode *);
extern int ext2_sync_inode (struct inode *);
extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 45ff49f..fc13cc1 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -41,6 +41,8 @@ MODULE_AUTHOR("Remy Card and others");
MODULE_DESCRIPTION("Second Extended Filesystem");
MODULE_LICENSE("GPL");
+static int __ext2_write_inode(struct inode *inode, int do_sync);
+
/*
* Test whether an inode is a fast symlink.
*/
@@ -66,7 +68,7 @@ void ext2_delete_inode (struct inode * inode)
goto no_delete;
EXT2_I(inode)->i_dtime = get_seconds();
mark_inode_dirty(inode);
- ext2_write_inode(inode, inode_needs_sync(inode));
+ __ext2_write_inode(inode, inode_needs_sync(inode));
inode->i_size = 0;
if (inode->i_blocks)
@@ -1337,7 +1339,7 @@ bad_inode:
return ERR_PTR(ret);
}
-int ext2_write_inode(struct inode *inode, int do_sync)
+static int __ext2_write_inode(struct inode *inode, int do_sync)
{
struct ext2_inode_info *ei = EXT2_I(inode);
struct super_block *sb = inode->i_sb;
@@ -1442,6 +1444,11 @@ int ext2_write_inode(struct inode *inode, int do_sync)
return err;
}
+int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+}
+
int ext2_sync_inode(struct inode *inode)
{
struct writeback_control wbc = {
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index ffbbc65..7f920b7 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3107,7 +3107,7 @@ out_brelse:
* `stuff()' is running, and the new i_size will be lost. Plus the inode
* will no longer be on the superblock's dirty inode list.
*/
-int ext3_write_inode(struct inode *inode, int wait)
+int ext3_write_inode(struct inode *inode, struct writeback_control *wbc)
{
if (current->flags & PF_MEMALLOC)
return 0;
@@ -3118,7 +3118,7 @@ int ext3_write_inode(struct inode *inode, int wait)
return -EIO;
}
- if (!wait)
+ if (wbc->sync_mode != WB_SYNC_ALL)
return 0;
return ext3_force_commit(inode->i_sb);
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 22bc743..d2f37a5 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -97,8 +97,8 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
/* If checksum is bad mark all blocks used to prevent allocation
* essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
- ext4_error(sb, __func__,
- "Checksum bad for group %u", block_group);
+ ext4_error(sb, "Checksum bad for group %u",
+ block_group);
ext4_free_blks_set(sb, gdp, 0);
ext4_free_inodes_set(sb, gdp, 0);
ext4_itable_unused_set(sb, gdp, 0);
@@ -130,8 +130,7 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
* to make sure we calculate the right free blocks
*/
group_blocks = ext4_blocks_count(sbi->s_es) -
- le32_to_cpu(sbi->s_es->s_first_data_block) -
- (EXT4_BLOCKS_PER_GROUP(sb) * (ngroups - 1));
+ ext4_group_first_block_no(sb, ngroups - 1);
} else {
group_blocks = EXT4_BLOCKS_PER_GROUP(sb);
}
@@ -189,9 +188,6 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
* when a file system is mounted (see ext4_fill_super).
*/
-
-#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
-
/**
* ext4_get_group_desc() -- load group descriptor from disk
* @sb: super block
@@ -210,10 +206,8 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
if (block_group >= ngroups) {
- ext4_error(sb, "ext4_get_group_desc",
- "block_group >= groups_count - "
- "block_group = %u, groups_count = %u",
- block_group, ngroups);
+ ext4_error(sb, "block_group >= groups_count - block_group = %u,"
+ " groups_count = %u", block_group, ngroups);
return NULL;
}
@@ -221,8 +215,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
if (!sbi->s_group_desc[group_desc]) {
- ext4_error(sb, "ext4_get_group_desc",
- "Group descriptor not loaded - "
+ ext4_error(sb, "Group descriptor not loaded - "
"block_group = %u, group_desc = %u, desc = %u",
block_group, group_desc, offset);
return NULL;
@@ -282,9 +275,7 @@ static int ext4_valid_block_bitmap(struct super_block *sb,
return 1;
err_out:
- ext4_error(sb, __func__,
- "Invalid block bitmap - "
- "block_group = %d, block = %llu",
+ ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu",
block_group, bitmap_blk);
return 0;
}
@@ -311,8 +302,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
bitmap_blk = ext4_block_bitmap(sb, desc);
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
- ext4_error(sb, __func__,
- "Cannot read block bitmap - "
+ ext4_error(sb, "Cannot read block bitmap - "
"block_group = %u, block_bitmap = %llu",
block_group, bitmap_blk);
return NULL;
@@ -354,8 +344,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
set_bitmap_uptodate(bh);
if (bh_submit_read(bh) < 0) {
put_bh(bh);
- ext4_error(sb, __func__,
- "Cannot read block bitmap - "
+ ext4_error(sb, "Cannot read block bitmap - "
"block_group = %u, block_bitmap = %llu",
block_group, bitmap_blk);
return NULL;
@@ -419,8 +408,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
in_range(block + count - 1, ext4_inode_table(sb, desc),
sbi->s_itb_per_group)) {
- ext4_error(sb, __func__,
- "Adding blocks in system zones - "
+ ext4_error(sb, "Adding blocks in system zones - "
"Block = %llu, count = %lu",
block, count);
goto error_return;
@@ -453,8 +441,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
BUFFER_TRACE(bitmap_bh, "clear bit");
if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
bit + i, bitmap_bh->b_data)) {
- ext4_error(sb, __func__,
- "bit already cleared for block %llu",
+ ext4_error(sb, "bit already cleared for block %llu",
(ext4_fsblk_t)(block + i));
BUFFER_TRACE(bitmap_bh, "bit already cleared");
} else {
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index a60ab9a..983f0e1 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -205,14 +205,14 @@ void ext4_release_system_zone(struct super_block *sb)
entry = rb_entry(n, struct ext4_system_zone, node);
kmem_cache_free(ext4_system_zone_cachep, entry);
if (!parent)
- EXT4_SB(sb)->system_blks.rb_node = NULL;
+ EXT4_SB(sb)->system_blks = RB_ROOT;
else if (parent->rb_left == n)
parent->rb_left = NULL;
else if (parent->rb_right == n)
parent->rb_right = NULL;
n = parent;
}
- EXT4_SB(sb)->system_blks.rb_node = NULL;
+ EXT4_SB(sb)->system_blks = RB_ROOT;
}
/*
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 9dc93168..86cb6d8 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -83,10 +83,12 @@ int ext4_check_dir_entry(const char *function, struct inode *dir,
error_msg = "inode out of bounds";
if (error_msg != NULL)
- ext4_error(dir->i_sb, function,
- "bad entry in directory #%lu: %s - "
- "offset=%u, inode=%u, rec_len=%d, name_len=%d",
- dir->i_ino, error_msg, offset,
+ __ext4_error(dir->i_sb, function,
+ "bad entry in directory #%lu: %s - block=%llu"
+ "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
+ dir->i_ino, error_msg,
+ (unsigned long long) bh->b_blocknr,
+ (unsigned) (offset%bh->b_size), offset,
le32_to_cpu(de->inode),
rlen, de->name_len);
return error_msg == NULL ? 1 : 0;
@@ -150,7 +152,7 @@ static int ext4_readdir(struct file *filp,
*/
if (!bh) {
if (!dir_has_error) {
- ext4_error(sb, __func__, "directory #%lu "
+ ext4_error(sb, "directory #%lu "
"contains a hole at offset %Lu",
inode->i_ino,
(unsigned long long) filp->f_pos);
@@ -303,7 +305,7 @@ static void free_rb_tree_fname(struct rb_root *root)
kfree(old);
}
if (!parent)
- root->rb_node = NULL;
+ *root = RB_ROOT;
else if (parent->rb_left == n)
parent->rb_left = NULL;
else if (parent->rb_right == n)
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 874d169..bf938cf 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -53,6 +53,12 @@
#define ext4_debug(f, a...) do {} while (0)
#endif
+#define EXT4_ERROR_INODE(inode, fmt, a...) \
+ ext4_error_inode(__func__, (inode), (fmt), ## a);
+
+#define EXT4_ERROR_FILE(file, fmt, a...) \
+ ext4_error_file(__func__, (file), (fmt), ## a);
+
/* data type for block offset of block group */
typedef int ext4_grpblk_t;
@@ -133,14 +139,14 @@ struct mpage_da_data {
int pages_written;
int retval;
};
-#define DIO_AIO_UNWRITTEN 0x1
+#define EXT4_IO_UNWRITTEN 0x1
typedef struct ext4_io_end {
struct list_head list; /* per-file finished AIO list */
struct inode *inode; /* file being written to */
unsigned int flag; /* unwritten or not */
- int error; /* I/O error code */
- ext4_lblk_t offset; /* offset in the file */
- size_t size; /* size of the extent */
+ struct page *page; /* page struct for buffer write */
+ loff_t offset; /* offset in the file */
+ ssize_t size; /* size of the extent */
struct work_struct work; /* data work queue */
} ext4_io_end_t;
@@ -284,10 +290,12 @@ struct flex_groups {
#define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
#define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */
#define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */
+#define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */
+#define EXT4_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */
#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
-#define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
-#define EXT4_FL_USER_MODIFIABLE 0x000B80FF /* User modifiable flags */
+#define EXT4_FL_USER_VISIBLE 0x004BDFFF /* User visible flags */
+#define EXT4_FL_USER_MODIFIABLE 0x004B80FF /* User modifiable flags */
/* Flags that should be inherited by new inodes from their parent. */
#define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\
@@ -313,17 +321,6 @@ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
return flags & EXT4_OTHER_FLMASK;
}
-/*
- * Inode dynamic state flags
- */
-#define EXT4_STATE_JDATA 0x00000001 /* journaled data exists */
-#define EXT4_STATE_NEW 0x00000002 /* inode is newly created */
-#define EXT4_STATE_XATTR 0x00000004 /* has in-inode xattrs */
-#define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */
-#define EXT4_STATE_DA_ALLOC_CLOSE 0x00000010 /* Alloc DA blks on close */
-#define EXT4_STATE_EXT_MIGRATE 0x00000020 /* Inode is migrating */
-#define EXT4_STATE_DIO_UNWRITTEN 0x00000040 /* need convert on dio done*/
-
/* Used to pass group descriptor data when online resize is done */
struct ext4_new_group_input {
__u32 group; /* Group number for this data */
@@ -364,19 +361,20 @@ struct ext4_new_group_data {
/* caller is from the direct IO path, request to creation of an
unitialized extents if not allocated, split the uninitialized
extent if blocks has been preallocated already*/
-#define EXT4_GET_BLOCKS_DIO 0x0008
+#define EXT4_GET_BLOCKS_PRE_IO 0x0008
#define EXT4_GET_BLOCKS_CONVERT 0x0010
-#define EXT4_GET_BLOCKS_DIO_CREATE_EXT (EXT4_GET_BLOCKS_DIO|\
+#define EXT4_GET_BLOCKS_IO_CREATE_EXT (EXT4_GET_BLOCKS_PRE_IO|\
+ EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
+ /* Convert extent to initialized after IO complete */
+#define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
- /* Convert extent to initialized after direct IO complete */
-#define EXT4_GET_BLOCKS_DIO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
- EXT4_GET_BLOCKS_DIO_CREATE_EXT)
/*
* Flags used by ext4_free_blocks
*/
#define EXT4_FREE_BLOCKS_METADATA 0x0001
#define EXT4_FREE_BLOCKS_FORGET 0x0002
+#define EXT4_FREE_BLOCKS_VALIDATED 0x0004
/*
* ioctl commands
@@ -630,7 +628,7 @@ struct ext4_inode_info {
* near to their parent directory's inode.
*/
ext4_group_t i_block_group;
- __u32 i_state; /* Dynamic state flags for ext4 */
+ unsigned long i_state_flags; /* Dynamic state flags */
ext4_lblk_t i_dir_start_lookup;
#ifdef CONFIG_EXT4_FS_XATTR
@@ -708,8 +706,9 @@ struct ext4_inode_info {
qsize_t i_reserved_quota;
#endif
- /* completed async DIOs that might need unwritten extents handling */
- struct list_head i_aio_dio_complete_list;
+ /* completed IOs that might need unwritten extents handling */
+ struct list_head i_completed_io_list;
+ spinlock_t i_completed_io_lock;
/* current io_end structure for async DIO write*/
ext4_io_end_t *cur_aio_dio;
@@ -760,6 +759,7 @@ struct ext4_inode_info {
#define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */
#define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
#define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
+#define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */
#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
#define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */
@@ -1014,7 +1014,7 @@ struct ext4_sb_info {
atomic_t s_lock_busy;
/* locality groups */
- struct ext4_locality_group *s_locality_groups;
+ struct ext4_locality_group __percpu *s_locality_groups;
/* for write statistics */
unsigned long s_sectors_written_start;
@@ -1050,6 +1050,34 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
(ino >= EXT4_FIRST_INO(sb) &&
ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
}
+
+/*
+ * Inode dynamic state flags
+ */
+enum {
+ EXT4_STATE_JDATA, /* journaled data exists */
+ EXT4_STATE_NEW, /* inode is newly created */
+ EXT4_STATE_XATTR, /* has in-inode xattrs */
+ EXT4_STATE_NO_EXPAND, /* No space for expansion */
+ EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */
+ EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */
+ EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/
+};
+
+static inline int ext4_test_inode_state(struct inode *inode, int bit)
+{
+ return test_bit(bit, &EXT4_I(inode)->i_state_flags);
+}
+
+static inline void ext4_set_inode_state(struct inode *inode, int bit)
+{
+ set_bit(bit, &EXT4_I(inode)->i_state_flags);
+}
+
+static inline void ext4_clear_inode_state(struct inode *inode, int bit)
+{
+ clear_bit(bit, &EXT4_I(inode)->i_state_flags);
+}
#else
/* Assume that user mode programs are passing in an ext4fs superblock, not
* a kernel struct super_block. This will allow us to call the feature-test
@@ -1126,6 +1154,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
#define EXT4_FEATURE_INCOMPAT_64BIT 0x0080
#define EXT4_FEATURE_INCOMPAT_MMP 0x0100
#define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200
+#define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400 /* EA in inode */
+#define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000 /* data in dirent */
#define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
#define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
@@ -1416,7 +1446,7 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
extern struct inode *ext4_iget(struct super_block *, unsigned long);
-extern int ext4_write_inode(struct inode *, int);
+extern int ext4_write_inode(struct inode *, struct writeback_control *);
extern int ext4_setattr(struct dentry *, struct iattr *);
extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat);
@@ -1439,7 +1469,7 @@ extern int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
-extern int flush_aio_dio_completed_IO(struct inode *inode);
+extern int flush_completed_IO(struct inode *inode);
extern void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim);
/* ioctl.c */
@@ -1465,13 +1495,20 @@ extern int ext4_group_extend(struct super_block *sb,
ext4_fsblk_t n_blocks_count);
/* super.c */
-extern void ext4_error(struct super_block *, const char *, const char *, ...)
+extern void __ext4_error(struct super_block *, const char *, const char *, ...)
+ __attribute__ ((format (printf, 3, 4)));
+#define ext4_error(sb, message...) __ext4_error(sb, __func__, ## message)
+extern void ext4_error_inode(const char *, struct inode *, const char *, ...)
+ __attribute__ ((format (printf, 3, 4)));
+extern void ext4_error_file(const char *, struct file *, const char *, ...)
__attribute__ ((format (printf, 3, 4)));
extern void __ext4_std_error(struct super_block *, const char *, int);
extern void ext4_abort(struct super_block *, const char *, const char *, ...)
__attribute__ ((format (printf, 3, 4)));
-extern void ext4_warning(struct super_block *, const char *, const char *, ...)
+extern void __ext4_warning(struct super_block *, const char *,
+ const char *, ...)
__attribute__ ((format (printf, 3, 4)));
+#define ext4_warning(sb, message...) __ext4_warning(sb, __func__, ## message)
extern void ext4_msg(struct super_block *, const char *, const char *, ...)
__attribute__ ((format (printf, 3, 4)));
extern void ext4_grp_locked_error(struct super_block *, ext4_group_t,
@@ -1744,7 +1781,7 @@ extern void ext4_ext_release(struct super_block *);
extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
loff_t len);
extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
- loff_t len);
+ ssize_t len);
extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
sector_t block, unsigned int max_blocks,
struct buffer_head *bh, int flags);
@@ -1756,6 +1793,15 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
__u64 len, __u64 *moved_len);
+/* BH_Uninit flag: blocks are allocated but uninitialized on disk */
+enum ext4_state_bits {
+ BH_Uninit /* blocks are allocated but uninitialized on disk */
+ = BH_JBDPrivateStart,
+};
+
+BUFFER_FNS(Uninit, uninit)
+TAS_BUFFER_FNS(Uninit, uninit)
+
/*
* Add new method to test wether block and inode bitmaps are properly
* initialized. With uninit_bg reading the block from disk is not enough
@@ -1773,6 +1819,8 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh)
set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
}
+#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
+
#endif /* __KERNEL__ */
#endif /* _EXT4_H */
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index b57e5c7..53d2764 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -125,14 +125,14 @@ int __ext4_handle_dirty_metadata(const char *where, handle_t *handle,
ext4_journal_abort_handle(where, __func__, bh,
handle, err);
} else {
- if (inode && bh)
+ if (inode)
mark_buffer_dirty_inode(bh, inode);
else
mark_buffer_dirty(bh);
if (inode && inode_needs_sync(inode)) {
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh)) {
- ext4_error(inode->i_sb, __func__,
+ ext4_error(inode->i_sb,
"IO error syncing inode, "
"inode=%lu, block=%llu",
inode->i_ino,
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 05eca81..b79ad51 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -304,4 +304,28 @@ static inline int ext4_should_writeback_data(struct inode *inode)
return 0;
}
+/*
+ * This function controls whether or not we should try to go down the
+ * dioread_nolock code paths, which makes it safe to avoid taking
+ * i_mutex for direct I/O reads. This only works for extent-based
+ * files, and it doesn't work for nobh or if data journaling is
+ * enabled, since the dioread_nolock code uses b_private to pass
+ * information back to the I/O completion handler, and this conflicts
+ * with the jbd's use of b_private.
+ */
+static inline int ext4_should_dioread_nolock(struct inode *inode)
+{
+ if (!test_opt(inode->i_sb, DIOREAD_NOLOCK))
+ return 0;
+ if (test_opt(inode->i_sb, NOBH))
+ return 0;
+ if (!S_ISREG(inode->i_mode))
+ return 0;
+ if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
+ return 0;
+ if (ext4_should_journal_data(inode))
+ return 0;
+ return 1;
+}
+
#endif /* _EXT4_JBD2_H */
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 765a482..94c8ee8 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -195,8 +195,7 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
if (S_ISREG(inode->i_mode))
block_group++;
}
- bg_start = (block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
+ bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
/*
@@ -440,7 +439,7 @@ static int __ext4_ext_check(const char *function, struct inode *inode,
return 0;
corrupted:
- ext4_error(inode->i_sb, function,
+ __ext4_error(inode->i_sb, function,
"bad header/extent in inode #%lu: %s - magic %x, "
"entries %u, max %u(%u), depth %u(%u)",
inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
@@ -703,7 +702,12 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
}
eh = ext_block_hdr(bh);
ppos++;
- BUG_ON(ppos > depth);
+ if (unlikely(ppos > depth)) {
+ put_bh(bh);
+ EXT4_ERROR_INODE(inode,
+ "ppos %d > depth %d", ppos, depth);
+ goto err;
+ }
path[ppos].p_bh = bh;
path[ppos].p_hdr = eh;
i--;
@@ -749,7 +753,12 @@ int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
if (err)
return err;
- BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
+ if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
+ EXT4_ERROR_INODE(inode,
+ "logical %d == ei_block %d!",
+ logical, le32_to_cpu(curp->p_idx->ei_block));
+ return -EIO;
+ }
len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
/* insert after */
@@ -779,9 +788,17 @@ int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
ext4_idx_store_pblock(ix, ptr);
le16_add_cpu(&curp->p_hdr->eh_entries, 1);
- BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
- > le16_to_cpu(curp->p_hdr->eh_max));
- BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
+ if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
+ > le16_to_cpu(curp->p_hdr->eh_max))) {
+ EXT4_ERROR_INODE(inode,
+ "logical %d == ei_block %d!",
+ logical, le32_to_cpu(curp->p_idx->ei_block));
+ return -EIO;
+ }
+ if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
+ EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
+ return -EIO;
+ }
err = ext4_ext_dirty(handle, inode, curp);
ext4_std_error(inode->i_sb, err);
@@ -819,7 +836,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
/* if current leaf will be split, then we should use
* border from split point */
- BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
+ if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
+ EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
+ return -EIO;
+ }
if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
border = path[depth].p_ext[1].ee_block;
ext_debug("leaf will be split."
@@ -860,7 +880,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
/* initialize new leaf */
newblock = ablocks[--a];
- BUG_ON(newblock == 0);
+ if (unlikely(newblock == 0)) {
+ EXT4_ERROR_INODE(inode, "newblock == 0!");
+ err = -EIO;
+ goto cleanup;
+ }
bh = sb_getblk(inode->i_sb, newblock);
if (!bh) {
err = -EIO;
@@ -880,7 +904,14 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
ex = EXT_FIRST_EXTENT(neh);
/* move remainder of path[depth] to the new leaf */
- BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
+ if (unlikely(path[depth].p_hdr->eh_entries !=
+ path[depth].p_hdr->eh_max)) {
+ EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
+ path[depth].p_hdr->eh_entries,
+ path[depth].p_hdr->eh_max);
+ err = -EIO;
+ goto cleanup;
+ }
/* start copy from next extent */
/* TODO: we could do it by single memmove */
m = 0;
@@ -927,7 +958,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
/* create intermediate indexes */
k = depth - at - 1;
- BUG_ON(k < 0);
+ if (unlikely(k < 0)) {
+ EXT4_ERROR_INODE(inode, "k %d < 0!", k);
+ err = -EIO;
+ goto cleanup;
+ }
if (k)
ext_debug("create %d intermediate indices\n", k);
/* insert new index into current index block */
@@ -964,8 +999,14 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
EXT_MAX_INDEX(path[i].p_hdr));
- BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
- EXT_LAST_INDEX(path[i].p_hdr));
+ if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
+ EXT_LAST_INDEX(path[i].p_hdr))) {
+ EXT4_ERROR_INODE(inode,
+ "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
+ le32_to_cpu(path[i].p_ext->ee_block));
+ err = -EIO;
+ goto cleanup;
+ }
while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
ext_debug("%d: move %d:%llu in new index %llu\n", i,
le32_to_cpu(path[i].p_idx->ei_block),
@@ -1203,7 +1244,10 @@ ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
struct ext4_extent *ex;
int depth, ee_len;
- BUG_ON(path == NULL);
+ if (unlikely(path == NULL)) {
+ EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
+ return -EIO;
+ }
depth = path->p_depth;
*phys = 0;
@@ -1217,15 +1261,33 @@ ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
ex = path[depth].p_ext;
ee_len = ext4_ext_get_actual_len(ex);
if (*logical < le32_to_cpu(ex->ee_block)) {
- BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
+ if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
+ EXT4_ERROR_INODE(inode,
+ "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
+ *logical, le32_to_cpu(ex->ee_block));
+ return -EIO;
+ }
while (--depth >= 0) {
ix = path[depth].p_idx;
- BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
+ if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
+ EXT4_ERROR_INODE(inode,
+ "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
+ ix != NULL ? ix->ei_block : 0,
+ EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
+ EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0,
+ depth);
+ return -EIO;
+ }
}
return 0;
}
- BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
+ if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
+ EXT4_ERROR_INODE(inode,
+ "logical %d < ee_block %d + ee_len %d!",
+ *logical, le32_to_cpu(ex->ee_block), ee_len);
+ return -EIO;
+ }
*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
*phys = ext_pblock(ex) + ee_len - 1;
@@ -1251,7 +1313,10 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
int depth; /* Note, NOT eh_depth; depth from top of tree */
int ee_len;
- BUG_ON(path == NULL);
+ if (unlikely(path == NULL)) {
+ EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
+ return -EIO;
+ }
depth = path->p_depth;
*phys = 0;
@@ -1265,17 +1330,32 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
ex = path[depth].p_ext;
ee_len = ext4_ext_get_actual_len(ex);
if (*logical < le32_to_cpu(ex->ee_block)) {
- BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
+ if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
+ EXT4_ERROR_INODE(inode,
+ "first_extent(path[%d].p_hdr) != ex",
+ depth);
+ return -EIO;
+ }
while (--depth >= 0) {
ix = path[depth].p_idx;
- BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
+ if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
+ EXT4_ERROR_INODE(inode,
+ "ix != EXT_FIRST_INDEX *logical %d!",
+ *logical);
+ return -EIO;
+ }
}
*logical = le32_to_cpu(ex->ee_block);
*phys = ext_pblock(ex);
return 0;
}
- BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
+ if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
+ EXT4_ERROR_INODE(inode,
+ "logical %d < ee_block %d + ee_len %d!",
+ *logical, le32_to_cpu(ex->ee_block), ee_len);
+ return -EIO;
+ }
if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
/* next allocated block in this leaf */
@@ -1414,8 +1494,12 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
eh = path[depth].p_hdr;
ex = path[depth].p_ext;
- BUG_ON(ex == NULL);
- BUG_ON(eh == NULL);
+
+ if (unlikely(ex == NULL || eh == NULL)) {
+ EXT4_ERROR_INODE(inode,
+ "ex %p == NULL or eh %p == NULL", ex, eh);
+ return -EIO;
+ }
if (depth == 0) {
/* there is no tree at all */
@@ -1538,8 +1622,9 @@ int ext4_ext_try_to_merge(struct inode *inode,
merge_done = 1;
WARN_ON(eh->eh_entries == 0);
if (!eh->eh_entries)
- ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
- "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
+ ext4_error(inode->i_sb,
+ "inode#%lu, eh->eh_entries = 0!",
+ inode->i_ino);
}
return merge_done;
@@ -1612,13 +1697,19 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
ext4_lblk_t next;
unsigned uninitialized = 0;
- BUG_ON(ext4_ext_get_actual_len(newext) == 0);
+ if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
+ EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
+ return -EIO;
+ }
depth = ext_depth(inode);
ex = path[depth].p_ext;
- BUG_ON(path[depth].p_hdr == NULL);
+ if (unlikely(path[depth].p_hdr == NULL)) {
+ EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
+ return -EIO;
+ }
/* try to insert block into found extent and return */
- if (ex && (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT)
+ if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
&& ext4_can_extents_be_merged(inode, ex, newext)) {
ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
ext4_ext_is_uninitialized(newext),
@@ -1739,7 +1830,7 @@ has_space:
merge:
/* try to merge extents to the right */
- if (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT)
+ if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
ext4_ext_try_to_merge(inode, path, nearex);
/* try to merge extents to the left */
@@ -1787,7 +1878,11 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
}
depth = ext_depth(inode);
- BUG_ON(path[depth].p_hdr == NULL);
+ if (unlikely(path[depth].p_hdr == NULL)) {
+ EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
+ err = -EIO;
+ break;
+ }
ex = path[depth].p_ext;
next = ext4_ext_next_allocated_block(path);
@@ -1838,7 +1933,11 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
}
- BUG_ON(cbex.ec_len == 0);
+ if (unlikely(cbex.ec_len == 0)) {
+ EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
+ err = -EIO;
+ break;
+ }
err = func(inode, path, &cbex, ex, cbdata);
ext4_ext_drop_refs(path);
@@ -1952,7 +2051,7 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
cex->ec_type != EXT4_EXT_CACHE_EXTENT);
- if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
+ if (in_range(block, cex->ec_block, cex->ec_len)) {
ex->ee_block = cpu_to_le32(cex->ec_block);
ext4_ext_store_pblock(ex, cex->ec_start);
ex->ee_len = cpu_to_le16(cex->ec_len);
@@ -1981,7 +2080,10 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
/* free index block */
path--;
leaf = idx_pblock(path->p_idx);
- BUG_ON(path->p_hdr->eh_entries == 0);
+ if (unlikely(path->p_hdr->eh_entries == 0)) {
+ EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
+ return -EIO;
+ }
err = ext4_ext_get_access(handle, inode, path);
if (err)
return err;
@@ -2119,8 +2221,10 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
if (!path[depth].p_hdr)
path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
eh = path[depth].p_hdr;
- BUG_ON(eh == NULL);
-
+ if (unlikely(path[depth].p_hdr == NULL)) {
+ EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
+ return -EIO;
+ }
/* find where to start removing */
ex = EXT_LAST_EXTENT(eh);
@@ -2983,7 +3087,7 @@ fix_extent_len:
ext4_ext_dirty(handle, inode, path + depth);
return err;
}
-static int ext4_convert_unwritten_extents_dio(handle_t *handle,
+static int ext4_convert_unwritten_extents_endio(handle_t *handle,
struct inode *inode,
struct ext4_ext_path *path)
{
@@ -3063,8 +3167,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
flags, allocated);
ext4_ext_show_leaf(inode, path);
- /* DIO get_block() before submit the IO, split the extent */
- if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
+ /* get_block() before submit the IO, split the extent */
+ if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
ret = ext4_split_unwritten_extents(handle,
inode, path, iblock,
max_blocks, flags);
@@ -3074,14 +3178,16 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
* completed
*/
if (io)
- io->flag = DIO_AIO_UNWRITTEN;
+ io->flag = EXT4_IO_UNWRITTEN;
else
- EXT4_I(inode)->i_state |= EXT4_STATE_DIO_UNWRITTEN;
+ ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
+ if (ext4_should_dioread_nolock(inode))
+ set_buffer_uninit(bh_result);
goto out;
}
- /* async DIO end_io complete, convert the filled extent to written */
- if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) {
- ret = ext4_convert_unwritten_extents_dio(handle, inode,
+ /* IO end_io complete, convert the filled extent to written */
+ if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
+ ret = ext4_convert_unwritten_extents_endio(handle, inode,
path);
if (ret >= 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -3185,7 +3291,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
{
struct ext4_ext_path *path = NULL;
struct ext4_extent_header *eh;
- struct ext4_extent newex, *ex;
+ struct ext4_extent newex, *ex, *last_ex;
ext4_fsblk_t newblock;
int err = 0, depth, ret, cache_type;
unsigned int allocated = 0;
@@ -3237,10 +3343,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
* this situation is possible, though, _during_ tree modification;
* this is why assert can't be put in ext4_ext_find_extent()
*/
- if (path[depth].p_ext == NULL && depth != 0) {
- ext4_error(inode->i_sb, __func__, "bad extent address "
- "inode: %lu, iblock: %d, depth: %d",
- inode->i_ino, iblock, depth);
+ if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
+ EXT4_ERROR_INODE(inode, "bad extent address "
+ "iblock: %d, depth: %d pblock %lld",
+ iblock, depth, path[depth].p_block);
err = -EIO;
goto out2;
}
@@ -3258,7 +3364,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
*/
ee_len = ext4_ext_get_actual_len(ex);
/* if found extent covers block, simply return it */
- if (iblock >= ee_block && iblock < ee_block + ee_len) {
+ if (in_range(iblock, ee_block, ee_len)) {
newblock = iblock - ee_block + ee_start;
/* number of remaining blocks in the extent */
allocated = ee_len - (iblock - ee_block);
@@ -3350,21 +3456,35 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
ext4_ext_mark_uninitialized(&newex);
/*
- * io_end structure was created for every async
- * direct IO write to the middle of the file.
- * To avoid unecessary convertion for every aio dio rewrite
- * to the mid of file, here we flag the IO that is really
- * need the convertion.
+ * io_end structure was created for every IO write to an
+ * uninitialized extent. To avoid unecessary conversion,
+ * here we flag the IO that really needs the conversion.
* For non asycn direct IO case, flag the inode state
* that we need to perform convertion when IO is done.
*/
- if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
+ if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
if (io)
- io->flag = DIO_AIO_UNWRITTEN;
+ io->flag = EXT4_IO_UNWRITTEN;
else
- EXT4_I(inode)->i_state |=
- EXT4_STATE_DIO_UNWRITTEN;;
+ ext4_set_inode_state(inode,
+ EXT4_STATE_DIO_UNWRITTEN);
+ }
+ if (ext4_should_dioread_nolock(inode))
+ set_buffer_uninit(bh_result);
+ }
+
+ if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) {
+ if (unlikely(!eh->eh_entries)) {
+ EXT4_ERROR_INODE(inode,
+ "eh->eh_entries == 0 ee_block %d",
+ ex->ee_block);
+ err = -EIO;
+ goto out2;
}
+ last_ex = EXT_LAST_EXTENT(eh);
+ if (iblock + ar.len > le32_to_cpu(last_ex->ee_block)
+ + ext4_ext_get_actual_len(last_ex))
+ EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL;
}
err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
if (err) {
@@ -3499,6 +3619,13 @@ static void ext4_falloc_update_inode(struct inode *inode,
i_size_write(inode, new_size);
if (new_size > EXT4_I(inode)->i_disksize)
ext4_update_i_disksize(inode, new_size);
+ } else {
+ /*
+ * Mark that we allocate beyond EOF so the subsequent truncate
+ * can proceed even if the new size is the same as i_size.
+ */
+ if (new_size > i_size_read(inode))
+ EXT4_I(inode)->i_flags |= EXT4_EOFBLOCKS_FL;
}
}
@@ -3603,7 +3730,7 @@ retry:
* Returns 0 on success.
*/
int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
- loff_t len)
+ ssize_t len)
{
handle_t *handle;
ext4_lblk_t block;
@@ -3635,7 +3762,7 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
map_bh.b_state = 0;
ret = ext4_get_blocks(handle, inode, block,
max_blocks, &map_bh,
- EXT4_GET_BLOCKS_DIO_CONVERT_EXT);
+ EXT4_GET_BLOCKS_IO_CONVERT_EXT);
if (ret <= 0) {
WARN_ON(ret <= 0);
printk(KERN_ERR "%s: ext4_ext_get_blocks "
@@ -3739,7 +3866,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
int error = 0;
/* in-inode? */
- if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) {
+ if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
struct ext4_iloc iloc;
int offset; /* offset of xattr in inode */
@@ -3767,7 +3894,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
ext4_lblk_t start_blk;
- ext4_lblk_t len_blks;
int error = 0;
/* fallback to generic here if not in extents fmt */
@@ -3781,8 +3907,14 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
error = ext4_xattr_fiemap(inode, fieinfo);
} else {
+ ext4_lblk_t len_blks;
+ __u64 last_blk;
+
start_blk = start >> inode->i_sb->s_blocksize_bits;
- len_blks = len >> inode->i_sb->s_blocksize_bits;
+ last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
+ if (last_blk >= EXT_MAX_BLOCK)
+ last_blk = EXT_MAX_BLOCK-1;
+ len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
/*
* Walk the extent tree gathering extent information.
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index a08a129..d0776e4 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -36,9 +36,9 @@
*/
static int ext4_release_file(struct inode *inode, struct file *filp)
{
- if (EXT4_I(inode)->i_state & EXT4_STATE_DA_ALLOC_CLOSE) {
+ if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
ext4_alloc_da_blocks(inode);
- EXT4_I(inode)->i_state &= ~EXT4_STATE_DA_ALLOC_CLOSE;
+ ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
}
/* if we are the last writer on the inode, drop the block reservation */
if ((filp->f_mode & FMODE_WRITE) &&
@@ -117,11 +117,9 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
* devices or filesystem images.
*/
memset(buf, 0, sizeof(buf));
- path.mnt = mnt->mnt_parent;
- path.dentry = mnt->mnt_mountpoint;
- path_get(&path);
+ path.mnt = mnt;
+ path.dentry = mnt->mnt_root;
cp = d_path(&path, buf, sizeof(buf));
- path_put(&path);
if (!IS_ERR(cp)) {
memcpy(sbi->s_es->s_last_mounted, cp,
sizeof(sbi->s_es->s_last_mounted));
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 98bd140..0d0c323 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -63,7 +63,7 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
if (inode->i_sb->s_flags & MS_RDONLY)
return 0;
- ret = flush_aio_dio_completed_IO(inode);
+ ret = flush_completed_IO(inode);
if (ret < 0)
return ret;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 9bb2bb9..361c0b9 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -76,8 +76,7 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
/* If checksum is bad mark all blocks and inodes use to prevent
* allocation, essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
- ext4_error(sb, __func__, "Checksum bad for group %u",
- block_group);
+ ext4_error(sb, "Checksum bad for group %u", block_group);
ext4_free_blks_set(sb, gdp, 0);
ext4_free_inodes_set(sb, gdp, 0);
ext4_itable_unused_set(sb, gdp, 0);
@@ -111,8 +110,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
bitmap_blk = ext4_inode_bitmap(sb, desc);
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
- ext4_error(sb, __func__,
- "Cannot read inode bitmap - "
+ ext4_error(sb, "Cannot read inode bitmap - "
"block_group = %u, inode_bitmap = %llu",
block_group, bitmap_blk);
return NULL;
@@ -153,8 +151,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
set_bitmap_uptodate(bh);
if (bh_submit_read(bh) < 0) {
put_bh(bh);
- ext4_error(sb, __func__,
- "Cannot read inode bitmap - "
+ ext4_error(sb, "Cannot read inode bitmap - "
"block_group = %u, inode_bitmap = %llu",
block_group, bitmap_blk);
return NULL;
@@ -229,8 +226,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
es = EXT4_SB(sb)->s_es;
if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
- ext4_error(sb, "ext4_free_inode",
- "reserved or nonexistent inode %lu", ino);
+ ext4_error(sb, "reserved or nonexistent inode %lu", ino);
goto error_return;
}
block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
@@ -248,8 +244,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
bit, bitmap_bh->b_data);
if (!cleared)
- ext4_error(sb, "ext4_free_inode",
- "bit already cleared for inode %lu", ino);
+ ext4_error(sb, "bit already cleared for inode %lu", ino);
else {
gdp = ext4_get_group_desc(sb, block_group, &bh2);
@@ -736,8 +731,7 @@ static int ext4_claim_inode(struct super_block *sb,
if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
ino > EXT4_INODES_PER_GROUP(sb)) {
ext4_unlock_group(sb, group);
- ext4_error(sb, __func__,
- "reserved inode or inode > inodes count - "
+ ext4_error(sb, "reserved inode or inode > inodes count - "
"block_group = %u, inode=%lu", group,
ino + group * EXT4_INODES_PER_GROUP(sb));
return 1;
@@ -904,7 +898,7 @@ repeat_in_this_group:
BUFFER_TRACE(inode_bitmap_bh,
"call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle,
- inode,
+ NULL,
inode_bitmap_bh);
if (err)
goto fail;
@@ -1029,7 +1023,8 @@ got:
inode->i_generation = sbi->s_next_generation++;
spin_unlock(&sbi->s_next_gen_lock);
- ei->i_state = EXT4_STATE_NEW;
+ ei->i_state_flags = 0;
+ ext4_set_inode_state(inode, EXT4_STATE_NEW);
ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
@@ -1098,8 +1093,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
/* Error cases - e2fsck has already cleaned up for us */
if (ino > max_ino) {
- ext4_warning(sb, __func__,
- "bad orphan ino %lu! e2fsck was run?", ino);
+ ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino);
goto error;
}
@@ -1107,8 +1101,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
if (!bitmap_bh) {
- ext4_warning(sb, __func__,
- "inode bitmap error for orphan %lu", ino);
+ ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
goto error;
}
@@ -1140,8 +1133,7 @@ iget_failed:
err = PTR_ERR(inode);
inode = NULL;
bad_orphan:
- ext4_warning(sb, __func__,
- "bad orphan inode %lu! e2fsck was run?", ino);
+ ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino);
printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
bit, (unsigned long long)bitmap_bh->b_blocknr,
ext4_test_bit(bit, bitmap_bh->b_data));
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index bec222c..986120f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -38,6 +38,7 @@
#include <linux/uio.h>
#include <linux/bio.h>
#include <linux/workqueue.h>
+#include <linux/kernel.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -197,7 +198,7 @@ void ext4_delete_inode(struct inode *inode)
inode->i_size = 0;
err = ext4_mark_inode_dirty(handle, inode);
if (err) {
- ext4_warning(inode->i_sb, __func__,
+ ext4_warning(inode->i_sb,
"couldn't mark inode dirty (err %d)", err);
goto stop_handle;
}
@@ -215,7 +216,7 @@ void ext4_delete_inode(struct inode *inode)
if (err > 0)
err = ext4_journal_restart(handle, 3);
if (err != 0) {
- ext4_warning(inode->i_sb, __func__,
+ ext4_warning(inode->i_sb,
"couldn't extend journal (err %d)", err);
stop_handle:
ext4_journal_stop(handle);
@@ -326,8 +327,7 @@ static int ext4_block_to_path(struct inode *inode,
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else {
- ext4_warning(inode->i_sb, "ext4_block_to_path",
- "block %lu > max in inode %lu",
+ ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
i_block + direct_blocks +
indirect_blocks + double_blocks, inode->i_ino);
}
@@ -347,7 +347,7 @@ static int __ext4_check_blockref(const char *function, struct inode *inode,
if (blk &&
unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
blk, 1))) {
- ext4_error(inode->i_sb, function,
+ __ext4_error(inode->i_sb, function,
"invalid block reference %u "
"in inode #%lu", blk, inode->i_ino);
return -EIO;
@@ -610,7 +610,14 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
if (*err)
goto failed_out;
- BUG_ON(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS);
+ if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
+ EXT4_ERROR_INODE(inode,
+ "current_block %llu + count %lu > %d!",
+ current_block, count,
+ EXT4_MAX_BLOCK_FILE_PHYS);
+ *err = -EIO;
+ goto failed_out;
+ }
target -= count;
/* allocate blocks for indirect blocks */
@@ -646,7 +653,14 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
ar.flags = EXT4_MB_HINT_DATA;
current_block = ext4_mb_new_blocks(handle, &ar, err);
- BUG_ON(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS);
+ if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
+ EXT4_ERROR_INODE(inode,
+ "current_block %llu + ar.len %d > %d!",
+ current_block, ar.len,
+ EXT4_MAX_BLOCK_FILE_PHYS);
+ *err = -EIO;
+ goto failed_out;
+ }
if (*err && (target == blks)) {
/*
@@ -1064,6 +1078,7 @@ void ext4_da_update_reserve_space(struct inode *inode,
int mdb_free = 0, allocated_meta_blocks = 0;
spin_lock(&ei->i_block_reservation_lock);
+ trace_ext4_da_update_reserve_space(inode, used);
if (unlikely(used > ei->i_reserved_data_blocks)) {
ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
"with only %d reserved data blocks\n",
@@ -1127,7 +1142,7 @@ static int check_block_validity(struct inode *inode, const char *msg,
sector_t logical, sector_t phys, int len)
{
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) {
- ext4_error(inode->i_sb, msg,
+ __ext4_error(inode->i_sb, msg,
"inode #%lu logical block %llu mapped to %llu "
"(size %d)", inode->i_ino,
(unsigned long long) logical,
@@ -1309,7 +1324,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
* i_data's format changing. Force the migrate
* to fail by clearing migrate flags
*/
- EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
+ ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
}
/*
@@ -1537,6 +1552,8 @@ static void ext4_truncate_failed_write(struct inode *inode)
ext4_truncate(inode);
}
+static int ext4_get_block_write(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create);
static int ext4_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -1578,8 +1595,12 @@ retry:
}
*pagep = page;
- ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- ext4_get_block);
+ if (ext4_should_dioread_nolock(inode))
+ ret = block_write_begin(file, mapping, pos, len, flags, pagep,
+ fsdata, ext4_get_block_write);
+ else
+ ret = block_write_begin(file, mapping, pos, len, flags, pagep,
+ fsdata, ext4_get_block);
if (!ret && ext4_should_journal_data(inode)) {
ret = walk_page_buffers(handle, page_buffers(page),
@@ -1796,7 +1817,7 @@ static int ext4_journalled_write_end(struct file *file,
new_i_size = pos + copied;
if (new_i_size > inode->i_size)
i_size_write(inode, pos+copied);
- EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
+ ext4_set_inode_state(inode, EXT4_STATE_JDATA);
if (new_i_size > EXT4_I(inode)->i_disksize) {
ext4_update_i_disksize(inode, new_i_size);
ret2 = ext4_mark_inode_dirty(handle, inode);
@@ -1850,6 +1871,7 @@ repeat:
spin_lock(&ei->i_block_reservation_lock);
md_reserved = ei->i_reserved_meta_blocks;
md_needed = ext4_calc_metadata_amount(inode, lblock);
+ trace_ext4_da_reserve_space(inode, md_needed);
spin_unlock(&ei->i_block_reservation_lock);
/*
@@ -2096,6 +2118,8 @@ static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
} else if (buffer_mapped(bh))
BUG_ON(bh->b_blocknr != pblock);
+ if (buffer_uninit(exbh))
+ set_buffer_uninit(bh);
cur_logical++;
pblock++;
} while ((bh = bh->b_this_page) != head);
@@ -2138,17 +2162,16 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- index = page->index;
- if (index > end)
+ if (page->index > end)
break;
- index++;
-
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
block_invalidatepage(page, 0);
ClearPageUptodate(page);
unlock_page(page);
}
+ index = pvec.pages[nr_pages - 1]->index + 1;
+ pagevec_release(&pvec);
}
return;
}
@@ -2225,6 +2248,8 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
*/
new.b_state = 0;
get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
+ if (ext4_should_dioread_nolock(mpd->inode))
+ get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
if (mpd->b_state & (1 << BH_Delay))
get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
@@ -2635,11 +2660,14 @@ static int __ext4_journalled_writepage(struct page *page,
ret = err;
walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
- EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
+ ext4_set_inode_state(inode, EXT4_STATE_JDATA);
out:
return ret;
}
+static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
+static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
+
/*
* Note that we don't need to start a transaction unless we're journaling data
* because we should have holes filled from ext4_page_mkwrite(). We even don't
@@ -2687,7 +2715,7 @@ static int ext4_writepage(struct page *page,
int ret = 0;
loff_t size;
unsigned int len;
- struct buffer_head *page_bufs;
+ struct buffer_head *page_bufs = NULL;
struct inode *inode = page->mapping->host;
trace_ext4_writepage(inode, page);
@@ -2763,7 +2791,11 @@ static int ext4_writepage(struct page *page,
if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
ret = nobh_writepage(page, noalloc_get_block_write, wbc);
- else
+ else if (page_bufs && buffer_uninit(page_bufs)) {
+ ext4_set_bh_endio(page_bufs, inode);
+ ret = block_write_full_page_endio(page, noalloc_get_block_write,
+ wbc, ext4_end_io_buffer_write);
+ } else
ret = block_write_full_page(page, noalloc_get_block_write,
wbc);
@@ -3306,7 +3338,8 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
filemap_write_and_wait(mapping);
}
- if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
+ if (EXT4_JOURNAL(inode) &&
+ ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
/*
* This is a REALLY heavyweight approach, but the use of
* bmap on dirty files is expected to be extremely rare:
@@ -3325,7 +3358,7 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
* everything they get.
*/
- EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
+ ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
journal = EXT4_JOURNAL(inode);
jbd2_journal_lock_updates(journal);
err = jbd2_journal_flush(journal);
@@ -3350,11 +3383,45 @@ ext4_readpages(struct file *file, struct address_space *mapping,
return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
}
+static void ext4_free_io_end(ext4_io_end_t *io)
+{
+ BUG_ON(!io);
+ if (io->page)
+ put_page(io->page);
+ iput(io->inode);
+ kfree(io);
+}
+
+static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
+{
+ struct buffer_head *head, *bh;
+ unsigned int curr_off = 0;
+
+ if (!page_has_buffers(page))
+ return;
+ head = bh = page_buffers(page);
+ do {
+ if (offset <= curr_off && test_clear_buffer_uninit(bh)
+ && bh->b_private) {
+ ext4_free_io_end(bh->b_private);
+ bh->b_private = NULL;
+ bh->b_end_io = NULL;
+ }
+ curr_off = curr_off + bh->b_size;
+ bh = bh->b_this_page;
+ } while (bh != head);
+}
+
static void ext4_invalidatepage(struct page *page, unsigned long offset)
{
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
/*
+ * free any io_end structure allocated for buffers to be discarded
+ */
+ if (ext4_should_dioread_nolock(page->mapping->host))
+ ext4_invalidatepage_free_endio(page, offset);
+ /*
* If it's a full truncate we just forget about the pending dirtying
*/
if (offset == 0)
@@ -3425,7 +3492,14 @@ static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
}
retry:
- ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+ if (rw == READ && ext4_should_dioread_nolock(inode))
+ ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
+ inode->i_sb->s_bdev, iov,
+ offset, nr_segs,
+ ext4_get_block, NULL);
+ else
+ ret = blockdev_direct_IO(rw, iocb, inode,
+ inode->i_sb->s_bdev, iov,
offset, nr_segs,
ext4_get_block, NULL);
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -3441,6 +3515,9 @@ retry:
* but cannot extend i_size. Bail out and pretend
* the write failed... */
ret = PTR_ERR(handle);
+ if (inode->i_nlink)
+ ext4_orphan_del(NULL, inode);
+
goto out;
}
if (inode->i_nlink)
@@ -3468,75 +3545,63 @@ out:
return ret;
}
-static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock,
+static int ext4_get_block_write(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
- handle_t *handle = NULL;
+ handle_t *handle = ext4_journal_current_handle();
int ret = 0;
unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
int dio_credits;
+ int started = 0;
- ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n",
+ ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
inode->i_ino, create);
/*
- * DIO VFS code passes create = 0 flag for write to
- * the middle of file. It does this to avoid block
- * allocation for holes, to prevent expose stale data
- * out when there is parallel buffered read (which does
- * not hold the i_mutex lock) while direct IO write has
- * not completed. DIO request on holes finally falls back
- * to buffered IO for this reason.
- *
- * For ext4 extent based file, since we support fallocate,
- * new allocated extent as uninitialized, for holes, we
- * could fallocate blocks for holes, thus parallel
- * buffered IO read will zero out the page when read on
- * a hole while parallel DIO write to the hole has not completed.
- *
- * when we come here, we know it's a direct IO write to
- * to the middle of file (<i_size)
- * so it's safe to override the create flag from VFS.
+ * ext4_get_block in prepare for a DIO write or buffer write.
+ * We allocate an uinitialized extent if blocks haven't been allocated.
+ * The extent will be converted to initialized after IO complete.
*/
- create = EXT4_GET_BLOCKS_DIO_CREATE_EXT;
+ create = EXT4_GET_BLOCKS_IO_CREATE_EXT;
- if (max_blocks > DIO_MAX_BLOCKS)
- max_blocks = DIO_MAX_BLOCKS;
- dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
- handle = ext4_journal_start(inode, dio_credits);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out;
+ if (!handle) {
+ if (max_blocks > DIO_MAX_BLOCKS)
+ max_blocks = DIO_MAX_BLOCKS;
+ dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
+ handle = ext4_journal_start(inode, dio_credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ goto out;
+ }
+ started = 1;
}
+
ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
create);
if (ret > 0) {
bh_result->b_size = (ret << inode->i_blkbits);
ret = 0;
}
- ext4_journal_stop(handle);
+ if (started)
+ ext4_journal_stop(handle);
out:
return ret;
}
-static void ext4_free_io_end(ext4_io_end_t *io)
-{
- BUG_ON(!io);
- iput(io->inode);
- kfree(io);
-}
-static void dump_aio_dio_list(struct inode * inode)
+static void dump_completed_IO(struct inode * inode)
{
#ifdef EXT4_DEBUG
struct list_head *cur, *before, *after;
ext4_io_end_t *io, *io0, *io1;
+ unsigned long flags;
- if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
- ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino);
+ if (list_empty(&EXT4_I(inode)->i_completed_io_list)){
+ ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino);
return;
}
- ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino);
- list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){
+ ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino);
+ spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
+ list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){
cur = &io->list;
before = cur->prev;
io0 = container_of(before, ext4_io_end_t, list);
@@ -3546,32 +3611,31 @@ static void dump_aio_dio_list(struct inode * inode)
ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
io, inode->i_ino, io0, io1);
}
+ spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
#endif
}
/*
* check a range of space and convert unwritten extents to written.
*/
-static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
+static int ext4_end_io_nolock(ext4_io_end_t *io)
{
struct inode *inode = io->inode;
loff_t offset = io->offset;
- size_t size = io->size;
+ ssize_t size = io->size;
int ret = 0;
- ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
+ ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
"list->prev 0x%p\n",
io, inode->i_ino, io->list.next, io->list.prev);
if (list_empty(&io->list))
return ret;
- if (io->flag != DIO_AIO_UNWRITTEN)
+ if (io->flag != EXT4_IO_UNWRITTEN)
return ret;
- if (offset + size <= i_size_read(inode))
- ret = ext4_convert_unwritten_extents(inode, offset, size);
-
+ ret = ext4_convert_unwritten_extents(inode, offset, size);
if (ret < 0) {
printk(KERN_EMERG "%s: failed to convert unwritten"
"extents to written extents, error is %d"
@@ -3584,50 +3648,64 @@ static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
io->flag = 0;
return ret;
}
+
/*
* work on completed aio dio IO, to convert unwritten extents to extents
*/
-static void ext4_end_aio_dio_work(struct work_struct *work)
+static void ext4_end_io_work(struct work_struct *work)
{
- ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
- struct inode *inode = io->inode;
- int ret = 0;
+ ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
+ struct inode *inode = io->inode;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned long flags;
+ int ret;
mutex_lock(&inode->i_mutex);
- ret = ext4_end_aio_dio_nolock(io);
- if (ret >= 0) {
- if (!list_empty(&io->list))
- list_del_init(&io->list);
- ext4_free_io_end(io);
+ ret = ext4_end_io_nolock(io);
+ if (ret < 0) {
+ mutex_unlock(&inode->i_mutex);
+ return;
}
+
+ spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+ if (!list_empty(&io->list))
+ list_del_init(&io->list);
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
mutex_unlock(&inode->i_mutex);
+ ext4_free_io_end(io);
}
+
/*
* This function is called from ext4_sync_file().
*
- * When AIO DIO IO is completed, the work to convert unwritten
- * extents to written is queued on workqueue but may not get immediately
+ * When IO is completed, the work to convert unwritten extents to
+ * written is queued on workqueue but may not get immediately
* scheduled. When fsync is called, we need to ensure the
* conversion is complete before fsync returns.
- * The inode keeps track of a list of completed AIO from DIO path
- * that might needs to do the conversion. This function walks through
- * the list and convert the related unwritten extents to written.
+ * The inode keeps track of a list of pending/completed IO that
+ * might needs to do the conversion. This function walks through
+ * the list and convert the related unwritten extents for completed IO
+ * to written.
+ * The function return the number of pending IOs on success.
*/
-int flush_aio_dio_completed_IO(struct inode *inode)
+int flush_completed_IO(struct inode *inode)
{
ext4_io_end_t *io;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned long flags;
int ret = 0;
int ret2 = 0;
- if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list))
+ if (list_empty(&ei->i_completed_io_list))
return ret;
- dump_aio_dio_list(inode);
- while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
- io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next,
+ dump_completed_IO(inode);
+ spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+ while (!list_empty(&ei->i_completed_io_list)){
+ io = list_entry(ei->i_completed_io_list.next,
ext4_io_end_t, list);
/*
- * Calling ext4_end_aio_dio_nolock() to convert completed
+ * Calling ext4_end_io_nolock() to convert completed
* IO to written.
*
* When ext4_sync_file() is called, run_queue() may already
@@ -3640,20 +3718,23 @@ int flush_aio_dio_completed_IO(struct inode *inode)
* avoid double converting from both fsync and background work
* queue work.
*/
- ret = ext4_end_aio_dio_nolock(io);
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+ ret = ext4_end_io_nolock(io);
+ spin_lock_irqsave(&ei->i_completed_io_lock, flags);
if (ret < 0)
ret2 = ret;
else
list_del_init(&io->list);
}
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
return (ret2 < 0) ? ret2 : 0;
}
-static ext4_io_end_t *ext4_init_io_end (struct inode *inode)
+static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags)
{
ext4_io_end_t *io = NULL;
- io = kmalloc(sizeof(*io), GFP_NOFS);
+ io = kmalloc(sizeof(*io), flags);
if (io) {
igrab(inode);
@@ -3661,8 +3742,8 @@ static ext4_io_end_t *ext4_init_io_end (struct inode *inode)
io->flag = 0;
io->offset = 0;
io->size = 0;
- io->error = 0;
- INIT_WORK(&io->work, ext4_end_aio_dio_work);
+ io->page = NULL;
+ INIT_WORK(&io->work, ext4_end_io_work);
INIT_LIST_HEAD(&io->list);
}
@@ -3674,6 +3755,8 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
{
ext4_io_end_t *io_end = iocb->private;
struct workqueue_struct *wq;
+ unsigned long flags;
+ struct ext4_inode_info *ei;
/* if not async direct IO or dio with 0 bytes write, just return */
if (!io_end || !size)
@@ -3685,7 +3768,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
size);
/* if not aio dio with unwritten extents, just free io and return */
- if (io_end->flag != DIO_AIO_UNWRITTEN){
+ if (io_end->flag != EXT4_IO_UNWRITTEN){
ext4_free_io_end(io_end);
iocb->private = NULL;
return;
@@ -3693,16 +3776,85 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
io_end->offset = offset;
io_end->size = size;
+ io_end->flag = EXT4_IO_UNWRITTEN;
wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
/* queue the work to convert unwritten extents to written */
queue_work(wq, &io_end->work);
/* Add the io_end to per-inode completed aio dio list*/
- list_add_tail(&io_end->list,
- &EXT4_I(io_end->inode)->i_aio_dio_complete_list);
+ ei = EXT4_I(io_end->inode);
+ spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+ list_add_tail(&io_end->list, &ei->i_completed_io_list);
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
iocb->private = NULL;
}
+
+static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
+{
+ ext4_io_end_t *io_end = bh->b_private;
+ struct workqueue_struct *wq;
+ struct inode *inode;
+ unsigned long flags;
+
+ if (!test_clear_buffer_uninit(bh) || !io_end)
+ goto out;
+
+ if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
+ printk("sb umounted, discard end_io request for inode %lu\n",
+ io_end->inode->i_ino);
+ ext4_free_io_end(io_end);
+ goto out;
+ }
+
+ io_end->flag = EXT4_IO_UNWRITTEN;
+ inode = io_end->inode;
+
+ /* Add the io_end to per-inode completed io list*/
+ spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
+ list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
+ spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
+
+ wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
+ /* queue the work to convert unwritten extents to written */
+ queue_work(wq, &io_end->work);
+out:
+ bh->b_private = NULL;
+ bh->b_end_io = NULL;
+ clear_buffer_uninit(bh);
+ end_buffer_async_write(bh, uptodate);
+}
+
+static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
+{
+ ext4_io_end_t *io_end;
+ struct page *page = bh->b_page;
+ loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
+ size_t size = bh->b_size;
+
+retry:
+ io_end = ext4_init_io_end(inode, GFP_ATOMIC);
+ if (!io_end) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "%s: allocation fail\n", __func__);
+ schedule();
+ goto retry;
+ }
+ io_end->offset = offset;
+ io_end->size = size;
+ /*
+ * We need to hold a reference to the page to make sure it
+ * doesn't get evicted before ext4_end_io_work() has a chance
+ * to convert the extent from written to unwritten.
+ */
+ io_end->page = page;
+ get_page(io_end->page);
+
+ bh->b_private = io_end;
+ bh->b_end_io = ext4_end_io_buffer_write;
+ return 0;
+}
+
/*
* For ext4 extent files, ext4 will do direct-io write to holes,
* preallocated extents, and those write extend the file, no need to
@@ -3756,7 +3908,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
iocb->private = NULL;
EXT4_I(inode)->cur_aio_dio = NULL;
if (!is_sync_kiocb(iocb)) {
- iocb->private = ext4_init_io_end(inode);
+ iocb->private = ext4_init_io_end(inode, GFP_NOFS);
if (!iocb->private)
return -ENOMEM;
/*
@@ -3772,7 +3924,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
ret = blockdev_direct_IO(rw, iocb, inode,
inode->i_sb->s_bdev, iov,
offset, nr_segs,
- ext4_get_block_dio_write,
+ ext4_get_block_write,
ext4_end_io_dio);
if (iocb->private)
EXT4_I(inode)->cur_aio_dio = NULL;
@@ -3793,8 +3945,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
ext4_free_io_end(iocb->private);
iocb->private = NULL;
- } else if (ret > 0 && (EXT4_I(inode)->i_state &
- EXT4_STATE_DIO_UNWRITTEN)) {
+ } else if (ret > 0 && ext4_test_inode_state(inode,
+ EXT4_STATE_DIO_UNWRITTEN)) {
int err;
/*
* for non AIO case, since the IO is already
@@ -3804,7 +3956,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
offset, ret);
if (err < 0)
ret = err;
- EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN;
+ ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
}
return ret;
}
@@ -4135,18 +4287,27 @@ no_top:
* We release `count' blocks on disk, but (last - first) may be greater
* than `count' because there can be holes in there.
*/
-static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
- struct buffer_head *bh,
- ext4_fsblk_t block_to_free,
- unsigned long count, __le32 *first,
- __le32 *last)
+static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
+ struct buffer_head *bh,
+ ext4_fsblk_t block_to_free,
+ unsigned long count, __le32 *first,
+ __le32 *last)
{
__le32 *p;
- int flags = EXT4_FREE_BLOCKS_FORGET;
+ int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
flags |= EXT4_FREE_BLOCKS_METADATA;
+ if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
+ count)) {
+ ext4_error(inode->i_sb, "inode #%lu: "
+ "attempt to clear blocks %llu len %lu, invalid",
+ inode->i_ino, (unsigned long long) block_to_free,
+ count);
+ return 1;
+ }
+
if (try_to_extend_transaction(handle, inode)) {
if (bh) {
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
@@ -4165,6 +4326,7 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
*p = 0;
ext4_free_blocks(handle, inode, 0, block_to_free, count, flags);
+ return 0;
}
/**
@@ -4220,9 +4382,10 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
} else if (nr == block_to_free + count) {
count++;
} else {
- ext4_clear_blocks(handle, inode, this_bh,
- block_to_free,
- count, block_to_free_p, p);
+ if (ext4_clear_blocks(handle, inode, this_bh,
+ block_to_free, count,
+ block_to_free_p, p))
+ break;
block_to_free = nr;
block_to_free_p = p;
count = 1;
@@ -4246,7 +4409,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
ext4_handle_dirty_metadata(handle, inode, this_bh);
else
- ext4_error(inode->i_sb, __func__,
+ ext4_error(inode->i_sb,
"circular indirect block detected, "
"inode=%lu, block=%llu",
inode->i_ino,
@@ -4286,6 +4449,16 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
if (!nr)
continue; /* A hole */
+ if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
+ nr, 1)) {
+ ext4_error(inode->i_sb,
+ "indirect mapped block in inode "
+ "#%lu invalid (level %d, blk #%lu)",
+ inode->i_ino, depth,
+ (unsigned long) nr);
+ break;
+ }
+
/* Go read the buffer for the next level down */
bh = sb_bread(inode->i_sb, nr);
@@ -4294,7 +4467,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
* (should be rare).
*/
if (!bh) {
- ext4_error(inode->i_sb, "ext4_free_branches",
+ ext4_error(inode->i_sb,
"Read failure, inode=%lu, block=%llu",
inode->i_ino, nr);
continue;
@@ -4438,8 +4611,10 @@ void ext4_truncate(struct inode *inode)
if (!ext4_can_truncate(inode))
return;
+ EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL;
+
if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
- ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
+ ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
ext4_ext_truncate(inode);
@@ -4609,9 +4784,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
bh = sb_getblk(sb, block);
if (!bh) {
- ext4_error(sb, "ext4_get_inode_loc", "unable to read "
- "inode block - inode=%lu, block=%llu",
- inode->i_ino, block);
+ ext4_error(sb, "unable to read inode block - "
+ "inode=%lu, block=%llu", inode->i_ino, block);
return -EIO;
}
if (!buffer_uptodate(bh)) {
@@ -4709,9 +4883,8 @@ make_io:
submit_bh(READ_META, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
- ext4_error(sb, __func__,
- "unable to read inode block - inode=%lu, "
- "block=%llu", inode->i_ino, block);
+ ext4_error(sb, "unable to read inode block - inode=%lu,"
+ " block=%llu", inode->i_ino, block);
brelse(bh);
return -EIO;
}
@@ -4725,7 +4898,7 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
{
/* We have all inode data except xattrs in memory here. */
return __ext4_get_inode_loc(inode, iloc,
- !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
+ !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
}
void ext4_set_inode_flags(struct inode *inode)
@@ -4819,7 +4992,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
}
inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
- ei->i_state = 0;
+ ei->i_state_flags = 0;
ei->i_dir_start_lookup = 0;
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
/* We now have enough fields to check if the inode was active or not.
@@ -4902,7 +5075,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
EXT4_GOOD_OLD_INODE_SIZE +
ei->i_extra_isize;
if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
- ei->i_state |= EXT4_STATE_XATTR;
+ ext4_set_inode_state(inode, EXT4_STATE_XATTR);
}
} else
ei->i_extra_isize = 0;
@@ -4922,8 +5095,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
ret = 0;
if (ei->i_file_acl &&
!ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
- ext4_error(sb, __func__,
- "bad extended attribute block %llu in inode #%lu",
+ ext4_error(sb, "bad extended attribute block %llu inode #%lu",
ei->i_file_acl, inode->i_ino);
ret = -EIO;
goto bad_inode;
@@ -4969,8 +5141,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
} else {
ret = -EIO;
- ext4_error(inode->i_sb, __func__,
- "bogus i_mode (%o) for inode=%lu",
+ ext4_error(inode->i_sb, "bogus i_mode (%o) for inode=%lu",
inode->i_mode, inode->i_ino);
goto bad_inode;
}
@@ -5042,7 +5213,7 @@ static int ext4_do_update_inode(handle_t *handle,
/* For fields not not tracking in the in-memory inode,
* initialise them to zero for new inodes. */
- if (ei->i_state & EXT4_STATE_NEW)
+ if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
ext4_get_inode_flags(ei);
@@ -5106,7 +5277,7 @@ static int ext4_do_update_inode(handle_t *handle,
EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
sb->s_dirt = 1;
ext4_handle_sync(handle);
- err = ext4_handle_dirty_metadata(handle, inode,
+ err = ext4_handle_dirty_metadata(handle, NULL,
EXT4_SB(sb)->s_sbh);
}
}
@@ -5135,10 +5306,10 @@ static int ext4_do_update_inode(handle_t *handle,
}
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
- rc = ext4_handle_dirty_metadata(handle, inode, bh);
+ rc = ext4_handle_dirty_metadata(handle, NULL, bh);
if (!err)
err = rc;
- ei->i_state &= ~EXT4_STATE_NEW;
+ ext4_clear_inode_state(inode, EXT4_STATE_NEW);
ext4_update_inode_fsync_trans(handle, inode, 0);
out_brelse:
@@ -5182,7 +5353,7 @@ out_brelse:
* `stuff()' is running, and the new i_size will be lost. Plus the inode
* will no longer be on the superblock's dirty inode list.
*/
-int ext4_write_inode(struct inode *inode, int wait)
+int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int err;
@@ -5196,7 +5367,7 @@ int ext4_write_inode(struct inode *inode, int wait)
return -EIO;
}
- if (!wait)
+ if (wbc->sync_mode != WB_SYNC_ALL)
return 0;
err = ext4_force_commit(inode->i_sb);
@@ -5206,13 +5377,11 @@ int ext4_write_inode(struct inode *inode, int wait)
err = ext4_get_inode_loc(inode, &iloc);
if (err)
return err;
- if (wait)
+ if (wbc->sync_mode == WB_SYNC_ALL)
sync_dirty_buffer(iloc.bh);
if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
- ext4_error(inode->i_sb, __func__,
- "IO error syncing inode, "
- "inode=%lu, block=%llu",
- inode->i_ino,
+ ext4_error(inode->i_sb, "IO error syncing inode, "
+ "inode=%lu, block=%llu", inode->i_ino,
(unsigned long long)iloc.bh->b_blocknr);
err = -EIO;
}
@@ -5295,7 +5464,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
}
if (S_ISREG(inode->i_mode) &&
- attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
+ attr->ia_valid & ATTR_SIZE &&
+ (attr->ia_size < inode->i_size ||
+ (EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL))) {
handle_t *handle;
handle = ext4_journal_start(inode, 3);
@@ -5326,6 +5497,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
goto err_out;
}
}
+ /* ext4_truncate will clear the flag */
+ if ((EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL))
+ ext4_truncate(inode);
}
rc = inode_setattr(inode, attr);
@@ -5564,8 +5738,8 @@ static int ext4_expand_extra_isize(struct inode *inode,
entry = IFIRST(header);
/* No extended attributes present */
- if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
- header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
+ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
+ header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
new_extra_isize);
EXT4_I(inode)->i_extra_isize = new_extra_isize;
@@ -5609,7 +5783,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (ext4_handle_valid(handle) &&
EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
- !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
+ !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
/*
* We need extra buffer credits since we may write into EA block
* with this same handle. If journal_extend fails, then it will
@@ -5623,10 +5797,11 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
sbi->s_want_extra_isize,
iloc, handle);
if (ret) {
- EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
+ ext4_set_inode_state(inode,
+ EXT4_STATE_NO_EXPAND);
if (mnt_count !=
le16_to_cpu(sbi->s_es->s_mnt_count)) {
- ext4_warning(inode->i_sb, __func__,
+ ext4_warning(inode->i_sb,
"Unable to expand inode %lu. Delete"
" some EAs or run e2fsck.",
inode->i_ino);
@@ -5690,7 +5865,7 @@ static int ext4_pin_inode(handle_t *handle, struct inode *inode)
err = jbd2_journal_get_write_access(handle, iloc.bh);
if (!err)
err = ext4_handle_dirty_metadata(handle,
- inode,
+ NULL,
iloc.bh);
brelse(iloc.bh);
}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index b63d193..016d024 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -92,6 +92,15 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
flags &= ~EXT4_EXTENTS_FL;
}
+ if (flags & EXT4_EOFBLOCKS_FL) {
+ /* we don't support adding EOFBLOCKS flag */
+ if (!(oldflags & EXT4_EOFBLOCKS_FL)) {
+ err = -EOPNOTSUPP;
+ goto flags_out;
+ }
+ } else if (oldflags & EXT4_EOFBLOCKS_FL)
+ ext4_truncate(inode);
+
handle = ext4_journal_start(inode, 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
@@ -249,7 +258,8 @@ setversion_out:
if (me.moved_len > 0)
file_remove_suid(donor_filp);
- if (copy_to_user((struct move_extent *)arg, &me, sizeof(me)))
+ if (copy_to_user((struct move_extent __user *)arg,
+ &me, sizeof(me)))
err = -EFAULT;
mext_out:
fput(donor_filp);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 0b90578..506713a 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -441,10 +441,9 @@ static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
for (i = 0; i < count; i++) {
if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
ext4_fsblk_t blocknr;
- blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
+
+ blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
blocknr += first + i;
- blocknr +=
- le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
ext4_grp_locked_error(sb, e4b->bd_group,
__func__, "double-free of inode"
" %lu's block %llu(bit %u in group %u)",
@@ -1255,10 +1254,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
ext4_fsblk_t blocknr;
- blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
+
+ blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
blocknr += block;
- blocknr +=
- le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
ext4_grp_locked_error(sb, e4b->bd_group,
__func__, "double-free of inode"
" %lu's block %llu(bit %u in group %u)",
@@ -1631,7 +1629,6 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
int max;
int err;
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- struct ext4_super_block *es = sbi->s_es;
struct ext4_free_extent ex;
if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
@@ -1648,8 +1645,8 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
ext4_fsblk_t start;
- start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) +
- ex.fe_start + le32_to_cpu(es->s_first_data_block);
+ start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
+ ex.fe_start;
/* use do_div to get remainder (would be 64-bit modulo) */
if (do_div(start, sbi->s_stripe) == 0) {
ac->ac_found++;
@@ -1803,8 +1800,8 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
BUG_ON(sbi->s_stripe == 0);
/* find first stripe-aligned block in group */
- first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb)
- + le32_to_cpu(sbi->s_es->s_first_data_block);
+ first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
+
a = first_group_block + sbi->s_stripe - 1;
do_div(a, sbi->s_stripe);
i = (a * sbi->s_stripe) - first_group_block;
@@ -2256,7 +2253,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
init_rwsem(&meta_group_info[i]->alloc_sem);
- meta_group_info[i]->bb_free_root.rb_node = NULL;
+ meta_group_info[i]->bb_free_root = RB_ROOT;
#ifdef DOUBLE_CHECK
{
@@ -2560,12 +2557,9 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
ext4_unlock_group(sb, entry->group);
if (test_opt(sb, DISCARD)) {
ext4_fsblk_t discard_block;
- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
- discard_block = (ext4_fsblk_t)entry->group *
- EXT4_BLOCKS_PER_GROUP(sb)
- + entry->start_blk
- + le32_to_cpu(es->s_first_data_block);
+ discard_block = entry->start_blk +
+ ext4_group_first_block_no(sb, entry->group);
trace_ext4_discard_blocks(sb,
(unsigned long long)discard_block,
entry->count);
@@ -2703,14 +2697,11 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
if (err)
goto out_err;
- block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb)
- + ac->ac_b_ex.fe_start
- + le32_to_cpu(es->s_first_data_block);
+ block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
len = ac->ac_b_ex.fe_len;
if (!ext4_data_block_valid(sbi, block, len)) {
- ext4_error(sb, __func__,
- "Allocating blocks %llu-%llu which overlap "
+ ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
"fs metadata\n", block, block+len);
/* File system mounted not to panic on error
* Fix the bitmap and repeat the block allocation
@@ -3161,9 +3152,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
/* The max size of hash table is PREALLOC_TB_SIZE */
order = PREALLOC_TB_SIZE - 1;
- goal_block = ac->ac_g_ex.fe_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb) +
- ac->ac_g_ex.fe_start +
- le32_to_cpu(EXT4_SB(ac->ac_sb)->s_es->s_first_data_block);
+ goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
/*
* search for the prealloc space that is having
* minimal distance from the goal block.
@@ -3526,8 +3515,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
if (bit >= end)
break;
next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
- start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit +
- le32_to_cpu(sbi->s_es->s_first_data_block);
+ start = ext4_group_first_block_no(sb, group) + bit;
mb_debug(1, " free preallocated %u/%u in group %u\n",
(unsigned) start, (unsigned) next - bit,
(unsigned) group);
@@ -3623,15 +3611,13 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
bitmap_bh = ext4_read_block_bitmap(sb, group);
if (bitmap_bh == NULL) {
- ext4_error(sb, __func__, "Error in reading block "
- "bitmap for %u", group);
+ ext4_error(sb, "Error reading block bitmap for %u", group);
return 0;
}
err = ext4_mb_load_buddy(sb, group, &e4b);
if (err) {
- ext4_error(sb, __func__, "Error in loading buddy "
- "information for %u", group);
+ ext4_error(sb, "Error loading buddy information for %u", group);
put_bh(bitmap_bh);
return 0;
}
@@ -3804,15 +3790,15 @@ repeat:
err = ext4_mb_load_buddy(sb, group, &e4b);
if (err) {
- ext4_error(sb, __func__, "Error in loading buddy "
- "information for %u", group);
+ ext4_error(sb, "Error loading buddy information for %u",
+ group);
continue;
}
bitmap_bh = ext4_read_block_bitmap(sb, group);
if (bitmap_bh == NULL) {
- ext4_error(sb, __func__, "Error in reading block "
- "bitmap for %u", group);
+ ext4_error(sb, "Error reading block bitmap for %u",
+ group);
ext4_mb_release_desc(&e4b);
continue;
}
@@ -3938,7 +3924,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
/* don't use group allocation for large files */
size = max(size, isize);
- if (size >= sbi->s_mb_stream_request) {
+ if (size > sbi->s_mb_stream_request) {
ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
return;
}
@@ -4077,8 +4063,8 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
if (ext4_mb_load_buddy(sb, group, &e4b)) {
- ext4_error(sb, __func__, "Error in loading buddy "
- "information for %u", group);
+ ext4_error(sb, "Error loading buddy information for %u",
+ group);
continue;
}
ext4_lock_group(sb, group);
@@ -4476,10 +4462,10 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
sbi = EXT4_SB(sb);
es = EXT4_SB(sb)->s_es;
- if (!ext4_data_block_valid(sbi, block, count)) {
- ext4_error(sb, __func__,
- "Freeing blocks not in datazone - "
- "block = %llu, count = %lu", block, count);
+ if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
+ !ext4_data_block_valid(sbi, block, count)) {
+ ext4_error(sb, "Freeing blocks not in datazone - "
+ "block = %llu, count = %lu", block, count);
goto error_return;
}
@@ -4547,8 +4533,7 @@ do_more:
in_range(block + count - 1, ext4_inode_table(sb, gdp),
EXT4_SB(sb)->s_itb_per_group)) {
- ext4_error(sb, __func__,
- "Freeing blocks in system zone - "
+ ext4_error(sb, "Freeing blocks in system zone - "
"Block = %llu, count = %lu", block, count);
/* err = 0. ext4_std_error should be a no op */
goto error_return;
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 436521c..b619322 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -220,16 +220,9 @@ struct ext4_buddy {
#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap)
#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy)
-#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
-
static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
struct ext4_free_extent *fex)
{
- ext4_fsblk_t block;
-
- block = (ext4_fsblk_t) fex->fe_group * EXT4_BLOCKS_PER_GROUP(sb)
- + fex->fe_start
- + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
- return block;
+ return ext4_group_first_block_no(sb, fex->fe_group) + fex->fe_start;
}
#endif
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 8141581..8b87bd0 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -365,12 +365,12 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
* happened after we started the migrate. We need to
* fail the migrate
*/
- if (!(EXT4_I(inode)->i_state & EXT4_STATE_EXT_MIGRATE)) {
+ if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
retval = -EAGAIN;
up_write(&EXT4_I(inode)->i_data_sem);
goto err_out;
} else
- EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
+ ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
/*
* We have the extent map build with the tmp inode.
* Now copy the i_data across
@@ -503,14 +503,10 @@ int ext4_ext_migrate(struct inode *inode)
}
i_size_write(tmp_inode, i_size_read(inode));
/*
- * We don't want the inode to be reclaimed
- * if we got interrupted in between. We have
- * this tmp inode carrying reference to the
- * data blocks of the original file. We set
- * the i_nlink to zero at the last stage after
- * switching the original file to extent format
+ * Set the i_nlink to zero so it will be deleted later
+ * when we drop inode reference.
*/
- tmp_inode->i_nlink = 1;
+ tmp_inode->i_nlink = 0;
ext4_ext_tree_init(handle, tmp_inode);
ext4_orphan_add(handle, tmp_inode);
@@ -533,10 +529,20 @@ int ext4_ext_migrate(struct inode *inode)
* allocation.
*/
down_read((&EXT4_I(inode)->i_data_sem));
- EXT4_I(inode)->i_state |= EXT4_STATE_EXT_MIGRATE;
+ ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
up_read((&EXT4_I(inode)->i_data_sem));
handle = ext4_journal_start(inode, 1);
+ if (IS_ERR(handle)) {
+ /*
+ * It is impossible to update on-disk structures without
+ * a handle, so just rollback in-core changes and live other
+ * work to orphan_list_cleanup()
+ */
+ ext4_orphan_del(NULL, tmp_inode);
+ retval = PTR_ERR(handle);
+ goto out;
+ }
ei = EXT4_I(inode);
i_data = ei->i_data;
@@ -618,15 +624,8 @@ err_out:
/* Reset the extent details */
ext4_ext_tree_init(handle, tmp_inode);
-
- /*
- * Set the i_nlink to zero so that
- * generic_drop_inode really deletes the
- * inode
- */
- tmp_inode->i_nlink = 0;
-
ext4_journal_stop(handle);
+out:
unlock_new_inode(tmp_inode);
iput(tmp_inode);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 82c415b..aa5fe28 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -152,12 +152,12 @@ mext_check_null_inode(struct inode *inode1, struct inode *inode2,
int ret = 0;
if (inode1 == NULL) {
- ext4_error(inode2->i_sb, function,
+ __ext4_error(inode2->i_sb, function,
"Both inodes should not be NULL: "
"inode1 NULL inode2 %lu", inode2->i_ino);
ret = -EIO;
} else if (inode2 == NULL) {
- ext4_error(inode1->i_sb, function,
+ __ext4_error(inode1->i_sb, function,
"Both inodes should not be NULL: "
"inode1 %lu inode2 NULL", inode1->i_ino);
ret = -EIO;
@@ -252,6 +252,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
}
o_start->ee_len = start_ext->ee_len;
+ eblock = le32_to_cpu(start_ext->ee_block);
new_flag = 1;
} else if (start_ext->ee_len && new_ext->ee_len &&
@@ -262,6 +263,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
* orig |------------------------------|
*/
o_start->ee_len = start_ext->ee_len;
+ eblock = le32_to_cpu(start_ext->ee_block);
new_flag = 1;
} else if (!start_ext->ee_len && new_ext->ee_len &&
@@ -475,7 +477,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
struct ext4_extent *oext, *o_start, *o_end, *prev_ext;
struct ext4_extent new_ext, start_ext, end_ext;
ext4_lblk_t new_ext_end;
- ext4_fsblk_t new_phys_end;
int oext_alen, new_ext_alen, end_ext_alen;
int depth = ext_depth(orig_inode);
int ret;
@@ -489,7 +490,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
new_ext.ee_len = dext->ee_len;
new_ext_alen = ext4_ext_get_actual_len(&new_ext);
new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1;
- new_phys_end = ext_pblock(&new_ext) + new_ext_alen - 1;
/*
* Case: original extent is first
@@ -502,6 +502,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
le32_to_cpu(oext->ee_block) + oext_alen) {
start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) -
le32_to_cpu(oext->ee_block));
+ start_ext.ee_block = oext->ee_block;
copy_extent_status(oext, &start_ext);
} else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) {
prev_ext = oext - 1;
@@ -515,6 +516,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
start_ext.ee_len = cpu_to_le16(
ext4_ext_get_actual_len(prev_ext) +
new_ext_alen);
+ start_ext.ee_block = oext->ee_block;
copy_extent_status(prev_ext, &start_ext);
new_ext.ee_len = 0;
}
@@ -526,7 +528,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
* new_ext |-------|
*/
if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) {
- ext4_error(orig_inode->i_sb, __func__,
+ ext4_error(orig_inode->i_sb,
"new_ext_end(%u) should be less than or equal to "
"oext->ee_block(%u) + oext_alen(%d) - 1",
new_ext_end, le32_to_cpu(oext->ee_block),
@@ -689,12 +691,12 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
while (1) {
/* The extent for donor must be found. */
if (!dext) {
- ext4_error(donor_inode->i_sb, __func__,
+ ext4_error(donor_inode->i_sb,
"The extent for donor must be found");
*err = -EIO;
goto out;
} else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) {
- ext4_error(donor_inode->i_sb, __func__,
+ ext4_error(donor_inode->i_sb,
"Donor offset(%u) and the first block of donor "
"extent(%u) should be equal",
donor_off,
@@ -928,7 +930,7 @@ out2:
}
/**
- * mext_check_argumants - Check whether move extent can be done
+ * mext_check_arguments - Check whether move extent can be done
*
* @orig_inode: original inode
* @donor_inode: donor inode
@@ -949,14 +951,6 @@ mext_check_arguments(struct inode *orig_inode,
unsigned int blkbits = orig_inode->i_blkbits;
unsigned int blocksize = 1 << blkbits;
- /* Regular file check */
- if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
- ext4_debug("ext4 move extent: The argument files should be "
- "regular file [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
- return -EINVAL;
- }
-
if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
ext4_debug("ext4 move extent: suid or sgid is set"
" to donor file [ino:orig %lu, donor %lu]\n",
@@ -1204,6 +1198,14 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
return -EINVAL;
}
+ /* Regular file check */
+ if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
+ ext4_debug("ext4 move extent: The argument files should be "
+ "regular file [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
+
/* Protect orig and donor inodes against a truncate */
ret1 = mext_inode_double_lock(orig_inode, donor_inode);
if (ret1 < 0)
@@ -1351,7 +1353,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
if (ret1 < 0)
break;
if (*moved_len > len) {
- ext4_error(orig_inode->i_sb, __func__,
+ ext4_error(orig_inode->i_sb,
"We replaced blocks too much! "
"sum of replaced: %llu requested: %llu",
*moved_len, len);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 7f3d2d7..0c070fa 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -383,8 +383,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
if (root->info.hash_version != DX_HASH_TEA &&
root->info.hash_version != DX_HASH_HALF_MD4 &&
root->info.hash_version != DX_HASH_LEGACY) {
- ext4_warning(dir->i_sb, __func__,
- "Unrecognised inode hash code %d",
+ ext4_warning(dir->i_sb, "Unrecognised inode hash code %d",
root->info.hash_version);
brelse(bh);
*err = ERR_BAD_DX_DIR;
@@ -399,8 +398,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
hash = hinfo->hash;
if (root->info.unused_flags & 1) {
- ext4_warning(dir->i_sb, __func__,
- "Unimplemented inode hash flags: %#06x",
+ ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x",
root->info.unused_flags);
brelse(bh);
*err = ERR_BAD_DX_DIR;
@@ -408,8 +406,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
}
if ((indirect = root->info.indirect_levels) > 1) {
- ext4_warning(dir->i_sb, __func__,
- "Unimplemented inode hash depth: %#06x",
+ ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
root->info.indirect_levels);
brelse(bh);
*err = ERR_BAD_DX_DIR;
@@ -421,8 +418,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
if (dx_get_limit(entries) != dx_root_limit(dir,
root->info.info_length)) {
- ext4_warning(dir->i_sb, __func__,
- "dx entry: limit != root limit");
+ ext4_warning(dir->i_sb, "dx entry: limit != root limit");
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto fail;
@@ -433,7 +429,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
{
count = dx_get_count(entries);
if (!count || count > dx_get_limit(entries)) {
- ext4_warning(dir->i_sb, __func__,
+ ext4_warning(dir->i_sb,
"dx entry: no count or count > limit");
brelse(bh);
*err = ERR_BAD_DX_DIR;
@@ -478,7 +474,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
goto fail2;
at = entries = ((struct dx_node *) bh->b_data)->entries;
if (dx_get_limit(entries) != dx_node_limit (dir)) {
- ext4_warning(dir->i_sb, __func__,
+ ext4_warning(dir->i_sb,
"dx entry: limit != node limit");
brelse(bh);
*err = ERR_BAD_DX_DIR;
@@ -494,7 +490,7 @@ fail2:
}
fail:
if (*err == ERR_BAD_DX_DIR)
- ext4_warning(dir->i_sb, __func__,
+ ext4_warning(dir->i_sb,
"Corrupt dir inode %ld, running e2fsck is "
"recommended.", dir->i_ino);
return NULL;
@@ -947,9 +943,8 @@ restart:
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
/* read error, skip block & hope for the best */
- ext4_error(sb, __func__, "reading directory #%lu "
- "offset %lu", dir->i_ino,
- (unsigned long)block);
+ ext4_error(sb, "reading directory #%lu offset %lu",
+ dir->i_ino, (unsigned long)block);
brelse(bh);
goto next;
}
@@ -1041,7 +1036,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
retval = ext4_htree_next_block(dir, hash, frame,
frames, NULL);
if (retval < 0) {
- ext4_warning(sb, __func__,
+ ext4_warning(sb,
"error reading index page in directory #%lu",
dir->i_ino);
*err = retval;
@@ -1071,14 +1066,13 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
__u32 ino = le32_to_cpu(de->inode);
brelse(bh);
if (!ext4_valid_inum(dir->i_sb, ino)) {
- ext4_error(dir->i_sb, "ext4_lookup",
- "bad inode number: %u", ino);
+ ext4_error(dir->i_sb, "bad inode number: %u", ino);
return ERR_PTR(-EIO);
}
inode = ext4_iget(dir->i_sb, ino);
if (unlikely(IS_ERR(inode))) {
if (PTR_ERR(inode) == -ESTALE) {
- ext4_error(dir->i_sb, __func__,
+ ext4_error(dir->i_sb,
"deleted inode referenced: %u",
ino);
return ERR_PTR(-EIO);
@@ -1110,7 +1104,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
brelse(bh);
if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
- ext4_error(child->d_inode->i_sb, "ext4_get_parent",
+ ext4_error(child->d_inode->i_sb,
"bad inode number: %u", ino);
return ERR_PTR(-EIO);
}
@@ -1410,7 +1404,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
de = (struct ext4_dir_entry_2 *)((char *)fde +
ext4_rec_len_from_disk(fde->rec_len, blocksize));
if ((char *) de >= (((char *) root) + blocksize)) {
- ext4_error(dir->i_sb, __func__,
+ ext4_error(dir->i_sb,
"invalid rec_len for '..' in inode %lu",
dir->i_ino);
brelse(bh);
@@ -1575,8 +1569,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
if (levels && (dx_get_count(frames->entries) ==
dx_get_limit(frames->entries))) {
- ext4_warning(sb, __func__,
- "Directory index full!");
+ ext4_warning(sb, "Directory index full!");
err = -ENOSPC;
goto cleanup;
}
@@ -1922,11 +1915,11 @@ static int empty_dir(struct inode *inode)
if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
!(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
if (err)
- ext4_error(inode->i_sb, __func__,
+ ext4_error(inode->i_sb,
"error %d reading directory #%lu offset 0",
err, inode->i_ino);
else
- ext4_warning(inode->i_sb, __func__,
+ ext4_warning(inode->i_sb,
"bad directory (dir #%lu) - no data block",
inode->i_ino);
return 1;
@@ -1937,7 +1930,7 @@ static int empty_dir(struct inode *inode)
!le32_to_cpu(de1->inode) ||
strcmp(".", de->name) ||
strcmp("..", de1->name)) {
- ext4_warning(inode->i_sb, "empty_dir",
+ ext4_warning(inode->i_sb,
"bad directory (dir #%lu) - no `.' or `..'",
inode->i_ino);
brelse(bh);
@@ -1955,7 +1948,7 @@ static int empty_dir(struct inode *inode)
offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err);
if (!bh) {
if (err)
- ext4_error(sb, __func__,
+ ext4_error(sb,
"error %d reading directory"
" #%lu offset %u",
err, inode->i_ino, offset);
@@ -2026,11 +2019,18 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err)
goto out_unlock;
+ /*
+ * Due to previous errors inode may be already a part of on-disk
+ * orphan list. If so skip on-disk list modification.
+ */
+ if (NEXT_ORPHAN(inode) && NEXT_ORPHAN(inode) <=
+ (le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)))
+ goto mem_insert;
/* Insert this inode at the head of the on-disk orphan list... */
NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
- err = ext4_handle_dirty_metadata(handle, inode, EXT4_SB(sb)->s_sbh);
+ err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
if (!err)
err = rc;
@@ -2043,6 +2043,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
*
* This is safe: on error we're going to ignore the orphan list
* anyway on the next recovery. */
+mem_insert:
if (!err)
list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
@@ -2102,7 +2103,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
if (err)
goto out_brelse;
sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
- err = ext4_handle_dirty_metadata(handle, inode, sbi->s_sbh);
+ err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
} else {
struct ext4_iloc iloc2;
struct inode *i_prev =
@@ -2171,7 +2172,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
if (retval)
goto end_rmdir;
if (!EXT4_DIR_LINK_EMPTY(inode))
- ext4_warning(inode->i_sb, "ext4_rmdir",
+ ext4_warning(inode->i_sb,
"empty directory has too many links (%d)",
inode->i_nlink);
inode->i_version++;
@@ -2225,7 +2226,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
goto end_unlink;
if (!inode->i_nlink) {
- ext4_warning(inode->i_sb, "ext4_unlink",
+ ext4_warning(inode->i_sb,
"Deleting nonexistent file (%lu), %d",
inode->i_ino, inode->i_nlink);
inode->i_nlink = 1;
@@ -2479,7 +2480,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
if (retval) {
- ext4_warning(old_dir->i_sb, "ext4_rename",
+ ext4_warning(old_dir->i_sb,
"Deleting old file (%lu), %d, error=%d",
old_dir->i_ino, old_dir->i_nlink, retval);
}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 3b2c554..5692c48 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -48,65 +48,54 @@ static int verify_group_input(struct super_block *sb,
ext4_get_group_no_and_offset(sb, start, NULL, &offset);
if (group != sbi->s_groups_count)
- ext4_warning(sb, __func__,
- "Cannot add at group %u (only %u groups)",
+ ext4_warning(sb, "Cannot add at group %u (only %u groups)",
input->group, sbi->s_groups_count);
else if (offset != 0)
- ext4_warning(sb, __func__, "Last group not full");
+ ext4_warning(sb, "Last group not full");
else if (input->reserved_blocks > input->blocks_count / 5)
- ext4_warning(sb, __func__, "Reserved blocks too high (%u)",
+ ext4_warning(sb, "Reserved blocks too high (%u)",
input->reserved_blocks);
else if (free_blocks_count < 0)
- ext4_warning(sb, __func__, "Bad blocks count %u",
+ ext4_warning(sb, "Bad blocks count %u",
input->blocks_count);
else if (!(bh = sb_bread(sb, end - 1)))
- ext4_warning(sb, __func__,
- "Cannot read last block (%llu)",
+ ext4_warning(sb, "Cannot read last block (%llu)",
end - 1);
else if (outside(input->block_bitmap, start, end))
- ext4_warning(sb, __func__,
- "Block bitmap not in group (block %llu)",
+ ext4_warning(sb, "Block bitmap not in group (block %llu)",
(unsigned long long)input->block_bitmap);
else if (outside(input->inode_bitmap, start, end))
- ext4_warning(sb, __func__,
- "Inode bitmap not in group (block %llu)",
+ ext4_warning(sb, "Inode bitmap not in group (block %llu)",
(unsigned long long)input->inode_bitmap);
else if (outside(input->inode_table, start, end) ||
outside(itend - 1, start, end))
- ext4_warning(sb, __func__,
- "Inode table not in group (blocks %llu-%llu)",
+ ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
(unsigned long long)input->inode_table, itend - 1);
else if (input->inode_bitmap == input->block_bitmap)
- ext4_warning(sb, __func__,
- "Block bitmap same as inode bitmap (%llu)",
+ ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
(unsigned long long)input->block_bitmap);
else if (inside(input->block_bitmap, input->inode_table, itend))
- ext4_warning(sb, __func__,
- "Block bitmap (%llu) in inode table (%llu-%llu)",
+ ext4_warning(sb, "Block bitmap (%llu) in inode table "
+ "(%llu-%llu)",
(unsigned long long)input->block_bitmap,
(unsigned long long)input->inode_table, itend - 1);
else if (inside(input->inode_bitmap, input->inode_table, itend))
- ext4_warning(sb, __func__,
- "Inode bitmap (%llu) in inode table (%llu-%llu)",
+ ext4_warning(sb, "Inode bitmap (%llu) in inode table "
+ "(%llu-%llu)",
(unsigned long long)input->inode_bitmap,
(unsigned long long)input->inode_table, itend - 1);
else if (inside(input->block_bitmap, start, metaend))
- ext4_warning(sb, __func__,
- "Block bitmap (%llu) in GDT table"
- " (%llu-%llu)",
+ ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
(unsigned long long)input->block_bitmap,
start, metaend - 1);
else if (inside(input->inode_bitmap, start, metaend))
- ext4_warning(sb, __func__,
- "Inode bitmap (%llu) in GDT table"
- " (%llu-%llu)",
+ ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
(unsigned long long)input->inode_bitmap,
start, metaend - 1);
else if (inside(input->inode_table, start, metaend) ||
inside(itend - 1, start, metaend))
- ext4_warning(sb, __func__,
- "Inode table (%llu-%llu) overlaps"
- "GDT table (%llu-%llu)",
+ ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
+ "(%llu-%llu)",
(unsigned long long)input->inode_table,
itend - 1, start, metaend - 1);
else
@@ -364,8 +353,7 @@ static int verify_reserved_gdb(struct super_block *sb,
while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
if (le32_to_cpu(*p++) !=
grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
- ext4_warning(sb, __func__,
- "reserved GDT %llu"
+ ext4_warning(sb, "reserved GDT %llu"
" missing grp %d (%llu)",
blk, grp,
grp *
@@ -420,8 +408,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
*/
if (EXT4_SB(sb)->s_sbh->b_blocknr !=
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
- ext4_warning(sb, __func__,
- "won't resize using backup superblock at %llu",
+ ext4_warning(sb, "won't resize using backup superblock at %llu",
(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
return -EPERM;
}
@@ -444,8 +431,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
data = (__le32 *)dind->b_data;
if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
- ext4_warning(sb, __func__,
- "new group %u GDT block %llu not reserved",
+ ext4_warning(sb, "new group %u GDT block %llu not reserved",
input->group, gdblock);
err = -EINVAL;
goto exit_dind;
@@ -468,7 +454,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
GFP_NOFS);
if (!n_group_desc) {
err = -ENOMEM;
- ext4_warning(sb, __func__,
+ ext4_warning(sb,
"not enough memory for %lu groups", gdb_num + 1);
goto exit_inode;
}
@@ -567,8 +553,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
/* Get each reserved primary GDT block and verify it holds backups */
for (res = 0; res < reserved_gdb; res++, blk++) {
if (le32_to_cpu(*data) != blk) {
- ext4_warning(sb, __func__,
- "reserved block %llu"
+ ext4_warning(sb, "reserved block %llu"
" not at offset %ld",
blk,
(long)(data - (__le32 *)dind->b_data));
@@ -713,8 +698,7 @@ static void update_backups(struct super_block *sb,
*/
exit_err:
if (err) {
- ext4_warning(sb, __func__,
- "can't update backup for group %u (err %d), "
+ ext4_warning(sb, "can't update backup for group %u (err %d), "
"forcing fsck on next reboot", group, err);
sbi->s_mount_state &= ~EXT4_VALID_FS;
sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
@@ -753,20 +737,19 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
- ext4_warning(sb, __func__,
- "Can't resize non-sparse filesystem further");
+ ext4_warning(sb, "Can't resize non-sparse filesystem further");
return -EPERM;
}
if (ext4_blocks_count(es) + input->blocks_count <
ext4_blocks_count(es)) {
- ext4_warning(sb, __func__, "blocks_count overflow");
+ ext4_warning(sb, "blocks_count overflow");
return -EINVAL;
}
if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
le32_to_cpu(es->s_inodes_count)) {
- ext4_warning(sb, __func__, "inodes_count overflow");
+ ext4_warning(sb, "inodes_count overflow");
return -EINVAL;
}
@@ -774,14 +757,13 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
if (!EXT4_HAS_COMPAT_FEATURE(sb,
EXT4_FEATURE_COMPAT_RESIZE_INODE)
|| !le16_to_cpu(es->s_reserved_gdt_blocks)) {
- ext4_warning(sb, __func__,
+ ext4_warning(sb,
"No reserved GDT blocks, can't resize");
return -EPERM;
}
inode = ext4_iget(sb, EXT4_RESIZE_INO);
if (IS_ERR(inode)) {
- ext4_warning(sb, __func__,
- "Error opening resize inode");
+ ext4_warning(sb, "Error opening resize inode");
return PTR_ERR(inode);
}
}
@@ -810,8 +792,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
mutex_lock(&sbi->s_resize_lock);
if (input->group != sbi->s_groups_count) {
- ext4_warning(sb, __func__,
- "multiple resizers run on filesystem!");
+ ext4_warning(sb, "multiple resizers run on filesystem!");
err = -EBUSY;
goto exit_journal;
}
@@ -997,13 +978,12 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
" too large to resize to %llu blocks safely\n",
sb->s_id, n_blocks_count);
if (sizeof(sector_t) < 8)
- ext4_warning(sb, __func__, "CONFIG_LBDAF not enabled");
+ ext4_warning(sb, "CONFIG_LBDAF not enabled");
return -EINVAL;
}
if (n_blocks_count < o_blocks_count) {
- ext4_warning(sb, __func__,
- "can't shrink FS - resize aborted");
+ ext4_warning(sb, "can't shrink FS - resize aborted");
return -EBUSY;
}
@@ -1011,15 +991,14 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
if (last == 0) {
- ext4_warning(sb, __func__,
- "need to use ext2online to resize further");
+ ext4_warning(sb, "need to use ext2online to resize further");
return -EPERM;
}
add = EXT4_BLOCKS_PER_GROUP(sb) - last;
if (o_blocks_count + add < o_blocks_count) {
- ext4_warning(sb, __func__, "blocks_count overflow");
+ ext4_warning(sb, "blocks_count overflow");
return -EINVAL;
}
@@ -1027,16 +1006,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
add = n_blocks_count - o_blocks_count;
if (o_blocks_count + add < n_blocks_count)
- ext4_warning(sb, __func__,
- "will only finish group (%llu"
- " blocks, %u new)",
+ ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
o_blocks_count + add, add);
/* See if the device is actually as big as what was requested */
bh = sb_bread(sb, o_blocks_count + add - 1);
if (!bh) {
- ext4_warning(sb, __func__,
- "can't read last block, resize aborted");
+ ext4_warning(sb, "can't read last block, resize aborted");
return -ENOSPC;
}
brelse(bh);
@@ -1047,14 +1023,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
handle = ext4_journal_start_sb(sb, 3);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
- ext4_warning(sb, __func__, "error %d on journal start", err);
+ ext4_warning(sb, "error %d on journal start", err);
goto exit_put;
}
mutex_lock(&EXT4_SB(sb)->s_resize_lock);
if (o_blocks_count != ext4_blocks_count(es)) {
- ext4_warning(sb, __func__,
- "multiple resizers run on filesystem!");
+ ext4_warning(sb, "multiple resizers run on filesystem!");
mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
ext4_journal_stop(handle);
err = -EBUSY;
@@ -1063,8 +1038,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
if ((err = ext4_journal_get_write_access(handle,
EXT4_SB(sb)->s_sbh))) {
- ext4_warning(sb, __func__,
- "error %d on journal write access", err);
+ ext4_warning(sb, "error %d on journal write access", err);
mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
ext4_journal_stop(handle);
goto exit_put;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index edcf3b0..2b83b96 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -333,7 +333,7 @@ static void ext4_handle_error(struct super_block *sb)
sb->s_id);
}
-void ext4_error(struct super_block *sb, const char *function,
+void __ext4_error(struct super_block *sb, const char *function,
const char *fmt, ...)
{
va_list args;
@@ -347,6 +347,42 @@ void ext4_error(struct super_block *sb, const char *function,
ext4_handle_error(sb);
}
+void ext4_error_inode(const char *function, struct inode *inode,
+ const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ printk(KERN_CRIT "EXT4-fs error (device %s): %s: inode #%lu: (comm %s) ",
+ inode->i_sb->s_id, function, inode->i_ino, current->comm);
+ vprintk(fmt, args);
+ printk("\n");
+ va_end(args);
+
+ ext4_handle_error(inode->i_sb);
+}
+
+void ext4_error_file(const char *function, struct file *file,
+ const char *fmt, ...)
+{
+ va_list args;
+ struct inode *inode = file->f_dentry->d_inode;
+ char pathname[80], *path;
+
+ va_start(args, fmt);
+ path = d_path(&(file->f_path), pathname, sizeof(pathname));
+ if (!path)
+ path = "(unknown)";
+ printk(KERN_CRIT
+ "EXT4-fs error (device %s): %s: inode #%lu (comm %s path %s): ",
+ inode->i_sb->s_id, function, inode->i_ino, current->comm, path);
+ vprintk(fmt, args);
+ printk("\n");
+ va_end(args);
+
+ ext4_handle_error(inode->i_sb);
+}
+
static const char *ext4_decode_error(struct super_block *sb, int errno,
char nbuf[16])
{
@@ -450,7 +486,7 @@ void ext4_msg (struct super_block * sb, const char *prefix,
va_end(args);
}
-void ext4_warning(struct super_block *sb, const char *function,
+void __ext4_warning(struct super_block *sb, const char *function,
const char *fmt, ...)
{
va_list args;
@@ -507,7 +543,7 @@ void ext4_update_dynamic_rev(struct super_block *sb)
if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
return;
- ext4_warning(sb, __func__,
+ ext4_warning(sb,
"updating to rev %d because of new feature flag, "
"running e2fsck is recommended",
EXT4_DYNAMIC_REV);
@@ -708,7 +744,8 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
#endif
- INIT_LIST_HEAD(&ei->i_aio_dio_complete_list);
+ INIT_LIST_HEAD(&ei->i_completed_io_list);
+ spin_lock_init(&ei->i_completed_io_lock);
ei->cur_aio_dio = NULL;
ei->i_sync_tid = 0;
ei->i_datasync_tid = 0;
@@ -797,10 +834,10 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
if (sbi->s_qf_names[GRPQUOTA])
seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
- if (sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA)
+ if (test_opt(sb, USRQUOTA))
seq_puts(seq, ",usrquota");
- if (sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA)
+ if (test_opt(sb, GRPQUOTA))
seq_puts(seq, ",grpquota");
#endif
}
@@ -927,6 +964,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (test_opt(sb, NOLOAD))
seq_puts(seq, ",norecovery");
+ if (test_opt(sb, DIOREAD_NOLOCK))
+ seq_puts(seq, ",dioread_nolock");
+
ext4_show_quota_options(seq, sb);
return 0;
@@ -1100,6 +1140,7 @@ enum {
Opt_stripe, Opt_delalloc, Opt_nodelalloc,
Opt_block_validity, Opt_noblock_validity,
Opt_inode_readahead_blks, Opt_journal_ioprio,
+ Opt_dioread_nolock, Opt_dioread_lock,
Opt_discard, Opt_nodiscard,
};
@@ -1167,6 +1208,8 @@ static const match_table_t tokens = {
{Opt_auto_da_alloc, "auto_da_alloc=%u"},
{Opt_auto_da_alloc, "auto_da_alloc"},
{Opt_noauto_da_alloc, "noauto_da_alloc"},
+ {Opt_dioread_nolock, "dioread_nolock"},
+ {Opt_dioread_lock, "dioread_lock"},
{Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"},
{Opt_err, NULL},
@@ -1196,6 +1239,66 @@ static ext4_fsblk_t get_sb_block(void **data)
}
#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
+static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
+ "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
+
+#ifdef CONFIG_QUOTA
+static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ char *qname;
+
+ if (sb_any_quota_loaded(sb) &&
+ !sbi->s_qf_names[qtype]) {
+ ext4_msg(sb, KERN_ERR,
+ "Cannot change journaled "
+ "quota options when quota turned on");
+ return 0;
+ }
+ qname = match_strdup(args);
+ if (!qname) {
+ ext4_msg(sb, KERN_ERR,
+ "Not enough memory for storing quotafile name");
+ return 0;
+ }
+ if (sbi->s_qf_names[qtype] &&
+ strcmp(sbi->s_qf_names[qtype], qname)) {
+ ext4_msg(sb, KERN_ERR,
+ "%s quota file already specified", QTYPE2NAME(qtype));
+ kfree(qname);
+ return 0;
+ }
+ sbi->s_qf_names[qtype] = qname;
+ if (strchr(sbi->s_qf_names[qtype], '/')) {
+ ext4_msg(sb, KERN_ERR,
+ "quotafile must be on filesystem root");
+ kfree(sbi->s_qf_names[qtype]);
+ sbi->s_qf_names[qtype] = NULL;
+ return 0;
+ }
+ set_opt(sbi->s_mount_opt, QUOTA);
+ return 1;
+}
+
+static int clear_qf_name(struct super_block *sb, int qtype)
+{
+
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ if (sb_any_quota_loaded(sb) &&
+ sbi->s_qf_names[qtype]) {
+ ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
+ " when quota turned on");
+ return 0;
+ }
+ /*
+ * The space will be released later when all options are confirmed
+ * to be correct
+ */
+ sbi->s_qf_names[qtype] = NULL;
+ return 1;
+}
+#endif
static int parse_options(char *options, struct super_block *sb,
unsigned long *journal_devnum,
@@ -1208,8 +1311,7 @@ static int parse_options(char *options, struct super_block *sb,
int data_opt = 0;
int option;
#ifdef CONFIG_QUOTA
- int qtype, qfmt;
- char *qname;
+ int qfmt;
#endif
if (!options)
@@ -1220,19 +1322,31 @@ static int parse_options(char *options, struct super_block *sb,
if (!*p)
continue;
+ /*
+ * Initialize args struct so we know whether arg was
+ * found; some options take optional arguments.
+ */
+ args[0].to = args[0].from = 0;
token = match_token(p, tokens, args);
switch (token) {
case Opt_bsd_df:
+ ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
clear_opt(sbi->s_mount_opt, MINIX_DF);
break;
case Opt_minix_df:
+ ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
set_opt(sbi->s_mount_opt, MINIX_DF);
+
break;
case Opt_grpid:
+ ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
set_opt(sbi->s_mount_opt, GRPID);
+
break;
case Opt_nogrpid:
+ ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
clear_opt(sbi->s_mount_opt, GRPID);
+
break;
case Opt_resuid:
if (match_int(&args[0], &option))
@@ -1369,14 +1483,13 @@ static int parse_options(char *options, struct super_block *sb,
data_opt = EXT4_MOUNT_WRITEBACK_DATA;
datacheck:
if (is_remount) {
- if ((sbi->s_mount_opt & EXT4_MOUNT_DATA_FLAGS)
- != data_opt) {
+ if (test_opt(sb, DATA_FLAGS) != data_opt) {
ext4_msg(sb, KERN_ERR,
"Cannot change data mode on remount");
return 0;
}
} else {
- sbi->s_mount_opt &= ~EXT4_MOUNT_DATA_FLAGS;
+ clear_opt(sbi->s_mount_opt, DATA_FLAGS);
sbi->s_mount_opt |= data_opt;
}
break;
@@ -1388,63 +1501,22 @@ static int parse_options(char *options, struct super_block *sb,
break;
#ifdef CONFIG_QUOTA
case Opt_usrjquota:
- qtype = USRQUOTA;
- goto set_qf_name;
- case Opt_grpjquota:
- qtype = GRPQUOTA;
-set_qf_name:
- if (sb_any_quota_loaded(sb) &&
- !sbi->s_qf_names[qtype]) {
- ext4_msg(sb, KERN_ERR,
- "Cannot change journaled "
- "quota options when quota turned on");
+ if (!set_qf_name(sb, USRQUOTA, &args[0]))
return 0;
- }
- qname = match_strdup(&args[0]);
- if (!qname) {
- ext4_msg(sb, KERN_ERR,
- "Not enough memory for "
- "storing quotafile name");
- return 0;
- }
- if (sbi->s_qf_names[qtype] &&
- strcmp(sbi->s_qf_names[qtype], qname)) {
- ext4_msg(sb, KERN_ERR,
- "%s quota file already "
- "specified", QTYPE2NAME(qtype));
- kfree(qname);
- return 0;
- }
- sbi->s_qf_names[qtype] = qname;
- if (strchr(sbi->s_qf_names[qtype], '/')) {
- ext4_msg(sb, KERN_ERR,
- "quotafile must be on "
- "filesystem root");
- kfree(sbi->s_qf_names[qtype]);
- sbi->s_qf_names[qtype] = NULL;
+ break;
+ case Opt_grpjquota:
+ if (!set_qf_name(sb, GRPQUOTA, &args[0]))
return 0;
- }
- set_opt(sbi->s_mount_opt, QUOTA);
break;
case Opt_offusrjquota:
- qtype = USRQUOTA;
- goto clear_qf_name;
+ if (!clear_qf_name(sb, USRQUOTA))
+ return 0;
+ break;
case Opt_offgrpjquota:
- qtype = GRPQUOTA;
-clear_qf_name:
- if (sb_any_quota_loaded(sb) &&
- sbi->s_qf_names[qtype]) {
- ext4_msg(sb, KERN_ERR, "Cannot change "
- "journaled quota options when "
- "quota turned on");
+ if (!clear_qf_name(sb, GRPQUOTA))
return 0;
- }
- /*
- * The space will be released later when all options
- * are confirmed to be correct
- */
- sbi->s_qf_names[qtype] = NULL;
break;
+
case Opt_jqfmt_vfsold:
qfmt = QFMT_VFS_OLD;
goto set_qf_format;
@@ -1509,10 +1581,11 @@ set_qf_format:
clear_opt(sbi->s_mount_opt, BARRIER);
break;
case Opt_barrier:
- if (match_int(&args[0], &option)) {
- set_opt(sbi->s_mount_opt, BARRIER);
- break;
- }
+ if (args[0].from) {
+ if (match_int(&args[0], &option))
+ return 0;
+ } else
+ option = 1; /* No argument, default to 1 */
if (option)
set_opt(sbi->s_mount_opt, BARRIER);
else
@@ -1585,10 +1658,11 @@ set_qf_format:
set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
break;
case Opt_auto_da_alloc:
- if (match_int(&args[0], &option)) {
- clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
- break;
- }
+ if (args[0].from) {
+ if (match_int(&args[0], &option))
+ return 0;
+ } else
+ option = 1; /* No argument, default to 1 */
if (option)
clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
else
@@ -1600,6 +1674,12 @@ set_qf_format:
case Opt_nodiscard:
clear_opt(sbi->s_mount_opt, DISCARD);
break;
+ case Opt_dioread_nolock:
+ set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+ break;
+ case Opt_dioread_lock:
+ clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+ break;
default:
ext4_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" "
@@ -1609,18 +1689,13 @@ set_qf_format:
}
#ifdef CONFIG_QUOTA
if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
- if ((sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA) &&
- sbi->s_qf_names[USRQUOTA])
+ if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sbi->s_mount_opt, USRQUOTA);
- if ((sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA) &&
- sbi->s_qf_names[GRPQUOTA])
+ if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
clear_opt(sbi->s_mount_opt, GRPQUOTA);
- if ((sbi->s_qf_names[USRQUOTA] &&
- (sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA)) ||
- (sbi->s_qf_names[GRPQUOTA] &&
- (sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA))) {
+ if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
ext4_msg(sb, KERN_ERR, "old and new quota "
"format mixing");
return 0;
@@ -2423,8 +2498,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
if (def_mount_opts & EXT4_DEFM_DEBUG)
set_opt(sbi->s_mount_opt, DEBUG);
- if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
+ if (def_mount_opts & EXT4_DEFM_BSDGROUPS) {
+ ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups",
+ "2.6.38");
set_opt(sbi->s_mount_opt, GRPID);
+ }
if (def_mount_opts & EXT4_DEFM_UID16)
set_opt(sbi->s_mount_opt, NO_UID32);
#ifdef CONFIG_EXT4_FS_XATTR
@@ -2436,11 +2514,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
set_opt(sbi->s_mount_opt, POSIX_ACL);
#endif
if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
- sbi->s_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
+ set_opt(sbi->s_mount_opt, JOURNAL_DATA);
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
- sbi->s_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
+ set_opt(sbi->s_mount_opt, ORDERED_DATA);
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
- sbi->s_mount_opt |= EXT4_MOUNT_WRITEBACK_DATA;
+ set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
set_opt(sbi->s_mount_opt, ERRORS_PANIC);
@@ -2468,7 +2546,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- ((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+ (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
(EXT4_HAS_COMPAT_FEATURE(sb, ~0U) ||
@@ -2757,7 +2835,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
ext4_msg(sb, KERN_ERR, "required journal recovery "
"suppressed and not mounted read-only");
- goto failed_mount4;
+ goto failed_mount_wq;
} else {
clear_opt(sbi->s_mount_opt, DATA_FLAGS);
set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
@@ -2770,7 +2848,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
!jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
JBD2_FEATURE_INCOMPAT_64BIT)) {
ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
- goto failed_mount4;
+ goto failed_mount_wq;
}
if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
@@ -2809,7 +2887,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
ext4_msg(sb, KERN_ERR, "Journal does not support "
"requested data journaling mode");
- goto failed_mount4;
+ goto failed_mount_wq;
}
default:
break;
@@ -2817,13 +2895,17 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
no_journal:
-
if (test_opt(sb, NOBH)) {
if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - "
"its supported only with writeback mode");
clear_opt(sbi->s_mount_opt, NOBH);
}
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+ ext4_msg(sb, KERN_WARNING, "dioread_nolock option is "
+ "not supported with nobh mode");
+ goto failed_mount_wq;
+ }
}
EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
if (!EXT4_SB(sb)->dio_unwritten_wq) {
@@ -2888,6 +2970,18 @@ no_journal:
"requested data journaling mode");
clear_opt(sbi->s_mount_opt, DELALLOC);
}
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
+ ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
+ "option - requested data journaling mode");
+ clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+ }
+ if (sb->s_blocksize < PAGE_SIZE) {
+ ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
+ "option - block size is too small");
+ clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+ }
+ }
err = ext4_setup_system_zone(sb);
if (err) {
@@ -3351,10 +3445,9 @@ static void ext4_clear_journal_err(struct super_block *sb,
char nbuf[16];
errstr = ext4_decode_error(sb, j_errno, nbuf);
- ext4_warning(sb, __func__, "Filesystem error recorded "
+ ext4_warning(sb, "Filesystem error recorded "
"from previous mount: %s", errstr);
- ext4_warning(sb, __func__, "Marking fs in need of "
- "filesystem check.");
+ ext4_warning(sb, "Marking fs in need of filesystem check.");
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
@@ -3505,7 +3598,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
ext4_abort(sb, __func__, "Abort forced by user");
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- ((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+ (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
es = sbi->s_es;
@@ -3908,9 +4001,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
int err = 0;
int offset = off & (sb->s_blocksize - 1);
- int tocopy;
int journal_quota = EXT4_SB(sb)->s_qf_names[type] != NULL;
- size_t towrite = len;
struct buffer_head *bh;
handle_t *handle = journal_current_handle();
@@ -3920,52 +4011,53 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
(unsigned long long)off, (unsigned long long)len);
return -EIO;
}
+ /*
+ * Since we account only one data block in transaction credits,
+ * then it is impossible to cross a block boundary.
+ */
+ if (sb->s_blocksize - offset < len) {
+ ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
+ " cancelled because not block aligned",
+ (unsigned long long)off, (unsigned long long)len);
+ return -EIO;
+ }
+
mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
- while (towrite > 0) {
- tocopy = sb->s_blocksize - offset < towrite ?
- sb->s_blocksize - offset : towrite;
- bh = ext4_bread(handle, inode, blk, 1, &err);
- if (!bh)
+ bh = ext4_bread(handle, inode, blk, 1, &err);
+ if (!bh)
+ goto out;
+ if (journal_quota) {
+ err = ext4_journal_get_write_access(handle, bh);
+ if (err) {
+ brelse(bh);
goto out;
- if (journal_quota) {
- err = ext4_journal_get_write_access(handle, bh);
- if (err) {
- brelse(bh);
- goto out;
- }
}
- lock_buffer(bh);
- memcpy(bh->b_data+offset, data, tocopy);
- flush_dcache_page(bh->b_page);
- unlock_buffer(bh);
- if (journal_quota)
- err = ext4_handle_dirty_metadata(handle, NULL, bh);
- else {
- /* Always do at least ordered writes for quotas */
- err = ext4_jbd2_file_inode(handle, inode);
- mark_buffer_dirty(bh);
- }
- brelse(bh);
- if (err)
- goto out;
- offset = 0;
- towrite -= tocopy;
- data += tocopy;
- blk++;
}
+ lock_buffer(bh);
+ memcpy(bh->b_data+offset, data, len);
+ flush_dcache_page(bh->b_page);
+ unlock_buffer(bh);
+ if (journal_quota)
+ err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ else {
+ /* Always do at least ordered writes for quotas */
+ err = ext4_jbd2_file_inode(handle, inode);
+ mark_buffer_dirty(bh);
+ }
+ brelse(bh);
out:
- if (len == towrite) {
+ if (err) {
mutex_unlock(&inode->i_mutex);
return err;
}
- if (inode->i_size < off+len-towrite) {
- i_size_write(inode, off+len-towrite);
+ if (inode->i_size < off + len) {
+ i_size_write(inode, off + len);
EXT4_I(inode)->i_disksize = inode->i_size;
}
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
ext4_mark_inode_dirty(handle, inode);
mutex_unlock(&inode->i_mutex);
- return len - towrite;
+ return len;
}
#endif
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index ab3a95e..b4c5aa8 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -227,7 +227,8 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
if (ext4_xattr_check_block(bh)) {
-bad_block: ext4_error(inode->i_sb, __func__,
+bad_block:
+ ext4_error(inode->i_sb,
"inode %lu: bad block %llu", inode->i_ino,
EXT4_I(inode)->i_file_acl);
error = -EIO;
@@ -267,7 +268,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
void *end;
int error;
- if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR))
+ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
return -ENODATA;
error = ext4_get_inode_loc(inode, &iloc);
if (error)
@@ -371,7 +372,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
if (ext4_xattr_check_block(bh)) {
- ext4_error(inode->i_sb, __func__,
+ ext4_error(inode->i_sb,
"inode %lu: bad block %llu", inode->i_ino,
EXT4_I(inode)->i_file_acl);
error = -EIO;
@@ -396,7 +397,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
void *end;
int error;
- if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR))
+ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
return 0;
error = ext4_get_inode_loc(inode, &iloc);
if (error)
@@ -665,9 +666,8 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
atomic_read(&(bs->bh->b_count)),
le32_to_cpu(BHDR(bs->bh)->h_refcount));
if (ext4_xattr_check_block(bs->bh)) {
- ext4_error(sb, __func__,
- "inode %lu: bad block %llu", inode->i_ino,
- EXT4_I(inode)->i_file_acl);
+ ext4_error(sb, "inode %lu: bad block %llu",
+ inode->i_ino, EXT4_I(inode)->i_file_acl);
error = -EIO;
goto cleanup;
}
@@ -880,9 +880,8 @@ cleanup_dquot:
goto cleanup;
bad_block:
- ext4_error(inode->i_sb, __func__,
- "inode %lu: bad block %llu", inode->i_ino,
- EXT4_I(inode)->i_file_acl);
+ ext4_error(inode->i_sb, "inode %lu: bad block %llu",
+ inode->i_ino, EXT4_I(inode)->i_file_acl);
goto cleanup;
#undef header
@@ -908,7 +907,7 @@ ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
is->s.base = is->s.first = IFIRST(header);
is->s.here = is->s.first;
is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
- if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) {
+ if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
error = ext4_xattr_check_names(IFIRST(header), is->s.end);
if (error)
return error;
@@ -940,10 +939,10 @@ ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
header = IHDR(inode, ext4_raw_inode(&is->iloc));
if (!IS_LAST_ENTRY(s->first)) {
header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
- EXT4_I(inode)->i_state |= EXT4_STATE_XATTR;
+ ext4_set_inode_state(inode, EXT4_STATE_XATTR);
} else {
header->h_magic = cpu_to_le32(0);
- EXT4_I(inode)->i_state &= ~EXT4_STATE_XATTR;
+ ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
}
return 0;
}
@@ -986,8 +985,8 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
if (strlen(name) > 255)
return -ERANGE;
down_write(&EXT4_I(inode)->xattr_sem);
- no_expand = EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND;
- EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
+ no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
+ ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
error = ext4_get_inode_loc(inode, &is.iloc);
if (error)
@@ -997,10 +996,10 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
if (error)
goto cleanup;
- if (EXT4_I(inode)->i_state & EXT4_STATE_NEW) {
+ if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
- EXT4_I(inode)->i_state &= ~EXT4_STATE_NEW;
+ ext4_clear_inode_state(inode, EXT4_STATE_NEW);
}
error = ext4_xattr_ibody_find(inode, &i, &is);
@@ -1052,7 +1051,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
ext4_xattr_update_super_block(handle, inode->i_sb);
inode->i_ctime = ext4_current_time(inode);
if (!value)
- EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND;
+ ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
/*
* The bh is consumed by ext4_mark_iloc_dirty, even with
@@ -1067,7 +1066,7 @@ cleanup:
brelse(is.iloc.bh);
brelse(bs.bh);
if (no_expand == 0)
- EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND;
+ ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
up_write(&EXT4_I(inode)->xattr_sem);
return error;
}
@@ -1195,9 +1194,8 @@ retry:
if (!bh)
goto cleanup;
if (ext4_xattr_check_block(bh)) {
- ext4_error(inode->i_sb, __func__,
- "inode %lu: bad block %llu", inode->i_ino,
- EXT4_I(inode)->i_file_acl);
+ ext4_error(inode->i_sb, "inode %lu: bad block %llu",
+ inode->i_ino, EXT4_I(inode)->i_file_acl);
error = -EIO;
goto cleanup;
}
@@ -1302,6 +1300,8 @@ retry:
/* Remove the chosen entry from the inode */
error = ext4_xattr_ibody_set(handle, inode, &i, is);
+ if (error)
+ goto cleanup;
entry = IFIRST(header);
if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize)
@@ -1372,16 +1372,14 @@ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
goto cleanup;
bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
if (!bh) {
- ext4_error(inode->i_sb, __func__,
- "inode %lu: block %llu read error", inode->i_ino,
- EXT4_I(inode)->i_file_acl);
+ ext4_error(inode->i_sb, "inode %lu: block %llu read error",
+ inode->i_ino, EXT4_I(inode)->i_file_acl);
goto cleanup;
}
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
BHDR(bh)->h_blocks != cpu_to_le32(1)) {
- ext4_error(inode->i_sb, __func__,
- "inode %lu: bad block %llu", inode->i_ino,
- EXT4_I(inode)->i_file_acl);
+ ext4_error(inode->i_sb, "inode %lu: bad block %llu",
+ inode->i_ino, EXT4_I(inode)->i_file_acl);
goto cleanup;
}
ext4_xattr_release_block(handle, inode, bh);
@@ -1506,7 +1504,7 @@ again:
}
bh = sb_bread(inode->i_sb, ce->e_block);
if (!bh) {
- ext4_error(inode->i_sb, __func__,
+ ext4_error(inode->i_sb,
"inode %lu: block %lu read error",
inode->i_ino, (unsigned long) ce->e_block);
} else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 14da530..fbeecdc 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -577,7 +577,7 @@ static inline loff_t fat_i_pos_read(struct msdos_sb_info *sbi,
return i_pos;
}
-static int fat_write_inode(struct inode *inode, int wait)
+static int __fat_write_inode(struct inode *inode, int wait)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
@@ -634,9 +634,14 @@ retry:
return err;
}
+static int fat_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ return __fat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+}
+
int fat_sync_inode(struct inode *inode)
{
- return fat_write_inode(inode, 1);
+ return __fat_write_inode(inode, 1);
}
EXPORT_SYMBOL_GPL(fat_sync_inode);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 1a7c42c..76fc4d5 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -381,10 +381,10 @@ static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
}
-static int write_inode(struct inode *inode, int sync)
+static int write_inode(struct inode *inode, struct writeback_control *wbc)
{
if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
- return inode->i_sb->s_op->write_inode(inode, sync);
+ return inode->i_sb->s_op->write_inode(inode, wbc);
return 0;
}
@@ -421,7 +421,6 @@ static int
writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
{
struct address_space *mapping = inode->i_mapping;
- int wait = wbc->sync_mode == WB_SYNC_ALL;
unsigned dirty;
int ret;
@@ -439,7 +438,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* We'll have another go at writing back this inode when we
* completed a full scan of b_io.
*/
- if (!wait) {
+ if (wbc->sync_mode != WB_SYNC_ALL) {
requeue_io(inode);
return 0;
}
@@ -461,15 +460,20 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
ret = do_writepages(mapping, wbc);
- /* Don't write the inode if only I_DIRTY_PAGES was set */
- if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
- int err = write_inode(inode, wait);
+ /*
+ * Make sure to wait on the data before writing out the metadata.
+ * This is important for filesystems that modify metadata on data
+ * I/O completion.
+ */
+ if (wbc->sync_mode == WB_SYNC_ALL) {
+ int err = filemap_fdatawait(mapping);
if (ret == 0)
ret = err;
}
- if (wait) {
- int err = filemap_fdatawait(mapping);
+ /* Don't write the inode if only I_DIRTY_PAGES was set */
+ if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
+ int err = write_inode(inode, wbc);
if (ret == 0)
ret = err;
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 51d9e33..eb7e942 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -865,13 +865,10 @@ static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
down_read(&fc->killsb);
err = -ENOENT;
- if (!fc->sb)
- goto err_unlock;
-
- err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
- outarg.off, outarg.len);
-
-err_unlock:
+ if (fc->sb) {
+ err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
+ outarg.off, outarg.len);
+ }
up_read(&fc->killsb);
return err;
@@ -884,10 +881,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_inval_entry_out outarg;
- int err = -EINVAL;
- char buf[FUSE_NAME_MAX+1];
+ int err = -ENOMEM;
+ char *buf;
struct qstr name;
+ buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
+ if (!buf)
+ goto err;
+
+ err = -EINVAL;
if (size < sizeof(outarg))
goto err;
@@ -910,16 +912,14 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
down_read(&fc->killsb);
err = -ENOENT;
- if (!fc->sb)
- goto err_unlock;
-
- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
-
-err_unlock:
+ if (fc->sb)
+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
up_read(&fc->killsb);
+ kfree(buf);
return err;
err:
+ kfree(buf);
fuse_copy_finish(cs);
return err;
}
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 7b8da94..0c1d0b8 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -1061,8 +1061,8 @@ out:
int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
{
- struct inode *aspace = page->mapping->host;
- struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
+ struct address_space *mapping = page->mapping;
+ struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct buffer_head *bh, *head;
struct gfs2_bufdata *bd;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f426633..454d4b4 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -19,7 +19,6 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/module.h>
-#include <linux/rwsem.h>
#include <asm/uaccess.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
@@ -60,7 +59,6 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
-static DECLARE_RWSEM(gfs2_umount_flush_sem);
static struct dentry *gfs2_root;
static struct workqueue_struct *glock_workqueue;
struct workqueue_struct *gfs2_delete_workqueue;
@@ -154,12 +152,14 @@ static unsigned int gl_hash(const struct gfs2_sbd *sdp,
static void glock_free(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
- struct inode *aspace = gl->gl_aspace;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
+ struct kmem_cache *cachep = gfs2_glock_cachep;
- if (aspace)
- gfs2_aspace_put(aspace);
+ GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
trace_gfs2_glock_put(gl);
- sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl);
+ if (mapping)
+ cachep = gfs2_glock_aspace_cachep;
+ sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
}
/**
@@ -712,7 +712,6 @@ static void glock_work_func(struct work_struct *work)
finish_xmote(gl, gl->gl_reply);
drop_ref = 1;
}
- down_read(&gfs2_umount_flush_sem);
spin_lock(&gl->gl_spin);
if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
@@ -725,7 +724,6 @@ static void glock_work_func(struct work_struct *work)
}
run_queue(gl, 0);
spin_unlock(&gl->gl_spin);
- up_read(&gfs2_umount_flush_sem);
if (!delay ||
queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
gfs2_glock_put(gl);
@@ -750,10 +748,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops, int create,
struct gfs2_glock **glp)
{
+ struct super_block *s = sdp->sd_vfs;
struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
struct gfs2_glock *gl, *tmp;
unsigned int hash = gl_hash(sdp, &name);
- int error;
+ struct address_space *mapping;
read_lock(gl_lock_addr(hash));
gl = search_bucket(hash, sdp, &name);
@@ -765,7 +764,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
if (!create)
return -ENOENT;
- gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
+ if (glops->go_flags & GLOF_ASPACE)
+ gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL);
+ else
+ gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
if (!gl)
return -ENOMEM;
@@ -784,18 +786,18 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_tchange = jiffies;
gl->gl_object = NULL;
gl->gl_sbd = sdp;
- gl->gl_aspace = NULL;
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
INIT_WORK(&gl->gl_delete, delete_work_func);
- /* If this glock protects actual on-disk data or metadata blocks,
- create a VFS inode to manage the pages/buffers holding them. */
- if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
- gl->gl_aspace = gfs2_aspace_get(sdp);
- if (!gl->gl_aspace) {
- error = -ENOMEM;
- goto fail;
- }
+ mapping = gfs2_glock2aspace(gl);
+ if (mapping) {
+ mapping->a_ops = &gfs2_meta_aops;
+ mapping->host = s->s_bdev->bd_inode;
+ mapping->flags = 0;
+ mapping_set_gfp_mask(mapping, GFP_NOFS);
+ mapping->assoc_mapping = NULL;
+ mapping->backing_dev_info = s->s_bdi;
+ mapping->writeback_index = 0;
}
write_lock(gl_lock_addr(hash));
@@ -812,10 +814,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
*glp = gl;
return 0;
-
-fail:
- kmem_cache_free(gfs2_glock_cachep, gl);
- return error;
}
/**
@@ -1510,35 +1508,10 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp)
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
{
- unsigned long t;
unsigned int x;
- int cont;
- t = jiffies;
-
- for (;;) {
- cont = 0;
- for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
- if (examine_bucket(clear_glock, sdp, x))
- cont = 1;
- }
-
- if (!cont)
- break;
-
- if (time_after_eq(jiffies,
- t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
- fs_warn(sdp, "Unmount seems to be stalled. "
- "Dumping lock state...\n");
- gfs2_dump_lockstate(sdp);
- t = jiffies;
- }
-
- down_write(&gfs2_umount_flush_sem);
- invalidate_inodes(sdp->sd_vfs);
- up_write(&gfs2_umount_flush_sem);
- msleep(10);
- }
+ for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
+ examine_bucket(clear_glock, sdp, x);
flush_workqueue(glock_workqueue);
wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
gfs2_dump_lockstate(sdp);
@@ -1685,7 +1658,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
dtime *= 1000000/HZ; /* demote time in uSec */
if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
dtime = 0;
- gfs2_print_dbg(seq, "G: s:%s n:%u/%llu f:%s t:%s d:%s/%llu a:%d r:%d\n",
+ gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
state2str(gl->gl_state),
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number,
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index c0262fa..2bda191 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -180,6 +180,13 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
return gl->gl_state == LM_ST_SHARED;
}
+static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
+{
+ if (gl->gl_ops->go_flags & GLOF_ASPACE)
+ return (struct address_space *)(gl + 1);
+ return NULL;
+}
+
int gfs2_glock_get(struct gfs2_sbd *sdp,
u64 number, const struct gfs2_glock_operations *glops,
int create, struct gfs2_glock **glp);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 78554ac..38e3749 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -87,7 +87,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
static void rgrp_go_sync(struct gfs2_glock *gl)
{
- struct address_space *metamapping = gl->gl_aspace->i_mapping;
+ struct address_space *metamapping = gfs2_glock2aspace(gl);
int error;
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
@@ -113,7 +113,7 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
{
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
BUG_ON(!(flags & DIO_METADATA));
gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
@@ -134,7 +134,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
static void inode_go_sync(struct gfs2_glock *gl)
{
struct gfs2_inode *ip = gl->gl_object;
- struct address_space *metamapping = gl->gl_aspace->i_mapping;
+ struct address_space *metamapping = gfs2_glock2aspace(gl);
int error;
if (ip && !S_ISREG(ip->i_inode.i_mode))
@@ -183,7 +183,7 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
if (flags & DIO_METADATA) {
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
truncate_inode_pages(mapping, 0);
if (ip) {
set_bit(GIF_INVALID, &ip->i_flags);
@@ -282,7 +282,8 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
static int rgrp_go_demote_ok(const struct gfs2_glock *gl)
{
- return !gl->gl_aspace->i_mapping->nrpages;
+ const struct address_space *mapping = (const struct address_space *)(gl + 1);
+ return !mapping->nrpages;
}
/**
@@ -387,8 +388,7 @@ static void iopen_go_callback(struct gfs2_glock *gl)
struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
- gl->gl_state == LM_ST_SHARED &&
- ip && test_bit(GIF_USER, &ip->i_flags)) {
+ gl->gl_state == LM_ST_SHARED && ip) {
gfs2_glock_hold(gl);
if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
gfs2_glock_put_nolock(gl);
@@ -407,6 +407,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
.go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE,
.go_min_hold_time = HZ / 5,
+ .go_flags = GLOF_ASPACE,
};
const struct gfs2_glock_operations gfs2_rgrp_glops = {
@@ -418,6 +419,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
.go_dump = gfs2_rgrp_dump,
.go_type = LM_TYPE_RGRP,
.go_min_hold_time = HZ / 5,
+ .go_flags = GLOF_ASPACE,
};
const struct gfs2_glock_operations gfs2_trans_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index bc0ad15..b8025e5 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -162,6 +162,8 @@ struct gfs2_glock_operations {
void (*go_callback) (struct gfs2_glock *gl);
const int go_type;
const unsigned long go_min_hold_time;
+ const unsigned long go_flags;
+#define GLOF_ASPACE 1
};
enum {
@@ -225,7 +227,6 @@ struct gfs2_glock {
struct gfs2_sbd *gl_sbd;
- struct inode *gl_aspace;
struct list_head gl_ail_list;
atomic_t gl_ail_count;
struct delayed_work gl_work;
@@ -258,7 +259,6 @@ enum {
GIF_INVALID = 0,
GIF_QD_LOCKED = 1,
GIF_SW_PAGED = 3,
- GIF_USER = 4, /* user inode, not metadata addr space */
};
@@ -451,7 +451,6 @@ struct gfs2_tune {
unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
unsigned int gt_new_files_jdata;
unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
- unsigned int gt_stall_secs; /* Detects trouble! */
unsigned int gt_complain_secs;
unsigned int gt_statfs_quantum;
unsigned int gt_statfs_slow;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 6e220f4..b1bf269 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -45,7 +45,7 @@ static int iget_test(struct inode *inode, void *opaque)
struct gfs2_inode *ip = GFS2_I(inode);
u64 *no_addr = opaque;
- if (ip->i_no_addr == *no_addr && test_bit(GIF_USER, &ip->i_flags))
+ if (ip->i_no_addr == *no_addr)
return 1;
return 0;
@@ -58,7 +58,6 @@ static int iget_set(struct inode *inode, void *opaque)
inode->i_ino = (unsigned long)*no_addr;
ip->i_no_addr = *no_addr;
- set_bit(GIF_USER, &ip->i_flags);
return 0;
}
@@ -84,7 +83,7 @@ static int iget_skip_test(struct inode *inode, void *opaque)
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_skip_data *data = opaque;
- if (ip->i_no_addr == data->no_addr && test_bit(GIF_USER, &ip->i_flags)){
+ if (ip->i_no_addr == data->no_addr) {
if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
data->skipped = 1;
return 0;
@@ -103,7 +102,6 @@ static int iget_skip_set(struct inode *inode, void *opaque)
return 1;
inode->i_ino = (unsigned long)(data->no_addr);
ip->i_no_addr = data->no_addr;
- set_bit(GIF_USER, &ip->i_flags);
return 0;
}
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 0e5e0e7..569b462 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -30,7 +30,10 @@ static void gdlm_ast(void *arg)
switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
- kmem_cache_free(gfs2_glock_cachep, gl);
+ if (gl->gl_ops->go_flags & GLOF_ASPACE)
+ kmem_cache_free(gfs2_glock_aspace_cachep, gl);
+ else
+ kmem_cache_free(gfs2_glock_cachep, gl);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
return;
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index de97632..adc260f 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -528,9 +528,9 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
gfs2_pin(sdp, bd->bd_bh);
tr->tr_num_databuf_new++;
sdp->sd_log_num_databuf++;
- list_add(&le->le_list, &sdp->sd_log_le_databuf);
+ list_add_tail(&le->le_list, &sdp->sd_log_le_databuf);
} else {
- list_add(&le->le_list, &sdp->sd_log_le_ordered);
+ list_add_tail(&le->le_list, &sdp->sd_log_le_ordered);
}
out:
gfs2_log_unlock(sdp);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 5b31f77..a88fadc 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -52,6 +52,22 @@ static void gfs2_init_glock_once(void *foo)
atomic_set(&gl->gl_ail_count, 0);
}
+static void gfs2_init_gl_aspace_once(void *foo)
+{
+ struct gfs2_glock *gl = foo;
+ struct address_space *mapping = (struct address_space *)(gl + 1);
+
+ gfs2_init_glock_once(gl);
+ memset(mapping, 0, sizeof(*mapping));
+ INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
+ spin_lock_init(&mapping->tree_lock);
+ spin_lock_init(&mapping->i_mmap_lock);
+ INIT_LIST_HEAD(&mapping->private_list);
+ spin_lock_init(&mapping->private_lock);
+ INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
+ INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+}
+
/**
* init_gfs2_fs - Register GFS2 as a filesystem
*
@@ -78,6 +94,14 @@ static int __init init_gfs2_fs(void)
if (!gfs2_glock_cachep)
goto fail;
+ gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock (aspace)",
+ sizeof(struct gfs2_glock) +
+ sizeof(struct address_space),
+ 0, 0, gfs2_init_gl_aspace_once);
+
+ if (!gfs2_glock_aspace_cachep)
+ goto fail;
+
gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
sizeof(struct gfs2_inode),
0, SLAB_RECLAIM_ACCOUNT|
@@ -144,6 +168,9 @@ fail:
if (gfs2_inode_cachep)
kmem_cache_destroy(gfs2_inode_cachep);
+ if (gfs2_glock_aspace_cachep)
+ kmem_cache_destroy(gfs2_glock_aspace_cachep);
+
if (gfs2_glock_cachep)
kmem_cache_destroy(gfs2_glock_cachep);
@@ -169,6 +196,7 @@ static void __exit exit_gfs2_fs(void)
kmem_cache_destroy(gfs2_rgrpd_cachep);
kmem_cache_destroy(gfs2_bufdata_cachep);
kmem_cache_destroy(gfs2_inode_cachep);
+ kmem_cache_destroy(gfs2_glock_aspace_cachep);
kmem_cache_destroy(gfs2_glock_cachep);
gfs2_sys_uninit();
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 6f68a5f..0bb12c8 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -93,49 +93,13 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
return err;
}
-static const struct address_space_operations aspace_aops = {
+const struct address_space_operations gfs2_meta_aops = {
.writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage,
.sync_page = block_sync_page,
};
/**
- * gfs2_aspace_get - Create and initialize a struct inode structure
- * @sdp: the filesystem the aspace is in
- *
- * Right now a struct inode is just a struct inode. Maybe Linux
- * will supply a more lightweight address space construct (that works)
- * in the future.
- *
- * Make sure pages/buffers in this aspace aren't in high memory.
- *
- * Returns: the aspace
- */
-
-struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
-{
- struct inode *aspace;
- struct gfs2_inode *ip;
-
- aspace = new_inode(sdp->sd_vfs);
- if (aspace) {
- mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
- aspace->i_mapping->a_ops = &aspace_aops;
- aspace->i_size = MAX_LFS_FILESIZE;
- ip = GFS2_I(aspace);
- clear_bit(GIF_USER, &ip->i_flags);
- insert_inode_hash(aspace);
- }
- return aspace;
-}
-
-void gfs2_aspace_put(struct inode *aspace)
-{
- remove_inode_hash(aspace);
- iput(aspace);
-}
-
-/**
* gfs2_meta_sync - Sync all buffers associated with a glock
* @gl: The glock
*
@@ -143,7 +107,7 @@ void gfs2_aspace_put(struct inode *aspace)
void gfs2_meta_sync(struct gfs2_glock *gl)
{
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
int error;
filemap_fdatawrite(mapping);
@@ -164,7 +128,7 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
{
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
struct gfs2_sbd *sdp = gl->gl_sbd;
struct page *page;
struct buffer_head *bh;
@@ -344,8 +308,10 @@ void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
{
- struct gfs2_sbd *sdp = GFS2_SB(bh->b_page->mapping->host);
+ struct address_space *mapping = bh->b_page->mapping;
+ struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct gfs2_bufdata *bd = bh->b_private;
+
if (test_clear_buffer_pinned(bh)) {
list_del_init(&bd->bd_le.le_list);
if (meta) {
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index de270c2..6a1d9ba 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -37,8 +37,16 @@ static inline void gfs2_buffer_copy_tail(struct buffer_head *to_bh,
0, from_head - to_head);
}
-struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp);
-void gfs2_aspace_put(struct inode *aspace);
+extern const struct address_space_operations gfs2_meta_aops;
+
+static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
+{
+ struct inode *inode = mapping->host;
+ if (mapping->a_ops == &gfs2_meta_aops)
+ return (((struct gfs2_glock *)mapping) - 1)->gl_sbd;
+ else
+ return inode->i_sb->s_fs_info;
+}
void gfs2_meta_sync(struct gfs2_glock *gl);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index a86ed63..a054b52 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -65,7 +65,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
gt->gt_quota_scale_den = 1;
gt->gt_new_files_jdata = 0;
gt->gt_max_readahead = 1 << 18;
- gt->gt_stall_secs = 600;
gt->gt_complain_secs = 10;
}
@@ -1241,10 +1240,9 @@ fail_sb:
fail_locking:
init_locking(sdp, &mount_gh, UNDO);
fail_lm:
+ invalidate_inodes(sb);
gfs2_gl_hash_clear(sdp);
gfs2_lm_unmount(sdp);
- while (invalidate_inodes(sb))
- yield();
fail_sys:
gfs2_sys_fs_del(sdp);
fail:
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 84350e1..4e64352 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -976,122 +976,62 @@ out:
}
/**
- * gfs2_readlinki - return the contents of a symlink
- * @ip: the symlink's inode
- * @buf: a pointer to the buffer to be filled
- * @len: a pointer to the length of @buf
+ * gfs2_follow_link - Follow a symbolic link
+ * @dentry: The dentry of the link
+ * @nd: Data that we pass to vfs_follow_link()
*
- * If @buf is too small, a piece of memory is kmalloc()ed and needs
- * to be freed by the caller.
+ * This can handle symlinks of any size.
*
- * Returns: errno
+ * Returns: 0 on success or error code
*/
-static int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
+static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
{
+ struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
struct gfs2_holder i_gh;
struct buffer_head *dibh;
unsigned int x;
+ char *buf;
int error;
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
error = gfs2_glock_nq(&i_gh);
if (error) {
gfs2_holder_uninit(&i_gh);
- return error;
+ nd_set_link(nd, ERR_PTR(error));
+ return NULL;
}
if (!ip->i_disksize) {
gfs2_consist_inode(ip);
- error = -EIO;
+ buf = ERR_PTR(-EIO);
goto out;
}
error = gfs2_meta_inode_buffer(ip, &dibh);
- if (error)
+ if (error) {
+ buf = ERR_PTR(error);
goto out;
-
- x = ip->i_disksize + 1;
- if (x > *len) {
- *buf = kmalloc(x, GFP_NOFS);
- if (!*buf) {
- error = -ENOMEM;
- goto out_brelse;
- }
}
- memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
- *len = x;
-
-out_brelse:
+ x = ip->i_disksize + 1;
+ buf = kmalloc(x, GFP_NOFS);
+ if (!buf)
+ buf = ERR_PTR(-ENOMEM);
+ else
+ memcpy(buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
brelse(dibh);
out:
gfs2_glock_dq_uninit(&i_gh);
- return error;
-}
-
-/**
- * gfs2_readlink - Read the value of a symlink
- * @dentry: the symlink
- * @buf: the buffer to read the symlink data into
- * @size: the size of the buffer
- *
- * Returns: errno
- */
-
-static int gfs2_readlink(struct dentry *dentry, char __user *user_buf,
- int user_size)
-{
- struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
- char array[GFS2_FAST_NAME_SIZE], *buf = array;
- unsigned int len = GFS2_FAST_NAME_SIZE;
- int error;
-
- error = gfs2_readlinki(ip, &buf, &len);
- if (error)
- return error;
-
- if (user_size > len - 1)
- user_size = len - 1;
-
- if (copy_to_user(user_buf, buf, user_size))
- error = -EFAULT;
- else
- error = user_size;
-
- if (buf != array)
- kfree(buf);
-
- return error;
+ nd_set_link(nd, buf);
+ return NULL;
}
-/**
- * gfs2_follow_link - Follow a symbolic link
- * @dentry: The dentry of the link
- * @nd: Data that we pass to vfs_follow_link()
- *
- * This can handle symlinks of any size. It is optimised for symlinks
- * under GFS2_FAST_NAME_SIZE.
- *
- * Returns: 0 on success or error code
- */
-
-static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
{
- struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
- char array[GFS2_FAST_NAME_SIZE], *buf = array;
- unsigned int len = GFS2_FAST_NAME_SIZE;
- int error;
-
- error = gfs2_readlinki(ip, &buf, &len);
- if (!error) {
- error = vfs_follow_link(nd, buf);
- if (buf != array)
- kfree(buf);
- } else
- path_put(&nd->path);
-
- return ERR_PTR(error);
+ char *s = nd_get_link(nd);
+ if (!IS_ERR(s))
+ kfree(s);
}
/**
@@ -1426,8 +1366,9 @@ const struct inode_operations gfs2_dir_iops = {
};
const struct inode_operations gfs2_symlink_iops = {
- .readlink = gfs2_readlink,
+ .readlink = generic_readlink,
.follow_link = gfs2_follow_link,
+ .put_link = gfs2_put_link,
.permission = gfs2_permission,
.setattr = gfs2_setattr,
.getattr = gfs2_getattr,
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index a8c2bcd..50aac60 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -22,6 +22,7 @@
#include <linux/crc32.h>
#include <linux/time.h>
#include <linux/wait.h>
+#include <linux/writeback.h>
#include "gfs2.h"
#include "incore.h"
@@ -711,7 +712,7 @@ void gfs2_unfreeze_fs(struct gfs2_sbd *sdp)
* Returns: errno
*/
-static int gfs2_write_inode(struct inode *inode, int sync)
+static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -722,8 +723,7 @@ static int gfs2_write_inode(struct inode *inode, int sync)
int ret = 0;
/* Check this is a "normal" inode, etc */
- if (!test_bit(GIF_USER, &ip->i_flags) ||
- (current->flags & PF_MEMALLOC))
+ if (current->flags & PF_MEMALLOC)
return 0;
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (ret)
@@ -746,7 +746,7 @@ static int gfs2_write_inode(struct inode *inode, int sync)
do_unlock:
gfs2_glock_dq_uninit(&gh);
do_flush:
- if (sync != 0)
+ if (wbc->sync_mode == WB_SYNC_ALL)
gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
return ret;
}
@@ -860,6 +860,7 @@ restart:
gfs2_clear_rgrpd(sdp);
gfs2_jindex_free(sdp);
/* Take apart glock structures and buffer lists */
+ invalidate_inodes(sdp->sd_vfs);
gfs2_gl_hash_clear(sdp);
/* Unmount the locking protocol */
gfs2_lm_unmount(sdp);
@@ -1194,7 +1195,7 @@ static void gfs2_drop_inode(struct inode *inode)
{
struct gfs2_inode *ip = GFS2_I(inode);
- if (test_bit(GIF_USER, &ip->i_flags) && inode->i_nlink) {
+ if (inode->i_nlink) {
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
clear_nlink(inode);
@@ -1212,18 +1213,12 @@ static void gfs2_clear_inode(struct inode *inode)
{
struct gfs2_inode *ip = GFS2_I(inode);
- /* This tells us its a "real" inode and not one which only
- * serves to contain an address space (see rgrp.c, meta_io.c)
- * which therefore doesn't have its own glocks.
- */
- if (test_bit(GIF_USER, &ip->i_flags)) {
- ip->i_gl->gl_object = NULL;
- gfs2_glock_put(ip->i_gl);
- ip->i_gl = NULL;
- if (ip->i_iopen_gh.gh_gl) {
- ip->i_iopen_gh.gh_gl->gl_object = NULL;
- gfs2_glock_dq_uninit(&ip->i_iopen_gh);
- }
+ ip->i_gl->gl_object = NULL;
+ gfs2_glock_put(ip->i_gl);
+ ip->i_gl = NULL;
+ if (ip->i_iopen_gh.gh_gl) {
+ ip->i_iopen_gh.gh_gl->gl_object = NULL;
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
}
}
@@ -1358,9 +1353,6 @@ static void gfs2_delete_inode(struct inode *inode)
struct gfs2_holder gh;
int error;
- if (!test_bit(GIF_USER, &ip->i_flags))
- goto out;
-
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (unlikely(error)) {
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 4496cc3..b5f1a46 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -478,7 +478,6 @@ TUNE_ATTR(complain_secs, 0);
TUNE_ATTR(statfs_slow, 0);
TUNE_ATTR(new_files_jdata, 0);
TUNE_ATTR(quota_simul_sync, 1);
-TUNE_ATTR(stall_secs, 1);
TUNE_ATTR(statfs_quantum, 1);
TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
@@ -491,7 +490,6 @@ static struct attribute *tune_attrs[] = {
&tune_attr_complain_secs.attr,
&tune_attr_statfs_slow.attr,
&tune_attr_quota_simul_sync.attr,
- &tune_attr_stall_secs.attr,
&tune_attr_statfs_quantum.attr,
&tune_attr_quota_scale.attr,
&tune_attr_new_files_jdata.attr,
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index f6a7efa..226f2bf 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -21,6 +21,7 @@
#include "util.h"
struct kmem_cache *gfs2_glock_cachep __read_mostly;
+struct kmem_cache *gfs2_glock_aspace_cachep __read_mostly;
struct kmem_cache *gfs2_inode_cachep __read_mostly;
struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
struct kmem_cache *gfs2_rgrpd_cachep __read_mostly;
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 33e96b0..b432e04 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -145,6 +145,7 @@ gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__);
extern struct kmem_cache *gfs2_glock_cachep;
+extern struct kmem_cache *gfs2_glock_aspace_cachep;
extern struct kmem_cache *gfs2_inode_cachep;
extern struct kmem_cache *gfs2_bufdata_cachep;
extern struct kmem_cache *gfs2_rgrpd_cachep;
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 052387e..fe35e3b 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -188,7 +188,7 @@ extern const struct address_space_operations hfs_btree_aops;
extern struct inode *hfs_new_inode(struct inode *, struct qstr *, int);
extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
-extern int hfs_write_inode(struct inode *, int);
+extern int hfs_write_inode(struct inode *, struct writeback_control *);
extern int hfs_inode_setattr(struct dentry *, struct iattr *);
extern void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
__be32 log_size, __be32 phys_size, u32 clump_size);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index a1cbff2..14f5cb1 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -381,7 +381,7 @@ void hfs_inode_write_fork(struct inode *inode, struct hfs_extent *ext,
HFS_SB(inode->i_sb)->alloc_blksz);
}
-int hfs_write_inode(struct inode *inode, int unused)
+int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct inode *main_inode = inode;
struct hfs_find_data fd;
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 43022f3..74b473a 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -87,7 +87,8 @@ bad_inode:
return ERR_PTR(err);
}
-static int hfsplus_write_inode(struct inode *inode, int unused)
+static int hfsplus_write_inode(struct inode *inode,
+ struct writeback_control *wbc)
{
struct hfsplus_vh *vhdr;
int ret = 0;
diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c
index 1aa88c4..6a2f04b 100644
--- a/fs/hpfs/anode.c
+++ b/fs/hpfs/anode.c
@@ -353,7 +353,7 @@ int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
}
int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos,
- unsigned len, char *buf)
+ unsigned len, const char *buf)
{
struct buffer_head *bh;
char *data;
diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c
index 940d6d1..67d9d36 100644
--- a/fs/hpfs/dentry.c
+++ b/fs/hpfs/dentry.c
@@ -20,8 +20,8 @@ static int hpfs_hash_dentry(struct dentry *dentry, struct qstr *qstr)
if (l == 1) if (qstr->name[0]=='.') goto x;
if (l == 2) if (qstr->name[0]=='.' || qstr->name[1]=='.') goto x;
- hpfs_adjust_length((char *)qstr->name, &l);
- /*if (hpfs_chk_name((char *)qstr->name,&l))*/
+ hpfs_adjust_length(qstr->name, &l);
+ /*if (hpfs_chk_name(qstr->name,&l))*/
/*return -ENAMETOOLONG;*/
/*return -ENOENT;*/
x:
@@ -38,14 +38,16 @@ static int hpfs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qst
{
unsigned al=a->len;
unsigned bl=b->len;
- hpfs_adjust_length((char *)a->name, &al);
- /*hpfs_adjust_length((char *)b->name, &bl);*/
+ hpfs_adjust_length(a->name, &al);
+ /*hpfs_adjust_length(b->name, &bl);*/
/* 'a' is the qstr of an already existing dentry, so the name
* must be valid. 'b' must be validated first.
*/
- if (hpfs_chk_name((char *)b->name, &bl)) return 1;
- if (hpfs_compare_names(dentry->d_sb, (char *)a->name, al, (char *)b->name, bl, 0)) return 1;
+ if (hpfs_chk_name(b->name, &bl))
+ return 1;
+ if (hpfs_compare_names(dentry->d_sb, a->name, al, b->name, bl, 0))
+ return 1;
return 0;
}
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 8865c94..26e3964 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -59,7 +59,7 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
struct hpfs_dirent *de;
int lc;
long old_pos;
- char *tempname;
+ unsigned char *tempname;
int c1, c2 = 0;
int ret = 0;
@@ -158,11 +158,11 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3);
if (filldir(dirent, tempname, de->namelen, old_pos, de->fnode, DT_UNKNOWN) < 0) {
filp->f_pos = old_pos;
- if (tempname != (char *)de->name) kfree(tempname);
+ if (tempname != de->name) kfree(tempname);
hpfs_brelse4(&qbh);
goto out;
}
- if (tempname != (char *)de->name) kfree(tempname);
+ if (tempname != de->name) kfree(tempname);
hpfs_brelse4(&qbh);
}
out:
@@ -187,7 +187,7 @@ out:
struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
- const char *name = dentry->d_name.name;
+ const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
struct quad_buffer_head qbh;
struct hpfs_dirent *de;
@@ -197,7 +197,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
struct hpfs_inode_info *hpfs_result;
lock_kernel();
- if ((err = hpfs_chk_name((char *)name, &len))) {
+ if ((err = hpfs_chk_name(name, &len))) {
if (err == -ENAMETOOLONG) {
unlock_kernel();
return ERR_PTR(-ENAMETOOLONG);
@@ -209,7 +209,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
* '.' and '..' will never be passed here.
*/
- de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *) name, len, NULL, &qbh);
+ de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, NULL, &qbh);
/*
* This is not really a bailout, just means file not found.
@@ -250,7 +250,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
hpfs_result = hpfs_i(result);
if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino;
- hpfs_decide_conv(result, (char *)name, len);
+ hpfs_decide_conv(result, name, len);
if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) {
hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures");
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c
index fe83c2b..9b2ffad 100644
--- a/fs/hpfs/dnode.c
+++ b/fs/hpfs/dnode.c
@@ -158,7 +158,8 @@ static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno
/* Add an entry to dnode and don't care if it grows over 2048 bytes */
-struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, unsigned char *name,
+struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d,
+ const unsigned char *name,
unsigned namelen, secno down_ptr)
{
struct hpfs_dirent *de;
@@ -223,7 +224,7 @@ static void fix_up_ptrs(struct super_block *s, struct dnode *d)
/* Add an entry to dnode and do dnode splitting if required */
static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
- unsigned char *name, unsigned namelen,
+ const unsigned char *name, unsigned namelen,
struct hpfs_dirent *new_de, dnode_secno down_ptr)
{
struct quad_buffer_head qbh, qbh1, qbh2;
@@ -231,7 +232,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
dnode_secno adno, rdno;
struct hpfs_dirent *de;
struct hpfs_dirent nde;
- char *nname;
+ unsigned char *nname;
int h;
int pos;
struct buffer_head *bh;
@@ -305,7 +306,9 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
pos++;
}
copy_de(new_de = &nde, de);
- memcpy(name = nname, de->name, namelen = de->namelen);
+ memcpy(nname, de->name, de->namelen);
+ name = nname;
+ namelen = de->namelen;
for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, 4);
down_ptr = adno;
set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0);
@@ -368,7 +371,8 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
* I hope, now it's finally bug-free.
*/
-int hpfs_add_dirent(struct inode *i, unsigned char *name, unsigned namelen,
+int hpfs_add_dirent(struct inode *i,
+ const unsigned char *name, unsigned namelen,
struct hpfs_dirent *new_de, int cdepth)
{
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
@@ -897,7 +901,8 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp,
/* Find a dirent in tree */
-struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno, char *name, unsigned len,
+struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno,
+ const unsigned char *name, unsigned len,
dnode_secno *dd, struct quad_buffer_head *qbh)
{
struct dnode *dnode;
@@ -988,8 +993,8 @@ void hpfs_remove_dtree(struct super_block *s, dnode_secno dno)
struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
struct fnode *f, struct quad_buffer_head *qbh)
{
- char *name1;
- char *name2;
+ unsigned char *name1;
+ unsigned char *name2;
int name1len, name2len;
struct dnode *d;
dnode_secno dno, downd;
diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c
index 547a838..45e53d9 100644
--- a/fs/hpfs/ea.c
+++ b/fs/hpfs/ea.c
@@ -62,8 +62,8 @@ static char *get_indirect_ea(struct super_block *s, int ano, secno a, int size)
return ret;
}
-static void set_indirect_ea(struct super_block *s, int ano, secno a, char *data,
- int size)
+static void set_indirect_ea(struct super_block *s, int ano, secno a,
+ const char *data, int size)
{
hpfs_ea_write(s, a, ano, 0, size, data);
}
@@ -186,7 +186,8 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
* This driver can't change sizes of eas ('cause I just don't need it).
*/
-void hpfs_set_ea(struct inode *inode, struct fnode *fnode, char *key, char *data, int size)
+void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
+ const char *data, int size)
{
fnode_secno fno = inode->i_ino;
struct super_block *s = inode->i_sb;
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 701ca54..97bf738 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -215,7 +215,7 @@ secno hpfs_bplus_lookup(struct super_block *, struct inode *, struct bplus_heade
secno hpfs_add_sector_to_btree(struct super_block *, secno, int, unsigned);
void hpfs_remove_btree(struct super_block *, struct bplus_header *);
int hpfs_ea_read(struct super_block *, secno, int, unsigned, unsigned, char *);
-int hpfs_ea_write(struct super_block *, secno, int, unsigned, unsigned, char *);
+int hpfs_ea_write(struct super_block *, secno, int, unsigned, unsigned, const char *);
void hpfs_ea_remove(struct super_block *, secno, int, unsigned);
void hpfs_truncate_btree(struct super_block *, secno, int, unsigned);
void hpfs_remove_fnode(struct super_block *, fnode_secno fno);
@@ -244,13 +244,17 @@ extern const struct file_operations hpfs_dir_ops;
void hpfs_add_pos(struct inode *, loff_t *);
void hpfs_del_pos(struct inode *, loff_t *);
-struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *, unsigned char *, unsigned, secno);
-int hpfs_add_dirent(struct inode *, unsigned char *, unsigned, struct hpfs_dirent *, int);
+struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *,
+ const unsigned char *, unsigned, secno);
+int hpfs_add_dirent(struct inode *, const unsigned char *, unsigned,
+ struct hpfs_dirent *, int);
int hpfs_remove_dirent(struct inode *, dnode_secno, struct hpfs_dirent *, struct quad_buffer_head *, int);
void hpfs_count_dnodes(struct super_block *, dnode_secno, int *, int *, int *);
dnode_secno hpfs_de_as_down_as_possible(struct super_block *, dnode_secno dno);
struct hpfs_dirent *map_pos_dirent(struct inode *, loff_t *, struct quad_buffer_head *);
-struct hpfs_dirent *map_dirent(struct inode *, dnode_secno, char *, unsigned, dnode_secno *, struct quad_buffer_head *);
+struct hpfs_dirent *map_dirent(struct inode *, dnode_secno,
+ const unsigned char *, unsigned, dnode_secno *,
+ struct quad_buffer_head *);
void hpfs_remove_dtree(struct super_block *, dnode_secno);
struct hpfs_dirent *map_fnode_dirent(struct super_block *, fnode_secno, struct fnode *, struct quad_buffer_head *);
@@ -259,7 +263,8 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *, fnode_secno, struct f
void hpfs_ea_ext_remove(struct super_block *, secno, int, unsigned);
int hpfs_read_ea(struct super_block *, struct fnode *, char *, char *, int);
char *hpfs_get_ea(struct super_block *, struct fnode *, char *, int *);
-void hpfs_set_ea(struct inode *, struct fnode *, char *, char *, int);
+void hpfs_set_ea(struct inode *, struct fnode *, const char *,
+ const char *, int);
/* file.c */
@@ -282,7 +287,7 @@ void hpfs_delete_inode(struct inode *);
unsigned *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *);
unsigned *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *);
-char *hpfs_load_code_page(struct super_block *, secno);
+unsigned char *hpfs_load_code_page(struct super_block *, secno);
secno *hpfs_load_bitmap_directory(struct super_block *, secno bmp);
struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **);
struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **);
@@ -292,12 +297,13 @@ dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino);
/* name.c */
unsigned char hpfs_upcase(unsigned char *, unsigned char);
-int hpfs_chk_name(unsigned char *, unsigned *);
-char *hpfs_translate_name(struct super_block *, unsigned char *, unsigned, int, int);
-int hpfs_compare_names(struct super_block *, unsigned char *, unsigned, unsigned char *, unsigned, int);
-int hpfs_is_name_long(unsigned char *, unsigned);
-void hpfs_adjust_length(unsigned char *, unsigned *);
-void hpfs_decide_conv(struct inode *, unsigned char *, unsigned);
+int hpfs_chk_name(const unsigned char *, unsigned *);
+unsigned char *hpfs_translate_name(struct super_block *, unsigned char *, unsigned, int, int);
+int hpfs_compare_names(struct super_block *, const unsigned char *, unsigned,
+ const unsigned char *, unsigned, int);
+int hpfs_is_name_long(const unsigned char *, unsigned);
+void hpfs_adjust_length(const unsigned char *, unsigned *);
+void hpfs_decide_conv(struct inode *, const unsigned char *, unsigned);
/* namei.c */
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index fe703ae..ff90aff 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -46,7 +46,7 @@ void hpfs_read_inode(struct inode *i)
struct fnode *fnode;
struct super_block *sb = i->i_sb;
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
- unsigned char *ea;
+ void *ea;
int ea_size;
if (!(fnode = hpfs_map_fnode(sb, i->i_ino, &bh))) {
@@ -112,7 +112,7 @@ void hpfs_read_inode(struct inode *i)
}
}
if (fnode->dirflag) {
- unsigned n_dnodes, n_subdirs;
+ int n_dnodes, n_subdirs;
i->i_mode |= S_IFDIR;
i->i_op = &hpfs_dir_iops;
i->i_fop = &hpfs_dir_ops;
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index c472458..840d033 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -35,7 +35,7 @@ unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
* lowercasing table
*/
-char *hpfs_load_code_page(struct super_block *s, secno cps)
+unsigned char *hpfs_load_code_page(struct super_block *s, secno cps)
{
struct buffer_head *bh;
secno cpds;
@@ -71,7 +71,7 @@ char *hpfs_load_code_page(struct super_block *s, secno cps)
brelse(bh);
return NULL;
}
- ptr = (char *)cpd + cpd->offs[cpi] + 6;
+ ptr = (unsigned char *)cpd + cpd->offs[cpi] + 6;
if (!(cp_table = kmalloc(256, GFP_KERNEL))) {
printk("HPFS: out of memory for code page table\n");
brelse(bh);
@@ -217,7 +217,7 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
if ((dnode = hpfs_map_4sectors(s, secno, qbh, DNODE_RD_AHEAD)))
if (hpfs_sb(s)->sb_chk) {
unsigned p, pp = 0;
- unsigned char *d = (char *)dnode;
+ unsigned char *d = (unsigned char *)dnode;
int b = 0;
if (dnode->magic != DNODE_MAGIC) {
hpfs_error(s, "bad magic on dnode %08x", secno);
diff --git a/fs/hpfs/name.c b/fs/hpfs/name.c
index 1f4a964..f24736d 100644
--- a/fs/hpfs/name.c
+++ b/fs/hpfs/name.c
@@ -8,16 +8,16 @@
#include "hpfs_fn.h"
-static char *text_postfix[]={
+static const char *text_postfix[]={
".ASM", ".BAS", ".BAT", ".C", ".CC", ".CFG", ".CMD", ".CON", ".CPP", ".DEF",
".DOC", ".DPR", ".ERX", ".H", ".HPP", ".HTM", ".HTML", ".JAVA", ".LOG", ".PAS",
".RC", ".TEX", ".TXT", ".Y", ""};
-static char *text_prefix[]={
+static const char *text_prefix[]={
"AUTOEXEC.", "CHANGES", "COPYING", "CONFIG.", "CREDITS", "FAQ", "FILE_ID.DIZ",
"MAKEFILE", "READ.ME", "README", "TERMCAP", ""};
-void hpfs_decide_conv(struct inode *inode, unsigned char *name, unsigned len)
+void hpfs_decide_conv(struct inode *inode, const unsigned char *name, unsigned len)
{
struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
int i;
@@ -71,7 +71,7 @@ static inline unsigned char locase(unsigned char *dir, unsigned char a)
return dir[a];
}
-int hpfs_chk_name(unsigned char *name, unsigned *len)
+int hpfs_chk_name(const unsigned char *name, unsigned *len)
{
int i;
if (*len > 254) return -ENAMETOOLONG;
@@ -83,10 +83,10 @@ int hpfs_chk_name(unsigned char *name, unsigned *len)
return 0;
}
-char *hpfs_translate_name(struct super_block *s, unsigned char *from,
+unsigned char *hpfs_translate_name(struct super_block *s, unsigned char *from,
unsigned len, int lc, int lng)
{
- char *to;
+ unsigned char *to;
int i;
if (hpfs_sb(s)->sb_chk >= 2) if (hpfs_is_name_long(from, len) != lng) {
printk("HPFS: Long name flag mismatch - name ");
@@ -103,8 +103,9 @@ char *hpfs_translate_name(struct super_block *s, unsigned char *from,
return to;
}
-int hpfs_compare_names(struct super_block *s, unsigned char *n1, unsigned l1,
- unsigned char *n2, unsigned l2, int last)
+int hpfs_compare_names(struct super_block *s,
+ const unsigned char *n1, unsigned l1,
+ const unsigned char *n2, unsigned l2, int last)
{
unsigned l = l1 < l2 ? l1 : l2;
unsigned i;
@@ -120,7 +121,7 @@ int hpfs_compare_names(struct super_block *s, unsigned char *n1, unsigned l1,
return 0;
}
-int hpfs_is_name_long(unsigned char *name, unsigned len)
+int hpfs_is_name_long(const unsigned char *name, unsigned len)
{
int i,j;
for (i = 0; i < len && name[i] != '.'; i++)
@@ -134,7 +135,7 @@ int hpfs_is_name_long(unsigned char *name, unsigned len)
/* OS/2 clears dots and spaces at the end of file name, so we have to */
-void hpfs_adjust_length(unsigned char *name, unsigned *len)
+void hpfs_adjust_length(const unsigned char *name, unsigned *len)
{
if (!*len) return;
if (*len == 1 && name[0] == '.') return;
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 82b9c4b..11c2b40 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -11,7 +11,7 @@
static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
- const char *name = dentry->d_name.name;
+ const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
struct quad_buffer_head qbh0;
struct buffer_head *bh;
@@ -24,7 +24,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
int r;
struct hpfs_dirent dee;
int err;
- if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err;
+ if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
lock_kernel();
err = -ENOSPC;
fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
@@ -62,7 +62,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
result->i_mode &= ~0222;
mutex_lock(&hpfs_i(dir)->i_mutex);
- r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
+ r = hpfs_add_dirent(dir, name, len, &dee, 0);
if (r == 1)
goto bail3;
if (r == -1) {
@@ -121,7 +121,7 @@ bail:
static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
{
- const char *name = dentry->d_name.name;
+ const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
struct inode *result = NULL;
struct buffer_head *bh;
@@ -130,7 +130,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
int r;
struct hpfs_dirent dee;
int err;
- if ((err = hpfs_chk_name((char *)name, &len)))
+ if ((err = hpfs_chk_name(name, &len)))
return err==-ENOENT ? -EINVAL : err;
lock_kernel();
err = -ENOSPC;
@@ -155,7 +155,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
result->i_op = &hpfs_file_iops;
result->i_fop = &hpfs_file_ops;
result->i_nlink = 1;
- hpfs_decide_conv(result, (char *)name, len);
+ hpfs_decide_conv(result, name, len);
hpfs_i(result)->i_parent_dir = dir->i_ino;
result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date);
result->i_ctime.tv_nsec = 0;
@@ -170,7 +170,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
hpfs_i(result)->mmu_private = 0;
mutex_lock(&hpfs_i(dir)->i_mutex);
- r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
+ r = hpfs_add_dirent(dir, name, len, &dee, 0);
if (r == 1)
goto bail2;
if (r == -1) {
@@ -211,7 +211,7 @@ bail:
static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
{
- const char *name = dentry->d_name.name;
+ const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
struct buffer_head *bh;
struct fnode *fnode;
@@ -220,7 +220,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
struct hpfs_dirent dee;
struct inode *result = NULL;
int err;
- if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err;
+ if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
if (hpfs_sb(dir->i_sb)->sb_eas < 2) return -EPERM;
if (!new_valid_dev(rdev))
return -EINVAL;
@@ -256,7 +256,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
init_special_inode(result, mode, rdev);
mutex_lock(&hpfs_i(dir)->i_mutex);
- r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
+ r = hpfs_add_dirent(dir, name, len, &dee, 0);
if (r == 1)
goto bail2;
if (r == -1) {
@@ -289,7 +289,7 @@ bail:
static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *symlink)
{
- const char *name = dentry->d_name.name;
+ const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
struct buffer_head *bh;
struct fnode *fnode;
@@ -298,7 +298,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
struct hpfs_dirent dee;
struct inode *result;
int err;
- if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err;
+ if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
lock_kernel();
if (hpfs_sb(dir->i_sb)->sb_eas < 2) {
unlock_kernel();
@@ -335,7 +335,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
result->i_data.a_ops = &hpfs_symlink_aops;
mutex_lock(&hpfs_i(dir)->i_mutex);
- r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
+ r = hpfs_add_dirent(dir, name, len, &dee, 0);
if (r == 1)
goto bail2;
if (r == -1) {
@@ -345,7 +345,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
fnode->len = len;
memcpy(fnode->name, name, len > 15 ? 15 : len);
fnode->up = dir->i_ino;
- hpfs_set_ea(result, fnode, "SYMLINK", (char *)symlink, strlen(symlink));
+ hpfs_set_ea(result, fnode, "SYMLINK", symlink, strlen(symlink));
mark_buffer_dirty(bh);
brelse(bh);
@@ -369,7 +369,7 @@ bail:
static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
{
- const char *name = dentry->d_name.name;
+ const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
struct quad_buffer_head qbh;
struct hpfs_dirent *de;
@@ -381,12 +381,12 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
int err;
lock_kernel();
- hpfs_adjust_length((char *)name, &len);
+ hpfs_adjust_length(name, &len);
again:
mutex_lock(&hpfs_i(inode)->i_parent_mutex);
mutex_lock(&hpfs_i(dir)->i_mutex);
err = -ENOENT;
- de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh);
+ de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
if (!de)
goto out;
@@ -413,22 +413,25 @@ again:
mutex_unlock(&hpfs_i(dir)->i_mutex);
mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
- d_drop(dentry);
- spin_lock(&dentry->d_lock);
- if (atomic_read(&dentry->d_count) > 1 ||
- generic_permission(inode, MAY_WRITE, NULL) ||
+ dentry_unhash(dentry);
+ if (!d_unhashed(dentry)) {
+ dput(dentry);
+ unlock_kernel();
+ return -ENOSPC;
+ }
+ if (generic_permission(inode, MAY_WRITE, NULL) ||
!S_ISREG(inode->i_mode) ||
get_write_access(inode)) {
- spin_unlock(&dentry->d_lock);
d_rehash(dentry);
+ dput(dentry);
} else {
struct iattr newattrs;
- spin_unlock(&dentry->d_lock);
/*printk("HPFS: truncating file before delete.\n");*/
newattrs.ia_size = 0;
newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
err = notify_change(dentry, &newattrs);
put_write_access(inode);
+ dput(dentry);
if (!err)
goto again;
}
@@ -451,7 +454,7 @@ out:
static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
{
- const char *name = dentry->d_name.name;
+ const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
struct quad_buffer_head qbh;
struct hpfs_dirent *de;
@@ -462,12 +465,12 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
int err;
int r;
- hpfs_adjust_length((char *)name, &len);
+ hpfs_adjust_length(name, &len);
lock_kernel();
mutex_lock(&hpfs_i(inode)->i_parent_mutex);
mutex_lock(&hpfs_i(dir)->i_mutex);
err = -ENOENT;
- de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh);
+ de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
if (!de)
goto out;
@@ -546,10 +549,10 @@ const struct address_space_operations hpfs_symlink_aops = {
static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
- char *old_name = (char *)old_dentry->d_name.name;
- int old_len = old_dentry->d_name.len;
- char *new_name = (char *)new_dentry->d_name.name;
- int new_len = new_dentry->d_name.len;
+ const unsigned char *old_name = old_dentry->d_name.name;
+ unsigned old_len = old_dentry->d_name.len;
+ const unsigned char *new_name = new_dentry->d_name.name;
+ unsigned new_len = new_dentry->d_name.len;
struct inode *i = old_dentry->d_inode;
struct inode *new_inode = new_dentry->d_inode;
struct quad_buffer_head qbh, qbh1;
@@ -560,9 +563,9 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct buffer_head *bh;
struct fnode *fnode;
int err;
- if ((err = hpfs_chk_name((char *)new_name, &new_len))) return err;
+ if ((err = hpfs_chk_name(new_name, &new_len))) return err;
err = 0;
- hpfs_adjust_length((char *)old_name, &old_len);
+ hpfs_adjust_length(old_name, &old_len);
lock_kernel();
/* order doesn't matter, due to VFS exclusion */
@@ -579,7 +582,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto end1;
}
- if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, (char *)old_name, old_len, &dno, &qbh))) {
+ if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) {
hpfs_error(i->i_sb, "lookup succeeded but map dirent failed");
err = -ENOENT;
goto end1;
@@ -590,7 +593,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (new_inode) {
int r;
if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 1)) != 2) {
- if ((nde = map_dirent(new_dir, hpfs_i(new_dir)->i_dno, (char *)new_name, new_len, NULL, &qbh1))) {
+ if ((nde = map_dirent(new_dir, hpfs_i(new_dir)->i_dno, new_name, new_len, NULL, &qbh1))) {
clear_nlink(new_inode);
copy_de(nde, &de);
memcpy(nde->name, new_name, new_len);
@@ -618,7 +621,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
if (new_dir == old_dir)
- if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, (char *)old_name, old_len, &dno, &qbh))) {
+ if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) {
hpfs_unlock_creation(i->i_sb);
hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2");
err = -ENOENT;
@@ -648,7 +651,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
brelse(bh);
}
hpfs_i(i)->i_conv = hpfs_sb(i->i_sb)->sb_conv;
- hpfs_decide_conv(i, (char *)new_name, new_len);
+ hpfs_decide_conv(i, new_name, new_len);
end1:
if (old_dir != new_dir)
mutex_unlock(&hpfs_i(new_dir)->i_mutex);
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index 7239efc..2e4dfa8 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -718,7 +718,7 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent)
struct vfsmount *proc_mnt;
int err = -ENOENT;
- proc_mnt = do_kern_mount("proc", 0, "proc", NULL);
+ proc_mnt = mntget(current->nsproxy->pid_ns->proc_mnt);
if (IS_ERR(proc_mnt))
goto out;
diff --git a/fs/internal.h b/fs/internal.h
index e96a166..8a03a54 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -70,6 +70,8 @@ extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
extern void __init mnt_init(void);
+extern spinlock_t vfsmount_lock;
+
/*
* fs_struct.c
*/
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 8868493..30beb11 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -507,6 +507,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
if (blocknr < journal->j_tail)
freed = freed + journal->j_last - journal->j_first;
+ trace_jbd2_cleanup_journal_tail(journal, first_tid, blocknr, freed);
jbd_debug(1,
"Cleaning journal tail from %d to %d (offset %lu), "
"freeing %lu\n",
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 1bc74b6..671da7f 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -883,8 +883,7 @@ restart_loop:
spin_unlock(&journal->j_list_lock);
bh = jh2bh(jh);
jbd_lock_bh_state(bh);
- J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
- jh->b_transaction == journal->j_running_transaction);
+ J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
/*
* If there is undo-protected committed data against
@@ -930,12 +929,12 @@ restart_loop:
/* A buffer which has been freed while still being
* journaled by a previous transaction may end up still
* being dirty here, but we want to avoid writing back
- * that buffer in the future now that the last use has
- * been committed. That's not only a performance gain,
- * it also stops aliasing problems if the buffer is left
- * behind for writeback and gets reallocated for another
+ * that buffer in the future after the "add to orphan"
+ * operation been committed, That's not only a performance
+ * gain, it also stops aliasing problems if the buffer is
+ * left behind for writeback and gets reallocated for another
* use in a different page. */
- if (buffer_freed(bh)) {
+ if (buffer_freed(bh) && !jh->b_next_transaction) {
clear_buffer_freed(bh);
clear_buffer_jbddirty(bh);
}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index ac0d027..c03d4dc 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -39,6 +39,8 @@
#include <linux/seq_file.h>
#include <linux/math64.h>
#include <linux/hash.h>
+#include <linux/log2.h>
+#include <linux/vmalloc.h>
#define CREATE_TRACE_POINTS
#include <trace/events/jbd2.h>
@@ -93,6 +95,7 @@ EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
static void __journal_abort_soft (journal_t *journal, int errno);
+static int jbd2_journal_create_slab(size_t slab_size);
/*
* Helper function used to manage commit timeouts
@@ -1248,6 +1251,13 @@ int jbd2_journal_load(journal_t *journal)
}
}
+ /*
+ * Create a slab for this blocksize
+ */
+ err = jbd2_journal_create_slab(be32_to_cpu(sb->s_blocksize));
+ if (err)
+ return err;
+
/* Let the recovery code check whether it needs to recover any
* data from the journal. */
if (jbd2_journal_recover(journal))
@@ -1807,6 +1817,127 @@ size_t journal_tag_bytes(journal_t *journal)
}
/*
+ * JBD memory management
+ *
+ * These functions are used to allocate block-sized chunks of memory
+ * used for making copies of buffer_head data. Very often it will be
+ * page-sized chunks of data, but sometimes it will be in
+ * sub-page-size chunks. (For example, 16k pages on Power systems
+ * with a 4k block file system.) For blocks smaller than a page, we
+ * use a SLAB allocator. There are slab caches for each block size,
+ * which are allocated at mount time, if necessary, and we only free
+ * (all of) the slab caches when/if the jbd2 module is unloaded. For
+ * this reason we don't need to a mutex to protect access to
+ * jbd2_slab[] allocating or releasing memory; only in
+ * jbd2_journal_create_slab().
+ */
+#define JBD2_MAX_SLABS 8
+static struct kmem_cache *jbd2_slab[JBD2_MAX_SLABS];
+static DECLARE_MUTEX(jbd2_slab_create_sem);
+
+static const char *jbd2_slab_names[JBD2_MAX_SLABS] = {
+ "jbd2_1k", "jbd2_2k", "jbd2_4k", "jbd2_8k",
+ "jbd2_16k", "jbd2_32k", "jbd2_64k", "jbd2_128k"
+};
+
+
+static void jbd2_journal_destroy_slabs(void)
+{
+ int i;
+
+ for (i = 0; i < JBD2_MAX_SLABS; i++) {
+ if (jbd2_slab[i])
+ kmem_cache_destroy(jbd2_slab[i]);
+ jbd2_slab[i] = NULL;
+ }
+}
+
+static int jbd2_journal_create_slab(size_t size)
+{
+ int i = order_base_2(size) - 10;
+ size_t slab_size;
+
+ if (size == PAGE_SIZE)
+ return 0;
+
+ if (i >= JBD2_MAX_SLABS)
+ return -EINVAL;
+
+ if (unlikely(i < 0))
+ i = 0;
+ down(&jbd2_slab_create_sem);
+ if (jbd2_slab[i]) {
+ up(&jbd2_slab_create_sem);
+ return 0; /* Already created */
+ }
+
+ slab_size = 1 << (i+10);
+ jbd2_slab[i] = kmem_cache_create(jbd2_slab_names[i], slab_size,
+ slab_size, 0, NULL);
+ up(&jbd2_slab_create_sem);
+ if (!jbd2_slab[i]) {
+ printk(KERN_EMERG "JBD2: no memory for jbd2_slab cache\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static struct kmem_cache *get_slab(size_t size)
+{
+ int i = order_base_2(size) - 10;
+
+ BUG_ON(i >= JBD2_MAX_SLABS);
+ if (unlikely(i < 0))
+ i = 0;
+ BUG_ON(jbd2_slab[i] == 0);
+ return jbd2_slab[i];
+}
+
+void *jbd2_alloc(size_t size, gfp_t flags)
+{
+ void *ptr;
+
+ BUG_ON(size & (size-1)); /* Must be a power of 2 */
+
+ flags |= __GFP_REPEAT;
+ if (size == PAGE_SIZE)
+ ptr = (void *)__get_free_pages(flags, 0);
+ else if (size > PAGE_SIZE) {
+ int order = get_order(size);
+
+ if (order < 3)
+ ptr = (void *)__get_free_pages(flags, order);
+ else
+ ptr = vmalloc(size);
+ } else
+ ptr = kmem_cache_alloc(get_slab(size), flags);
+
+ /* Check alignment; SLUB has gotten this wrong in the past,
+ * and this can lead to user data corruption! */
+ BUG_ON(((unsigned long) ptr) & (size-1));
+
+ return ptr;
+}
+
+void jbd2_free(void *ptr, size_t size)
+{
+ if (size == PAGE_SIZE) {
+ free_pages((unsigned long)ptr, 0);
+ return;
+ }
+ if (size > PAGE_SIZE) {
+ int order = get_order(size);
+
+ if (order < 3)
+ free_pages((unsigned long)ptr, order);
+ else
+ vfree(ptr);
+ return;
+ }
+ kmem_cache_free(get_slab(size), ptr);
+};
+
+/*
* Journal_head storage management
*/
static struct kmem_cache *jbd2_journal_head_cache;
@@ -2204,6 +2335,7 @@ static void jbd2_journal_destroy_caches(void)
jbd2_journal_destroy_revoke_caches();
jbd2_journal_destroy_jbd2_journal_head_cache();
jbd2_journal_destroy_handle_cache();
+ jbd2_journal_destroy_slabs();
}
static int __init journal_init(void)
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index a051270..bfc70f5 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1727,6 +1727,21 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
if (!jh)
goto zap_buffer_no_jh;
+ /*
+ * We cannot remove the buffer from checkpoint lists until the
+ * transaction adding inode to orphan list (let's call it T)
+ * is committed. Otherwise if the transaction changing the
+ * buffer would be cleaned from the journal before T is
+ * committed, a crash will cause that the correct contents of
+ * the buffer will be lost. On the other hand we have to
+ * clear the buffer dirty bit at latest at the moment when the
+ * transaction marking the buffer as freed in the filesystem
+ * structures is committed because from that moment on the
+ * buffer can be reallocated and used by a different page.
+ * Since the block hasn't been freed yet but the inode has
+ * already been added to orphan list, it is safe for us to add
+ * the buffer to BJ_Forget list of the newest transaction.
+ */
transaction = jh->b_transaction;
if (transaction == NULL) {
/* First case: not on any transaction. If it
@@ -1783,16 +1798,15 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
} else if (transaction == journal->j_committing_transaction) {
JBUFFER_TRACE(jh, "on committing transaction");
/*
- * If it is committing, we simply cannot touch it. We
- * can remove it's next_transaction pointer from the
- * running transaction if that is set, but nothing
- * else. */
+ * The buffer is committing, we simply cannot touch
+ * it. So we just set j_next_transaction to the
+ * running transaction (if there is one) and mark
+ * buffer as freed so that commit code knows it should
+ * clear dirty bits when it is done with the buffer.
+ */
set_buffer_freed(bh);
- if (jh->b_next_transaction) {
- J_ASSERT(jh->b_next_transaction ==
- journal->j_running_transaction);
- jh->b_next_transaction = NULL;
- }
+ if (journal->j_running_transaction && buffer_jbddirty(bh))
+ jh->b_next_transaction = journal->j_running_transaction;
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
@@ -1969,7 +1983,7 @@ void jbd2_journal_file_buffer(struct journal_head *jh,
*/
void __jbd2_journal_refile_buffer(struct journal_head *jh)
{
- int was_dirty;
+ int was_dirty, jlist;
struct buffer_head *bh = jh2bh(jh);
J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
@@ -1991,8 +2005,13 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
__jbd2_journal_temp_unlink_buffer(jh);
jh->b_transaction = jh->b_next_transaction;
jh->b_next_transaction = NULL;
- __jbd2_journal_file_buffer(jh, jh->b_transaction,
- jh->b_modified ? BJ_Metadata : BJ_Reserved);
+ if (buffer_freed(bh))
+ jlist = BJ_Forget;
+ else if (jh->b_modified)
+ jlist = BJ_Metadata;
+ else
+ jlist = BJ_Reserved;
+ __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist);
J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
if (was_dirty)
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index c694a5f..9dd1262 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -22,6 +22,7 @@
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
+#include <linux/writeback.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
#include "jfs_filsys.h"
@@ -120,8 +121,10 @@ int jfs_commit_inode(struct inode *inode, int wait)
return rc;
}
-int jfs_write_inode(struct inode *inode, int wait)
+int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
+ int wait = wbc->sync_mode == WB_SYNC_ALL;
+
if (test_cflag(COMMIT_Nolink, inode))
return 0;
/*
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 4b91b27..79e2c79 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -26,7 +26,7 @@ extern long jfs_ioctl(struct file *, unsigned int, unsigned long);
extern long jfs_compat_ioctl(struct file *, unsigned int, unsigned long);
extern struct inode *jfs_iget(struct super_block *, unsigned long);
extern int jfs_commit_inode(struct inode *, int);
-extern int jfs_write_inode(struct inode*, int);
+extern int jfs_write_inode(struct inode *, struct writeback_control *);
extern void jfs_delete_inode(struct inode *);
extern void jfs_dirty_inode(struct inode *);
extern void jfs_truncate(struct inode *);
diff --git a/fs/libfs.c b/fs/libfs.c
index 6e8d17e..9e50bcf 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -338,28 +338,14 @@ int simple_readpage(struct file *file, struct page *page)
return 0;
}
-int simple_prepare_write(struct file *file, struct page *page,
- unsigned from, unsigned to)
-{
- if (!PageUptodate(page)) {
- if (to - from != PAGE_CACHE_SIZE)
- zero_user_segments(page,
- 0, from,
- to, PAGE_CACHE_SIZE);
- }
- return 0;
-}
-
int simple_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
struct page *page;
pgoff_t index;
- unsigned from;
index = pos >> PAGE_CACHE_SHIFT;
- from = pos & (PAGE_CACHE_SIZE - 1);
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
@@ -367,43 +353,59 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
*pagep = page;
- return simple_prepare_write(file, page, from, from+len);
-}
-
-static int simple_commit_write(struct file *file, struct page *page,
- unsigned from, unsigned to)
-{
- struct inode *inode = page->mapping->host;
- loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+ if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
- if (!PageUptodate(page))
- SetPageUptodate(page);
- /*
- * No need to use i_size_read() here, the i_size
- * cannot change under us because we hold the i_mutex.
- */
- if (pos > inode->i_size)
- i_size_write(inode, pos);
- set_page_dirty(page);
+ zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE);
+ }
return 0;
}
+/**
+ * simple_write_end - .write_end helper for non-block-device FSes
+ * @available: See .write_end of address_space_operations
+ * @file: "
+ * @mapping: "
+ * @pos: "
+ * @len: "
+ * @copied: "
+ * @page: "
+ * @fsdata: "
+ *
+ * simple_write_end does the minimum needed for updating a page after writing is
+ * done. It has the same API signature as the .write_end of
+ * address_space_operations vector. So it can just be set onto .write_end for
+ * FSes that don't need any other processing. i_mutex is assumed to be held.
+ * Block based filesystems should use generic_write_end().
+ * NOTE: Even though i_size might get updated by this function, mark_inode_dirty
+ * is not called, so a filesystem that actually does store data in .write_inode
+ * should extend on what's done here with a call to mark_inode_dirty() in the
+ * case that i_size has changed.
+ */
int simple_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ struct inode *inode = page->mapping->host;
+ loff_t last_pos = pos + copied;
/* zero the stale part of the page if we did a short copy */
if (copied < len) {
- void *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + from + copied, 0, len - copied);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+
+ zero_user(page, from + copied, len - copied);
}
- simple_commit_write(file, page, from, from+copied);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
+ /*
+ * No need to use i_size_read() here, the i_size
+ * cannot change under us because we hold the i_mutex.
+ */
+ if (last_pos > inode->i_size)
+ i_size_write(inode, last_pos);
+ set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
@@ -853,7 +855,6 @@ EXPORT_SYMBOL(simple_getattr);
EXPORT_SYMBOL(simple_link);
EXPORT_SYMBOL(simple_lookup);
EXPORT_SYMBOL(simple_pin_fs);
-EXPORT_UNUSED_SYMBOL(simple_prepare_write);
EXPORT_SYMBOL(simple_readpage);
EXPORT_SYMBOL(simple_release_fs);
EXPORT_SYMBOL(simple_rename);
diff --git a/fs/locks.c b/fs/locks.c
index a8794f2..ae9ded0 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1182,8 +1182,9 @@ int __break_lease(struct inode *inode, unsigned int mode)
struct file_lock *fl;
unsigned long break_time;
int i_have_this_lease = 0;
+ int want_write = (mode & O_ACCMODE) != O_RDONLY;
- new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK);
+ new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
lock_kernel();
@@ -1197,7 +1198,7 @@ int __break_lease(struct inode *inode, unsigned int mode)
if (fl->fl_owner == current->files)
i_have_this_lease = 1;
- if (mode & FMODE_WRITE) {
+ if (want_write) {
/* If we want write access, we have to revoke any lease. */
future = F_UNLCK | F_INPROGRESS;
} else if (flock->fl_type & F_INPROGRESS) {
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 74ea82d..756f8c9 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -17,8 +17,10 @@
#include <linux/init.h>
#include <linux/highuid.h>
#include <linux/vfs.h>
+#include <linux/writeback.h>
-static int minix_write_inode(struct inode * inode, int wait);
+static int minix_write_inode(struct inode *inode,
+ struct writeback_control *wbc);
static int minix_statfs(struct dentry *dentry, struct kstatfs *buf);
static int minix_remount (struct super_block * sb, int * flags, char * data);
@@ -552,7 +554,7 @@ static struct buffer_head * V2_minix_update_inode(struct inode * inode)
return bh;
}
-static int minix_write_inode(struct inode *inode, int wait)
+static int minix_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int err = 0;
struct buffer_head *bh;
@@ -563,7 +565,7 @@ static int minix_write_inode(struct inode *inode, int wait)
bh = V2_minix_update_inode(inode);
if (!bh)
return -EIO;
- if (wait && buffer_dirty(bh)) {
+ if (wbc->sync_mode == WB_SYNC_ALL && buffer_dirty(bh)) {
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh)) {
printk("IO error syncing minix inode [%s:%08lx]\n",
diff --git a/fs/namei.c b/fs/namei.c
index 06abd2b..3d9d2f9 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -497,8 +497,6 @@ static int link_path_walk(const char *, struct nameidata *);
static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link)
{
- int res = 0;
- char *name;
if (IS_ERR(link))
goto fail;
@@ -509,22 +507,7 @@ static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *l
path_get(&nd->root);
}
- res = link_path_walk(link, nd);
- if (nd->depth || res || nd->last_type!=LAST_NORM)
- return res;
- /*
- * If it is an iterative symlinks resolution in open_namei() we
- * have to copy the last component. And all that crap because of
- * bloody create() on broken symlinks. Furrfu...
- */
- name = __getname();
- if (unlikely(!name)) {
- path_put(&nd->path);
- return -ENOMEM;
- }
- strcpy(name, nd->last.name);
- nd->last.name = name;
- return 0;
+ return link_path_walk(link, nd);
fail:
path_put(&nd->path);
return PTR_ERR(link);
@@ -546,10 +529,10 @@ static inline void path_to_nameidata(struct path *path, struct nameidata *nd)
nd->path.dentry = path->dentry;
}
-static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd)
+static __always_inline int
+__do_follow_link(struct path *path, struct nameidata *nd, void **p)
{
int error;
- void *cookie;
struct dentry *dentry = path->dentry;
touch_atime(path->mnt, dentry);
@@ -561,9 +544,9 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
}
mntget(path->mnt);
nd->last_type = LAST_BIND;
- cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
- error = PTR_ERR(cookie);
- if (!IS_ERR(cookie)) {
+ *p = dentry->d_inode->i_op->follow_link(dentry, nd);
+ error = PTR_ERR(*p);
+ if (!IS_ERR(*p)) {
char *s = nd_get_link(nd);
error = 0;
if (s)
@@ -573,8 +556,6 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
if (error)
path_put(&nd->path);
}
- if (dentry->d_inode->i_op->put_link)
- dentry->d_inode->i_op->put_link(dentry, nd, cookie);
}
return error;
}
@@ -588,6 +569,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
*/
static inline int do_follow_link(struct path *path, struct nameidata *nd)
{
+ void *cookie;
int err = -ELOOP;
if (current->link_count >= MAX_NESTED_LINKS)
goto loop;
@@ -601,7 +583,9 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
current->link_count++;
current->total_link_count++;
nd->depth++;
- err = __do_follow_link(path, nd);
+ err = __do_follow_link(path, nd, &cookie);
+ if (!IS_ERR(cookie) && path->dentry->d_inode->i_op->put_link)
+ path->dentry->d_inode->i_op->put_link(path->dentry, nd, cookie);
path_put(path);
current->link_count--;
nd->depth--;
@@ -688,33 +672,20 @@ static __always_inline void follow_dotdot(struct nameidata *nd)
set_root(nd);
while(1) {
- struct vfsmount *parent;
struct dentry *old = nd->path.dentry;
if (nd->path.dentry == nd->root.dentry &&
nd->path.mnt == nd->root.mnt) {
break;
}
- spin_lock(&dcache_lock);
if (nd->path.dentry != nd->path.mnt->mnt_root) {
- nd->path.dentry = dget(nd->path.dentry->d_parent);
- spin_unlock(&dcache_lock);
+ /* rare case of legitimate dget_parent()... */
+ nd->path.dentry = dget_parent(nd->path.dentry);
dput(old);
break;
}
- spin_unlock(&dcache_lock);
- spin_lock(&vfsmount_lock);
- parent = nd->path.mnt->mnt_parent;
- if (parent == nd->path.mnt) {
- spin_unlock(&vfsmount_lock);
+ if (!follow_up(&nd->path))
break;
- }
- mntget(parent);
- nd->path.dentry = dget(nd->path.mnt->mnt_mountpoint);
- spin_unlock(&vfsmount_lock);
- dput(old);
- mntput(nd->path.mnt);
- nd->path.mnt = parent;
}
follow_mount(&nd->path);
}
@@ -1346,7 +1317,7 @@ static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
return -ENOENT;
BUG_ON(victim->d_parent->d_inode != dir);
- audit_inode_child(victim->d_name.name, victim, dir);
+ audit_inode_child(victim, dir);
error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
if (error)
@@ -1387,22 +1358,6 @@ static inline int may_create(struct inode *dir, struct dentry *child)
return inode_permission(dir, MAY_WRITE | MAY_EXEC);
}
-/*
- * O_DIRECTORY translates into forcing a directory lookup.
- */
-static inline int lookup_flags(unsigned int f)
-{
- unsigned long retval = LOOKUP_FOLLOW;
-
- if (f & O_NOFOLLOW)
- retval &= ~LOOKUP_FOLLOW;
-
- if (f & O_DIRECTORY)
- retval |= LOOKUP_DIRECTORY;
-
- return retval;
-}
-
/*
* p1 and p2 should be directories on the same fs.
*/
@@ -1501,7 +1456,7 @@ int may_open(struct path *path, int acc_mode, int flag)
* An append-only file must be opened in append mode for writing.
*/
if (IS_APPEND(inode)) {
- if ((flag & FMODE_WRITE) && !(flag & O_APPEND))
+ if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
return -EPERM;
if (flag & O_TRUNC)
return -EPERM;
@@ -1545,7 +1500,7 @@ static int handle_truncate(struct path *path)
* what get passed to sys_open().
*/
static int __open_namei_create(struct nameidata *nd, struct path *path,
- int flag, int mode)
+ int open_flag, int mode)
{
int error;
struct dentry *dir = nd->path.dentry;
@@ -1563,7 +1518,7 @@ out_unlock:
if (error)
return error;
/* Don't check for write permission, don't truncate */
- return may_open(&nd->path, 0, flag & ~O_TRUNC);
+ return may_open(&nd->path, 0, open_flag & ~O_TRUNC);
}
/*
@@ -1601,129 +1556,132 @@ static int open_will_truncate(int flag, struct inode *inode)
return (flag & O_TRUNC);
}
-/*
- * Note that the low bits of the passed in "open_flag"
- * are not the same as in the local variable "flag". See
- * open_to_namei_flags() for more details.
- */
-struct file *do_filp_open(int dfd, const char *pathname,
- int open_flag, int mode, int acc_mode)
+static struct file *finish_open(struct nameidata *nd,
+ int open_flag, int acc_mode)
{
struct file *filp;
- struct nameidata nd;
- int error;
- struct path path;
- struct dentry *dir;
- int count = 0;
int will_truncate;
- int flag = open_to_namei_flags(open_flag);
- int force_reval = 0;
+ int error;
+ will_truncate = open_will_truncate(open_flag, nd->path.dentry->d_inode);
+ if (will_truncate) {
+ error = mnt_want_write(nd->path.mnt);
+ if (error)
+ goto exit;
+ }
+ error = may_open(&nd->path, acc_mode, open_flag);
+ if (error) {
+ if (will_truncate)
+ mnt_drop_write(nd->path.mnt);
+ goto exit;
+ }
+ filp = nameidata_to_filp(nd);
+ if (!IS_ERR(filp)) {
+ error = ima_file_check(filp, acc_mode);
+ if (error) {
+ fput(filp);
+ filp = ERR_PTR(error);
+ }
+ }
+ if (!IS_ERR(filp)) {
+ if (will_truncate) {
+ error = handle_truncate(&nd->path);
+ if (error) {
+ fput(filp);
+ filp = ERR_PTR(error);
+ }
+ }
+ }
/*
- * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only
- * check for O_DSYNC if the need any syncing at all we enforce it's
- * always set instead of having to deal with possibly weird behaviour
- * for malicious applications setting only __O_SYNC.
+ * It is now safe to drop the mnt write
+ * because the filp has had a write taken
+ * on its behalf.
*/
- if (open_flag & __O_SYNC)
- open_flag |= O_DSYNC;
-
- if (!acc_mode)
- acc_mode = MAY_OPEN | ACC_MODE(open_flag);
+ if (will_truncate)
+ mnt_drop_write(nd->path.mnt);
+ return filp;
- /* O_TRUNC implies we need access checks for write permissions */
- if (flag & O_TRUNC)
- acc_mode |= MAY_WRITE;
+exit:
+ if (!IS_ERR(nd->intent.open.file))
+ release_open_intent(nd);
+ path_put(&nd->path);
+ return ERR_PTR(error);
+}
- /* Allow the LSM permission hook to distinguish append
- access from general write access. */
- if (flag & O_APPEND)
- acc_mode |= MAY_APPEND;
+static struct file *do_last(struct nameidata *nd, struct path *path,
+ int open_flag, int acc_mode,
+ int mode, const char *pathname,
+ int *want_dir)
+{
+ struct dentry *dir = nd->path.dentry;
+ struct file *filp;
+ int error = -EISDIR;
- /*
- * The simplest case - just a plain lookup.
- */
- if (!(flag & O_CREAT)) {
- filp = get_empty_filp();
-
- if (filp == NULL)
- return ERR_PTR(-ENFILE);
- nd.intent.open.file = filp;
- filp->f_flags = open_flag;
- nd.intent.open.flags = flag;
- nd.intent.open.create_mode = 0;
- error = do_path_lookup(dfd, pathname,
- lookup_flags(flag)|LOOKUP_OPEN, &nd);
- if (IS_ERR(nd.intent.open.file)) {
- if (error == 0) {
- error = PTR_ERR(nd.intent.open.file);
- path_put(&nd.path);
+ switch (nd->last_type) {
+ case LAST_DOTDOT:
+ follow_dotdot(nd);
+ dir = nd->path.dentry;
+ if (nd->path.mnt->mnt_sb->s_type->fs_flags & FS_REVAL_DOT) {
+ if (!dir->d_op->d_revalidate(dir, nd)) {
+ error = -ESTALE;
+ goto exit;
}
- } else if (error)
- release_open_intent(&nd);
- if (error)
- return ERR_PTR(error);
+ }
+ /* fallthrough */
+ case LAST_DOT:
+ case LAST_ROOT:
+ if (open_flag & O_CREAT)
+ goto exit;
+ /* fallthrough */
+ case LAST_BIND:
+ audit_inode(pathname, dir);
goto ok;
}
- /*
- * Create - we need to know the parent.
- */
-reval:
- error = path_init(dfd, pathname, LOOKUP_PARENT, &nd);
- if (error)
- return ERR_PTR(error);
- if (force_reval)
- nd.flags |= LOOKUP_REVAL;
- error = path_walk(pathname, &nd);
- if (error) {
- if (nd.root.mnt)
- path_put(&nd.root);
- return ERR_PTR(error);
+ /* trailing slashes? */
+ if (nd->last.name[nd->last.len]) {
+ if (open_flag & O_CREAT)
+ goto exit;
+ *want_dir = 1;
}
- if (unlikely(!audit_dummy_context()))
- audit_inode(pathname, nd.path.dentry);
- /*
- * We have the parent and last component. First of all, check
- * that we are not asked to creat(2) an obvious directory - that
- * will not do.
- */
- error = -EISDIR;
- if (nd.last_type != LAST_NORM || nd.last.name[nd.last.len])
- goto exit_parent;
+ /* just plain open? */
+ if (!(open_flag & O_CREAT)) {
+ error = do_lookup(nd, &nd->last, path);
+ if (error)
+ goto exit;
+ error = -ENOENT;
+ if (!path->dentry->d_inode)
+ goto exit_dput;
+ if (path->dentry->d_inode->i_op->follow_link)
+ return NULL;
+ error = -ENOTDIR;
+ if (*want_dir & !path->dentry->d_inode->i_op->lookup)
+ goto exit_dput;
+ path_to_nameidata(path, nd);
+ audit_inode(pathname, nd->path.dentry);
+ goto ok;
+ }
- error = -ENFILE;
- filp = get_empty_filp();
- if (filp == NULL)
- goto exit_parent;
- nd.intent.open.file = filp;
- filp->f_flags = open_flag;
- nd.intent.open.flags = flag;
- nd.intent.open.create_mode = mode;
- dir = nd.path.dentry;
- nd.flags &= ~LOOKUP_PARENT;
- nd.flags |= LOOKUP_CREATE | LOOKUP_OPEN;
- if (flag & O_EXCL)
- nd.flags |= LOOKUP_EXCL;
+ /* OK, it's O_CREAT */
mutex_lock(&dir->d_inode->i_mutex);
- path.dentry = lookup_hash(&nd);
- path.mnt = nd.path.mnt;
-do_last:
- error = PTR_ERR(path.dentry);
- if (IS_ERR(path.dentry)) {
+ path->dentry = lookup_hash(nd);
+ path->mnt = nd->path.mnt;
+
+ error = PTR_ERR(path->dentry);
+ if (IS_ERR(path->dentry)) {
mutex_unlock(&dir->d_inode->i_mutex);
goto exit;
}
- if (IS_ERR(nd.intent.open.file)) {
- error = PTR_ERR(nd.intent.open.file);
+ if (IS_ERR(nd->intent.open.file)) {
+ error = PTR_ERR(nd->intent.open.file);
goto exit_mutex_unlock;
}
/* Negative dentry, just create the file */
- if (!path.dentry->d_inode) {
+ if (!path->dentry->d_inode) {
/*
* This write is needed to ensure that a
* ro->rw transition does not occur between
@@ -1731,18 +1689,16 @@ do_last:
* a permanent write count is taken through
* the 'struct file' in nameidata_to_filp().
*/
- error = mnt_want_write(nd.path.mnt);
+ error = mnt_want_write(nd->path.mnt);
if (error)
goto exit_mutex_unlock;
- error = __open_namei_create(&nd, &path, flag, mode);
+ error = __open_namei_create(nd, path, open_flag, mode);
if (error) {
- mnt_drop_write(nd.path.mnt);
+ mnt_drop_write(nd->path.mnt);
goto exit;
}
- filp = nameidata_to_filp(&nd);
- mnt_drop_write(nd.path.mnt);
- if (nd.root.mnt)
- path_put(&nd.root);
+ filp = nameidata_to_filp(nd);
+ mnt_drop_write(nd->path.mnt);
if (!IS_ERR(filp)) {
error = ima_file_check(filp, acc_mode);
if (error) {
@@ -1757,147 +1713,181 @@ do_last:
* It already exists.
*/
mutex_unlock(&dir->d_inode->i_mutex);
- audit_inode(pathname, path.dentry);
+ audit_inode(pathname, path->dentry);
error = -EEXIST;
- if (flag & O_EXCL)
+ if (open_flag & O_EXCL)
goto exit_dput;
- if (__follow_mount(&path)) {
+ if (__follow_mount(path)) {
error = -ELOOP;
- if (flag & O_NOFOLLOW)
+ if (open_flag & O_NOFOLLOW)
goto exit_dput;
}
error = -ENOENT;
- if (!path.dentry->d_inode)
+ if (!path->dentry->d_inode)
goto exit_dput;
- if (path.dentry->d_inode->i_op->follow_link)
- goto do_link;
- path_to_nameidata(&path, &nd);
+ if (path->dentry->d_inode->i_op->follow_link)
+ return NULL;
+
+ path_to_nameidata(path, nd);
error = -EISDIR;
- if (S_ISDIR(path.dentry->d_inode->i_mode))
+ if (S_ISDIR(path->dentry->d_inode->i_mode))
goto exit;
ok:
+ filp = finish_open(nd, open_flag, acc_mode);
+ return filp;
+
+exit_mutex_unlock:
+ mutex_unlock(&dir->d_inode->i_mutex);
+exit_dput:
+ path_put_conditional(path, nd);
+exit:
+ if (!IS_ERR(nd->intent.open.file))
+ release_open_intent(nd);
+ path_put(&nd->path);
+ return ERR_PTR(error);
+}
+
+/*
+ * Note that the low bits of the passed in "open_flag"
+ * are not the same as in the local variable "flag". See
+ * open_to_namei_flags() for more details.
+ */
+struct file *do_filp_open(int dfd, const char *pathname,
+ int open_flag, int mode, int acc_mode)
+{
+ struct file *filp;
+ struct nameidata nd;
+ int error;
+ struct path path;
+ int count = 0;
+ int flag = open_to_namei_flags(open_flag);
+ int force_reval = 0;
+ int want_dir = open_flag & O_DIRECTORY;
+
+ if (!(open_flag & O_CREAT))
+ mode = 0;
+
/*
- * Consider:
- * 1. may_open() truncates a file
- * 2. a rw->ro mount transition occurs
- * 3. nameidata_to_filp() fails due to
- * the ro mount.
- * That would be inconsistent, and should
- * be avoided. Taking this mnt write here
- * ensures that (2) can not occur.
+ * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only
+ * check for O_DSYNC if the need any syncing at all we enforce it's
+ * always set instead of having to deal with possibly weird behaviour
+ * for malicious applications setting only __O_SYNC.
*/
- will_truncate = open_will_truncate(flag, nd.path.dentry->d_inode);
- if (will_truncate) {
- error = mnt_want_write(nd.path.mnt);
- if (error)
- goto exit;
- }
- error = may_open(&nd.path, acc_mode, flag);
+ if (open_flag & __O_SYNC)
+ open_flag |= O_DSYNC;
+
+ if (!acc_mode)
+ acc_mode = MAY_OPEN | ACC_MODE(open_flag);
+
+ /* O_TRUNC implies we need access checks for write permissions */
+ if (open_flag & O_TRUNC)
+ acc_mode |= MAY_WRITE;
+
+ /* Allow the LSM permission hook to distinguish append
+ access from general write access. */
+ if (open_flag & O_APPEND)
+ acc_mode |= MAY_APPEND;
+
+ /* find the parent */
+reval:
+ error = path_init(dfd, pathname, LOOKUP_PARENT, &nd);
+ if (error)
+ return ERR_PTR(error);
+ if (force_reval)
+ nd.flags |= LOOKUP_REVAL;
+
+ current->total_link_count = 0;
+ error = link_path_walk(pathname, &nd);
if (error) {
- if (will_truncate)
- mnt_drop_write(nd.path.mnt);
- goto exit;
+ filp = ERR_PTR(error);
+ goto out;
}
- filp = nameidata_to_filp(&nd);
- if (!IS_ERR(filp)) {
- error = ima_file_check(filp, acc_mode);
- if (error) {
- fput(filp);
+ if (unlikely(!audit_dummy_context()) && (open_flag & O_CREAT))
+ audit_inode(pathname, nd.path.dentry);
+
+ /*
+ * We have the parent and last component.
+ */
+
+ error = -ENFILE;
+ filp = get_empty_filp();
+ if (filp == NULL)
+ goto exit_parent;
+ nd.intent.open.file = filp;
+ filp->f_flags = open_flag;
+ nd.intent.open.flags = flag;
+ nd.intent.open.create_mode = mode;
+ nd.flags &= ~LOOKUP_PARENT;
+ nd.flags |= LOOKUP_OPEN;
+ if (open_flag & O_CREAT) {
+ nd.flags |= LOOKUP_CREATE;
+ if (open_flag & O_EXCL)
+ nd.flags |= LOOKUP_EXCL;
+ }
+ filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname, &want_dir);
+ while (unlikely(!filp)) { /* trailing symlink */
+ struct path holder;
+ struct inode *inode = path.dentry->d_inode;
+ void *cookie;
+ error = -ELOOP;
+ /* S_ISDIR part is a temporary automount kludge */
+ if ((open_flag & O_NOFOLLOW) && !S_ISDIR(inode->i_mode))
+ goto exit_dput;
+ if (count++ == 32)
+ goto exit_dput;
+ /*
+ * This is subtle. Instead of calling do_follow_link() we do
+ * the thing by hands. The reason is that this way we have zero
+ * link_count and path_walk() (called from ->follow_link)
+ * honoring LOOKUP_PARENT. After that we have the parent and
+ * last component, i.e. we are in the same situation as after
+ * the first path_walk(). Well, almost - if the last component
+ * is normal we get its copy stored in nd->last.name and we will
+ * have to putname() it when we are done. Procfs-like symlinks
+ * just set LAST_BIND.
+ */
+ nd.flags |= LOOKUP_PARENT;
+ error = security_inode_follow_link(path.dentry, &nd);
+ if (error)
+ goto exit_dput;
+ error = __do_follow_link(&path, &nd, &cookie);
+ if (unlikely(error)) {
+ /* nd.path had been dropped */
+ if (!IS_ERR(cookie) && inode->i_op->put_link)
+ inode->i_op->put_link(path.dentry, &nd, cookie);
+ path_put(&path);
+ release_open_intent(&nd);
filp = ERR_PTR(error);
+ goto out;
}
+ holder = path;
+ nd.flags &= ~LOOKUP_PARENT;
+ filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname, &want_dir);
+ if (inode->i_op->put_link)
+ inode->i_op->put_link(holder.dentry, &nd, cookie);
+ path_put(&holder);
}
- if (!IS_ERR(filp)) {
- if (will_truncate) {
- error = handle_truncate(&nd.path);
- if (error) {
- fput(filp);
- filp = ERR_PTR(error);
- }
- }
- }
- /*
- * It is now safe to drop the mnt write
- * because the filp has had a write taken
- * on its behalf.
- */
- if (will_truncate)
- mnt_drop_write(nd.path.mnt);
+out:
if (nd.root.mnt)
path_put(&nd.root);
+ if (filp == ERR_PTR(-ESTALE) && !force_reval) {
+ force_reval = 1;
+ goto reval;
+ }
return filp;
-exit_mutex_unlock:
- mutex_unlock(&dir->d_inode->i_mutex);
exit_dput:
path_put_conditional(&path, &nd);
-exit:
if (!IS_ERR(nd.intent.open.file))
release_open_intent(&nd);
exit_parent:
- if (nd.root.mnt)
- path_put(&nd.root);
path_put(&nd.path);
- return ERR_PTR(error);
-
-do_link:
- error = -ELOOP;
- if (flag & O_NOFOLLOW)
- goto exit_dput;
- /*
- * This is subtle. Instead of calling do_follow_link() we do the
- * thing by hands. The reason is that this way we have zero link_count
- * and path_walk() (called from ->follow_link) honoring LOOKUP_PARENT.
- * After that we have the parent and last component, i.e.
- * we are in the same situation as after the first path_walk().
- * Well, almost - if the last component is normal we get its copy
- * stored in nd->last.name and we will have to putname() it when we
- * are done. Procfs-like symlinks just set LAST_BIND.
- */
- nd.flags |= LOOKUP_PARENT;
- error = security_inode_follow_link(path.dentry, &nd);
- if (error)
- goto exit_dput;
- error = __do_follow_link(&path, &nd);
- path_put(&path);
- if (error) {
- /* Does someone understand code flow here? Or it is only
- * me so stupid? Anathema to whoever designed this non-sense
- * with "intent.open".
- */
- release_open_intent(&nd);
- if (nd.root.mnt)
- path_put(&nd.root);
- if (error == -ESTALE && !force_reval) {
- force_reval = 1;
- goto reval;
- }
- return ERR_PTR(error);
- }
- nd.flags &= ~LOOKUP_PARENT;
- if (nd.last_type == LAST_BIND)
- goto ok;
- error = -EISDIR;
- if (nd.last_type != LAST_NORM)
- goto exit;
- if (nd.last.name[nd.last.len]) {
- __putname(nd.last.name);
- goto exit;
- }
- error = -ELOOP;
- if (count++==32) {
- __putname(nd.last.name);
- goto exit;
- }
- dir = nd.path.dentry;
- mutex_lock(&dir->d_inode->i_mutex);
- path.dentry = lookup_hash(&nd);
- path.mnt = nd.path.mnt;
- __putname(nd.last.name);
- goto do_last;
+ filp = ERR_PTR(error);
+ goto out;
}
/**
@@ -2264,8 +2254,11 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
error = -EBUSY;
else {
error = security_inode_unlink(dir, dentry);
- if (!error)
+ if (!error) {
error = dir->i_op->unlink(dir, dentry);
+ if (!error)
+ dentry->d_inode->i_flags |= S_DEAD;
+ }
}
mutex_unlock(&dentry->d_inode->i_mutex);
@@ -2616,6 +2609,8 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
else
error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
if (!error) {
+ if (target)
+ target->i_flags |= S_DEAD;
if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
d_move(old_dentry, new_dentry);
}
@@ -2655,11 +2650,9 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry);
else
error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry);
- if (!error) {
- const char *new_name = old_dentry->d_name.name;
- fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir,
+ if (!error)
+ fsnotify_move(old_dir, new_dir, old_name, is_dir,
new_dentry->d_inode, old_dentry);
- }
fsnotify_oldname_free(old_name);
return error;
diff --git a/fs/namespace.c b/fs/namespace.c
index c768f73..8174c8a 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -573,7 +573,7 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
mnt->mnt_master = old;
CLEAR_MNT_SHARED(mnt);
} else if (!(flag & CL_PRIVATE)) {
- if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
+ if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
list_add(&mnt->mnt_share, &old->mnt_share);
if (IS_MNT_SLAVE(old))
list_add(&mnt->mnt_slave, &old->mnt_slave);
@@ -737,6 +737,21 @@ static void m_stop(struct seq_file *m, void *v)
up_read(&namespace_sem);
}
+int mnt_had_events(struct proc_mounts *p)
+{
+ struct mnt_namespace *ns = p->ns;
+ int res = 0;
+
+ spin_lock(&vfsmount_lock);
+ if (p->event != ns->event) {
+ p->event = ns->event;
+ res = 1;
+ }
+ spin_unlock(&vfsmount_lock);
+
+ return res;
+}
+
struct proc_fs_info {
int flag;
const char *str;
@@ -1121,8 +1136,15 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
{
struct path path;
int retval;
+ int lookup_flags = 0;
- retval = user_path(name, &path);
+ if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
+ return -EINVAL;
+
+ if (!(flags & UMOUNT_NOFOLLOW))
+ lookup_flags |= LOOKUP_FOLLOW;
+
+ retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
if (retval)
goto out;
retval = -EINVAL;
@@ -1246,6 +1268,21 @@ void drop_collected_mounts(struct vfsmount *mnt)
release_mounts(&umount_list);
}
+int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
+ struct vfsmount *root)
+{
+ struct vfsmount *mnt;
+ int res = f(root, arg);
+ if (res)
+ return res;
+ list_for_each_entry(mnt, &root->mnt_list, mnt_list) {
+ res = f(mnt, arg);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
{
struct vfsmount *p;
@@ -1538,7 +1575,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
err = do_remount_sb(sb, flags, data, 0);
if (!err) {
spin_lock(&vfsmount_lock);
- mnt_flags |= path->mnt->mnt_flags & MNT_PNODE_MASK;
+ mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK;
path->mnt->mnt_flags = mnt_flags;
spin_unlock(&vfsmount_lock);
}
@@ -1671,7 +1708,7 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path,
{
int err;
- mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD);
+ mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL);
down_write(&namespace_sem);
/* Something was mounted here while we slept */
@@ -2314,17 +2351,13 @@ void __init mnt_init(void)
void put_mnt_ns(struct mnt_namespace *ns)
{
- struct vfsmount *root;
LIST_HEAD(umount_list);
- if (!atomic_dec_and_lock(&ns->count, &vfsmount_lock))
+ if (!atomic_dec_and_test(&ns->count))
return;
- root = ns->root;
- ns->root = NULL;
- spin_unlock(&vfsmount_lock);
down_write(&namespace_sem);
spin_lock(&vfsmount_lock);
- umount_tree(root, 0, &umount_list);
+ umount_tree(ns->root, 0, &umount_list);
spin_unlock(&vfsmount_lock);
up_write(&namespace_sem);
release_mounts(&umount_list);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index f141bde..7f9ecc4 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -97,16 +97,12 @@ u64 nfs_compat_user_ino64(u64 fileid)
return ino;
}
-int nfs_write_inode(struct inode *inode, int sync)
+int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int ret;
- if (sync) {
- ret = filemap_fdatawait(inode->i_mapping);
- if (ret == 0)
- ret = nfs_commit_inode(inode, FLUSH_SYNC);
- } else
- ret = nfs_commit_inode(inode, 0);
+ ret = nfs_commit_inode(inode,
+ wbc->sync_mode == WB_SYNC_ALL ? FLUSH_SYNC : 0);
if (ret >= 0)
return 0;
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
@@ -574,14 +570,14 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
nfs_revalidate_inode(server, inode);
}
-static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, struct dentry *dentry, struct rpc_cred *cred)
+static struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cred *cred)
{
struct nfs_open_context *ctx;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx != NULL) {
- ctx->path.dentry = dget(dentry);
- ctx->path.mnt = mntget(mnt);
+ ctx->path = *path;
+ path_get(&ctx->path);
ctx->cred = get_rpccred(cred);
ctx->state = NULL;
ctx->lockowner = current->files;
@@ -686,7 +682,7 @@ int nfs_open(struct inode *inode, struct file *filp)
cred = rpc_lookup_cred();
if (IS_ERR(cred))
return PTR_ERR(cred);
- ctx = alloc_nfs_open_context(filp->f_path.mnt, filp->f_path.dentry, cred);
+ ctx = alloc_nfs_open_context(&filp->f_path, cred);
put_rpccred(cred);
if (ctx == NULL)
return -ENOMEM;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 29e464d..11f82f0 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -211,7 +211,7 @@ extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask);
extern struct workqueue_struct *nfsiod_workqueue;
extern struct inode *nfs_alloc_inode(struct super_block *sb);
extern void nfs_destroy_inode(struct inode *);
-extern int nfs_write_inode(struct inode *,int);
+extern int nfs_write_inode(struct inode *, struct writeback_control *);
extern void nfs_clear_inode(struct inode *);
#ifdef CONFIG_NFS_V4
extern void nfs4_clear_inode(struct inode *);
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h
index 46d779a..1d8d5c8 100644
--- a/fs/nfs/iostat.h
+++ b/fs/nfs/iostat.h
@@ -57,12 +57,12 @@ static inline void nfs_add_fscache_stats(struct inode *inode,
}
#endif
-static inline struct nfs_iostats *nfs_alloc_iostats(void)
+static inline struct nfs_iostats __percpu *nfs_alloc_iostats(void)
{
return alloc_percpu(struct nfs_iostats);
}
-static inline void nfs_free_iostats(struct nfs_iostats *stats)
+static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats)
{
if (stats != NULL)
free_percpu(stats);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 375f0fa..84d83be 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -724,8 +724,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
if (p->o_arg.seqid == NULL)
goto err_free;
- p->path.mnt = mntget(path->mnt);
- p->path.dentry = dget(path->dentry);
+ path_get(path);
+ p->path = *path;
p->dir = parent;
p->owner = sp;
atomic_inc(&sp->so_count);
@@ -1944,8 +1944,8 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
calldata->res.seqid = calldata->arg.seqid;
calldata->res.server = server;
calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
- calldata->path.mnt = mntget(path->mnt);
- calldata->path.dentry = dget(path->dentry);
+ path_get(path);
+ calldata->path = *path;
msg.rpc_argp = &calldata->arg,
msg.rpc_resp = &calldata->res,
diff --git a/fs/nfsctl.c b/fs/nfsctl.c
index d3854d9..bf9cbd2 100644
--- a/fs/nfsctl.c
+++ b/fs/nfsctl.c
@@ -36,10 +36,9 @@ static struct file *do_open(char *name, int flags)
return ERR_PTR(error);
if (flags == O_RDWR)
- error = may_open(&nd.path, MAY_READ|MAY_WRITE,
- FMODE_READ|FMODE_WRITE);
+ error = may_open(&nd.path, MAY_READ|MAY_WRITE, flags);
else
- error = may_open(&nd.path, MAY_WRITE, FMODE_WRITE);
+ error = may_open(&nd.path, MAY_WRITE, flags);
if (!error)
return dentry_open(nd.path.dentry, nd.path.mnt, flags,
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index a8587e9..bbf72d8 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2121,9 +2121,15 @@ out_acl:
* and this is the root of a cross-mounted filesystem.
*/
if (ignore_crossmnt == 0 &&
- exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) {
- err = vfs_getattr(exp->ex_path.mnt->mnt_parent,
- exp->ex_path.mnt->mnt_mountpoint, &stat);
+ dentry == exp->ex_path.mnt->mnt_root) {
+ struct path path = exp->ex_path;
+ path_get(&path);
+ while (follow_up(&path)) {
+ if (path.dentry != path.mnt->mnt_root)
+ break;
+ }
+ err = vfs_getattr(path.mnt, path.dentry, &stat);
+ path_put(&path);
if (err)
goto out_nfserr;
}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 09e9fc0..8eca17d 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -360,7 +360,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
* If we are changing the size of the file, then
* we need to break all leases.
*/
- host_err = break_lease(inode, FMODE_WRITE | O_NONBLOCK);
+ host_err = break_lease(inode, O_WRONLY | O_NONBLOCK);
if (host_err == -EWOULDBLOCK)
host_err = -ETIMEDOUT;
if (host_err) /* ENOMEM or EWOULDBLOCK */
@@ -732,7 +732,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
* Check to see if there are any leases on this file.
* This may block while leases are broken.
*/
- host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? FMODE_WRITE : 0));
+ host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
if (host_err == -EWOULDBLOCK)
host_err = -ETIMEDOUT;
if (host_err) /* NOMEM or WOULDBLOCK */
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 187dd07..9d1e5de 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -388,8 +388,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
ret = -ENOENT;
goto out;
}
- if (blocknrp != NULL)
- *blocknrp = blocknr;
+ *blocknrp = blocknr;
out:
kunmap_atomic(kaddr, KM_USER0);
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 76d803e..0092840 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -224,7 +224,7 @@ fail:
* len <= NILFS_NAME_LEN and de != NULL are guaranteed by caller.
*/
static int
-nilfs_match(int len, const char * const name, struct nilfs_dir_entry *de)
+nilfs_match(int len, const unsigned char *name, struct nilfs_dir_entry *de)
{
if (len != de->name_len)
return 0;
@@ -349,11 +349,11 @@ done:
* Entry is guaranteed to be valid.
*/
struct nilfs_dir_entry *
-nilfs_find_entry(struct inode *dir, struct dentry *dentry,
+nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
struct page **res_page)
{
- const char *name = dentry->d_name.name;
- int namelen = dentry->d_name.len;
+ const unsigned char *name = qstr->name;
+ int namelen = qstr->len;
unsigned reclen = NILFS_DIR_REC_LEN(namelen);
unsigned long start, n;
unsigned long npages = dir_pages(dir);
@@ -424,13 +424,13 @@ struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
return de;
}
-ino_t nilfs_inode_by_name(struct inode *dir, struct dentry *dentry)
+ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
{
ino_t res = 0;
struct nilfs_dir_entry *de;
struct page *page;
- de = nilfs_find_entry(dir, dentry, &page);
+ de = nilfs_find_entry(dir, qstr, &page);
if (de) {
res = le64_to_cpu(de->inode);
kunmap(page);
@@ -465,7 +465,7 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
int nilfs_add_link(struct dentry *dentry, struct inode *inode)
{
struct inode *dir = dentry->d_parent->d_inode;
- const char *name = dentry->d_name.name;
+ const unsigned char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
unsigned chunk_size = nilfs_chunk_size(dir);
unsigned reclen = NILFS_DIR_REC_LEN(namelen);
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index d6b2b83..313d0a2 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -26,6 +26,7 @@
#include <linux/capability.h> /* capable() */
#include <linux/uaccess.h> /* copy_from_user(), copy_to_user() */
#include <linux/vmalloc.h>
+#include <linux/mount.h> /* mnt_want_write(), mnt_drop_write() */
#include <linux/nilfs2_fs.h>
#include "nilfs.h"
#include "segment.h"
@@ -107,20 +108,28 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+
+ ret = mnt_want_write(filp->f_path.mnt);
+ if (ret)
+ return ret;
+
+ ret = -EFAULT;
if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
- return -EFAULT;
+ goto out;
mutex_lock(&nilfs->ns_mount_mutex);
+
nilfs_transaction_begin(inode->i_sb, &ti, 0);
ret = nilfs_cpfile_change_cpmode(
cpfile, cpmode.cm_cno, cpmode.cm_mode);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret < 0))
nilfs_transaction_abort(inode->i_sb);
- mutex_unlock(&nilfs->ns_mount_mutex);
- return ret;
- }
- nilfs_transaction_commit(inode->i_sb); /* never fails */
+ else
+ nilfs_transaction_commit(inode->i_sb); /* never fails */
+
mutex_unlock(&nilfs->ns_mount_mutex);
+out:
+ mnt_drop_write(filp->f_path.mnt);
return ret;
}
@@ -135,16 +144,23 @@ nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+
+ ret = mnt_want_write(filp->f_path.mnt);
+ if (ret)
+ return ret;
+
+ ret = -EFAULT;
if (copy_from_user(&cno, argp, sizeof(cno)))
- return -EFAULT;
+ goto out;
nilfs_transaction_begin(inode->i_sb, &ti, 0);
ret = nilfs_cpfile_delete_checkpoint(cpfile, cno);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret < 0))
nilfs_transaction_abort(inode->i_sb);
- return ret;
- }
- nilfs_transaction_commit(inode->i_sb); /* never fails */
+ else
+ nilfs_transaction_commit(inode->i_sb); /* never fails */
+out:
+ mnt_drop_write(filp->f_path.mnt);
return ret;
}
@@ -496,12 +512,19 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ ret = mnt_want_write(filp->f_path.mnt);
+ if (ret)
+ return ret;
+
+ ret = -EFAULT;
if (copy_from_user(argv, argp, sizeof(argv)))
- return -EFAULT;
+ goto out;
+ ret = -EINVAL;
nsegs = argv[4].v_nmembs;
if (argv[4].v_size != argsz[4])
- return -EINVAL;
+ goto out;
+
/*
* argv[4] points to segment numbers this ioctl cleans. We
* use kmalloc() for its buffer because memory used for the
@@ -509,9 +532,10 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
*/
kbufs[4] = memdup_user((void __user *)(unsigned long)argv[4].v_base,
nsegs * sizeof(__u64));
- if (IS_ERR(kbufs[4]))
- return PTR_ERR(kbufs[4]);
-
+ if (IS_ERR(kbufs[4])) {
+ ret = PTR_ERR(kbufs[4]);
+ goto out;
+ }
nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
for (n = 0; n < 4; n++) {
@@ -563,10 +587,12 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
nilfs_remove_all_gcinode(nilfs);
clear_nilfs_gc_running(nilfs);
- out_free:
+out_free:
while (--n >= 0)
vfree(kbufs[n]);
kfree(kbufs[4]);
+out:
+ mnt_drop_write(filp->f_path.mnt);
return ret;
}
@@ -575,13 +601,17 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
{
__u64 cno;
int ret;
+ struct the_nilfs *nilfs;
ret = nilfs_construct_segment(inode->i_sb);
if (ret < 0)
return ret;
if (argp != NULL) {
- cno = NILFS_SB(inode->i_sb)->s_nilfs->ns_cno - 1;
+ nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
+ down_read(&nilfs->ns_segctor_sem);
+ cno = nilfs->ns_cno - 1;
+ up_read(&nilfs->ns_segctor_sem);
if (copy_to_user(argp, &cno, sizeof(cno)))
return -EFAULT;
}
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 07ba838..ad6ed2c 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -67,7 +67,7 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
if (dentry->d_name.len > NILFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
- ino = nilfs_inode_by_name(dir, dentry);
+ ino = nilfs_inode_by_name(dir, &dentry->d_name);
inode = NULL;
if (ino) {
inode = nilfs_iget(dir->i_sb, ino);
@@ -81,10 +81,7 @@ struct dentry *nilfs_get_parent(struct dentry *child)
{
unsigned long ino;
struct inode *inode;
- struct dentry dotdot;
-
- dotdot.d_name.name = "..";
- dotdot.d_name.len = 2;
+ struct qstr dotdot = {.name = "..", .len = 2};
ino = nilfs_inode_by_name(child->d_inode, &dotdot);
if (!ino)
@@ -296,7 +293,7 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
int err;
err = -ENOENT;
- de = nilfs_find_entry(dir, dentry, &page);
+ de = nilfs_find_entry(dir, &dentry->d_name, &page);
if (!de)
goto out;
@@ -389,7 +386,7 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
return err;
err = -ENOENT;
- old_de = nilfs_find_entry(old_dir, old_dentry, &old_page);
+ old_de = nilfs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_de)
goto out;
@@ -409,7 +406,7 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out_dir;
err = -ENOENT;
- new_de = nilfs_find_entry(new_dir, new_dentry, &new_page);
+ new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page);
if (!new_de)
goto out_dir;
inc_nlink(old_inode);
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 4da6f67..8723e5b 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -217,10 +217,10 @@ static inline int nilfs_init_acl(struct inode *inode, struct inode *dir)
/* dir.c */
extern int nilfs_add_link(struct dentry *, struct inode *);
-extern ino_t nilfs_inode_by_name(struct inode *, struct dentry *);
+extern ino_t nilfs_inode_by_name(struct inode *, const struct qstr *);
extern int nilfs_make_empty(struct inode *, struct inode *);
extern struct nilfs_dir_entry *
-nilfs_find_entry(struct inode *, struct dentry *, struct page **);
+nilfs_find_entry(struct inode *, const struct qstr *, struct page **);
extern int nilfs_delete_entry(struct nilfs_dir_entry *, struct page *);
extern int nilfs_empty_dir(struct inode *);
extern struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct page **);
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index c9c96c7..017bedc 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -39,7 +39,6 @@ enum {
NILFS_SEG_FAIL_IO,
NILFS_SEG_FAIL_MAGIC,
NILFS_SEG_FAIL_SEQ,
- NILFS_SEG_FAIL_CHECKSUM_SEGSUM,
NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT,
NILFS_SEG_FAIL_CHECKSUM_FULL,
NILFS_SEG_FAIL_CONSISTENCY,
@@ -71,10 +70,6 @@ static int nilfs_warn_segment_error(int err)
printk(KERN_WARNING
"NILFS warning: Sequence number mismatch\n");
break;
- case NILFS_SEG_FAIL_CHECKSUM_SEGSUM:
- printk(KERN_WARNING
- "NILFS warning: Checksum error in segment summary\n");
- break;
case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT:
printk(KERN_WARNING
"NILFS warning: Checksum error in super root\n");
@@ -206,19 +201,15 @@ int nilfs_read_super_root_block(struct super_block *sb, sector_t sr_block,
* @pseg_start: start disk block number of partial segment
* @seg_seq: sequence number requested
* @ssi: pointer to nilfs_segsum_info struct to store information
- * @full_check: full check flag
- * (0: only checks segment summary CRC, 1: data CRC)
*/
static int
load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start,
- u64 seg_seq, struct nilfs_segsum_info *ssi,
- int full_check)
+ u64 seg_seq, struct nilfs_segsum_info *ssi)
{
struct buffer_head *bh_sum;
struct nilfs_segment_summary *sum;
- unsigned long offset, nblock;
- u64 check_bytes;
- u32 crc, crc_sum;
+ unsigned long nblock;
+ u32 crc;
int ret = NILFS_SEG_FAIL_IO;
bh_sum = sb_bread(sbi->s_super, pseg_start);
@@ -237,34 +228,24 @@ load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start,
ret = NILFS_SEG_FAIL_SEQ;
goto failed;
}
- if (full_check) {
- offset = sizeof(sum->ss_datasum);
- check_bytes =
- ((u64)ssi->nblocks << sbi->s_super->s_blocksize_bits);
- nblock = ssi->nblocks;
- crc_sum = le32_to_cpu(sum->ss_datasum);
- ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
- } else { /* only checks segment summary */
- offset = sizeof(sum->ss_datasum) + sizeof(sum->ss_sumsum);
- check_bytes = ssi->sumbytes;
- nblock = ssi->nsumblk;
- crc_sum = le32_to_cpu(sum->ss_sumsum);
- ret = NILFS_SEG_FAIL_CHECKSUM_SEGSUM;
- }
+ nblock = ssi->nblocks;
if (unlikely(nblock == 0 ||
nblock > sbi->s_nilfs->ns_blocks_per_segment)) {
/* This limits the number of blocks read in the CRC check */
ret = NILFS_SEG_FAIL_CONSISTENCY;
goto failed;
}
- if (calc_crc_cont(sbi, bh_sum, &crc, offset, check_bytes,
+ if (calc_crc_cont(sbi, bh_sum, &crc, sizeof(sum->ss_datasum),
+ ((u64)nblock << sbi->s_super->s_blocksize_bits),
pseg_start, nblock)) {
ret = NILFS_SEG_FAIL_IO;
goto failed;
}
- if (crc == crc_sum)
+ if (crc == le32_to_cpu(sum->ss_datasum))
ret = 0;
+ else
+ ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
failed:
brelse(bh_sum);
out:
@@ -598,7 +579,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) {
- ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1);
+ ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi);
if (ret) {
if (ret == NILFS_SEG_FAIL_IO) {
err = -EIO;
@@ -821,7 +802,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
for (;;) {
/* Load segment summary */
- ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1);
+ ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi);
if (ret) {
if (ret == NILFS_SEG_FAIL_IO)
goto failed;
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 645c786..ab56fe4 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -40,6 +40,11 @@ struct nilfs_write_info {
};
+static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
+ struct the_nilfs *nilfs);
+static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
+
+
static struct kmem_cache *nilfs_segbuf_cachep;
static void nilfs_segbuf_init_once(void *obj)
@@ -302,6 +307,19 @@ void nilfs_truncate_logs(struct list_head *logs,
}
}
+int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs)
+{
+ struct nilfs_segment_buffer *segbuf;
+ int ret = 0;
+
+ list_for_each_entry(segbuf, logs, sb_list) {
+ ret = nilfs_segbuf_write(segbuf, nilfs);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
int nilfs_wait_on_logs(struct list_head *logs)
{
struct nilfs_segment_buffer *segbuf;
diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h
index 6af1630..94dfd35 100644
--- a/fs/nilfs2/segbuf.h
+++ b/fs/nilfs2/segbuf.h
@@ -166,13 +166,10 @@ nilfs_segbuf_add_file_buffer(struct nilfs_segment_buffer *segbuf,
segbuf->sb_sum.nfileblk++;
}
-int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
- struct the_nilfs *nilfs);
-int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
-
void nilfs_clear_logs(struct list_head *logs);
void nilfs_truncate_logs(struct list_head *logs,
struct nilfs_segment_buffer *last);
+int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs);
int nilfs_wait_on_logs(struct list_head *logs);
static inline void nilfs_destroy_logs(struct list_head *logs)
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 105b508..ada2f1b 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1764,14 +1764,9 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
static int nilfs_segctor_write(struct nilfs_sc_info *sci,
struct the_nilfs *nilfs)
{
- struct nilfs_segment_buffer *segbuf;
- int ret = 0;
+ int ret;
- list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
- ret = nilfs_segbuf_write(segbuf, nilfs);
- if (ret)
- break;
- }
+ ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
return ret;
}
@@ -1937,8 +1932,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
{
struct nilfs_segment_buffer *segbuf;
struct page *bd_page = NULL, *fs_page = NULL;
- struct nilfs_sb_info *sbi = sci->sc_sbi;
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
int update_sr = (sci->sc_super_root != NULL);
list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
@@ -2020,7 +2014,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
if (update_sr) {
nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
- sbi->s_super->s_dirt = 1;
+ set_nilfs_sb_dirty(nilfs);
clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
@@ -2425,43 +2419,43 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
return err;
}
-struct nilfs_segctor_req {
- int mode;
- __u32 seq_accepted;
- int sc_err; /* construction failure */
- int sb_err; /* super block writeback failure */
-};
-
#define FLUSH_FILE_BIT (0x1) /* data file only */
#define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
-static void nilfs_segctor_accept(struct nilfs_sc_info *sci,
- struct nilfs_segctor_req *req)
+/**
+ * nilfs_segctor_accept - record accepted sequence count of log-write requests
+ * @sci: segment constructor object
+ */
+static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
{
- req->sc_err = req->sb_err = 0;
spin_lock(&sci->sc_state_lock);
- req->seq_accepted = sci->sc_seq_request;
+ sci->sc_seq_accepted = sci->sc_seq_request;
spin_unlock(&sci->sc_state_lock);
if (sci->sc_timer)
del_timer_sync(sci->sc_timer);
}
-static void nilfs_segctor_notify(struct nilfs_sc_info *sci,
- struct nilfs_segctor_req *req)
+/**
+ * nilfs_segctor_notify - notify the result of request to caller threads
+ * @sci: segment constructor object
+ * @mode: mode of log forming
+ * @err: error code to be notified
+ */
+static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
{
/* Clear requests (even when the construction failed) */
spin_lock(&sci->sc_state_lock);
- if (req->mode == SC_LSEG_SR) {
+ if (mode == SC_LSEG_SR) {
sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
- sci->sc_seq_done = req->seq_accepted;
- nilfs_segctor_wakeup(sci, req->sc_err ? : req->sb_err);
+ sci->sc_seq_done = sci->sc_seq_accepted;
+ nilfs_segctor_wakeup(sci, err);
sci->sc_flush_request = 0;
} else {
- if (req->mode == SC_FLUSH_FILE)
+ if (mode == SC_FLUSH_FILE)
sci->sc_flush_request &= ~FLUSH_FILE_BIT;
- else if (req->mode == SC_FLUSH_DAT)
+ else if (mode == SC_FLUSH_DAT)
sci->sc_flush_request &= ~FLUSH_DAT_BIT;
/* re-enable timer if checkpoint creation was not done */
@@ -2472,30 +2466,37 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci,
spin_unlock(&sci->sc_state_lock);
}
-static int nilfs_segctor_construct(struct nilfs_sc_info *sci,
- struct nilfs_segctor_req *req)
+/**
+ * nilfs_segctor_construct - form logs and write them to disk
+ * @sci: segment constructor object
+ * @mode: mode of log forming
+ */
+static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
{
struct nilfs_sb_info *sbi = sci->sc_sbi;
struct the_nilfs *nilfs = sbi->s_nilfs;
int err = 0;
+ nilfs_segctor_accept(sci);
+
if (nilfs_discontinued(nilfs))
- req->mode = SC_LSEG_SR;
- if (!nilfs_segctor_confirm(sci)) {
- err = nilfs_segctor_do_construct(sci, req->mode);
- req->sc_err = err;
- }
+ mode = SC_LSEG_SR;
+ if (!nilfs_segctor_confirm(sci))
+ err = nilfs_segctor_do_construct(sci, mode);
+
if (likely(!err)) {
- if (req->mode != SC_FLUSH_DAT)
+ if (mode != SC_FLUSH_DAT)
atomic_set(&nilfs->ns_ndirtyblks, 0);
if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
nilfs_discontinued(nilfs)) {
down_write(&nilfs->ns_sem);
- req->sb_err = nilfs_commit_super(sbi,
- nilfs_altsb_need_update(nilfs));
+ err = nilfs_commit_super(
+ sbi, nilfs_altsb_need_update(nilfs));
up_write(&nilfs->ns_sem);
}
}
+
+ nilfs_segctor_notify(sci, mode, err);
return err;
}
@@ -2526,7 +2527,6 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
struct nilfs_sc_info *sci = NILFS_SC(sbi);
struct the_nilfs *nilfs = sbi->s_nilfs;
struct nilfs_transaction_info ti;
- struct nilfs_segctor_req req = { .mode = SC_LSEG_SR };
int err;
if (unlikely(!sci))
@@ -2547,10 +2547,8 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
for (;;) {
- nilfs_segctor_accept(sci, &req);
- err = nilfs_segctor_construct(sci, &req);
+ err = nilfs_segctor_construct(sci, SC_LSEG_SR);
nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
- nilfs_segctor_notify(sci, &req);
if (likely(!err))
break;
@@ -2560,6 +2558,16 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(sci->sc_interval);
}
+ if (nilfs_test_opt(sbi, DISCARD)) {
+ int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
+ sci->sc_nfreesegs);
+ if (ret) {
+ printk(KERN_WARNING
+ "NILFS warning: error %d on discard request, "
+ "turning discards off for the device\n", ret);
+ nilfs_clear_opt(sbi, DISCARD);
+ }
+ }
out_unlock:
sci->sc_freesegs = NULL;
@@ -2573,13 +2581,9 @@ static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
{
struct nilfs_sb_info *sbi = sci->sc_sbi;
struct nilfs_transaction_info ti;
- struct nilfs_segctor_req req = { .mode = mode };
nilfs_transaction_lock(sbi, &ti, 0);
-
- nilfs_segctor_accept(sci, &req);
- nilfs_segctor_construct(sci, &req);
- nilfs_segctor_notify(sci, &req);
+ nilfs_segctor_construct(sci, mode);
/*
* Unclosed segment should be retried. We do this using sc_timer.
@@ -2635,6 +2639,7 @@ static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
static int nilfs_segctor_thread(void *arg)
{
struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
+ struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
struct timer_list timer;
int timeout = 0;
@@ -2680,7 +2685,6 @@ static int nilfs_segctor_thread(void *arg)
} else {
DEFINE_WAIT(wait);
int should_sleep = 1;
- struct the_nilfs *nilfs;
prepare_to_wait(&sci->sc_wait_daemon, &wait,
TASK_INTERRUPTIBLE);
@@ -2701,8 +2705,8 @@ static int nilfs_segctor_thread(void *arg)
finish_wait(&sci->sc_wait_daemon, &wait);
timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
time_after_eq(jiffies, sci->sc_timer->expires));
- nilfs = sci->sc_sbi->s_nilfs;
- if (sci->sc_super->s_dirt && nilfs_sb_need_update(nilfs))
+
+ if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
set_nilfs_discontinued(nilfs);
}
goto loop;
@@ -2797,12 +2801,9 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
do {
struct nilfs_sb_info *sbi = sci->sc_sbi;
struct nilfs_transaction_info ti;
- struct nilfs_segctor_req req = { .mode = SC_LSEG_SR };
nilfs_transaction_lock(sbi, &ti, 0);
- nilfs_segctor_accept(sci, &req);
- ret = nilfs_segctor_construct(sci, &req);
- nilfs_segctor_notify(sci, &req);
+ ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
nilfs_transaction_unlock(sbi);
} while (ret && retrycount-- > 0);
@@ -2865,8 +2866,15 @@ int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi)
struct the_nilfs *nilfs = sbi->s_nilfs;
int err;
- /* Each field of nilfs_segctor is cleared through the initialization
- of super-block info */
+ if (NILFS_SC(sbi)) {
+ /*
+ * This happens if the filesystem was remounted
+ * read/write after nilfs_error degenerated it into a
+ * read-only mount.
+ */
+ nilfs_detach_segment_constructor(sbi);
+ }
+
sbi->s_sc_info = nilfs_segctor_new(sbi);
if (!sbi->s_sc_info)
return -ENOMEM;
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 3d3ab2f..3155e0c 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -116,6 +116,7 @@ struct nilfs_segsum_pointer {
* @sc_wait_daemon: Daemon wait queue
* @sc_wait_task: Start/end wait queue to control segctord task
* @sc_seq_request: Request counter
+ * @sc_seq_accept: Accepted request count
* @sc_seq_done: Completion counter
* @sc_sync: Request of explicit sync operation
* @sc_interval: Timeout value of background construction
@@ -169,6 +170,7 @@ struct nilfs_sc_info {
wait_queue_head_t sc_wait_task;
__u32 sc_seq_request;
+ __u32 sc_seq_accepted;
__u32 sc_seq_done;
int sc_sync;
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 8173fae..92579cc 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -96,9 +96,6 @@ void nilfs_error(struct super_block *sb, const char *function,
if (!(sb->s_flags & MS_RDONLY)) {
struct the_nilfs *nilfs = sbi->s_nilfs;
- if (!nilfs_test_opt(sbi, ERRORS_CONT))
- nilfs_detach_segment_constructor(sbi);
-
down_write(&nilfs->ns_sem);
if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) {
nilfs->ns_mount_state |= NILFS_ERROR_FS;
@@ -301,7 +298,7 @@ int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb)
memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
nilfs->ns_sbwtime[1] = t;
}
- sbi->s_super->s_dirt = 0;
+ clear_nilfs_sb_dirty(nilfs);
return nilfs_sync_super(sbi, dupsb);
}
@@ -345,7 +342,7 @@ static int nilfs_sync_fs(struct super_block *sb, int wait)
err = nilfs_construct_segment(sb);
down_write(&nilfs->ns_sem);
- if (sb->s_dirt)
+ if (nilfs_sb_dirty(nilfs))
nilfs_commit_super(sbi, 1);
up_write(&nilfs->ns_sem);
@@ -481,6 +478,8 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_printf(seq, ",order=strict");
if (nilfs_test_opt(sbi, NORECOVERY))
seq_printf(seq, ",norecovery");
+ if (nilfs_test_opt(sbi, DISCARD))
+ seq_printf(seq, ",discard");
return 0;
}
@@ -550,7 +549,7 @@ static const struct export_operations nilfs_export_ops = {
enum {
Opt_err_cont, Opt_err_panic, Opt_err_ro,
Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery,
- Opt_err,
+ Opt_discard, Opt_err,
};
static match_table_t tokens = {
@@ -561,6 +560,7 @@ static match_table_t tokens = {
{Opt_snapshot, "cp=%u"},
{Opt_order, "order=%s"},
{Opt_norecovery, "norecovery"},
+ {Opt_discard, "discard"},
{Opt_err, NULL}
};
@@ -614,6 +614,9 @@ static int parse_options(char *options, struct super_block *sb)
case Opt_norecovery:
nilfs_set_opt(sbi, NORECOVERY);
break;
+ case Opt_discard:
+ nilfs_set_opt(sbi, DISCARD);
+ break;
default:
printk(KERN_ERR
"NILFS: Unrecognized mount option \"%s\"\n", p);
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 6241e17..92733d5 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -646,6 +646,44 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
goto out;
}
+int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
+ size_t nsegs)
+{
+ sector_t seg_start, seg_end;
+ sector_t start = 0, nblocks = 0;
+ unsigned int sects_per_block;
+ __u64 *sn;
+ int ret = 0;
+
+ sects_per_block = (1 << nilfs->ns_blocksize_bits) /
+ bdev_logical_block_size(nilfs->ns_bdev);
+ for (sn = segnump; sn < segnump + nsegs; sn++) {
+ nilfs_get_segment_range(nilfs, *sn, &seg_start, &seg_end);
+
+ if (!nblocks) {
+ start = seg_start;
+ nblocks = seg_end - seg_start + 1;
+ } else if (start + nblocks == seg_start) {
+ nblocks += seg_end - seg_start + 1;
+ } else {
+ ret = blkdev_issue_discard(nilfs->ns_bdev,
+ start * sects_per_block,
+ nblocks * sects_per_block,
+ GFP_NOFS,
+ DISCARD_FL_BARRIER);
+ if (ret < 0)
+ return ret;
+ nblocks = 0;
+ }
+ }
+ if (nblocks)
+ ret = blkdev_issue_discard(nilfs->ns_bdev,
+ start * sects_per_block,
+ nblocks * sects_per_block,
+ GFP_NOFS, DISCARD_FL_BARRIER);
+ return ret;
+}
+
int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
{
struct inode *dat = nilfs_dat_inode(nilfs);
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 589786e..e9795f1 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -38,6 +38,7 @@ enum {
the latest checkpoint was loaded */
THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */
THE_NILFS_GC_RUNNING, /* gc process is running */
+ THE_NILFS_SB_DIRTY, /* super block is dirty */
};
/**
@@ -197,6 +198,7 @@ THE_NILFS_FNS(INIT, init)
THE_NILFS_FNS(LOADED, loaded)
THE_NILFS_FNS(DISCONTINUED, discontinued)
THE_NILFS_FNS(GC_RUNNING, gc_running)
+THE_NILFS_FNS(SB_DIRTY, sb_dirty)
/* Minimum interval of periodical update of superblocks (in seconds) */
#define NILFS_SB_FREQ 10
@@ -221,6 +223,7 @@ struct the_nilfs *find_or_create_nilfs(struct block_device *);
void put_nilfs(struct the_nilfs *);
int init_nilfs(struct the_nilfs *, struct nilfs_sb_info *, char *);
int load_nilfs(struct the_nilfs *, struct nilfs_sb_info *);
+int nilfs_discard_segments(struct the_nilfs *, __u64 *, size_t);
int nilfs_count_free_blocks(struct the_nilfs *, sector_t *);
struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *, int, __u64);
int nilfs_checkpoint_is_mounted(struct the_nilfs *, __u64, int);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index a94e8bd..472cdf2 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -29,14 +29,12 @@
#include <linux/init.h> /* module_init */
#include <linux/inotify.h>
#include <linux/kernel.h> /* roundup() */
-#include <linux/magic.h> /* superblock magic number */
-#include <linux/mount.h> /* mntget */
#include <linux/namei.h> /* LOOKUP_FOLLOW */
-#include <linux/path.h> /* struct path */
#include <linux/sched.h> /* struct user */
#include <linux/slab.h> /* struct kmem_cache */
#include <linux/syscalls.h>
#include <linux/types.h>
+#include <linux/anon_inodes.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/wait.h>
@@ -45,8 +43,6 @@
#include <asm/ioctls.h>
-static struct vfsmount *inotify_mnt __read_mostly;
-
/* these are configurable via /proc/sys/fs/inotify/ */
static int inotify_max_user_instances __read_mostly;
static int inotify_max_queued_events __read_mostly;
@@ -645,9 +641,7 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
{
struct fsnotify_group *group;
struct user_struct *user;
- struct file *filp;
- struct path path;
- int fd, ret;
+ int ret;
/* Check the IN_* constants for consistency. */
BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
@@ -656,10 +650,6 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
return -EINVAL;
- fd = get_unused_fd_flags(flags & O_CLOEXEC);
- if (fd < 0)
- return fd;
-
user = get_current_user();
if (unlikely(atomic_read(&user->inotify_devs) >=
inotify_max_user_instances)) {
@@ -676,27 +666,14 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
atomic_inc(&user->inotify_devs);
- path.mnt = inotify_mnt;
- path.dentry = inotify_mnt->mnt_root;
- path_get(&path);
- filp = alloc_file(&path, FMODE_READ, &inotify_fops);
- if (!filp)
- goto Enfile;
+ ret = anon_inode_getfd("inotify", &inotify_fops, group,
+ O_RDONLY | flags);
+ if (ret >= 0)
+ return ret;
- filp->f_flags = O_RDONLY | (flags & O_NONBLOCK);
- filp->private_data = group;
-
- fd_install(fd, filp);
-
- return fd;
-
-Enfile:
- ret = -ENFILE;
- path_put(&path);
atomic_dec(&user->inotify_devs);
out_free_uid:
free_uid(user);
- put_unused_fd(fd);
return ret;
}
@@ -783,20 +760,6 @@ out:
return ret;
}
-static int
-inotify_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data, struct vfsmount *mnt)
-{
- return get_sb_pseudo(fs_type, "inotify", NULL,
- INOTIFYFS_SUPER_MAGIC, mnt);
-}
-
-static struct file_system_type inotify_fs_type = {
- .name = "inotifyfs",
- .get_sb = inotify_get_sb,
- .kill_sb = kill_anon_super,
-};
-
/*
* inotify_user_setup - Our initialization function. Note that we cannnot return
* error because we have compiled-in VFS hooks. So an (unlikely) failure here
@@ -804,16 +767,6 @@ static struct file_system_type inotify_fs_type = {
*/
static int __init inotify_user_setup(void)
{
- int ret;
-
- ret = register_filesystem(&inotify_fs_type);
- if (unlikely(ret))
- panic("inotify: register_filesystem returned %d!\n", ret);
-
- inotify_mnt = kern_mount(&inotify_fs_type);
- if (IS_ERR(inotify_mnt))
- panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
-
inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index 5a9e344..9173e82 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -1545,7 +1545,7 @@ static int ntfs_dir_fsync(struct file *filp, struct dentry *dentry,
write_inode_now(bmp_vi, !datasync);
iput(bmp_vi);
}
- ret = ntfs_write_inode(vi, 1);
+ ret = __ntfs_write_inode(vi, 1);
write_inode_now(vi, !datasync);
err = sync_blockdev(vi->i_sb->s_bdev);
if (unlikely(err && !ret))
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 43179dd..b681c71 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -2182,7 +2182,7 @@ static int ntfs_file_fsync(struct file *filp, struct dentry *dentry,
ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
BUG_ON(S_ISDIR(vi->i_mode));
if (!datasync || !NInoNonResident(NTFS_I(vi)))
- ret = ntfs_write_inode(vi, 1);
+ ret = __ntfs_write_inode(vi, 1);
write_inode_now(vi, !datasync);
/*
* NOTE: If we were to use mapping->private_list (see ext2 and
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index dc2505a..4b57fb1 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -2957,7 +2957,7 @@ out:
*
* Return 0 on success and -errno on error.
*/
-int ntfs_write_inode(struct inode *vi, int sync)
+int __ntfs_write_inode(struct inode *vi, int sync)
{
sle64 nt;
ntfs_inode *ni = NTFS_I(vi);
diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h
index 117eaf8..9a11354 100644
--- a/fs/ntfs/inode.h
+++ b/fs/ntfs/inode.h
@@ -307,12 +307,12 @@ extern void ntfs_truncate_vfs(struct inode *vi);
extern int ntfs_setattr(struct dentry *dentry, struct iattr *attr);
-extern int ntfs_write_inode(struct inode *vi, int sync);
+extern int __ntfs_write_inode(struct inode *vi, int sync);
static inline void ntfs_commit_inode(struct inode *vi)
{
if (!is_bad_inode(vi))
- ntfs_write_inode(vi, 1);
+ __ntfs_write_inode(vi, 1);
return;
}
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 80b0477..1cf39df 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -39,6 +39,7 @@
#include "dir.h"
#include "debug.h"
#include "index.h"
+#include "inode.h"
#include "aops.h"
#include "layout.h"
#include "malloc.h"
@@ -2662,6 +2663,13 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
return 0;
}
+#ifdef NTFS_RW
+static int ntfs_write_inode(struct inode *vi, struct writeback_control *wbc)
+{
+ return __ntfs_write_inode(vi, wbc->sync_mode == WB_SYNC_ALL);
+}
+#endif
+
/**
* The complete super operations.
*/
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
index 600d2d2..791c088 100644
--- a/fs/ocfs2/Makefile
+++ b/fs/ocfs2/Makefile
@@ -46,6 +46,7 @@ ocfs2_stackglue-objs := stackglue.o
ocfs2_stack_o2cb-objs := stack_o2cb.o
ocfs2_stack_user-objs := stack_user.o
+obj-$(CONFIG_OCFS2_FS) += dlmfs/
# cluster/ is always needed when OCFS2_FS for masklog support
obj-$(CONFIG_OCFS2_FS) += cluster/
obj-$(CONFIG_OCFS2_FS_O2CB) += dlm/
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 20538dd..9f8bd91 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1050,7 +1050,8 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle,
strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
eb->h_blkno = cpu_to_le64(first_blkno);
eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
- eb->h_suballoc_slot = cpu_to_le16(osb->slot_num);
+ eb->h_suballoc_slot =
+ cpu_to_le16(meta_ac->ac_alloc_slot);
eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start);
eb->h_list.l_count =
cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
@@ -6037,7 +6038,7 @@ static void ocfs2_truncate_log_worker(struct work_struct *work)
if (status < 0)
mlog_errno(status);
else
- ocfs2_init_inode_steal_slot(osb);
+ ocfs2_init_steal_slots(osb);
mlog_exit(status);
}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 7d04c17..21441dd 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -577,8 +577,9 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
goto bail;
}
- /* We should already CoW the refcounted extent. */
- BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
+ /* We should already CoW the refcounted extent in case of create. */
+ BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED));
+
/*
* get_more_blocks() expects us to describe a hole by clearing
* the mapped bit on bh_result().
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
index 1cd2934..b39da87 100644
--- a/fs/ocfs2/cluster/masklog.c
+++ b/fs/ocfs2/cluster/masklog.c
@@ -112,6 +112,7 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
define_mask(XATTR),
define_mask(QUOTA),
define_mask(REFCOUNT),
+ define_mask(BASTS),
define_mask(ERROR),
define_mask(NOTICE),
define_mask(KTHREAD),
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index 9b4d117..3dfddbe 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -114,6 +114,7 @@
#define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */
#define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */
#define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */
+#define ML_BASTS 0x0000001000000000ULL /* dlmglue asts and basts */
/* bits that are infrequently given and frequently matched in the high word */
#define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */
#define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */
@@ -194,9 +195,9 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits;
* previous token if args expands to nothing.
*/
#define __mlog_printk(level, fmt, args...) \
- printk(level "(%u,%lu):%s:%d " fmt, task_pid_nr(current), \
- __mlog_cpu_guess, __PRETTY_FUNCTION__, __LINE__ , \
- ##args)
+ printk(level "(%s,%u,%lu):%s:%d " fmt, current->comm, \
+ task_pid_nr(current), __mlog_cpu_guess, \
+ __PRETTY_FUNCTION__, __LINE__ , ##args)
#define mlog(mask, fmt, args...) do { \
u64 __m = MLOG_MASK_PREFIX | (mask); \
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index a63ea4d..efd77d0 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2439,7 +2439,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
memset(dx_root, 0, osb->sb->s_blocksize);
strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
- dx_root->dr_suballoc_slot = cpu_to_le16(osb->slot_num);
+ dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit);
dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation);
dx_root->dr_blkno = cpu_to_le64(dr_blkno);
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
index 1903613..dcebf0d 100644
--- a/fs/ocfs2/dlm/Makefile
+++ b/fs/ocfs2/dlm/Makefile
@@ -1,8 +1,7 @@
EXTRA_CFLAGS += -Ifs/ocfs2
-obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o ocfs2_dlmfs.o
+obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o
ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \
dlmmaster.o dlmast.o dlmconvert.o dlmlock.o dlmunlock.o dlmver.o
-ocfs2_dlmfs-objs := userdlm.o dlmfs.o dlmfsver.o
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 344bcf9..b4f99de 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -310,7 +310,7 @@ static int dlm_recovery_thread(void *data)
mlog(0, "dlm thread running for %s...\n", dlm->name);
while (!kthread_should_stop()) {
- if (dlm_joined(dlm)) {
+ if (dlm_domain_fully_joined(dlm)) {
status = dlm_do_recovery(dlm);
if (status == -EAGAIN) {
/* do not sleep, recheck immediately. */
diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile
new file mode 100644
index 0000000..df69b48
--- /dev/null
+++ b/fs/ocfs2/dlmfs/Makefile
@@ -0,0 +1,5 @@
+EXTRA_CFLAGS += -Ifs/ocfs2
+
+obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o
+
+ocfs2_dlmfs-objs := userdlm.o dlmfs.o dlmfsver.o
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 02bf178..1b0de15 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -43,24 +43,17 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
+#include <linux/poll.h>
#include <asm/uaccess.h>
-
-#include "cluster/nodemanager.h"
-#include "cluster/heartbeat.h"
-#include "cluster/tcp.h"
-
-#include "dlmapi.h"
-
+#include "stackglue.h"
#include "userdlm.h"
-
#include "dlmfsver.h"
#define MLOG_MASK_PREFIX ML_DLMFS
#include "cluster/masklog.h"
-#include "ocfs2_lockingver.h"
static const struct super_operations dlmfs_ops;
static const struct file_operations dlmfs_file_operations;
@@ -71,15 +64,46 @@ static struct kmem_cache *dlmfs_inode_cache;
struct workqueue_struct *user_dlm_worker;
+
+
/*
- * This is the userdlmfs locking protocol version.
+ * These are the ABI capabilities of dlmfs.
+ *
+ * Over time, dlmfs has added some features that were not part of the
+ * initial ABI. Unfortunately, some of these features are not detectable
+ * via standard usage. For example, Linux's default poll always returns
+ * POLLIN, so there is no way for a caller of poll(2) to know when dlmfs
+ * added poll support. Instead, we provide this list of new capabilities.
+ *
+ * Capabilities is a read-only attribute. We do it as a module parameter
+ * so we can discover it whether dlmfs is built in, loaded, or even not
+ * loaded.
*
- * See fs/ocfs2/dlmglue.c for more details on locking versions.
+ * The ABI features are local to this machine's dlmfs mount. This is
+ * distinct from the locking protocol, which is concerned with inter-node
+ * interaction.
+ *
+ * Capabilities:
+ * - bast : POLLIN against the file descriptor of a held lock
+ * signifies a bast fired on the lock.
*/
-static const struct dlm_protocol_version user_locking_protocol = {
- .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
- .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
-};
+#define DLMFS_CAPABILITIES "bast stackglue"
+extern int param_set_dlmfs_capabilities(const char *val,
+ struct kernel_param *kp)
+{
+ printk(KERN_ERR "%s: readonly parameter\n", kp->name);
+ return -EINVAL;
+}
+static int param_get_dlmfs_capabilities(char *buffer,
+ struct kernel_param *kp)
+{
+ return strlcpy(buffer, DLMFS_CAPABILITIES,
+ strlen(DLMFS_CAPABILITIES) + 1);
+}
+module_param_call(capabilities, param_set_dlmfs_capabilities,
+ param_get_dlmfs_capabilities, NULL, 0444);
+MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES);
+
/*
* decodes a set of open flags into a valid lock level and a set of flags.
@@ -179,13 +203,46 @@ static int dlmfs_file_release(struct inode *inode,
return 0;
}
+/*
+ * We do ->setattr() just to override size changes. Our size is the size
+ * of the LVB and nothing else.
+ */
+static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ int error;
+ struct inode *inode = dentry->d_inode;
+
+ attr->ia_valid &= ~ATTR_SIZE;
+ error = inode_change_ok(inode, attr);
+ if (!error)
+ error = inode_setattr(inode, attr);
+
+ return error;
+}
+
+static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait)
+{
+ int event = 0;
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct dlmfs_inode_private *ip = DLMFS_I(inode);
+
+ poll_wait(file, &ip->ip_lockres.l_event, wait);
+
+ spin_lock(&ip->ip_lockres.l_lock);
+ if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED)
+ event = POLLIN | POLLRDNORM;
+ spin_unlock(&ip->ip_lockres.l_lock);
+
+ return event;
+}
+
static ssize_t dlmfs_file_read(struct file *filp,
char __user *buf,
size_t count,
loff_t *ppos)
{
int bytes_left;
- ssize_t readlen;
+ ssize_t readlen, got;
char *lvb_buf;
struct inode *inode = filp->f_path.dentry->d_inode;
@@ -211,9 +268,13 @@ static ssize_t dlmfs_file_read(struct file *filp,
if (!lvb_buf)
return -ENOMEM;
- user_dlm_read_lvb(inode, lvb_buf, readlen);
- bytes_left = __copy_to_user(buf, lvb_buf, readlen);
- readlen -= bytes_left;
+ got = user_dlm_read_lvb(inode, lvb_buf, readlen);
+ if (got) {
+ BUG_ON(got != readlen);
+ bytes_left = __copy_to_user(buf, lvb_buf, readlen);
+ readlen -= bytes_left;
+ } else
+ readlen = 0;
kfree(lvb_buf);
@@ -272,7 +333,7 @@ static void dlmfs_init_once(void *foo)
struct dlmfs_inode_private *ip =
(struct dlmfs_inode_private *) foo;
- ip->ip_dlm = NULL;
+ ip->ip_conn = NULL;
ip->ip_parent = NULL;
inode_init_once(&ip->ip_vfs_inode);
@@ -314,14 +375,14 @@ static void dlmfs_clear_inode(struct inode *inode)
goto clear_fields;
}
- mlog(0, "we're a directory, ip->ip_dlm = 0x%p\n", ip->ip_dlm);
+ mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn);
/* we must be a directory. If required, lets unregister the
* dlm context now. */
- if (ip->ip_dlm)
- user_dlm_unregister_context(ip->ip_dlm);
+ if (ip->ip_conn)
+ user_dlm_unregister(ip->ip_conn);
clear_fields:
ip->ip_parent = NULL;
- ip->ip_dlm = NULL;
+ ip->ip_conn = NULL;
}
static struct backing_dev_info dlmfs_backing_dev_info = {
@@ -371,7 +432,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
ip = DLMFS_I(inode);
- ip->ip_dlm = DLMFS_I(parent)->ip_dlm;
+ ip->ip_conn = DLMFS_I(parent)->ip_conn;
switch (mode & S_IFMT) {
default:
@@ -425,13 +486,12 @@ static int dlmfs_mkdir(struct inode * dir,
struct inode *inode = NULL;
struct qstr *domain = &dentry->d_name;
struct dlmfs_inode_private *ip;
- struct dlm_ctxt *dlm;
- struct dlm_protocol_version proto = user_locking_protocol;
+ struct ocfs2_cluster_connection *conn;
mlog(0, "mkdir %.*s\n", domain->len, domain->name);
/* verify that we have a proper domain */
- if (domain->len >= O2NM_MAX_NAME_LEN) {
+ if (domain->len >= GROUP_NAME_MAX) {
status = -EINVAL;
mlog(ML_ERROR, "invalid domain name for directory.\n");
goto bail;
@@ -446,14 +506,14 @@ static int dlmfs_mkdir(struct inode * dir,
ip = DLMFS_I(inode);
- dlm = user_dlm_register_context(domain, &proto);
- if (IS_ERR(dlm)) {
- status = PTR_ERR(dlm);
+ conn = user_dlm_register(domain);
+ if (IS_ERR(conn)) {
+ status = PTR_ERR(conn);
mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
status, domain->len, domain->name);
goto bail;
}
- ip->ip_dlm = dlm;
+ ip->ip_conn = conn;
inc_nlink(dir);
d_instantiate(dentry, inode);
@@ -549,6 +609,7 @@ static int dlmfs_fill_super(struct super_block * sb,
static const struct file_operations dlmfs_file_operations = {
.open = dlmfs_file_open,
.release = dlmfs_file_release,
+ .poll = dlmfs_file_poll,
.read = dlmfs_file_read,
.write = dlmfs_file_write,
};
@@ -576,6 +637,7 @@ static const struct super_operations dlmfs_ops = {
static const struct inode_operations dlmfs_file_inode_operations = {
.getattr = simple_getattr,
+ .setattr = dlmfs_file_setattr,
};
static int dlmfs_get_sb(struct file_system_type *fs_type,
@@ -620,6 +682,7 @@ static int __init init_dlmfs_fs(void)
}
cleanup_worker = 1;
+ user_dlm_set_locking_protocol();
status = register_filesystem(&dlmfs_fs_type);
bail:
if (status) {
diff --git a/fs/ocfs2/dlm/dlmfsver.c b/fs/ocfs2/dlmfs/dlmfsver.c
index a733b33..a733b33 100644
--- a/fs/ocfs2/dlm/dlmfsver.c
+++ b/fs/ocfs2/dlmfs/dlmfsver.c
diff --git a/fs/ocfs2/dlm/dlmfsver.h b/fs/ocfs2/dlmfs/dlmfsver.h
index f35eadb..f35eadb 100644
--- a/fs/ocfs2/dlm/dlmfsver.h
+++ b/fs/ocfs2/dlmfs/dlmfsver.h
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c
index 4cb1d3d..0499e3f 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlmfs/userdlm.c
@@ -34,18 +34,19 @@
#include <linux/types.h>
#include <linux/crc32.h>
-
-#include "cluster/nodemanager.h"
-#include "cluster/heartbeat.h"
-#include "cluster/tcp.h"
-
-#include "dlmapi.h"
-
+#include "ocfs2_lockingver.h"
+#include "stackglue.h"
#include "userdlm.h"
#define MLOG_MASK_PREFIX ML_DLMFS
#include "cluster/masklog.h"
+
+static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
+{
+ return container_of(lksb, struct user_lock_res, l_lksb);
+}
+
static inline int user_check_wait_flag(struct user_lock_res *lockres,
int flag)
{
@@ -73,15 +74,15 @@ static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres)
}
/* I heart container_of... */
-static inline struct dlm_ctxt *
-dlm_ctxt_from_user_lockres(struct user_lock_res *lockres)
+static inline struct ocfs2_cluster_connection *
+cluster_connection_from_user_lockres(struct user_lock_res *lockres)
{
struct dlmfs_inode_private *ip;
ip = container_of(lockres,
struct dlmfs_inode_private,
ip_lockres);
- return ip->ip_dlm;
+ return ip->ip_conn;
}
static struct inode *
@@ -103,9 +104,9 @@ static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
}
#define user_log_dlm_error(_func, _stat, _lockres) do { \
- mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \
- "resource %.*s: %s\n", dlm_errname(_stat), _func, \
- _lockres->l_namelen, _lockres->l_name, dlm_errmsg(_stat)); \
+ mlog(ML_ERROR, "Dlm error %d while calling %s on " \
+ "resource %.*s\n", _stat, _func, \
+ _lockres->l_namelen, _lockres->l_name); \
} while (0)
/* WARNING: This function lives in a world where the only three lock
@@ -113,34 +114,35 @@ static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
* lock types are added. */
static inline int user_highest_compat_lock_level(int level)
{
- int new_level = LKM_EXMODE;
+ int new_level = DLM_LOCK_EX;
- if (level == LKM_EXMODE)
- new_level = LKM_NLMODE;
- else if (level == LKM_PRMODE)
- new_level = LKM_PRMODE;
+ if (level == DLM_LOCK_EX)
+ new_level = DLM_LOCK_NL;
+ else if (level == DLM_LOCK_PR)
+ new_level = DLM_LOCK_PR;
return new_level;
}
-static void user_ast(void *opaque)
+static void user_ast(struct ocfs2_dlm_lksb *lksb)
{
- struct user_lock_res *lockres = opaque;
- struct dlm_lockstatus *lksb;
+ struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
+ int status;
- mlog(0, "AST fired for lockres %.*s\n", lockres->l_namelen,
- lockres->l_name);
+ mlog(ML_BASTS, "AST fired for lockres %.*s, level %d => %d\n",
+ lockres->l_namelen, lockres->l_name, lockres->l_level,
+ lockres->l_requested);
spin_lock(&lockres->l_lock);
- lksb = &(lockres->l_lksb);
- if (lksb->status != DLM_NORMAL) {
+ status = ocfs2_dlm_lock_status(&lockres->l_lksb);
+ if (status) {
mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n",
- lksb->status, lockres->l_namelen, lockres->l_name);
+ status, lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
return;
}
- mlog_bug_on_msg(lockres->l_requested == LKM_IVMODE,
+ mlog_bug_on_msg(lockres->l_requested == DLM_LOCK_IV,
"Lockres %.*s, requested ivmode. flags 0x%x\n",
lockres->l_namelen, lockres->l_name, lockres->l_flags);
@@ -148,13 +150,13 @@ static void user_ast(void *opaque)
if (lockres->l_requested < lockres->l_level) {
if (lockres->l_requested <=
user_highest_compat_lock_level(lockres->l_blocking)) {
- lockres->l_blocking = LKM_NLMODE;
+ lockres->l_blocking = DLM_LOCK_NL;
lockres->l_flags &= ~USER_LOCK_BLOCKED;
}
}
lockres->l_level = lockres->l_requested;
- lockres->l_requested = LKM_IVMODE;
+ lockres->l_requested = DLM_LOCK_IV;
lockres->l_flags |= USER_LOCK_ATTACHED;
lockres->l_flags &= ~USER_LOCK_BUSY;
@@ -193,11 +195,11 @@ static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
return;
switch (lockres->l_blocking) {
- case LKM_EXMODE:
+ case DLM_LOCK_EX:
if (!lockres->l_ex_holders && !lockres->l_ro_holders)
queue = 1;
break;
- case LKM_PRMODE:
+ case DLM_LOCK_PR:
if (!lockres->l_ex_holders)
queue = 1;
break;
@@ -209,12 +211,12 @@ static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
__user_dlm_queue_lockres(lockres);
}
-static void user_bast(void *opaque, int level)
+static void user_bast(struct ocfs2_dlm_lksb *lksb, int level)
{
- struct user_lock_res *lockres = opaque;
+ struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
- mlog(0, "Blocking AST fired for lockres %.*s. Blocking level %d\n",
- lockres->l_namelen, lockres->l_name, level);
+ mlog(ML_BASTS, "BAST fired for lockres %.*s, blocking %d, level %d\n",
+ lockres->l_namelen, lockres->l_name, level, lockres->l_level);
spin_lock(&lockres->l_lock);
lockres->l_flags |= USER_LOCK_BLOCKED;
@@ -227,15 +229,15 @@ static void user_bast(void *opaque, int level)
wake_up(&lockres->l_event);
}
-static void user_unlock_ast(void *opaque, enum dlm_status status)
+static void user_unlock_ast(struct ocfs2_dlm_lksb *lksb, int status)
{
- struct user_lock_res *lockres = opaque;
+ struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
- mlog(0, "UNLOCK AST called on lock %.*s\n", lockres->l_namelen,
- lockres->l_name);
+ mlog(ML_BASTS, "UNLOCK AST fired for lockres %.*s, flags 0x%x\n",
+ lockres->l_namelen, lockres->l_name, lockres->l_flags);
- if (status != DLM_NORMAL && status != DLM_CANCELGRANT)
- mlog(ML_ERROR, "Dlm returns status %d\n", status);
+ if (status)
+ mlog(ML_ERROR, "dlm returns status %d\n", status);
spin_lock(&lockres->l_lock);
/* The teardown flag gets set early during the unlock process,
@@ -243,7 +245,7 @@ static void user_unlock_ast(void *opaque, enum dlm_status status)
* for a concurrent cancel. */
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN
&& !(lockres->l_flags & USER_LOCK_IN_CANCEL)) {
- lockres->l_level = LKM_IVMODE;
+ lockres->l_level = DLM_LOCK_IV;
} else if (status == DLM_CANCELGRANT) {
/* We tried to cancel a convert request, but it was
* already granted. Don't clear the busy flag - the
@@ -254,7 +256,7 @@ static void user_unlock_ast(void *opaque, enum dlm_status status)
} else {
BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
/* Cancel succeeded, we want to re-queue */
- lockres->l_requested = LKM_IVMODE; /* cancel an
+ lockres->l_requested = DLM_LOCK_IV; /* cancel an
* upconvert
* request. */
lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
@@ -271,6 +273,21 @@ out_noclear:
wake_up(&lockres->l_event);
}
+/*
+ * This is the userdlmfs locking protocol version.
+ *
+ * See fs/ocfs2/dlmglue.c for more details on locking versions.
+ */
+static struct ocfs2_locking_protocol user_dlm_lproto = {
+ .lp_max_version = {
+ .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
+ .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
+ },
+ .lp_lock_ast = user_ast,
+ .lp_blocking_ast = user_bast,
+ .lp_unlock_ast = user_unlock_ast,
+};
+
static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
{
struct inode *inode;
@@ -283,10 +300,10 @@ static void user_dlm_unblock_lock(struct work_struct *work)
int new_level, status;
struct user_lock_res *lockres =
container_of(work, struct user_lock_res, l_work);
- struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
+ struct ocfs2_cluster_connection *conn =
+ cluster_connection_from_user_lockres(lockres);
- mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
- lockres->l_name);
+ mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
spin_lock(&lockres->l_lock);
@@ -304,17 +321,23 @@ static void user_dlm_unblock_lock(struct work_struct *work)
* flag, and finally we might get another bast which re-queues
* us before our ast for the downconvert is called. */
if (!(lockres->l_flags & USER_LOCK_BLOCKED)) {
+ mlog(ML_BASTS, "lockres %.*s USER_LOCK_BLOCKED\n",
+ lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
+ mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_TEARDOWN\n",
+ lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}
if (lockres->l_flags & USER_LOCK_BUSY) {
if (lockres->l_flags & USER_LOCK_IN_CANCEL) {
+ mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_CANCEL\n",
+ lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}
@@ -322,32 +345,31 @@ static void user_dlm_unblock_lock(struct work_struct *work)
lockres->l_flags |= USER_LOCK_IN_CANCEL;
spin_unlock(&lockres->l_lock);
- status = dlmunlock(dlm,
- &lockres->l_lksb,
- LKM_CANCEL,
- user_unlock_ast,
- lockres);
- if (status != DLM_NORMAL)
- user_log_dlm_error("dlmunlock", status, lockres);
+ status = ocfs2_dlm_unlock(conn, &lockres->l_lksb,
+ DLM_LKF_CANCEL);
+ if (status)
+ user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
goto drop_ref;
}
/* If there are still incompat holders, we can exit safely
* without worrying about re-queueing this lock as that will
* happen on the last call to user_cluster_unlock. */
- if ((lockres->l_blocking == LKM_EXMODE)
+ if ((lockres->l_blocking == DLM_LOCK_EX)
&& (lockres->l_ex_holders || lockres->l_ro_holders)) {
spin_unlock(&lockres->l_lock);
- mlog(0, "can't downconvert for ex: ro = %u, ex = %u\n",
- lockres->l_ro_holders, lockres->l_ex_holders);
+ mlog(ML_BASTS, "lockres %.*s, EX/PR Holders %u,%u\n",
+ lockres->l_namelen, lockres->l_name,
+ lockres->l_ex_holders, lockres->l_ro_holders);
goto drop_ref;
}
- if ((lockres->l_blocking == LKM_PRMODE)
+ if ((lockres->l_blocking == DLM_LOCK_PR)
&& lockres->l_ex_holders) {
spin_unlock(&lockres->l_lock);
- mlog(0, "can't downconvert for pr: ex = %u\n",
- lockres->l_ex_holders);
+ mlog(ML_BASTS, "lockres %.*s, EX Holders %u\n",
+ lockres->l_namelen, lockres->l_name,
+ lockres->l_ex_holders);
goto drop_ref;
}
@@ -355,22 +377,17 @@ static void user_dlm_unblock_lock(struct work_struct *work)
new_level = user_highest_compat_lock_level(lockres->l_blocking);
lockres->l_requested = new_level;
lockres->l_flags |= USER_LOCK_BUSY;
- mlog(0, "Downconvert lock from %d to %d\n",
- lockres->l_level, new_level);
+ mlog(ML_BASTS, "lockres %.*s, downconvert %d => %d\n",
+ lockres->l_namelen, lockres->l_name, lockres->l_level, new_level);
spin_unlock(&lockres->l_lock);
/* need lock downconvert request now... */
- status = dlmlock(dlm,
- new_level,
- &lockres->l_lksb,
- LKM_CONVERT|LKM_VALBLK,
- lockres->l_name,
- lockres->l_namelen,
- user_ast,
- lockres,
- user_bast);
- if (status != DLM_NORMAL) {
- user_log_dlm_error("dlmlock", status, lockres);
+ status = ocfs2_dlm_lock(conn, new_level, &lockres->l_lksb,
+ DLM_LKF_CONVERT|DLM_LKF_VALBLK,
+ lockres->l_name,
+ lockres->l_namelen);
+ if (status) {
+ user_log_dlm_error("ocfs2_dlm_lock", status, lockres);
user_recover_from_dlm_error(lockres);
}
@@ -382,10 +399,10 @@ static inline void user_dlm_inc_holders(struct user_lock_res *lockres,
int level)
{
switch(level) {
- case LKM_EXMODE:
+ case DLM_LOCK_EX:
lockres->l_ex_holders++;
break;
- case LKM_PRMODE:
+ case DLM_LOCK_PR:
lockres->l_ro_holders++;
break;
default:
@@ -410,20 +427,19 @@ int user_dlm_cluster_lock(struct user_lock_res *lockres,
int lkm_flags)
{
int status, local_flags;
- struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
+ struct ocfs2_cluster_connection *conn =
+ cluster_connection_from_user_lockres(lockres);
- if (level != LKM_EXMODE &&
- level != LKM_PRMODE) {
+ if (level != DLM_LOCK_EX &&
+ level != DLM_LOCK_PR) {
mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
lockres->l_namelen, lockres->l_name);
status = -EINVAL;
goto bail;
}
- mlog(0, "lockres %.*s: asking for %s lock, passed flags = 0x%x\n",
- lockres->l_namelen, lockres->l_name,
- (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE",
- lkm_flags);
+ mlog(ML_BASTS, "lockres %.*s, level %d, flags = 0x%x\n",
+ lockres->l_namelen, lockres->l_name, level, lkm_flags);
again:
if (signal_pending(current)) {
@@ -457,35 +473,26 @@ again:
}
if (level > lockres->l_level) {
- local_flags = lkm_flags | LKM_VALBLK;
- if (lockres->l_level != LKM_IVMODE)
- local_flags |= LKM_CONVERT;
+ local_flags = lkm_flags | DLM_LKF_VALBLK;
+ if (lockres->l_level != DLM_LOCK_IV)
+ local_flags |= DLM_LKF_CONVERT;
lockres->l_requested = level;
lockres->l_flags |= USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
- BUG_ON(level == LKM_IVMODE);
- BUG_ON(level == LKM_NLMODE);
+ BUG_ON(level == DLM_LOCK_IV);
+ BUG_ON(level == DLM_LOCK_NL);
/* call dlm_lock to upgrade lock now */
- status = dlmlock(dlm,
- level,
- &lockres->l_lksb,
- local_flags,
- lockres->l_name,
- lockres->l_namelen,
- user_ast,
- lockres,
- user_bast);
- if (status != DLM_NORMAL) {
- if ((lkm_flags & LKM_NOQUEUE) &&
- (status == DLM_NOTQUEUED))
- status = -EAGAIN;
- else {
- user_log_dlm_error("dlmlock", status, lockres);
- status = -EINVAL;
- }
+ status = ocfs2_dlm_lock(conn, level, &lockres->l_lksb,
+ local_flags, lockres->l_name,
+ lockres->l_namelen);
+ if (status) {
+ if ((lkm_flags & DLM_LKF_NOQUEUE) &&
+ (status != -EAGAIN))
+ user_log_dlm_error("ocfs2_dlm_lock",
+ status, lockres);
user_recover_from_dlm_error(lockres);
goto bail;
}
@@ -506,11 +513,11 @@ static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
int level)
{
switch(level) {
- case LKM_EXMODE:
+ case DLM_LOCK_EX:
BUG_ON(!lockres->l_ex_holders);
lockres->l_ex_holders--;
break;
- case LKM_PRMODE:
+ case DLM_LOCK_PR:
BUG_ON(!lockres->l_ro_holders);
lockres->l_ro_holders--;
break;
@@ -522,8 +529,8 @@ static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
void user_dlm_cluster_unlock(struct user_lock_res *lockres,
int level)
{
- if (level != LKM_EXMODE &&
- level != LKM_PRMODE) {
+ if (level != DLM_LOCK_EX &&
+ level != DLM_LOCK_PR) {
mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
lockres->l_namelen, lockres->l_name);
return;
@@ -540,33 +547,40 @@ void user_dlm_write_lvb(struct inode *inode,
unsigned int len)
{
struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
- char *lvb = lockres->l_lksb.lvb;
+ char *lvb;
BUG_ON(len > DLM_LVB_LEN);
spin_lock(&lockres->l_lock);
- BUG_ON(lockres->l_level < LKM_EXMODE);
+ BUG_ON(lockres->l_level < DLM_LOCK_EX);
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
memcpy(lvb, val, len);
spin_unlock(&lockres->l_lock);
}
-void user_dlm_read_lvb(struct inode *inode,
- char *val,
- unsigned int len)
+ssize_t user_dlm_read_lvb(struct inode *inode,
+ char *val,
+ unsigned int len)
{
struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
- char *lvb = lockres->l_lksb.lvb;
+ char *lvb;
+ ssize_t ret = len;
BUG_ON(len > DLM_LVB_LEN);
spin_lock(&lockres->l_lock);
- BUG_ON(lockres->l_level < LKM_PRMODE);
- memcpy(val, lvb, len);
+ BUG_ON(lockres->l_level < DLM_LOCK_PR);
+ if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)) {
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ memcpy(val, lvb, len);
+ } else
+ ret = 0;
spin_unlock(&lockres->l_lock);
+ return ret;
}
void user_dlm_lock_res_init(struct user_lock_res *lockres,
@@ -576,9 +590,9 @@ void user_dlm_lock_res_init(struct user_lock_res *lockres,
spin_lock_init(&lockres->l_lock);
init_waitqueue_head(&lockres->l_event);
- lockres->l_level = LKM_IVMODE;
- lockres->l_requested = LKM_IVMODE;
- lockres->l_blocking = LKM_IVMODE;
+ lockres->l_level = DLM_LOCK_IV;
+ lockres->l_requested = DLM_LOCK_IV;
+ lockres->l_blocking = DLM_LOCK_IV;
/* should have been checked before getting here. */
BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN);
@@ -592,9 +606,10 @@ void user_dlm_lock_res_init(struct user_lock_res *lockres,
int user_dlm_destroy_lock(struct user_lock_res *lockres)
{
int status = -EBUSY;
- struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
+ struct ocfs2_cluster_connection *conn =
+ cluster_connection_from_user_lockres(lockres);
- mlog(0, "asked to destroy %.*s\n", lockres->l_namelen, lockres->l_name);
+ mlog(ML_BASTS, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
spin_lock(&lockres->l_lock);
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
@@ -627,14 +642,9 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres)
lockres->l_flags |= USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
- status = dlmunlock(dlm,
- &lockres->l_lksb,
- LKM_VALBLK,
- user_unlock_ast,
- lockres);
- if (status != DLM_NORMAL) {
- user_log_dlm_error("dlmunlock", status, lockres);
- status = -EINVAL;
+ status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK);
+ if (status) {
+ user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
goto bail;
}
@@ -645,32 +655,34 @@ bail:
return status;
}
-struct dlm_ctxt *user_dlm_register_context(struct qstr *name,
- struct dlm_protocol_version *proto)
+static void user_dlm_recovery_handler_noop(int node_num,
+ void *recovery_data)
{
- struct dlm_ctxt *dlm;
- u32 dlm_key;
- char *domain;
-
- domain = kmalloc(name->len + 1, GFP_NOFS);
- if (!domain) {
- mlog_errno(-ENOMEM);
- return ERR_PTR(-ENOMEM);
- }
+ /* We ignore recovery events */
+ return;
+}
- dlm_key = crc32_le(0, name->name, name->len);
+void user_dlm_set_locking_protocol(void)
+{
+ ocfs2_stack_glue_set_max_proto_version(&user_dlm_lproto.lp_max_version);
+}
- snprintf(domain, name->len + 1, "%.*s", name->len, name->name);
+struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name)
+{
+ int rc;
+ struct ocfs2_cluster_connection *conn;
- dlm = dlm_register_domain(domain, dlm_key, proto);
- if (IS_ERR(dlm))
- mlog_errno(PTR_ERR(dlm));
+ rc = ocfs2_cluster_connect_agnostic(name->name, name->len,
+ &user_dlm_lproto,
+ user_dlm_recovery_handler_noop,
+ NULL, &conn);
+ if (rc)
+ mlog_errno(rc);
- kfree(domain);
- return dlm;
+ return rc ? ERR_PTR(rc) : conn;
}
-void user_dlm_unregister_context(struct dlm_ctxt *dlm)
+void user_dlm_unregister(struct ocfs2_cluster_connection *conn)
{
- dlm_unregister_domain(dlm);
+ ocfs2_cluster_disconnect(conn, 0);
}
diff --git a/fs/ocfs2/dlm/userdlm.h b/fs/ocfs2/dlmfs/userdlm.h
index 0c3cc03..3b42d79 100644
--- a/fs/ocfs2/dlm/userdlm.h
+++ b/fs/ocfs2/dlmfs/userdlm.h
@@ -57,7 +57,7 @@ struct user_lock_res {
int l_level;
unsigned int l_ro_holders;
unsigned int l_ex_holders;
- struct dlm_lockstatus l_lksb;
+ struct ocfs2_dlm_lksb l_lksb;
int l_requested;
int l_blocking;
@@ -80,15 +80,15 @@ void user_dlm_cluster_unlock(struct user_lock_res *lockres,
void user_dlm_write_lvb(struct inode *inode,
const char *val,
unsigned int len);
-void user_dlm_read_lvb(struct inode *inode,
- char *val,
- unsigned int len);
-struct dlm_ctxt *user_dlm_register_context(struct qstr *name,
- struct dlm_protocol_version *proto);
-void user_dlm_unregister_context(struct dlm_ctxt *dlm);
+ssize_t user_dlm_read_lvb(struct inode *inode,
+ char *val,
+ unsigned int len);
+struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name);
+void user_dlm_unregister(struct ocfs2_cluster_connection *conn);
+void user_dlm_set_locking_protocol(void);
struct dlmfs_inode_private {
- struct dlm_ctxt *ip_dlm;
+ struct ocfs2_cluster_connection *ip_conn;
struct user_lock_res ip_lockres; /* unused for directories. */
struct inode *ip_parent;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e044019..8298608 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -297,6 +297,11 @@ static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
}
+static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
+{
+ return container_of(lksb, struct ocfs2_lock_res, l_lksb);
+}
+
static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
{
BUG_ON(!ocfs2_is_inode_lock(lockres));
@@ -927,6 +932,10 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
lockres->l_blocking = level;
}
+ mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
+ lockres->l_name, level, lockres->l_level, lockres->l_blocking,
+ needs_downconvert);
+
if (needs_downconvert)
lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
@@ -1040,18 +1049,17 @@ static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
return lockres->l_pending_gen;
}
-
-static void ocfs2_blocking_ast(void *opaque, int level)
+static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
{
- struct ocfs2_lock_res *lockres = opaque;
+ struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
int needs_downconvert;
unsigned long flags;
BUG_ON(level <= DLM_LOCK_NL);
- mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n",
- lockres->l_name, level, lockres->l_level,
+ mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
+ "type %s\n", lockres->l_name, level, lockres->l_level,
ocfs2_lock_type_string(lockres->l_type));
/*
@@ -1072,9 +1080,9 @@ static void ocfs2_blocking_ast(void *opaque, int level)
ocfs2_wake_downconvert_thread(osb);
}
-static void ocfs2_locking_ast(void *opaque)
+static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
{
- struct ocfs2_lock_res *lockres = opaque;
+ struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
unsigned long flags;
int status;
@@ -1095,6 +1103,10 @@ static void ocfs2_locking_ast(void *opaque)
return;
}
+ mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
+ "level %d => %d\n", lockres->l_name, lockres->l_action,
+ lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
+
switch(lockres->l_action) {
case OCFS2_AST_ATTACH:
ocfs2_generic_handle_attach_action(lockres);
@@ -1107,8 +1119,8 @@ static void ocfs2_locking_ast(void *opaque)
ocfs2_generic_handle_downconvert_action(lockres);
break;
default:
- mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u "
- "lockres flags = 0x%lx, unlock action: %u\n",
+ mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
+ "flags 0x%lx, unlock: %u\n",
lockres->l_name, lockres->l_action, lockres->l_flags,
lockres->l_unlock_action);
BUG();
@@ -1134,6 +1146,88 @@ out:
spin_unlock_irqrestore(&lockres->l_lock, flags);
}
+static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
+{
+ struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
+ unsigned long flags;
+
+ mlog_entry_void();
+
+ mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
+ lockres->l_name, lockres->l_unlock_action);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if (error) {
+ mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
+ "unlock_action %d\n", error, lockres->l_name,
+ lockres->l_unlock_action);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ mlog_exit_void();
+ return;
+ }
+
+ switch(lockres->l_unlock_action) {
+ case OCFS2_UNLOCK_CANCEL_CONVERT:
+ mlog(0, "Cancel convert success for %s\n", lockres->l_name);
+ lockres->l_action = OCFS2_AST_INVALID;
+ /* Downconvert thread may have requeued this lock, we
+ * need to wake it. */
+ if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
+ ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
+ break;
+ case OCFS2_UNLOCK_DROP_LOCK:
+ lockres->l_level = DLM_LOCK_IV;
+ break;
+ default:
+ BUG();
+ }
+
+ lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+ lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
+ wake_up(&lockres->l_event);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ mlog_exit_void();
+}
+
+/*
+ * This is the filesystem locking protocol. It provides the lock handling
+ * hooks for the underlying DLM. It has a maximum version number.
+ * The version number allows interoperability with systems running at
+ * the same major number and an equal or smaller minor number.
+ *
+ * Whenever the filesystem does new things with locks (adds or removes a
+ * lock, orders them differently, does different things underneath a lock),
+ * the version must be changed. The protocol is negotiated when joining
+ * the dlm domain. A node may join the domain if its major version is
+ * identical to all other nodes and its minor version is greater than
+ * or equal to all other nodes. When its minor version is greater than
+ * the other nodes, it will run at the minor version specified by the
+ * other nodes.
+ *
+ * If a locking change is made that will not be compatible with older
+ * versions, the major number must be increased and the minor version set
+ * to zero. If a change merely adds a behavior that can be disabled when
+ * speaking to older versions, the minor version must be increased. If a
+ * change adds a fully backwards compatible change (eg, LVB changes that
+ * are just ignored by older versions), the version does not need to be
+ * updated.
+ */
+static struct ocfs2_locking_protocol lproto = {
+ .lp_max_version = {
+ .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
+ .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
+ },
+ .lp_lock_ast = ocfs2_locking_ast,
+ .lp_blocking_ast = ocfs2_blocking_ast,
+ .lp_unlock_ast = ocfs2_unlock_ast,
+};
+
+void ocfs2_set_locking_protocol(void)
+{
+ ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
+}
+
static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
int convert)
{
@@ -1189,8 +1283,7 @@ static int ocfs2_lock_create(struct ocfs2_super *osb,
&lockres->l_lksb,
dlm_flags,
lockres->l_name,
- OCFS2_LOCK_ID_MAX_LEN - 1,
- lockres);
+ OCFS2_LOCK_ID_MAX_LEN - 1);
lockres_clear_pending(lockres, gen, osb);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
@@ -1412,7 +1505,7 @@ again:
BUG_ON(level == DLM_LOCK_IV);
BUG_ON(level == DLM_LOCK_NL);
- mlog(0, "lock %s, convert from %d to level = %d\n",
+ mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
lockres->l_name, lockres->l_level, level);
/* call dlm_lock to upgrade lock now */
@@ -1421,8 +1514,7 @@ again:
&lockres->l_lksb,
lkm_flags,
lockres->l_name,
- OCFS2_LOCK_ID_MAX_LEN - 1,
- lockres);
+ OCFS2_LOCK_ID_MAX_LEN - 1);
lockres_clear_pending(lockres, gen, osb);
if (ret) {
if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
@@ -1859,8 +1951,7 @@ int ocfs2_file_lock(struct file *file, int ex, int trylock)
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
- lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1,
- lockres);
+ lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
if (ret) {
if (!trylock || (ret != -EAGAIN)) {
ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
@@ -2989,7 +3080,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
status = ocfs2_cluster_connect(osb->osb_cluster_stack,
osb->uuid_str,
strlen(osb->uuid_str),
- ocfs2_do_node_down, osb,
+ &lproto, ocfs2_do_node_down, osb,
&conn);
if (status) {
mlog_errno(status);
@@ -3056,50 +3147,6 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
mlog_exit_void();
}
-static void ocfs2_unlock_ast(void *opaque, int error)
-{
- struct ocfs2_lock_res *lockres = opaque;
- unsigned long flags;
-
- mlog_entry_void();
-
- mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
- lockres->l_unlock_action);
-
- spin_lock_irqsave(&lockres->l_lock, flags);
- if (error) {
- mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
- "unlock_action %d\n", error, lockres->l_name,
- lockres->l_unlock_action);
- spin_unlock_irqrestore(&lockres->l_lock, flags);
- mlog_exit_void();
- return;
- }
-
- switch(lockres->l_unlock_action) {
- case OCFS2_UNLOCK_CANCEL_CONVERT:
- mlog(0, "Cancel convert success for %s\n", lockres->l_name);
- lockres->l_action = OCFS2_AST_INVALID;
- /* Downconvert thread may have requeued this lock, we
- * need to wake it. */
- if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
- ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
- break;
- case OCFS2_UNLOCK_DROP_LOCK:
- lockres->l_level = DLM_LOCK_IV;
- break;
- default:
- BUG();
- }
-
- lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
- lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
- wake_up(&lockres->l_event);
- spin_unlock_irqrestore(&lockres->l_lock, flags);
-
- mlog_exit_void();
-}
-
static int ocfs2_drop_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
@@ -3167,8 +3214,7 @@ static int ocfs2_drop_lock(struct ocfs2_super *osb,
mlog(0, "lock %s\n", lockres->l_name);
- ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags,
- lockres);
+ ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
@@ -3276,13 +3322,20 @@ static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
if (lockres->l_level <= new_level) {
- mlog(ML_ERROR, "lockres->l_level (%d) <= new_level (%d)\n",
- lockres->l_level, new_level);
+ mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
+ "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
+ "block %d, pgen %d\n", lockres->l_name, lockres->l_level,
+ new_level, list_empty(&lockres->l_blocked_list),
+ list_empty(&lockres->l_mask_waiters), lockres->l_type,
+ lockres->l_flags, lockres->l_ro_holders,
+ lockres->l_ex_holders, lockres->l_action,
+ lockres->l_unlock_action, lockres->l_requested,
+ lockres->l_blocking, lockres->l_pending_gen);
BUG();
}
- mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
- lockres->l_name, new_level, lockres->l_blocking);
+ mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
+ lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
lockres->l_action = OCFS2_AST_DOWNCONVERT;
lockres->l_requested = new_level;
@@ -3301,6 +3354,9 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
mlog_entry_void();
+ mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
+ lockres->l_level, new_level);
+
if (lvb)
dlm_flags |= DLM_LKF_VALBLK;
@@ -3309,8 +3365,7 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
&lockres->l_lksb,
dlm_flags,
lockres->l_name,
- OCFS2_LOCK_ID_MAX_LEN - 1,
- lockres);
+ OCFS2_LOCK_ID_MAX_LEN - 1);
lockres_clear_pending(lockres, generation, osb);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
@@ -3331,14 +3386,12 @@ static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
assert_spin_locked(&lockres->l_lock);
mlog_entry_void();
- mlog(0, "lock %s\n", lockres->l_name);
if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
/* If we're already trying to cancel a lock conversion
* then just drop the spinlock and allow the caller to
* requeue this lock. */
-
- mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
+ mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
return 0;
}
@@ -3353,6 +3406,8 @@ static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
"lock %s, invalid flags: 0x%lx\n",
lockres->l_name, lockres->l_flags);
+ mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
+
return 1;
}
@@ -3362,16 +3417,15 @@ static int ocfs2_cancel_convert(struct ocfs2_super *osb,
int ret;
mlog_entry_void();
- mlog(0, "lock %s\n", lockres->l_name);
ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
- DLM_LKF_CANCEL, lockres);
+ DLM_LKF_CANCEL);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
ocfs2_recover_from_dlm_error(lockres, 0);
}
- mlog(0, "lock %s return from ocfs2_dlm_unlock\n", lockres->l_name);
+ mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
mlog_exit(ret);
return ret;
@@ -3428,8 +3482,11 @@ recheck:
* at the same time they set OCFS2_DLM_BUSY. They must
* clear OCFS2_DLM_PENDING after dlm_lock() returns.
*/
- if (lockres->l_flags & OCFS2_LOCK_PENDING)
+ if (lockres->l_flags & OCFS2_LOCK_PENDING) {
+ mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
+ lockres->l_name);
goto leave_requeue;
+ }
ctl->requeue = 1;
ret = ocfs2_prepare_cancel_convert(osb, lockres);
@@ -3461,6 +3518,7 @@ recheck:
*/
if (lockres->l_level == DLM_LOCK_NL) {
BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
+ mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
lockres->l_blocking = DLM_LOCK_NL;
lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
spin_unlock_irqrestore(&lockres->l_lock, flags);
@@ -3470,28 +3528,41 @@ recheck:
/* if we're blocking an exclusive and we have *any* holders,
* then requeue. */
if ((lockres->l_blocking == DLM_LOCK_EX)
- && (lockres->l_ex_holders || lockres->l_ro_holders))
+ && (lockres->l_ex_holders || lockres->l_ro_holders)) {
+ mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
+ lockres->l_name, lockres->l_ex_holders,
+ lockres->l_ro_holders);
goto leave_requeue;
+ }
/* If it's a PR we're blocking, then only
* requeue if we've got any EX holders */
if (lockres->l_blocking == DLM_LOCK_PR &&
- lockres->l_ex_holders)
+ lockres->l_ex_holders) {
+ mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
+ lockres->l_name, lockres->l_ex_holders);
goto leave_requeue;
+ }
/*
* Can we get a lock in this state if the holder counts are
* zero? The meta data unblock code used to check this.
*/
if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
- && (lockres->l_flags & OCFS2_LOCK_REFRESHING))
+ && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
+ mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
+ lockres->l_name);
goto leave_requeue;
+ }
new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
if (lockres->l_ops->check_downconvert
- && !lockres->l_ops->check_downconvert(lockres, new_level))
+ && !lockres->l_ops->check_downconvert(lockres, new_level)) {
+ mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
+ lockres->l_name);
goto leave_requeue;
+ }
/* If we get here, then we know that there are no more
* incompatible holders (and anyone asking for an incompatible
@@ -3509,13 +3580,19 @@ recheck:
ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
- if (ctl->unblock_action == UNBLOCK_STOP_POST)
+ if (ctl->unblock_action == UNBLOCK_STOP_POST) {
+ mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
+ lockres->l_name);
goto leave;
+ }
spin_lock_irqsave(&lockres->l_lock, flags);
if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
/* If this changed underneath us, then we can't drop
* it just yet. */
+ mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
+ "Recheck\n", lockres->l_name, blocking,
+ lockres->l_blocking, level, lockres->l_level);
goto recheck;
}
@@ -3910,45 +3987,6 @@ void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
ocfs2_cluster_unlock(osb, lockres, level);
}
-/*
- * This is the filesystem locking protocol. It provides the lock handling
- * hooks for the underlying DLM. It has a maximum version number.
- * The version number allows interoperability with systems running at
- * the same major number and an equal or smaller minor number.
- *
- * Whenever the filesystem does new things with locks (adds or removes a
- * lock, orders them differently, does different things underneath a lock),
- * the version must be changed. The protocol is negotiated when joining
- * the dlm domain. A node may join the domain if its major version is
- * identical to all other nodes and its minor version is greater than
- * or equal to all other nodes. When its minor version is greater than
- * the other nodes, it will run at the minor version specified by the
- * other nodes.
- *
- * If a locking change is made that will not be compatible with older
- * versions, the major number must be increased and the minor version set
- * to zero. If a change merely adds a behavior that can be disabled when
- * speaking to older versions, the minor version must be increased. If a
- * change adds a fully backwards compatible change (eg, LVB changes that
- * are just ignored by older versions), the version does not need to be
- * updated.
- */
-static struct ocfs2_locking_protocol lproto = {
- .lp_max_version = {
- .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
- .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
- },
- .lp_lock_ast = ocfs2_locking_ast,
- .lp_blocking_ast = ocfs2_blocking_ast,
- .lp_unlock_ast = ocfs2_unlock_ast,
-};
-
-void ocfs2_set_locking_protocol(void)
-{
- ocfs2_stack_glue_set_locking_protocol(&lproto);
-}
-
-
static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
@@ -3965,7 +4003,7 @@ static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
BUG_ON(!lockres);
BUG_ON(!lockres->l_ops);
- mlog(0, "lockres %s blocked.\n", lockres->l_name);
+ mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
/* Detect whether a lock has been marked as going away while
* the downconvert thread was processing other things. A lock can
@@ -3988,7 +4026,7 @@ unqueue:
} else
ocfs2_schedule_blocked_lock(osb, lockres);
- mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
+ mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
ctl.requeue ? "yes" : "no");
spin_unlock_irqrestore(&lockres->l_lock, flags);
@@ -4010,7 +4048,7 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
/* Do not schedule a lock for downconvert when it's on
* the way to destruction - any nodes wanting access
* to the resource will get it soon. */
- mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
+ mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
lockres->l_name, lockres->l_flags);
return;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 3641052..17947dc 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -997,10 +997,9 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
}
if (size_change && attr->ia_size != i_size_read(inode)) {
- if (attr->ia_size > sb->s_maxbytes) {
- status = -EFBIG;
+ status = inode_newsize_ok(inode, attr->ia_size);
+ if (status)
goto bail_unlock;
- }
if (i_size_read(inode) > attr->ia_size) {
if (ocfs2_should_order_data(inode)) {
@@ -1840,6 +1839,8 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
&meta_level);
if (has_refcount)
*has_refcount = 1;
+ if (direct_io)
+ *direct_io = 0;
}
if (ret < 0) {
@@ -1863,10 +1864,6 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
break;
}
- if (has_refcount && *has_refcount == 1) {
- *direct_io = 0;
- break;
- }
/*
* Allowing concurrent direct writes means
* i_size changes wouldn't be synchronized, so
@@ -2047,7 +2044,7 @@ out_dio:
* async dio is going to do it in the future or an end_io after an
* error has already done it.
*/
- if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
+ if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
rw_level = -1;
have_alloc_sem = 0;
}
diff --git a/fs/ocfs2/ioctl.h b/fs/ocfs2/ioctl.h
index cf9a5ee..0cd5323 100644
--- a/fs/ocfs2/ioctl.h
+++ b/fs/ocfs2/ioctl.h
@@ -7,10 +7,10 @@
*
*/
-#ifndef OCFS2_IOCTL_H
-#define OCFS2_IOCTL_H
+#ifndef OCFS2_IOCTL_PROTO_H
+#define OCFS2_IOCTL_PROTO_H
long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg);
-#endif /* OCFS2_IOCTL_H */
+#endif /* OCFS2_IOCTL_PROTO_H */
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index ac10f83..ca992d9 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -476,7 +476,7 @@ out_mutex:
out:
if (!status)
- ocfs2_init_inode_steal_slot(osb);
+ ocfs2_init_steal_slots(osb);
mlog_exit(status);
return status;
}
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 740f4480..1238b49 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -42,6 +42,7 @@
#include "ocfs2_fs.h"
#include "ocfs2_lockid.h"
+#include "ocfs2_ioctl.h"
/* For struct ocfs2_blockcheck_stats */
#include "blockcheck.h"
@@ -159,7 +160,7 @@ struct ocfs2_lock_res {
int l_level;
unsigned int l_ro_holders;
unsigned int l_ex_holders;
- union ocfs2_dlm_lksb l_lksb;
+ struct ocfs2_dlm_lksb l_lksb;
/* used from AST/BAST funcs. */
enum ocfs2_ast_action l_action;
@@ -305,7 +306,9 @@ struct ocfs2_super
u32 s_next_generation;
unsigned long osb_flags;
s16 s_inode_steal_slot;
+ s16 s_meta_steal_slot;
atomic_t s_num_inodes_stolen;
+ atomic_t s_num_meta_stolen;
unsigned long s_mount_opt;
unsigned int s_atime_quantum;
@@ -760,33 +763,6 @@ static inline unsigned int ocfs2_megabytes_to_clusters(struct super_block *sb,
return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits);
}
-static inline void ocfs2_init_inode_steal_slot(struct ocfs2_super *osb)
-{
- spin_lock(&osb->osb_lock);
- osb->s_inode_steal_slot = OCFS2_INVALID_SLOT;
- spin_unlock(&osb->osb_lock);
- atomic_set(&osb->s_num_inodes_stolen, 0);
-}
-
-static inline void ocfs2_set_inode_steal_slot(struct ocfs2_super *osb,
- s16 slot)
-{
- spin_lock(&osb->osb_lock);
- osb->s_inode_steal_slot = slot;
- spin_unlock(&osb->osb_lock);
-}
-
-static inline s16 ocfs2_get_inode_steal_slot(struct ocfs2_super *osb)
-{
- s16 slot;
-
- spin_lock(&osb->osb_lock);
- slot = osb->s_inode_steal_slot;
- spin_unlock(&osb->osb_lock);
-
- return slot;
-}
-
#define ocfs2_set_bit ext2_set_bit
#define ocfs2_clear_bit ext2_clear_bit
#define ocfs2_test_bit ext2_test_bit
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 7638a38..bb37218 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -254,63 +254,6 @@
* refcount tree */
/*
- * ioctl commands
- */
-#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long)
-#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long)
-#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int)
-#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int)
-
-/*
- * Space reservation / allocation / free ioctls and argument structure
- * are designed to be compatible with XFS.
- *
- * ALLOCSP* and FREESP* are not and will never be supported, but are
- * included here for completeness.
- */
-struct ocfs2_space_resv {
- __s16 l_type;
- __s16 l_whence;
- __s64 l_start;
- __s64 l_len; /* len == 0 means until end of file */
- __s32 l_sysid;
- __u32 l_pid;
- __s32 l_pad[4]; /* reserve area */
-};
-
-#define OCFS2_IOC_ALLOCSP _IOW ('X', 10, struct ocfs2_space_resv)
-#define OCFS2_IOC_FREESP _IOW ('X', 11, struct ocfs2_space_resv)
-#define OCFS2_IOC_RESVSP _IOW ('X', 40, struct ocfs2_space_resv)
-#define OCFS2_IOC_UNRESVSP _IOW ('X', 41, struct ocfs2_space_resv)
-#define OCFS2_IOC_ALLOCSP64 _IOW ('X', 36, struct ocfs2_space_resv)
-#define OCFS2_IOC_FREESP64 _IOW ('X', 37, struct ocfs2_space_resv)
-#define OCFS2_IOC_RESVSP64 _IOW ('X', 42, struct ocfs2_space_resv)
-#define OCFS2_IOC_UNRESVSP64 _IOW ('X', 43, struct ocfs2_space_resv)
-
-/* Used to pass group descriptor data when online resize is done */
-struct ocfs2_new_group_input {
- __u64 group; /* Group descriptor's blkno. */
- __u32 clusters; /* Total number of clusters in this group */
- __u32 frees; /* Total free clusters in this group */
- __u16 chain; /* Chain for this group */
- __u16 reserved1;
- __u32 reserved2;
-};
-
-#define OCFS2_IOC_GROUP_EXTEND _IOW('o', 1, int)
-#define OCFS2_IOC_GROUP_ADD _IOW('o', 2,struct ocfs2_new_group_input)
-#define OCFS2_IOC_GROUP_ADD64 _IOW('o', 3,struct ocfs2_new_group_input)
-
-/* Used to pass 2 file names to reflink. */
-struct reflink_arguments {
- __u64 old_path;
- __u64 new_path;
- __u64 preserve;
-};
-#define OCFS2_IOC_REFLINK _IOW('o', 4, struct reflink_arguments)
-
-
-/*
* Journal Flags (ocfs2_dinode.id1.journal1.i_flags)
*/
#define OCFS2_JOURNAL_DIRTY_FL (0x00000001) /* Journal needs recovery */
diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h
new file mode 100644
index 0000000..2d3420a
--- /dev/null
+++ b/fs/ocfs2/ocfs2_ioctl.h
@@ -0,0 +1,79 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ocfs2_ioctl.h
+ *
+ * Defines OCFS2 ioctls.
+ *
+ * Copyright (C) 2010 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License, version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef OCFS2_IOCTL_H
+#define OCFS2_IOCTL_H
+
+/*
+ * ioctl commands
+ */
+#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long)
+#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long)
+#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int)
+#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int)
+
+/*
+ * Space reservation / allocation / free ioctls and argument structure
+ * are designed to be compatible with XFS.
+ *
+ * ALLOCSP* and FREESP* are not and will never be supported, but are
+ * included here for completeness.
+ */
+struct ocfs2_space_resv {
+ __s16 l_type;
+ __s16 l_whence;
+ __s64 l_start;
+ __s64 l_len; /* len == 0 means until end of file */
+ __s32 l_sysid;
+ __u32 l_pid;
+ __s32 l_pad[4]; /* reserve area */
+};
+
+#define OCFS2_IOC_ALLOCSP _IOW ('X', 10, struct ocfs2_space_resv)
+#define OCFS2_IOC_FREESP _IOW ('X', 11, struct ocfs2_space_resv)
+#define OCFS2_IOC_RESVSP _IOW ('X', 40, struct ocfs2_space_resv)
+#define OCFS2_IOC_UNRESVSP _IOW ('X', 41, struct ocfs2_space_resv)
+#define OCFS2_IOC_ALLOCSP64 _IOW ('X', 36, struct ocfs2_space_resv)
+#define OCFS2_IOC_FREESP64 _IOW ('X', 37, struct ocfs2_space_resv)
+#define OCFS2_IOC_RESVSP64 _IOW ('X', 42, struct ocfs2_space_resv)
+#define OCFS2_IOC_UNRESVSP64 _IOW ('X', 43, struct ocfs2_space_resv)
+
+/* Used to pass group descriptor data when online resize is done */
+struct ocfs2_new_group_input {
+ __u64 group; /* Group descriptor's blkno. */
+ __u32 clusters; /* Total number of clusters in this group */
+ __u32 frees; /* Total free clusters in this group */
+ __u16 chain; /* Chain for this group */
+ __u16 reserved1;
+ __u32 reserved2;
+};
+
+#define OCFS2_IOC_GROUP_EXTEND _IOW('o', 1, int)
+#define OCFS2_IOC_GROUP_ADD _IOW('o', 2,struct ocfs2_new_group_input)
+#define OCFS2_IOC_GROUP_ADD64 _IOW('o', 3,struct ocfs2_new_group_input)
+
+/* Used to pass 2 file names to reflink. */
+struct reflink_arguments {
+ __u64 old_path;
+ __u64 new_path;
+ __u64 preserve;
+};
+#define OCFS2_IOC_REFLINK _IOW('o', 4, struct reflink_arguments)
+
+#endif /* OCFS2_IOCTL_H */
diff --git a/fs/ocfs2/ocfs2_lockingver.h b/fs/ocfs2/ocfs2_lockingver.h
index 82d5eea..2e45c8d 100644
--- a/fs/ocfs2/ocfs2_lockingver.h
+++ b/fs/ocfs2/ocfs2_lockingver.h
@@ -23,6 +23,8 @@
/*
* The protocol version for ocfs2 cluster locking. See dlmglue.c for
* more details.
+ *
+ * 1.0 - Initial locking version from ocfs2 1.4.
*/
#define OCFS2_LOCKING_PROTOCOL_MAJOR 1
#define OCFS2_LOCKING_PROTOCOL_MINOR 0
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index f3ae10c..9e96921 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -626,7 +626,7 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
rb = (struct ocfs2_refcount_block *)new_bh->b_data;
memset(rb, 0, inode->i_sb->s_blocksize);
strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
- rb->rf_suballoc_slot = cpu_to_le16(osb->slot_num);
+ rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
rb->rf_blkno = cpu_to_le64(first_blkno);
@@ -1330,7 +1330,7 @@ static int ocfs2_expand_inline_ref_root(handle_t *handle,
memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
- new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
+ new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
new_rb->rf_blkno = cpu_to_le64(blkno);
new_rb->rf_cpos = cpu_to_le32(0);
@@ -1576,7 +1576,7 @@ static int ocfs2_new_leaf_refcount_block(handle_t *handle,
new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
memset(new_rb, 0, sb->s_blocksize);
strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
- new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
+ new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
new_rb->rf_blkno = cpu_to_le64(blkno);
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index 3038c92..7020e12 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -161,24 +161,23 @@ static int dlm_status_to_errno(enum dlm_status status)
static void o2dlm_lock_ast_wrapper(void *astarg)
{
- BUG_ON(o2cb_stack.sp_proto == NULL);
+ struct ocfs2_dlm_lksb *lksb = astarg;
- o2cb_stack.sp_proto->lp_lock_ast(astarg);
+ lksb->lksb_conn->cc_proto->lp_lock_ast(lksb);
}
static void o2dlm_blocking_ast_wrapper(void *astarg, int level)
{
- BUG_ON(o2cb_stack.sp_proto == NULL);
+ struct ocfs2_dlm_lksb *lksb = astarg;
- o2cb_stack.sp_proto->lp_blocking_ast(astarg, level);
+ lksb->lksb_conn->cc_proto->lp_blocking_ast(lksb, level);
}
static void o2dlm_unlock_ast_wrapper(void *astarg, enum dlm_status status)
{
+ struct ocfs2_dlm_lksb *lksb = astarg;
int error = dlm_status_to_errno(status);
- BUG_ON(o2cb_stack.sp_proto == NULL);
-
/*
* In o2dlm, you can get both the lock_ast() for the lock being
* granted and the unlock_ast() for the CANCEL failing. A
@@ -193,16 +192,15 @@ static void o2dlm_unlock_ast_wrapper(void *astarg, enum dlm_status status)
if (status == DLM_CANCELGRANT)
return;
- o2cb_stack.sp_proto->lp_unlock_ast(astarg, error);
+ lksb->lksb_conn->cc_proto->lp_unlock_ast(lksb, error);
}
static int o2cb_dlm_lock(struct ocfs2_cluster_connection *conn,
int mode,
- union ocfs2_dlm_lksb *lksb,
+ struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
- unsigned int namelen,
- void *astarg)
+ unsigned int namelen)
{
enum dlm_status status;
int o2dlm_mode = mode_to_o2dlm(mode);
@@ -211,28 +209,27 @@ static int o2cb_dlm_lock(struct ocfs2_cluster_connection *conn,
status = dlmlock(conn->cc_lockspace, o2dlm_mode, &lksb->lksb_o2dlm,
o2dlm_flags, name, namelen,
- o2dlm_lock_ast_wrapper, astarg,
+ o2dlm_lock_ast_wrapper, lksb,
o2dlm_blocking_ast_wrapper);
ret = dlm_status_to_errno(status);
return ret;
}
static int o2cb_dlm_unlock(struct ocfs2_cluster_connection *conn,
- union ocfs2_dlm_lksb *lksb,
- u32 flags,
- void *astarg)
+ struct ocfs2_dlm_lksb *lksb,
+ u32 flags)
{
enum dlm_status status;
int o2dlm_flags = flags_to_o2dlm(flags);
int ret;
status = dlmunlock(conn->cc_lockspace, &lksb->lksb_o2dlm,
- o2dlm_flags, o2dlm_unlock_ast_wrapper, astarg);
+ o2dlm_flags, o2dlm_unlock_ast_wrapper, lksb);
ret = dlm_status_to_errno(status);
return ret;
}
-static int o2cb_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
+static int o2cb_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
{
return dlm_status_to_errno(lksb->lksb_o2dlm.status);
}
@@ -242,17 +239,17 @@ static int o2cb_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
* contents, it will zero out the LVB. Thus the caller can always trust
* the contents.
*/
-static int o2cb_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb)
+static int o2cb_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb)
{
return 1;
}
-static void *o2cb_dlm_lvb(union ocfs2_dlm_lksb *lksb)
+static void *o2cb_dlm_lvb(struct ocfs2_dlm_lksb *lksb)
{
return (void *)(lksb->lksb_o2dlm.lvb);
}
-static void o2cb_dump_lksb(union ocfs2_dlm_lksb *lksb)
+static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb)
{
dlm_print_one_lock(lksb->lksb_o2dlm.lockid);
}
@@ -280,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
struct dlm_protocol_version fs_version;
BUG_ON(conn == NULL);
- BUG_ON(o2cb_stack.sp_proto == NULL);
+ BUG_ON(conn->cc_proto == NULL);
/* for now we only have one cluster/node, make sure we see it
* in the heartbeat universe */
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index da78a2a..5ae8812 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -25,7 +25,6 @@
#include <linux/reboot.h>
#include <asm/uaccess.h>
-#include "ocfs2.h" /* For struct ocfs2_lock_res */
#include "stackglue.h"
#include <linux/dlm_plock.h>
@@ -63,8 +62,8 @@
* negotiated by the client. The client negotiates based on the maximum
* version advertised in /sys/fs/ocfs2/max_locking_protocol. The major
* number from the "SETV" message must match
- * ocfs2_user_plugin.sp_proto->lp_max_version.pv_major, and the minor number
- * must be less than or equal to ...->lp_max_version.pv_minor.
+ * ocfs2_user_plugin.sp_max_proto.pv_major, and the minor number
+ * must be less than or equal to ...sp_max_version.pv_minor.
*
* Once this information has been set, mounts will be allowed. From this
* point on, the "DOWN" message can be sent for node down notification.
@@ -401,7 +400,7 @@ static int ocfs2_control_do_setversion_msg(struct file *file,
char *ptr = NULL;
struct ocfs2_control_private *p = file->private_data;
struct ocfs2_protocol_version *max =
- &ocfs2_user_plugin.sp_proto->lp_max_version;
+ &ocfs2_user_plugin.sp_max_proto;
if (ocfs2_control_get_handshake_state(file) !=
OCFS2_CONTROL_HANDSHAKE_PROTOCOL)
@@ -664,18 +663,10 @@ static void ocfs2_control_exit(void)
-rc);
}
-static struct dlm_lksb *fsdlm_astarg_to_lksb(void *astarg)
-{
- struct ocfs2_lock_res *res = astarg;
- return &res->l_lksb.lksb_fsdlm;
-}
-
static void fsdlm_lock_ast_wrapper(void *astarg)
{
- struct dlm_lksb *lksb = fsdlm_astarg_to_lksb(astarg);
- int status = lksb->sb_status;
-
- BUG_ON(ocfs2_user_plugin.sp_proto == NULL);
+ struct ocfs2_dlm_lksb *lksb = astarg;
+ int status = lksb->lksb_fsdlm.sb_status;
/*
* For now we're punting on the issue of other non-standard errors
@@ -688,25 +679,24 @@ static void fsdlm_lock_ast_wrapper(void *astarg)
*/
if (status == -DLM_EUNLOCK || status == -DLM_ECANCEL)
- ocfs2_user_plugin.sp_proto->lp_unlock_ast(astarg, 0);
+ lksb->lksb_conn->cc_proto->lp_unlock_ast(lksb, 0);
else
- ocfs2_user_plugin.sp_proto->lp_lock_ast(astarg);
+ lksb->lksb_conn->cc_proto->lp_lock_ast(lksb);
}
static void fsdlm_blocking_ast_wrapper(void *astarg, int level)
{
- BUG_ON(ocfs2_user_plugin.sp_proto == NULL);
+ struct ocfs2_dlm_lksb *lksb = astarg;
- ocfs2_user_plugin.sp_proto->lp_blocking_ast(astarg, level);
+ lksb->lksb_conn->cc_proto->lp_blocking_ast(lksb, level);
}
static int user_dlm_lock(struct ocfs2_cluster_connection *conn,
int mode,
- union ocfs2_dlm_lksb *lksb,
+ struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
- unsigned int namelen,
- void *astarg)
+ unsigned int namelen)
{
int ret;
@@ -716,36 +706,35 @@ static int user_dlm_lock(struct ocfs2_cluster_connection *conn,
ret = dlm_lock(conn->cc_lockspace, mode, &lksb->lksb_fsdlm,
flags|DLM_LKF_NODLCKWT, name, namelen, 0,
- fsdlm_lock_ast_wrapper, astarg,
+ fsdlm_lock_ast_wrapper, lksb,
fsdlm_blocking_ast_wrapper);
return ret;
}
static int user_dlm_unlock(struct ocfs2_cluster_connection *conn,
- union ocfs2_dlm_lksb *lksb,
- u32 flags,
- void *astarg)
+ struct ocfs2_dlm_lksb *lksb,
+ u32 flags)
{
int ret;
ret = dlm_unlock(conn->cc_lockspace, lksb->lksb_fsdlm.sb_lkid,
- flags, &lksb->lksb_fsdlm, astarg);
+ flags, &lksb->lksb_fsdlm, lksb);
return ret;
}
-static int user_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
+static int user_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
{
return lksb->lksb_fsdlm.sb_status;
}
-static int user_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb)
+static int user_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb)
{
int invalid = lksb->lksb_fsdlm.sb_flags & DLM_SBF_VALNOTVALID;
return !invalid;
}
-static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb)
+static void *user_dlm_lvb(struct ocfs2_dlm_lksb *lksb)
{
if (!lksb->lksb_fsdlm.sb_lvbptr)
lksb->lksb_fsdlm.sb_lvbptr = (char *)lksb +
@@ -753,7 +742,7 @@ static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb)
return (void *)(lksb->lksb_fsdlm.sb_lvbptr);
}
-static void user_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb)
+static void user_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb)
{
}
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index f3df0ba..39abf89 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -36,7 +36,7 @@
#define OCFS2_STACK_PLUGIN_USER "user"
#define OCFS2_MAX_HB_CTL_PATH 256
-static struct ocfs2_locking_protocol *lproto;
+static struct ocfs2_protocol_version locking_max_version;
static DEFINE_SPINLOCK(ocfs2_stack_lock);
static LIST_HEAD(ocfs2_stack_list);
static char cluster_stack_name[OCFS2_STACK_LABEL_LEN + 1];
@@ -176,7 +176,7 @@ int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin)
spin_lock(&ocfs2_stack_lock);
if (!ocfs2_stack_lookup(plugin->sp_name)) {
plugin->sp_count = 0;
- plugin->sp_proto = lproto;
+ plugin->sp_max_proto = locking_max_version;
list_add(&plugin->sp_list, &ocfs2_stack_list);
printk(KERN_INFO "ocfs2: Registered cluster interface %s\n",
plugin->sp_name);
@@ -213,77 +213,76 @@ void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin)
}
EXPORT_SYMBOL_GPL(ocfs2_stack_glue_unregister);
-void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto)
+void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_proto)
{
struct ocfs2_stack_plugin *p;
- BUG_ON(proto == NULL);
-
spin_lock(&ocfs2_stack_lock);
- BUG_ON(active_stack != NULL);
+ if (memcmp(max_proto, &locking_max_version,
+ sizeof(struct ocfs2_protocol_version))) {
+ BUG_ON(locking_max_version.pv_major != 0);
- lproto = proto;
- list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
- p->sp_proto = lproto;
+ locking_max_version = *max_proto;
+ list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
+ p->sp_max_proto = locking_max_version;
+ }
}
-
spin_unlock(&ocfs2_stack_lock);
}
-EXPORT_SYMBOL_GPL(ocfs2_stack_glue_set_locking_protocol);
+EXPORT_SYMBOL_GPL(ocfs2_stack_glue_set_max_proto_version);
/*
- * The ocfs2_dlm_lock() and ocfs2_dlm_unlock() functions take
- * "struct ocfs2_lock_res *astarg" instead of "void *astarg" because the
- * underlying stack plugins need to pilfer the lksb off of the lock_res.
- * If some other structure needs to be passed as an astarg, the plugins
- * will need to be given a different avenue to the lksb.
+ * The ocfs2_dlm_lock() and ocfs2_dlm_unlock() functions take no argument
+ * for the ast and bast functions. They will pass the lksb to the ast
+ * and bast. The caller can wrap the lksb with their own structure to
+ * get more information.
*/
int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn,
int mode,
- union ocfs2_dlm_lksb *lksb,
+ struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
- unsigned int namelen,
- struct ocfs2_lock_res *astarg)
+ unsigned int namelen)
{
- BUG_ON(lproto == NULL);
-
+ if (!lksb->lksb_conn)
+ lksb->lksb_conn = conn;
+ else
+ BUG_ON(lksb->lksb_conn != conn);
return active_stack->sp_ops->dlm_lock(conn, mode, lksb, flags,
- name, namelen, astarg);
+ name, namelen);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lock);
int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn,
- union ocfs2_dlm_lksb *lksb,
- u32 flags,
- struct ocfs2_lock_res *astarg)
+ struct ocfs2_dlm_lksb *lksb,
+ u32 flags)
{
- BUG_ON(lproto == NULL);
+ BUG_ON(lksb->lksb_conn == NULL);
- return active_stack->sp_ops->dlm_unlock(conn, lksb, flags, astarg);
+ return active_stack->sp_ops->dlm_unlock(conn, lksb, flags);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_unlock);
-int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
+int ocfs2_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
{
return active_stack->sp_ops->lock_status(lksb);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lock_status);
-int ocfs2_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb)
+int ocfs2_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb)
{
return active_stack->sp_ops->lvb_valid(lksb);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb_valid);
-void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb)
+void *ocfs2_dlm_lvb(struct ocfs2_dlm_lksb *lksb)
{
return active_stack->sp_ops->lock_lvb(lksb);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb);
-void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb)
+void ocfs2_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb)
{
active_stack->sp_ops->dump_lksb(lksb);
}
@@ -312,6 +311,7 @@ EXPORT_SYMBOL_GPL(ocfs2_plock);
int ocfs2_cluster_connect(const char *stack_name,
const char *group,
int grouplen,
+ struct ocfs2_locking_protocol *lproto,
void (*recovery_handler)(int node_num,
void *recovery_data),
void *recovery_data,
@@ -329,6 +329,12 @@ int ocfs2_cluster_connect(const char *stack_name,
goto out;
}
+ if (memcmp(&lproto->lp_max_version, &locking_max_version,
+ sizeof(struct ocfs2_protocol_version))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
new_conn = kzalloc(sizeof(struct ocfs2_cluster_connection),
GFP_KERNEL);
if (!new_conn) {
@@ -341,6 +347,7 @@ int ocfs2_cluster_connect(const char *stack_name,
new_conn->cc_recovery_handler = recovery_handler;
new_conn->cc_recovery_data = recovery_data;
+ new_conn->cc_proto = lproto;
/* Start the new connection at our maximum compatibility level */
new_conn->cc_version = lproto->lp_max_version;
@@ -366,6 +373,24 @@ out:
}
EXPORT_SYMBOL_GPL(ocfs2_cluster_connect);
+/* The caller will ensure all nodes have the same cluster stack */
+int ocfs2_cluster_connect_agnostic(const char *group,
+ int grouplen,
+ struct ocfs2_locking_protocol *lproto,
+ void (*recovery_handler)(int node_num,
+ void *recovery_data),
+ void *recovery_data,
+ struct ocfs2_cluster_connection **conn)
+{
+ char *stack_name = NULL;
+
+ if (cluster_stack_name[0])
+ stack_name = cluster_stack_name;
+ return ocfs2_cluster_connect(stack_name, group, grouplen, lproto,
+ recovery_handler, recovery_data, conn);
+}
+EXPORT_SYMBOL_GPL(ocfs2_cluster_connect_agnostic);
+
/* If hangup_pending is 0, the stack driver will be dropped */
int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn,
int hangup_pending)
@@ -453,10 +478,10 @@ static ssize_t ocfs2_max_locking_protocol_show(struct kobject *kobj,
ssize_t ret = 0;
spin_lock(&ocfs2_stack_lock);
- if (lproto)
+ if (locking_max_version.pv_major)
ret = snprintf(buf, PAGE_SIZE, "%u.%u\n",
- lproto->lp_max_version.pv_major,
- lproto->lp_max_version.pv_minor);
+ locking_max_version.pv_major,
+ locking_max_version.pv_minor);
spin_unlock(&ocfs2_stack_lock);
return ret;
@@ -685,7 +710,10 @@ static int __init ocfs2_stack_glue_init(void)
static void __exit ocfs2_stack_glue_exit(void)
{
- lproto = NULL;
+ memset(&locking_max_version, 0,
+ sizeof(struct ocfs2_protocol_version));
+ locking_max_version.pv_major = 0;
+ locking_max_version.pv_minor = 0;
ocfs2_sysfs_exit();
if (ocfs2_table_header)
unregister_sysctl_table(ocfs2_table_header);
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index 03a44d6..8ce7398 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -56,17 +56,6 @@ struct ocfs2_protocol_version {
};
/*
- * The ocfs2_locking_protocol defines the handlers called on ocfs2's behalf.
- */
-struct ocfs2_locking_protocol {
- struct ocfs2_protocol_version lp_max_version;
- void (*lp_lock_ast)(void *astarg);
- void (*lp_blocking_ast)(void *astarg, int level);
- void (*lp_unlock_ast)(void *astarg, int error);
-};
-
-
-/*
* The dlm_lockstatus struct includes lvb space, but the dlm_lksb struct only
* has a pointer to separately allocated lvb space. This struct exists only to
* include in the lksb union to make space for a combined dlm_lksb and lvb.
@@ -81,12 +70,27 @@ struct fsdlm_lksb_plus_lvb {
* size of the union is known. Lock status structures are embedded in
* ocfs2 inodes.
*/
-union ocfs2_dlm_lksb {
- struct dlm_lockstatus lksb_o2dlm;
- struct dlm_lksb lksb_fsdlm;
- struct fsdlm_lksb_plus_lvb padding;
+struct ocfs2_cluster_connection;
+struct ocfs2_dlm_lksb {
+ union {
+ struct dlm_lockstatus lksb_o2dlm;
+ struct dlm_lksb lksb_fsdlm;
+ struct fsdlm_lksb_plus_lvb padding;
+ };
+ struct ocfs2_cluster_connection *lksb_conn;
+};
+
+/*
+ * The ocfs2_locking_protocol defines the handlers called on ocfs2's behalf.
+ */
+struct ocfs2_locking_protocol {
+ struct ocfs2_protocol_version lp_max_version;
+ void (*lp_lock_ast)(struct ocfs2_dlm_lksb *lksb);
+ void (*lp_blocking_ast)(struct ocfs2_dlm_lksb *lksb, int level);
+ void (*lp_unlock_ast)(struct ocfs2_dlm_lksb *lksb, int error);
};
+
/*
* A cluster connection. Mostly opaque to ocfs2, the connection holds
* state for the underlying stack. ocfs2 does use cc_version to determine
@@ -96,6 +100,7 @@ struct ocfs2_cluster_connection {
char cc_name[GROUP_NAME_MAX];
int cc_namelen;
struct ocfs2_protocol_version cc_version;
+ struct ocfs2_locking_protocol *cc_proto;
void (*cc_recovery_handler)(int node_num, void *recovery_data);
void *cc_recovery_data;
void *cc_lockspace;
@@ -155,27 +160,29 @@ struct ocfs2_stack_operations {
*
* ast and bast functions are not part of the call because the
* stack will likely want to wrap ast and bast calls before passing
- * them to stack->sp_proto.
+ * them to stack->sp_proto. There is no astarg. The lksb will
+ * be passed back to the ast and bast functions. The caller can
+ * use this to find their object.
*/
int (*dlm_lock)(struct ocfs2_cluster_connection *conn,
int mode,
- union ocfs2_dlm_lksb *lksb,
+ struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
- unsigned int namelen,
- void *astarg);
+ unsigned int namelen);
/*
* Call the underlying dlm unlock function. The ->dlm_unlock()
* function should convert the flags as appropriate.
*
* The unlock ast is not passed, as the stack will want to wrap
- * it before calling stack->sp_proto->lp_unlock_ast().
+ * it before calling stack->sp_proto->lp_unlock_ast(). There is
+ * no astarg. The lksb will be passed back to the unlock ast
+ * function. The caller can use this to find their object.
*/
int (*dlm_unlock)(struct ocfs2_cluster_connection *conn,
- union ocfs2_dlm_lksb *lksb,
- u32 flags,
- void *astarg);
+ struct ocfs2_dlm_lksb *lksb,
+ u32 flags);
/*
* Return the status of the current lock status block. The fs
@@ -183,17 +190,17 @@ struct ocfs2_stack_operations {
* callback pulls out the stack-specific lksb, converts the status
* to a proper errno, and returns it.
*/
- int (*lock_status)(union ocfs2_dlm_lksb *lksb);
+ int (*lock_status)(struct ocfs2_dlm_lksb *lksb);
/*
* Return non-zero if the LVB is valid.
*/
- int (*lvb_valid)(union ocfs2_dlm_lksb *lksb);
+ int (*lvb_valid)(struct ocfs2_dlm_lksb *lksb);
/*
* Pull the lvb pointer off of the stack-specific lksb.
*/
- void *(*lock_lvb)(union ocfs2_dlm_lksb *lksb);
+ void *(*lock_lvb)(struct ocfs2_dlm_lksb *lksb);
/*
* Cluster-aware posix locks
@@ -210,7 +217,7 @@ struct ocfs2_stack_operations {
* This is an optoinal debugging hook. If provided, the
* stack can dump debugging information about this lock.
*/
- void (*dump_lksb)(union ocfs2_dlm_lksb *lksb);
+ void (*dump_lksb)(struct ocfs2_dlm_lksb *lksb);
};
/*
@@ -226,7 +233,7 @@ struct ocfs2_stack_plugin {
/* These are managed by the stackglue code. */
struct list_head sp_list;
unsigned int sp_count;
- struct ocfs2_locking_protocol *sp_proto;
+ struct ocfs2_protocol_version sp_max_proto;
};
@@ -234,10 +241,22 @@ struct ocfs2_stack_plugin {
int ocfs2_cluster_connect(const char *stack_name,
const char *group,
int grouplen,
+ struct ocfs2_locking_protocol *lproto,
void (*recovery_handler)(int node_num,
void *recovery_data),
void *recovery_data,
struct ocfs2_cluster_connection **conn);
+/*
+ * Used by callers that don't store their stack name. They must ensure
+ * all nodes have the same stack.
+ */
+int ocfs2_cluster_connect_agnostic(const char *group,
+ int grouplen,
+ struct ocfs2_locking_protocol *lproto,
+ void (*recovery_handler)(int node_num,
+ void *recovery_data),
+ void *recovery_data,
+ struct ocfs2_cluster_connection **conn);
int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn,
int hangup_pending);
void ocfs2_cluster_hangup(const char *group, int grouplen);
@@ -246,26 +265,24 @@ int ocfs2_cluster_this_node(unsigned int *node);
struct ocfs2_lock_res;
int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn,
int mode,
- union ocfs2_dlm_lksb *lksb,
+ struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
- unsigned int namelen,
- struct ocfs2_lock_res *astarg);
+ unsigned int namelen);
int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn,
- union ocfs2_dlm_lksb *lksb,
- u32 flags,
- struct ocfs2_lock_res *astarg);
+ struct ocfs2_dlm_lksb *lksb,
+ u32 flags);
-int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb);
-int ocfs2_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb);
-void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb);
-void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb);
+int ocfs2_dlm_lock_status(struct ocfs2_dlm_lksb *lksb);
+int ocfs2_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb);
+void *ocfs2_dlm_lvb(struct ocfs2_dlm_lksb *lksb);
+void ocfs2_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb);
int ocfs2_stack_supports_plocks(void);
int ocfs2_plock(struct ocfs2_cluster_connection *conn, u64 ino,
struct file *file, int cmd, struct file_lock *fl);
-void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto);
+void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_proto);
/* Used by stack plugins */
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index c30b644..c3c60bc 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -51,7 +51,7 @@
#define ALLOC_NEW_GROUP 0x1
#define ALLOC_GROUPS_FROM_GLOBAL 0x2
-#define OCFS2_MAX_INODES_TO_STEAL 1024
+#define OCFS2_MAX_TO_STEAL 1024
static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
@@ -637,12 +637,113 @@ bail:
return status;
}
+static void ocfs2_init_inode_steal_slot(struct ocfs2_super *osb)
+{
+ spin_lock(&osb->osb_lock);
+ osb->s_inode_steal_slot = OCFS2_INVALID_SLOT;
+ spin_unlock(&osb->osb_lock);
+ atomic_set(&osb->s_num_inodes_stolen, 0);
+}
+
+static void ocfs2_init_meta_steal_slot(struct ocfs2_super *osb)
+{
+ spin_lock(&osb->osb_lock);
+ osb->s_meta_steal_slot = OCFS2_INVALID_SLOT;
+ spin_unlock(&osb->osb_lock);
+ atomic_set(&osb->s_num_meta_stolen, 0);
+}
+
+void ocfs2_init_steal_slots(struct ocfs2_super *osb)
+{
+ ocfs2_init_inode_steal_slot(osb);
+ ocfs2_init_meta_steal_slot(osb);
+}
+
+static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type)
+{
+ spin_lock(&osb->osb_lock);
+ if (type == INODE_ALLOC_SYSTEM_INODE)
+ osb->s_inode_steal_slot = slot;
+ else if (type == EXTENT_ALLOC_SYSTEM_INODE)
+ osb->s_meta_steal_slot = slot;
+ spin_unlock(&osb->osb_lock);
+}
+
+static int __ocfs2_get_steal_slot(struct ocfs2_super *osb, int type)
+{
+ int slot = OCFS2_INVALID_SLOT;
+
+ spin_lock(&osb->osb_lock);
+ if (type == INODE_ALLOC_SYSTEM_INODE)
+ slot = osb->s_inode_steal_slot;
+ else if (type == EXTENT_ALLOC_SYSTEM_INODE)
+ slot = osb->s_meta_steal_slot;
+ spin_unlock(&osb->osb_lock);
+
+ return slot;
+}
+
+static int ocfs2_get_inode_steal_slot(struct ocfs2_super *osb)
+{
+ return __ocfs2_get_steal_slot(osb, INODE_ALLOC_SYSTEM_INODE);
+}
+
+static int ocfs2_get_meta_steal_slot(struct ocfs2_super *osb)
+{
+ return __ocfs2_get_steal_slot(osb, EXTENT_ALLOC_SYSTEM_INODE);
+}
+
+static int ocfs2_steal_resource(struct ocfs2_super *osb,
+ struct ocfs2_alloc_context *ac,
+ int type)
+{
+ int i, status = -ENOSPC;
+ int slot = __ocfs2_get_steal_slot(osb, type);
+
+ /* Start to steal resource from the first slot after ours. */
+ if (slot == OCFS2_INVALID_SLOT)
+ slot = osb->slot_num + 1;
+
+ for (i = 0; i < osb->max_slots; i++, slot++) {
+ if (slot == osb->max_slots)
+ slot = 0;
+
+ if (slot == osb->slot_num)
+ continue;
+
+ status = ocfs2_reserve_suballoc_bits(osb, ac,
+ type,
+ (u32)slot, NULL,
+ NOT_ALLOC_NEW_GROUP);
+ if (status >= 0) {
+ __ocfs2_set_steal_slot(osb, slot, type);
+ break;
+ }
+
+ ocfs2_free_ac_resource(ac);
+ }
+
+ return status;
+}
+
+static int ocfs2_steal_inode(struct ocfs2_super *osb,
+ struct ocfs2_alloc_context *ac)
+{
+ return ocfs2_steal_resource(osb, ac, INODE_ALLOC_SYSTEM_INODE);
+}
+
+static int ocfs2_steal_meta(struct ocfs2_super *osb,
+ struct ocfs2_alloc_context *ac)
+{
+ return ocfs2_steal_resource(osb, ac, EXTENT_ALLOC_SYSTEM_INODE);
+}
+
int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb,
int blocks,
struct ocfs2_alloc_context **ac)
{
int status;
- u32 slot;
+ int slot = ocfs2_get_meta_steal_slot(osb);
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
if (!(*ac)) {
@@ -653,12 +754,34 @@ int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb,
(*ac)->ac_bits_wanted = blocks;
(*ac)->ac_which = OCFS2_AC_USE_META;
- slot = osb->slot_num;
(*ac)->ac_group_search = ocfs2_block_group_search;
+ if (slot != OCFS2_INVALID_SLOT &&
+ atomic_read(&osb->s_num_meta_stolen) < OCFS2_MAX_TO_STEAL)
+ goto extent_steal;
+
+ atomic_set(&osb->s_num_meta_stolen, 0);
status = ocfs2_reserve_suballoc_bits(osb, (*ac),
EXTENT_ALLOC_SYSTEM_INODE,
- slot, NULL, ALLOC_NEW_GROUP);
+ (u32)osb->slot_num, NULL,
+ ALLOC_NEW_GROUP);
+
+
+ if (status >= 0) {
+ status = 0;
+ if (slot != OCFS2_INVALID_SLOT)
+ ocfs2_init_meta_steal_slot(osb);
+ goto bail;
+ } else if (status < 0 && status != -ENOSPC) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ocfs2_free_ac_resource(*ac);
+
+extent_steal:
+ status = ocfs2_steal_meta(osb, *ac);
+ atomic_inc(&osb->s_num_meta_stolen);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
@@ -685,43 +808,11 @@ int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
ac);
}
-static int ocfs2_steal_inode_from_other_nodes(struct ocfs2_super *osb,
- struct ocfs2_alloc_context *ac)
-{
- int i, status = -ENOSPC;
- s16 slot = ocfs2_get_inode_steal_slot(osb);
-
- /* Start to steal inodes from the first slot after ours. */
- if (slot == OCFS2_INVALID_SLOT)
- slot = osb->slot_num + 1;
-
- for (i = 0; i < osb->max_slots; i++, slot++) {
- if (slot == osb->max_slots)
- slot = 0;
-
- if (slot == osb->slot_num)
- continue;
-
- status = ocfs2_reserve_suballoc_bits(osb, ac,
- INODE_ALLOC_SYSTEM_INODE,
- slot, NULL,
- NOT_ALLOC_NEW_GROUP);
- if (status >= 0) {
- ocfs2_set_inode_steal_slot(osb, slot);
- break;
- }
-
- ocfs2_free_ac_resource(ac);
- }
-
- return status;
-}
-
int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
struct ocfs2_alloc_context **ac)
{
int status;
- s16 slot = ocfs2_get_inode_steal_slot(osb);
+ int slot = ocfs2_get_inode_steal_slot(osb);
u64 alloc_group;
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
@@ -754,14 +845,14 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
* need to check our slots to see whether there is some space for us.
*/
if (slot != OCFS2_INVALID_SLOT &&
- atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_INODES_TO_STEAL)
+ atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_TO_STEAL)
goto inode_steal;
atomic_set(&osb->s_num_inodes_stolen, 0);
alloc_group = osb->osb_inode_alloc_group;
status = ocfs2_reserve_suballoc_bits(osb, *ac,
INODE_ALLOC_SYSTEM_INODE,
- osb->slot_num,
+ (u32)osb->slot_num,
&alloc_group,
ALLOC_NEW_GROUP |
ALLOC_GROUPS_FROM_GLOBAL);
@@ -789,7 +880,7 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
ocfs2_free_ac_resource(*ac);
inode_steal:
- status = ocfs2_steal_inode_from_other_nodes(osb, *ac);
+ status = ocfs2_steal_inode(osb, *ac);
atomic_inc(&osb->s_num_inodes_stolen);
if (status < 0) {
if (status != -ENOSPC)
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index 8c9a78a..fa60723 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -56,6 +56,7 @@ struct ocfs2_alloc_context {
is the same as ~0 - unlimited */
};
+void ocfs2_init_steal_slots(struct ocfs2_super *osb);
void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac);
static inline int ocfs2_alloc_context_bits_left(struct ocfs2_alloc_context *ac)
{
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 755cd49..dee0319 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -69,6 +69,7 @@
#include "xattr.h"
#include "quota.h"
#include "refcounttree.h"
+#include "suballoc.h"
#include "buffer_head_io.h"
@@ -301,9 +302,12 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
spin_lock(&osb->osb_lock);
out += snprintf(buf + out, len - out,
- "%10s => Slot: %d NumStolen: %d\n", "Steal",
+ "%10s => InodeSlot: %d StolenInodes: %d, "
+ "MetaSlot: %d StolenMeta: %d\n", "Steal",
osb->s_inode_steal_slot,
- atomic_read(&osb->s_num_inodes_stolen));
+ atomic_read(&osb->s_num_inodes_stolen),
+ osb->s_meta_steal_slot,
+ atomic_read(&osb->s_num_meta_stolen));
spin_unlock(&osb->osb_lock);
out += snprintf(buf + out, len - out, "OrphanScan => ");
@@ -1997,7 +2001,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
osb->blocked_lock_count = 0;
spin_lock_init(&osb->osb_lock);
spin_lock_init(&osb->osb_xattr_lock);
- ocfs2_init_inode_steal_slot(osb);
+ ocfs2_init_steal_slots(osb);
atomic_set(&osb->alloc_stats.moves, 0);
atomic_set(&osb->alloc_stats.local_data, 0);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 8fc6fb0..d1b0d38 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -116,10 +116,11 @@ static struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
};
struct ocfs2_xattr_info {
- int name_index;
- const char *name;
- const void *value;
- size_t value_len;
+ int xi_name_index;
+ const char *xi_name;
+ int xi_name_len;
+ const void *xi_value;
+ size_t xi_value_len;
};
struct ocfs2_xattr_search {
@@ -137,6 +138,115 @@ struct ocfs2_xattr_search {
int not_found;
};
+/* Operations on struct ocfs2_xa_entry */
+struct ocfs2_xa_loc;
+struct ocfs2_xa_loc_operations {
+ /*
+ * Journal functions
+ */
+ int (*xlo_journal_access)(handle_t *handle, struct ocfs2_xa_loc *loc,
+ int type);
+ void (*xlo_journal_dirty)(handle_t *handle, struct ocfs2_xa_loc *loc);
+
+ /*
+ * Return a pointer to the appropriate buffer in loc->xl_storage
+ * at the given offset from loc->xl_header.
+ */
+ void *(*xlo_offset_pointer)(struct ocfs2_xa_loc *loc, int offset);
+
+ /* Can we reuse the existing entry for the new value? */
+ int (*xlo_can_reuse)(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi);
+
+ /* How much space is needed for the new value? */
+ int (*xlo_check_space)(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi);
+
+ /*
+ * Return the offset of the first name+value pair. This is
+ * the start of our downward-filling free space.
+ */
+ int (*xlo_get_free_start)(struct ocfs2_xa_loc *loc);
+
+ /*
+ * Remove the name+value at this location. Do whatever is
+ * appropriate with the remaining name+value pairs.
+ */
+ void (*xlo_wipe_namevalue)(struct ocfs2_xa_loc *loc);
+
+ /* Fill xl_entry with a new entry */
+ void (*xlo_add_entry)(struct ocfs2_xa_loc *loc, u32 name_hash);
+
+ /* Add name+value storage to an entry */
+ void (*xlo_add_namevalue)(struct ocfs2_xa_loc *loc, int size);
+
+ /*
+ * Initialize the value buf's access and bh fields for this entry.
+ * ocfs2_xa_fill_value_buf() will handle the xv pointer.
+ */
+ void (*xlo_fill_value_buf)(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_value_buf *vb);
+};
+
+/*
+ * Describes an xattr entry location. This is a memory structure
+ * tracking the on-disk structure.
+ */
+struct ocfs2_xa_loc {
+ /* This xattr belongs to this inode */
+ struct inode *xl_inode;
+
+ /* The ocfs2_xattr_header inside the on-disk storage. Not NULL. */
+ struct ocfs2_xattr_header *xl_header;
+
+ /* Bytes from xl_header to the end of the storage */
+ int xl_size;
+
+ /*
+ * The ocfs2_xattr_entry this location describes. If this is
+ * NULL, this location describes the on-disk structure where it
+ * would have been.
+ */
+ struct ocfs2_xattr_entry *xl_entry;
+
+ /*
+ * Internal housekeeping
+ */
+
+ /* Buffer(s) containing this entry */
+ void *xl_storage;
+
+ /* Operations on the storage backing this location */
+ const struct ocfs2_xa_loc_operations *xl_ops;
+};
+
+/*
+ * Convenience functions to calculate how much space is needed for a
+ * given name+value pair
+ */
+static int namevalue_size(int name_len, uint64_t value_len)
+{
+ if (value_len > OCFS2_XATTR_INLINE_SIZE)
+ return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
+ else
+ return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len);
+}
+
+static int namevalue_size_xi(struct ocfs2_xattr_info *xi)
+{
+ return namevalue_size(xi->xi_name_len, xi->xi_value_len);
+}
+
+static int namevalue_size_xe(struct ocfs2_xattr_entry *xe)
+{
+ u64 value_len = le64_to_cpu(xe->xe_value_size);
+
+ BUG_ON((value_len > OCFS2_XATTR_INLINE_SIZE) &&
+ ocfs2_xattr_is_local(xe));
+ return namevalue_size(xe->xe_name_len, value_len);
+}
+
+
static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
struct ocfs2_xattr_header *xh,
int index,
@@ -212,14 +322,6 @@ static inline u16 ocfs2_blocks_per_xattr_bucket(struct super_block *sb)
return OCFS2_XATTR_BUCKET_SIZE / (1 << sb->s_blocksize_bits);
}
-static inline u16 ocfs2_xattr_max_xe_in_bucket(struct super_block *sb)
-{
- u16 len = sb->s_blocksize -
- offsetof(struct ocfs2_xattr_header, xh_entries);
-
- return len / sizeof(struct ocfs2_xattr_entry);
-}
-
#define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr)
#define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data)
#define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0))
@@ -463,35 +565,22 @@ static u32 ocfs2_xattr_name_hash(struct inode *inode,
return hash;
}
-/*
- * ocfs2_xattr_hash_entry()
- *
- * Compute the hash of an extended attribute.
- */
-static void ocfs2_xattr_hash_entry(struct inode *inode,
- struct ocfs2_xattr_header *header,
- struct ocfs2_xattr_entry *entry)
+static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len)
{
- u32 hash = 0;
- char *name = (char *)header + le16_to_cpu(entry->xe_name_offset);
-
- hash = ocfs2_xattr_name_hash(inode, name, entry->xe_name_len);
- entry->xe_name_hash = cpu_to_le32(hash);
-
- return;
+ return namevalue_size(name_len, value_len) +
+ sizeof(struct ocfs2_xattr_entry);
}
-static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len)
+static int ocfs2_xi_entry_usage(struct ocfs2_xattr_info *xi)
{
- int size = 0;
-
- if (value_len <= OCFS2_XATTR_INLINE_SIZE)
- size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len);
- else
- size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
- size += sizeof(struct ocfs2_xattr_entry);
+ return namevalue_size_xi(xi) +
+ sizeof(struct ocfs2_xattr_entry);
+}
- return size;
+static int ocfs2_xe_entry_usage(struct ocfs2_xattr_entry *xe)
+{
+ return namevalue_size_xe(xe) +
+ sizeof(struct ocfs2_xattr_entry);
}
int ocfs2_calc_security_init(struct inode *dir,
@@ -1308,452 +1397,897 @@ out:
return ret;
}
-static int ocfs2_xattr_cleanup(struct inode *inode,
- handle_t *handle,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_value_buf *vb,
- size_t offs)
+static int ocfs2_xa_check_space_helper(int needed_space, int free_start,
+ int num_entries)
{
- int ret = 0;
- size_t name_len = strlen(xi->name);
- void *val = xs->base + offs;
- size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
+ int free_space;
- ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
- /* Decrease xattr count */
- le16_add_cpu(&xs->header->xh_count, -1);
- /* Remove the xattr entry and tree root which has already be set*/
- memset((void *)xs->here, 0, sizeof(struct ocfs2_xattr_entry));
- memset(val, 0, size);
+ if (!needed_space)
+ return 0;
- ret = ocfs2_journal_dirty(handle, vb->vb_bh);
- if (ret < 0)
- mlog_errno(ret);
-out:
- return ret;
+ free_space = free_start -
+ sizeof(struct ocfs2_xattr_header) -
+ (num_entries * sizeof(struct ocfs2_xattr_entry)) -
+ OCFS2_XATTR_HEADER_GAP;
+ if (free_space < 0)
+ return -EIO;
+ if (free_space < needed_space)
+ return -ENOSPC;
+
+ return 0;
}
-static int ocfs2_xattr_update_entry(struct inode *inode,
- handle_t *handle,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_value_buf *vb,
- size_t offs)
+static int ocfs2_xa_journal_access(handle_t *handle, struct ocfs2_xa_loc *loc,
+ int type)
{
- int ret;
+ return loc->xl_ops->xlo_journal_access(handle, loc, type);
+}
- ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
+static void ocfs2_xa_journal_dirty(handle_t *handle, struct ocfs2_xa_loc *loc)
+{
+ loc->xl_ops->xlo_journal_dirty(handle, loc);
+}
- xs->here->xe_name_offset = cpu_to_le16(offs);
- xs->here->xe_value_size = cpu_to_le64(xi->value_len);
- if (xi->value_len <= OCFS2_XATTR_INLINE_SIZE)
- ocfs2_xattr_set_local(xs->here, 1);
- else
- ocfs2_xattr_set_local(xs->here, 0);
- ocfs2_xattr_hash_entry(inode, xs->header, xs->here);
+/* Give a pointer into the storage for the given offset */
+static void *ocfs2_xa_offset_pointer(struct ocfs2_xa_loc *loc, int offset)
+{
+ BUG_ON(offset >= loc->xl_size);
+ return loc->xl_ops->xlo_offset_pointer(loc, offset);
+}
- ret = ocfs2_journal_dirty(handle, vb->vb_bh);
- if (ret < 0)
- mlog_errno(ret);
-out:
- return ret;
+/*
+ * Wipe the name+value pair and allow the storage to reclaim it. This
+ * must be followed by either removal of the entry or a call to
+ * ocfs2_xa_add_namevalue().
+ */
+static void ocfs2_xa_wipe_namevalue(struct ocfs2_xa_loc *loc)
+{
+ loc->xl_ops->xlo_wipe_namevalue(loc);
}
/*
- * ocfs2_xattr_set_value_outside()
- *
- * Set large size value in B tree.
+ * Find lowest offset to a name+value pair. This is the start of our
+ * downward-growing free space.
*/
-static int ocfs2_xattr_set_value_outside(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_set_ctxt *ctxt,
- struct ocfs2_xattr_value_buf *vb,
- size_t offs)
+static int ocfs2_xa_get_free_start(struct ocfs2_xa_loc *loc)
{
- size_t name_len = strlen(xi->name);
- void *val = xs->base + offs;
- struct ocfs2_xattr_value_root *xv = NULL;
- size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
- int ret = 0;
+ return loc->xl_ops->xlo_get_free_start(loc);
+}
- memset(val, 0, size);
- memcpy(val, xi->name, name_len);
- xv = (struct ocfs2_xattr_value_root *)
- (val + OCFS2_XATTR_SIZE(name_len));
- xv->xr_clusters = 0;
- xv->xr_last_eb_blk = 0;
- xv->xr_list.l_tree_depth = 0;
- xv->xr_list.l_count = cpu_to_le16(1);
- xv->xr_list.l_next_free_rec = 0;
- vb->vb_xv = xv;
-
- ret = ocfs2_xattr_value_truncate(inode, vb, xi->value_len, ctxt);
- if (ret < 0) {
- mlog_errno(ret);
- return ret;
+/* Can we reuse loc->xl_entry for xi? */
+static int ocfs2_xa_can_reuse_entry(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ return loc->xl_ops->xlo_can_reuse(loc, xi);
+}
+
+/* How much free space is needed to set the new value */
+static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ return loc->xl_ops->xlo_check_space(loc, xi);
+}
+
+static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
+{
+ loc->xl_ops->xlo_add_entry(loc, name_hash);
+ loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
+ /*
+ * We can't leave the new entry's xe_name_offset at zero or
+ * add_namevalue() will go nuts. We set it to the size of our
+ * storage so that it can never be less than any other entry.
+ */
+ loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
+}
+
+static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ int size = namevalue_size_xi(xi);
+ int nameval_offset;
+ char *nameval_buf;
+
+ loc->xl_ops->xlo_add_namevalue(loc, size);
+ loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
+ loc->xl_entry->xe_name_len = xi->xi_name_len;
+ ocfs2_xattr_set_type(loc->xl_entry, xi->xi_name_index);
+ ocfs2_xattr_set_local(loc->xl_entry,
+ xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE);
+
+ nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
+ nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
+ memset(nameval_buf, 0, size);
+ memcpy(nameval_buf, xi->xi_name, xi->xi_name_len);
+}
+
+static void ocfs2_xa_fill_value_buf(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_value_buf *vb)
+{
+ int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
+ int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
+
+ /* Value bufs are for value trees */
+ BUG_ON(ocfs2_xattr_is_local(loc->xl_entry));
+ BUG_ON(namevalue_size_xe(loc->xl_entry) !=
+ (name_size + OCFS2_XATTR_ROOT_SIZE));
+
+ loc->xl_ops->xlo_fill_value_buf(loc, vb);
+ vb->vb_xv =
+ (struct ocfs2_xattr_value_root *)ocfs2_xa_offset_pointer(loc,
+ nameval_offset +
+ name_size);
+}
+
+static int ocfs2_xa_block_journal_access(handle_t *handle,
+ struct ocfs2_xa_loc *loc, int type)
+{
+ struct buffer_head *bh = loc->xl_storage;
+ ocfs2_journal_access_func access;
+
+ if (loc->xl_size == (bh->b_size -
+ offsetof(struct ocfs2_xattr_block,
+ xb_attrs.xb_header)))
+ access = ocfs2_journal_access_xb;
+ else
+ access = ocfs2_journal_access_di;
+ return access(handle, INODE_CACHE(loc->xl_inode), bh, type);
+}
+
+static void ocfs2_xa_block_journal_dirty(handle_t *handle,
+ struct ocfs2_xa_loc *loc)
+{
+ struct buffer_head *bh = loc->xl_storage;
+
+ ocfs2_journal_dirty(handle, bh);
+}
+
+static void *ocfs2_xa_block_offset_pointer(struct ocfs2_xa_loc *loc,
+ int offset)
+{
+ return (char *)loc->xl_header + offset;
+}
+
+static int ocfs2_xa_block_can_reuse(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ /*
+ * Block storage is strict. If the sizes aren't exact, we will
+ * remove the old one and reinsert the new.
+ */
+ return namevalue_size_xe(loc->xl_entry) ==
+ namevalue_size_xi(xi);
+}
+
+static int ocfs2_xa_block_get_free_start(struct ocfs2_xa_loc *loc)
+{
+ struct ocfs2_xattr_header *xh = loc->xl_header;
+ int i, count = le16_to_cpu(xh->xh_count);
+ int offset, free_start = loc->xl_size;
+
+ for (i = 0; i < count; i++) {
+ offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
+ if (offset < free_start)
+ free_start = offset;
}
- ret = ocfs2_xattr_update_entry(inode, ctxt->handle, xi, xs, vb, offs);
- if (ret < 0) {
- mlog_errno(ret);
- return ret;
+
+ return free_start;
+}
+
+static int ocfs2_xa_block_check_space(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ int count = le16_to_cpu(loc->xl_header->xh_count);
+ int free_start = ocfs2_xa_get_free_start(loc);
+ int needed_space = ocfs2_xi_entry_usage(xi);
+
+ /*
+ * Block storage will reclaim the original entry before inserting
+ * the new value, so we only need the difference. If the new
+ * entry is smaller than the old one, we don't need anything.
+ */
+ if (loc->xl_entry) {
+ /* Don't need space if we're reusing! */
+ if (ocfs2_xa_can_reuse_entry(loc, xi))
+ needed_space = 0;
+ else
+ needed_space -= ocfs2_xe_entry_usage(loc->xl_entry);
}
- ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, vb,
- xi->value, xi->value_len);
- if (ret < 0)
- mlog_errno(ret);
+ if (needed_space < 0)
+ needed_space = 0;
+ return ocfs2_xa_check_space_helper(needed_space, free_start, count);
+}
- return ret;
+/*
+ * Block storage for xattrs keeps the name+value pairs compacted. When
+ * we remove one, we have to shift any that preceded it towards the end.
+ */
+static void ocfs2_xa_block_wipe_namevalue(struct ocfs2_xa_loc *loc)
+{
+ int i, offset;
+ int namevalue_offset, first_namevalue_offset, namevalue_size;
+ struct ocfs2_xattr_entry *entry = loc->xl_entry;
+ struct ocfs2_xattr_header *xh = loc->xl_header;
+ int count = le16_to_cpu(xh->xh_count);
+
+ namevalue_offset = le16_to_cpu(entry->xe_name_offset);
+ namevalue_size = namevalue_size_xe(entry);
+ first_namevalue_offset = ocfs2_xa_get_free_start(loc);
+
+ /* Shift the name+value pairs */
+ memmove((char *)xh + first_namevalue_offset + namevalue_size,
+ (char *)xh + first_namevalue_offset,
+ namevalue_offset - first_namevalue_offset);
+ memset((char *)xh + first_namevalue_offset, 0, namevalue_size);
+
+ /* Now tell xh->xh_entries about it */
+ for (i = 0; i < count; i++) {
+ offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
+ if (offset < namevalue_offset)
+ le16_add_cpu(&xh->xh_entries[i].xe_name_offset,
+ namevalue_size);
+ }
+
+ /*
+ * Note that we don't update xh_free_start or xh_name_value_len
+ * because they're not used in block-stored xattrs.
+ */
+}
+
+static void ocfs2_xa_block_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
+{
+ int count = le16_to_cpu(loc->xl_header->xh_count);
+ loc->xl_entry = &(loc->xl_header->xh_entries[count]);
+ le16_add_cpu(&loc->xl_header->xh_count, 1);
+ memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
+}
+
+static void ocfs2_xa_block_add_namevalue(struct ocfs2_xa_loc *loc, int size)
+{
+ int free_start = ocfs2_xa_get_free_start(loc);
+
+ loc->xl_entry->xe_name_offset = cpu_to_le16(free_start - size);
+}
+
+static void ocfs2_xa_block_fill_value_buf(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_value_buf *vb)
+{
+ struct buffer_head *bh = loc->xl_storage;
+
+ if (loc->xl_size == (bh->b_size -
+ offsetof(struct ocfs2_xattr_block,
+ xb_attrs.xb_header)))
+ vb->vb_access = ocfs2_journal_access_xb;
+ else
+ vb->vb_access = ocfs2_journal_access_di;
+ vb->vb_bh = bh;
}
/*
- * ocfs2_xattr_set_entry_local()
- *
- * Set, replace or remove extended attribute in local.
+ * Operations for xattrs stored in blocks. This includes inline inode
+ * storage and unindexed ocfs2_xattr_blocks.
*/
-static void ocfs2_xattr_set_entry_local(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_entry *last,
- size_t min_offs)
+static const struct ocfs2_xa_loc_operations ocfs2_xa_block_loc_ops = {
+ .xlo_journal_access = ocfs2_xa_block_journal_access,
+ .xlo_journal_dirty = ocfs2_xa_block_journal_dirty,
+ .xlo_offset_pointer = ocfs2_xa_block_offset_pointer,
+ .xlo_check_space = ocfs2_xa_block_check_space,
+ .xlo_can_reuse = ocfs2_xa_block_can_reuse,
+ .xlo_get_free_start = ocfs2_xa_block_get_free_start,
+ .xlo_wipe_namevalue = ocfs2_xa_block_wipe_namevalue,
+ .xlo_add_entry = ocfs2_xa_block_add_entry,
+ .xlo_add_namevalue = ocfs2_xa_block_add_namevalue,
+ .xlo_fill_value_buf = ocfs2_xa_block_fill_value_buf,
+};
+
+static int ocfs2_xa_bucket_journal_access(handle_t *handle,
+ struct ocfs2_xa_loc *loc, int type)
{
- size_t name_len = strlen(xi->name);
- int i;
+ struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
- if (xi->value && xs->not_found) {
- /* Insert the new xattr entry. */
- le16_add_cpu(&xs->header->xh_count, 1);
- ocfs2_xattr_set_type(last, xi->name_index);
- ocfs2_xattr_set_local(last, 1);
- last->xe_name_len = name_len;
- } else {
- void *first_val;
- void *val;
- size_t offs, size;
-
- first_val = xs->base + min_offs;
- offs = le16_to_cpu(xs->here->xe_name_offset);
- val = xs->base + offs;
-
- if (le64_to_cpu(xs->here->xe_value_size) >
- OCFS2_XATTR_INLINE_SIZE)
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_ROOT_SIZE;
+ return ocfs2_xattr_bucket_journal_access(handle, bucket, type);
+}
+
+static void ocfs2_xa_bucket_journal_dirty(handle_t *handle,
+ struct ocfs2_xa_loc *loc)
+{
+ struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
+
+ ocfs2_xattr_bucket_journal_dirty(handle, bucket);
+}
+
+static void *ocfs2_xa_bucket_offset_pointer(struct ocfs2_xa_loc *loc,
+ int offset)
+{
+ struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
+ int block, block_offset;
+
+ /* The header is at the front of the bucket */
+ block = offset >> loc->xl_inode->i_sb->s_blocksize_bits;
+ block_offset = offset % loc->xl_inode->i_sb->s_blocksize;
+
+ return bucket_block(bucket, block) + block_offset;
+}
+
+static int ocfs2_xa_bucket_can_reuse(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ return namevalue_size_xe(loc->xl_entry) >=
+ namevalue_size_xi(xi);
+}
+
+static int ocfs2_xa_bucket_get_free_start(struct ocfs2_xa_loc *loc)
+{
+ struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
+ return le16_to_cpu(bucket_xh(bucket)->xh_free_start);
+}
+
+static int ocfs2_bucket_align_free_start(struct super_block *sb,
+ int free_start, int size)
+{
+ /*
+ * We need to make sure that the name+value pair fits within
+ * one block.
+ */
+ if (((free_start - size) >> sb->s_blocksize_bits) !=
+ ((free_start - 1) >> sb->s_blocksize_bits))
+ free_start -= free_start % sb->s_blocksize;
+
+ return free_start;
+}
+
+static int ocfs2_xa_bucket_check_space(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ int rc;
+ int count = le16_to_cpu(loc->xl_header->xh_count);
+ int free_start = ocfs2_xa_get_free_start(loc);
+ int needed_space = ocfs2_xi_entry_usage(xi);
+ int size = namevalue_size_xi(xi);
+ struct super_block *sb = loc->xl_inode->i_sb;
+
+ /*
+ * Bucket storage does not reclaim name+value pairs it cannot
+ * reuse. They live as holes until the bucket fills, and then
+ * the bucket is defragmented. However, the bucket can reclaim
+ * the ocfs2_xattr_entry.
+ */
+ if (loc->xl_entry) {
+ /* Don't need space if we're reusing! */
+ if (ocfs2_xa_can_reuse_entry(loc, xi))
+ needed_space = 0;
else
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
-
- if (xi->value && size == OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(xi->value_len)) {
- /* The old and the new value have the
- same size. Just replace the value. */
- ocfs2_xattr_set_local(xs->here, 1);
- xs->here->xe_value_size = cpu_to_le64(xi->value_len);
- /* Clear value bytes. */
- memset(val + OCFS2_XATTR_SIZE(name_len),
- 0,
- OCFS2_XATTR_SIZE(xi->value_len));
- memcpy(val + OCFS2_XATTR_SIZE(name_len),
- xi->value,
- xi->value_len);
- return;
- }
- /* Remove the old name+value. */
- memmove(first_val + size, first_val, val - first_val);
- memset(first_val, 0, size);
- xs->here->xe_name_hash = 0;
- xs->here->xe_name_offset = 0;
- ocfs2_xattr_set_local(xs->here, 1);
- xs->here->xe_value_size = 0;
-
- min_offs += size;
-
- /* Adjust all value offsets. */
- last = xs->header->xh_entries;
- for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) {
- size_t o = le16_to_cpu(last->xe_name_offset);
-
- if (o < offs)
- last->xe_name_offset = cpu_to_le16(o + size);
- last += 1;
- }
+ needed_space -= sizeof(struct ocfs2_xattr_entry);
+ }
+ BUG_ON(needed_space < 0);
- if (!xi->value) {
- /* Remove the old entry. */
- last -= 1;
- memmove(xs->here, xs->here + 1,
- (void *)last - (void *)xs->here);
- memset(last, 0, sizeof(struct ocfs2_xattr_entry));
- le16_add_cpu(&xs->header->xh_count, -1);
- }
+ if (free_start < size) {
+ if (needed_space)
+ return -ENOSPC;
+ } else {
+ /*
+ * First we check if it would fit in the first place.
+ * Below, we align the free start to a block. This may
+ * slide us below the minimum gap. By checking unaligned
+ * first, we avoid that error.
+ */
+ rc = ocfs2_xa_check_space_helper(needed_space, free_start,
+ count);
+ if (rc)
+ return rc;
+ free_start = ocfs2_bucket_align_free_start(sb, free_start,
+ size);
}
- if (xi->value) {
- /* Insert the new name+value. */
- size_t size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(xi->value_len);
- void *val = xs->base + min_offs - size;
+ return ocfs2_xa_check_space_helper(needed_space, free_start, count);
+}
+
+static void ocfs2_xa_bucket_wipe_namevalue(struct ocfs2_xa_loc *loc)
+{
+ le16_add_cpu(&loc->xl_header->xh_name_value_len,
+ -namevalue_size_xe(loc->xl_entry));
+}
- xs->here->xe_name_offset = cpu_to_le16(min_offs - size);
- memset(val, 0, size);
- memcpy(val, xi->name, name_len);
- memcpy(val + OCFS2_XATTR_SIZE(name_len),
- xi->value,
- xi->value_len);
- xs->here->xe_value_size = cpu_to_le64(xi->value_len);
- ocfs2_xattr_set_local(xs->here, 1);
- ocfs2_xattr_hash_entry(inode, xs->header, xs->here);
+static void ocfs2_xa_bucket_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
+{
+ struct ocfs2_xattr_header *xh = loc->xl_header;
+ int count = le16_to_cpu(xh->xh_count);
+ int low = 0, high = count - 1, tmp;
+ struct ocfs2_xattr_entry *tmp_xe;
+
+ /*
+ * We keep buckets sorted by name_hash, so we need to find
+ * our insert place.
+ */
+ while (low <= high && count) {
+ tmp = (low + high) / 2;
+ tmp_xe = &xh->xh_entries[tmp];
+
+ if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash))
+ low = tmp + 1;
+ else if (name_hash < le32_to_cpu(tmp_xe->xe_name_hash))
+ high = tmp - 1;
+ else {
+ low = tmp;
+ break;
+ }
}
- return;
+ if (low != count)
+ memmove(&xh->xh_entries[low + 1],
+ &xh->xh_entries[low],
+ ((count - low) * sizeof(struct ocfs2_xattr_entry)));
+
+ le16_add_cpu(&xh->xh_count, 1);
+ loc->xl_entry = &xh->xh_entries[low];
+ memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
+}
+
+static void ocfs2_xa_bucket_add_namevalue(struct ocfs2_xa_loc *loc, int size)
+{
+ int free_start = ocfs2_xa_get_free_start(loc);
+ struct ocfs2_xattr_header *xh = loc->xl_header;
+ struct super_block *sb = loc->xl_inode->i_sb;
+ int nameval_offset;
+
+ free_start = ocfs2_bucket_align_free_start(sb, free_start, size);
+ nameval_offset = free_start - size;
+ loc->xl_entry->xe_name_offset = cpu_to_le16(nameval_offset);
+ xh->xh_free_start = cpu_to_le16(nameval_offset);
+ le16_add_cpu(&xh->xh_name_value_len, size);
+
+}
+
+static void ocfs2_xa_bucket_fill_value_buf(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_value_buf *vb)
+{
+ struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
+ struct super_block *sb = loc->xl_inode->i_sb;
+ int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
+ int size = namevalue_size_xe(loc->xl_entry);
+ int block_offset = nameval_offset >> sb->s_blocksize_bits;
+
+ /* Values are not allowed to straddle block boundaries */
+ BUG_ON(block_offset !=
+ ((nameval_offset + size - 1) >> sb->s_blocksize_bits));
+ /* We expect the bucket to be filled in */
+ BUG_ON(!bucket->bu_bhs[block_offset]);
+
+ vb->vb_access = ocfs2_journal_access;
+ vb->vb_bh = bucket->bu_bhs[block_offset];
+}
+
+/* Operations for xattrs stored in buckets. */
+static const struct ocfs2_xa_loc_operations ocfs2_xa_bucket_loc_ops = {
+ .xlo_journal_access = ocfs2_xa_bucket_journal_access,
+ .xlo_journal_dirty = ocfs2_xa_bucket_journal_dirty,
+ .xlo_offset_pointer = ocfs2_xa_bucket_offset_pointer,
+ .xlo_check_space = ocfs2_xa_bucket_check_space,
+ .xlo_can_reuse = ocfs2_xa_bucket_can_reuse,
+ .xlo_get_free_start = ocfs2_xa_bucket_get_free_start,
+ .xlo_wipe_namevalue = ocfs2_xa_bucket_wipe_namevalue,
+ .xlo_add_entry = ocfs2_xa_bucket_add_entry,
+ .xlo_add_namevalue = ocfs2_xa_bucket_add_namevalue,
+ .xlo_fill_value_buf = ocfs2_xa_bucket_fill_value_buf,
+};
+
+static unsigned int ocfs2_xa_value_clusters(struct ocfs2_xa_loc *loc)
+{
+ struct ocfs2_xattr_value_buf vb;
+
+ if (ocfs2_xattr_is_local(loc->xl_entry))
+ return 0;
+
+ ocfs2_xa_fill_value_buf(loc, &vb);
+ return le32_to_cpu(vb.vb_xv->xr_clusters);
+}
+
+static int ocfs2_xa_value_truncate(struct ocfs2_xa_loc *loc, u64 bytes,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int trunc_rc, access_rc;
+ struct ocfs2_xattr_value_buf vb;
+
+ ocfs2_xa_fill_value_buf(loc, &vb);
+ trunc_rc = ocfs2_xattr_value_truncate(loc->xl_inode, &vb, bytes,
+ ctxt);
+
+ /*
+ * The caller of ocfs2_xa_value_truncate() has already called
+ * ocfs2_xa_journal_access on the loc. However, The truncate code
+ * calls ocfs2_extend_trans(). This may commit the previous
+ * transaction and open a new one. If this is a bucket, truncate
+ * could leave only vb->vb_bh set up for journaling. Meanwhile,
+ * the caller is expecting to dirty the entire bucket. So we must
+ * reset the journal work. We do this even if truncate has failed,
+ * as it could have failed after committing the extend.
+ */
+ access_rc = ocfs2_xa_journal_access(ctxt->handle, loc,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+
+ /* Errors in truncate take precedence */
+ return trunc_rc ? trunc_rc : access_rc;
+}
+
+static void ocfs2_xa_remove_entry(struct ocfs2_xa_loc *loc)
+{
+ int index, count;
+ struct ocfs2_xattr_header *xh = loc->xl_header;
+ struct ocfs2_xattr_entry *entry = loc->xl_entry;
+
+ ocfs2_xa_wipe_namevalue(loc);
+ loc->xl_entry = NULL;
+
+ le16_add_cpu(&xh->xh_count, -1);
+ count = le16_to_cpu(xh->xh_count);
+
+ /*
+ * Only zero out the entry if there are more remaining. This is
+ * important for an empty bucket, as it keeps track of the
+ * bucket's hash value. It doesn't hurt empty block storage.
+ */
+ if (count) {
+ index = ((char *)entry - (char *)&xh->xh_entries) /
+ sizeof(struct ocfs2_xattr_entry);
+ memmove(&xh->xh_entries[index], &xh->xh_entries[index + 1],
+ (count - index) * sizeof(struct ocfs2_xattr_entry));
+ memset(&xh->xh_entries[count], 0,
+ sizeof(struct ocfs2_xattr_entry));
+ }
}
/*
- * ocfs2_xattr_set_entry()
+ * If we have a problem adjusting the size of an external value during
+ * ocfs2_xa_prepare_entry() or ocfs2_xa_remove(), we may have an xattr
+ * in an intermediate state. For example, the value may be partially
+ * truncated.
+ *
+ * If the value tree hasn't changed, the extend/truncate went nowhere.
+ * We have nothing to do. The caller can treat it as a straight error.
*
- * Set extended attribute entry into inode or block.
+ * If the value tree got partially truncated, we now have a corrupted
+ * extended attribute. We're going to wipe its entry and leak the
+ * clusters. Better to leak some storage than leave a corrupt entry.
*
- * If extended attribute value size > OCFS2_XATTR_INLINE_SIZE,
- * We first insert tree root(ocfs2_xattr_value_root) with set_entry_local(),
- * then set value in B tree with set_value_outside().
+ * If the value tree grew, it obviously didn't grow enough for the
+ * new entry. We're not going to try and reclaim those clusters either.
+ * If there was already an external value there (orig_clusters != 0),
+ * the new clusters are attached safely and we can just leave the old
+ * value in place. If there was no external value there, we remove
+ * the entry.
+ *
+ * This way, the xattr block we store in the journal will be consistent.
+ * If the size change broke because of the journal, no changes will hit
+ * disk anyway.
*/
-static int ocfs2_xattr_set_entry(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_set_ctxt *ctxt,
- int flag)
-{
- struct ocfs2_xattr_entry *last;
- struct ocfs2_inode_info *oi = OCFS2_I(inode);
- struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
- size_t min_offs = xs->end - xs->base, name_len = strlen(xi->name);
- size_t size_l = 0;
- handle_t *handle = ctxt->handle;
- int free, i, ret;
- struct ocfs2_xattr_info xi_l = {
- .name_index = xi->name_index,
- .name = xi->name,
- .value = xi->value,
- .value_len = xi->value_len,
- };
- struct ocfs2_xattr_value_buf vb = {
- .vb_bh = xs->xattr_bh,
- .vb_access = ocfs2_journal_access_di,
- };
+static void ocfs2_xa_cleanup_value_truncate(struct ocfs2_xa_loc *loc,
+ const char *what,
+ unsigned int orig_clusters)
+{
+ unsigned int new_clusters = ocfs2_xa_value_clusters(loc);
+ char *nameval_buf = ocfs2_xa_offset_pointer(loc,
+ le16_to_cpu(loc->xl_entry->xe_name_offset));
+
+ if (new_clusters < orig_clusters) {
+ mlog(ML_ERROR,
+ "Partial truncate while %s xattr %.*s. Leaking "
+ "%u clusters and removing the entry\n",
+ what, loc->xl_entry->xe_name_len, nameval_buf,
+ orig_clusters - new_clusters);
+ ocfs2_xa_remove_entry(loc);
+ } else if (!orig_clusters) {
+ mlog(ML_ERROR,
+ "Unable to allocate an external value for xattr "
+ "%.*s safely. Leaking %u clusters and removing the "
+ "entry\n",
+ loc->xl_entry->xe_name_len, nameval_buf,
+ new_clusters - orig_clusters);
+ ocfs2_xa_remove_entry(loc);
+ } else if (new_clusters > orig_clusters)
+ mlog(ML_ERROR,
+ "Unable to grow xattr %.*s safely. %u new clusters "
+ "have been added, but the value will not be "
+ "modified\n",
+ loc->xl_entry->xe_name_len, nameval_buf,
+ new_clusters - orig_clusters);
+}
+
+static int ocfs2_xa_remove(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int rc = 0;
+ unsigned int orig_clusters;
+
+ if (!ocfs2_xattr_is_local(loc->xl_entry)) {
+ orig_clusters = ocfs2_xa_value_clusters(loc);
+ rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+ if (rc) {
+ mlog_errno(rc);
+ /*
+ * Since this is remove, we can return 0 if
+ * ocfs2_xa_cleanup_value_truncate() is going to
+ * wipe the entry anyway. So we check the
+ * cluster count as well.
+ */
+ if (orig_clusters != ocfs2_xa_value_clusters(loc))
+ rc = 0;
+ ocfs2_xa_cleanup_value_truncate(loc, "removing",
+ orig_clusters);
+ if (rc)
+ goto out;
+ }
+ }
- if (!(flag & OCFS2_INLINE_XATTR_FL)) {
- BUG_ON(xs->xattr_bh == xs->inode_bh);
- vb.vb_access = ocfs2_journal_access_xb;
- } else
- BUG_ON(xs->xattr_bh != xs->inode_bh);
+ ocfs2_xa_remove_entry(loc);
- /* Compute min_offs, last and free space. */
- last = xs->header->xh_entries;
+out:
+ return rc;
+}
- for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) {
- size_t offs = le16_to_cpu(last->xe_name_offset);
- if (offs < min_offs)
- min_offs = offs;
- last += 1;
- }
+static void ocfs2_xa_install_value_root(struct ocfs2_xa_loc *loc)
+{
+ int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
+ char *nameval_buf;
- free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP;
- if (free < 0)
- return -EIO;
+ nameval_buf = ocfs2_xa_offset_pointer(loc,
+ le16_to_cpu(loc->xl_entry->xe_name_offset));
+ memcpy(nameval_buf + name_size, &def_xv, OCFS2_XATTR_ROOT_SIZE);
+}
- if (!xs->not_found) {
- size_t size = 0;
- if (ocfs2_xattr_is_local(xs->here))
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
- else
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_ROOT_SIZE;
- free += (size + sizeof(struct ocfs2_xattr_entry));
- }
- /* Check free space in inode or block */
- if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
- if (free < sizeof(struct ocfs2_xattr_entry) +
- OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_ROOT_SIZE) {
- ret = -ENOSPC;
- goto out;
+/*
+ * Take an existing entry and make it ready for the new value. This
+ * won't allocate space, but it may free space. It should be ready for
+ * ocfs2_xa_prepare_entry() to finish the work.
+ */
+static int ocfs2_xa_reuse_entry(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int rc = 0;
+ int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
+ unsigned int orig_clusters;
+ char *nameval_buf;
+ int xe_local = ocfs2_xattr_is_local(loc->xl_entry);
+ int xi_local = xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE;
+
+ BUG_ON(OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len) !=
+ name_size);
+
+ nameval_buf = ocfs2_xa_offset_pointer(loc,
+ le16_to_cpu(loc->xl_entry->xe_name_offset));
+ if (xe_local) {
+ memset(nameval_buf + name_size, 0,
+ namevalue_size_xe(loc->xl_entry) - name_size);
+ if (!xi_local)
+ ocfs2_xa_install_value_root(loc);
+ } else {
+ orig_clusters = ocfs2_xa_value_clusters(loc);
+ if (xi_local) {
+ rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+ if (rc < 0)
+ mlog_errno(rc);
+ else
+ memset(nameval_buf + name_size, 0,
+ namevalue_size_xe(loc->xl_entry) -
+ name_size);
+ } else if (le64_to_cpu(loc->xl_entry->xe_value_size) >
+ xi->xi_value_len) {
+ rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len,
+ ctxt);
+ if (rc < 0)
+ mlog_errno(rc);
}
- size_l = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
- xi_l.value = (void *)&def_xv;
- xi_l.value_len = OCFS2_XATTR_ROOT_SIZE;
- } else if (xi->value) {
- if (free < sizeof(struct ocfs2_xattr_entry) +
- OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(xi->value_len)) {
- ret = -ENOSPC;
+
+ if (rc) {
+ ocfs2_xa_cleanup_value_truncate(loc, "reusing",
+ orig_clusters);
goto out;
}
}
- if (!xs->not_found) {
- /* For existing extended attribute */
- size_t size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
- size_t offs = le16_to_cpu(xs->here->xe_name_offset);
- void *val = xs->base + offs;
+ loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
+ ocfs2_xattr_set_local(loc->xl_entry, xi_local);
- if (ocfs2_xattr_is_local(xs->here) && size == size_l) {
- /* Replace existing local xattr with tree root */
- ret = ocfs2_xattr_set_value_outside(inode, xi, xs,
- ctxt, &vb, offs);
- if (ret < 0)
- mlog_errno(ret);
- goto out;
- } else if (!ocfs2_xattr_is_local(xs->here)) {
- /* For existing xattr which has value outside */
- vb.vb_xv = (struct ocfs2_xattr_value_root *)
- (val + OCFS2_XATTR_SIZE(name_len));
+out:
+ return rc;
+}
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
- /*
- * If new value need set outside also,
- * first truncate old value to new value,
- * then set new value with set_value_outside().
- */
- ret = ocfs2_xattr_value_truncate(inode,
- &vb,
- xi->value_len,
- ctxt);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
+/*
+ * Prepares loc->xl_entry to receive the new xattr. This includes
+ * properly setting up the name+value pair region. If loc->xl_entry
+ * already exists, it will take care of modifying it appropriately.
+ *
+ * Note that this modifies the data. You did journal_access already,
+ * right?
+ */
+static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi,
+ u32 name_hash,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int rc = 0;
+ unsigned int orig_clusters;
+ __le64 orig_value_size = 0;
- ret = ocfs2_xattr_update_entry(inode,
- handle,
- xi,
- xs,
- &vb,
- offs);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
+ rc = ocfs2_xa_check_space(loc, xi);
+ if (rc)
+ goto out;
- ret = __ocfs2_xattr_set_value_outside(inode,
- handle,
- &vb,
- xi->value,
- xi->value_len);
- if (ret < 0)
- mlog_errno(ret);
+ if (loc->xl_entry) {
+ if (ocfs2_xa_can_reuse_entry(loc, xi)) {
+ orig_value_size = loc->xl_entry->xe_value_size;
+ rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
+ if (rc)
+ goto out;
+ goto alloc_value;
+ }
+
+ if (!ocfs2_xattr_is_local(loc->xl_entry)) {
+ orig_clusters = ocfs2_xa_value_clusters(loc);
+ rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+ if (rc) {
+ mlog_errno(rc);
+ ocfs2_xa_cleanup_value_truncate(loc,
+ "overwriting",
+ orig_clusters);
goto out;
- } else {
- /*
- * If new value need set in local,
- * just trucate old value to zero.
- */
- ret = ocfs2_xattr_value_truncate(inode,
- &vb,
- 0,
- ctxt);
- if (ret < 0)
- mlog_errno(ret);
}
}
+ ocfs2_xa_wipe_namevalue(loc);
+ } else
+ ocfs2_xa_add_entry(loc, name_hash);
+
+ /*
+ * If we get here, we have a blank entry. Fill it. We grow our
+ * name+value pair back from the end.
+ */
+ ocfs2_xa_add_namevalue(loc, xi);
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
+ ocfs2_xa_install_value_root(loc);
+
+alloc_value:
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
+ orig_clusters = ocfs2_xa_value_clusters(loc);
+ rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt);
+ if (rc < 0) {
+ /*
+ * If we tried to grow an existing external value,
+ * ocfs2_xa_cleanuP-value_truncate() is going to
+ * let it stand. We have to restore its original
+ * value size.
+ */
+ loc->xl_entry->xe_value_size = orig_value_size;
+ ocfs2_xa_cleanup_value_truncate(loc, "growing",
+ orig_clusters);
+ mlog_errno(rc);
+ }
}
- ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), xs->inode_bh,
+out:
+ return rc;
+}
+
+/*
+ * Store the value portion of the name+value pair. This will skip
+ * values that are stored externally. Their tree roots were set up
+ * by ocfs2_xa_prepare_entry().
+ */
+static int ocfs2_xa_store_value(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int rc = 0;
+ int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
+ int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
+ char *nameval_buf;
+ struct ocfs2_xattr_value_buf vb;
+
+ nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
+ ocfs2_xa_fill_value_buf(loc, &vb);
+ rc = __ocfs2_xattr_set_value_outside(loc->xl_inode,
+ ctxt->handle, &vb,
+ xi->xi_value,
+ xi->xi_value_len);
+ } else
+ memcpy(nameval_buf + name_size, xi->xi_value, xi->xi_value_len);
+
+ return rc;
+}
+
+static int ocfs2_xa_set(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int ret;
+ u32 name_hash = ocfs2_xattr_name_hash(loc->xl_inode, xi->xi_name,
+ xi->xi_name_len);
+
+ ret = ocfs2_xa_journal_access(ctxt->handle, loc,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
- if (!(flag & OCFS2_INLINE_XATTR_FL)) {
- ret = vb.vb_access(handle, INODE_CACHE(inode), vb.vb_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
- }
-
/*
- * Set value in local, include set tree root in local.
- * This is the first step for value size >INLINE_SIZE.
+ * From here on out, everything is going to modify the buffer a
+ * little. Errors are going to leave the xattr header in a
+ * sane state. Thus, even with errors we dirty the sucker.
*/
- ocfs2_xattr_set_entry_local(inode, &xi_l, xs, last, min_offs);
- if (!(flag & OCFS2_INLINE_XATTR_FL)) {
- ret = ocfs2_journal_dirty(handle, xs->xattr_bh);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
+ /* Don't worry, we are never called with !xi_value and !xl_entry */
+ if (!xi->xi_value) {
+ ret = ocfs2_xa_remove(loc, ctxt);
+ goto out_dirty;
}
- if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) &&
- (flag & OCFS2_INLINE_XATTR_FL)) {
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- unsigned int xattrsize = osb->s_xattr_inline_size;
-
- /*
- * Adjust extent record count or inline data size
- * to reserve space for extended attribute.
- */
- if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
- struct ocfs2_inline_data *idata = &di->id2.i_data;
- le16_add_cpu(&idata->id_count, -xattrsize);
- } else if (!(ocfs2_inode_is_fast_symlink(inode))) {
- struct ocfs2_extent_list *el = &di->id2.i_list;
- le16_add_cpu(&el->l_count, -(xattrsize /
- sizeof(struct ocfs2_extent_rec)));
- }
- di->i_xattr_inline_size = cpu_to_le16(xattrsize);
+ ret = ocfs2_xa_prepare_entry(loc, xi, name_hash, ctxt);
+ if (ret) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+ goto out_dirty;
}
- /* Update xattr flag */
- spin_lock(&oi->ip_lock);
- oi->ip_dyn_features |= flag;
- di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
- spin_unlock(&oi->ip_lock);
- ret = ocfs2_journal_dirty(handle, xs->inode_bh);
- if (ret < 0)
+ ret = ocfs2_xa_store_value(loc, xi, ctxt);
+ if (ret)
mlog_errno(ret);
- if (!ret && xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
- /*
- * Set value outside in B tree.
- * This is the second step for value size > INLINE_SIZE.
- */
- size_t offs = le16_to_cpu(xs->here->xe_name_offset);
- ret = ocfs2_xattr_set_value_outside(inode, xi, xs, ctxt,
- &vb, offs);
- if (ret < 0) {
- int ret2;
+out_dirty:
+ ocfs2_xa_journal_dirty(ctxt->handle, loc);
- mlog_errno(ret);
- /*
- * If set value outside failed, we have to clean
- * the junk tree root we have already set in local.
- */
- ret2 = ocfs2_xattr_cleanup(inode, ctxt->handle,
- xi, xs, &vb, offs);
- if (ret2 < 0)
- mlog_errno(ret2);
- }
- }
out:
return ret;
}
+static void ocfs2_init_dinode_xa_loc(struct ocfs2_xa_loc *loc,
+ struct inode *inode,
+ struct buffer_head *bh,
+ struct ocfs2_xattr_entry *entry)
+{
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
+
+ BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_XATTR_FL));
+
+ loc->xl_inode = inode;
+ loc->xl_ops = &ocfs2_xa_block_loc_ops;
+ loc->xl_storage = bh;
+ loc->xl_entry = entry;
+ loc->xl_size = le16_to_cpu(di->i_xattr_inline_size);
+ loc->xl_header =
+ (struct ocfs2_xattr_header *)(bh->b_data + bh->b_size -
+ loc->xl_size);
+}
+
+static void ocfs2_init_xattr_block_xa_loc(struct ocfs2_xa_loc *loc,
+ struct inode *inode,
+ struct buffer_head *bh,
+ struct ocfs2_xattr_entry *entry)
+{
+ struct ocfs2_xattr_block *xb =
+ (struct ocfs2_xattr_block *)bh->b_data;
+
+ BUG_ON(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED);
+
+ loc->xl_inode = inode;
+ loc->xl_ops = &ocfs2_xa_block_loc_ops;
+ loc->xl_storage = bh;
+ loc->xl_header = &(xb->xb_attrs.xb_header);
+ loc->xl_entry = entry;
+ loc->xl_size = bh->b_size - offsetof(struct ocfs2_xattr_block,
+ xb_attrs.xb_header);
+}
+
+static void ocfs2_init_xattr_bucket_xa_loc(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_bucket *bucket,
+ struct ocfs2_xattr_entry *entry)
+{
+ loc->xl_inode = bucket->bu_inode;
+ loc->xl_ops = &ocfs2_xa_bucket_loc_ops;
+ loc->xl_storage = bucket;
+ loc->xl_header = bucket_xh(bucket);
+ loc->xl_entry = entry;
+ loc->xl_size = OCFS2_XATTR_BUCKET_SIZE;
+}
+
/*
* In xattr remove, if it is stored outside and refcounted, we may have
* the chance to split the refcount tree. So need the allocators.
@@ -2149,6 +2683,55 @@ static int ocfs2_xattr_ibody_find(struct inode *inode,
return 0;
}
+static int ocfs2_xattr_ibody_init(struct inode *inode,
+ struct buffer_head *di_bh,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int ret;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ unsigned int xattrsize = osb->s_xattr_inline_size;
+
+ if (!ocfs2_xattr_has_space_inline(inode, di)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * Adjust extent record count or inline data size
+ * to reserve space for extended attribute.
+ */
+ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ struct ocfs2_inline_data *idata = &di->id2.i_data;
+ le16_add_cpu(&idata->id_count, -xattrsize);
+ } else if (!(ocfs2_inode_is_fast_symlink(inode))) {
+ struct ocfs2_extent_list *el = &di->id2.i_list;
+ le16_add_cpu(&el->l_count, -(xattrsize /
+ sizeof(struct ocfs2_extent_rec)));
+ }
+ di->i_xattr_inline_size = cpu_to_le16(xattrsize);
+
+ spin_lock(&oi->ip_lock);
+ oi->ip_dyn_features |= OCFS2_INLINE_XATTR_FL|OCFS2_HAS_XATTR_FL;
+ di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
+ spin_unlock(&oi->ip_lock);
+
+ ret = ocfs2_journal_dirty(ctxt->handle, di_bh);
+ if (ret < 0)
+ mlog_errno(ret);
+
+out:
+ return ret;
+}
+
/*
* ocfs2_xattr_ibody_set()
*
@@ -2160,9 +2743,10 @@ static int ocfs2_xattr_ibody_set(struct inode *inode,
struct ocfs2_xattr_search *xs,
struct ocfs2_xattr_set_ctxt *ctxt)
{
+ int ret;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
- int ret;
+ struct ocfs2_xa_loc loc;
if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
return -ENOSPC;
@@ -2175,8 +2759,25 @@ static int ocfs2_xattr_ibody_set(struct inode *inode,
}
}
- ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt,
- (OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL));
+ if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
+ ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt);
+ if (ret) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ ocfs2_init_dinode_xa_loc(&loc, inode, xs->inode_bh,
+ xs->not_found ? NULL : xs->here);
+ ret = ocfs2_xa_set(&loc, xi, ctxt);
+ if (ret) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+ goto out;
+ }
+ xs->here = loc.xl_entry;
+
out:
up_write(&oi->ip_alloc_sem);
@@ -2236,12 +2837,11 @@ cleanup:
return ret;
}
-static int ocfs2_create_xattr_block(handle_t *handle,
- struct inode *inode,
+static int ocfs2_create_xattr_block(struct inode *inode,
struct buffer_head *inode_bh,
- struct ocfs2_alloc_context *meta_ac,
- struct buffer_head **ret_bh,
- int indexed)
+ struct ocfs2_xattr_set_ctxt *ctxt,
+ int indexed,
+ struct buffer_head **ret_bh)
{
int ret;
u16 suballoc_bit_start;
@@ -2252,14 +2852,14 @@ static int ocfs2_create_xattr_block(handle_t *handle,
struct buffer_head *new_bh = NULL;
struct ocfs2_xattr_block *xblk;
- ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), inode_bh,
- OCFS2_JOURNAL_ACCESS_CREATE);
+ ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode),
+ inode_bh, OCFS2_JOURNAL_ACCESS_CREATE);
if (ret < 0) {
mlog_errno(ret);
goto end;
}
- ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1,
+ ret = ocfs2_claim_metadata(osb, ctxt->handle, ctxt->meta_ac, 1,
&suballoc_bit_start, &num_got,
&first_blkno);
if (ret < 0) {
@@ -2270,7 +2870,7 @@ static int ocfs2_create_xattr_block(handle_t *handle,
new_bh = sb_getblk(inode->i_sb, first_blkno);
ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
- ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode),
+ ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode),
new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret < 0) {
@@ -2282,11 +2882,10 @@ static int ocfs2_create_xattr_block(handle_t *handle,
xblk = (struct ocfs2_xattr_block *)new_bh->b_data;
memset(xblk, 0, inode->i_sb->s_blocksize);
strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE);
- xblk->xb_suballoc_slot = cpu_to_le16(osb->slot_num);
+ xblk->xb_suballoc_slot = cpu_to_le16(ctxt->meta_ac->ac_alloc_slot);
xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start);
xblk->xb_fs_generation = cpu_to_le32(osb->fs_generation);
xblk->xb_blkno = cpu_to_le64(first_blkno);
-
if (indexed) {
struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root;
xr->xt_clusters = cpu_to_le32(1);
@@ -2297,14 +2896,17 @@ static int ocfs2_create_xattr_block(handle_t *handle,
xr->xt_list.l_next_free_rec = cpu_to_le16(1);
xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED);
}
+ ocfs2_journal_dirty(ctxt->handle, new_bh);
- ret = ocfs2_journal_dirty(handle, new_bh);
- if (ret < 0) {
- mlog_errno(ret);
- goto end;
- }
+ /* Add it to the inode */
di->i_xattr_loc = cpu_to_le64(first_blkno);
- ocfs2_journal_dirty(handle, inode_bh);
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ OCFS2_I(inode)->ip_dyn_features |= OCFS2_HAS_XATTR_FL;
+ di->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features);
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ ocfs2_journal_dirty(ctxt->handle, inode_bh);
*ret_bh = new_bh;
new_bh = NULL;
@@ -2326,13 +2928,13 @@ static int ocfs2_xattr_block_set(struct inode *inode,
struct ocfs2_xattr_set_ctxt *ctxt)
{
struct buffer_head *new_bh = NULL;
- handle_t *handle = ctxt->handle;
struct ocfs2_xattr_block *xblk = NULL;
int ret;
+ struct ocfs2_xa_loc loc;
if (!xs->xattr_bh) {
- ret = ocfs2_create_xattr_block(handle, inode, xs->inode_bh,
- ctxt->meta_ac, &new_bh, 0);
+ ret = ocfs2_create_xattr_block(inode, xs->inode_bh, ctxt,
+ 0, &new_bh);
if (ret) {
mlog_errno(ret);
goto end;
@@ -2348,21 +2950,25 @@ static int ocfs2_xattr_block_set(struct inode *inode,
xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) {
- /* Set extended attribute into external block */
- ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt,
- OCFS2_HAS_XATTR_FL);
- if (!ret || ret != -ENOSPC)
- goto end;
+ ocfs2_init_xattr_block_xa_loc(&loc, inode, xs->xattr_bh,
+ xs->not_found ? NULL : xs->here);
- ret = ocfs2_xattr_create_index_block(inode, xs, ctxt);
- if (ret)
+ ret = ocfs2_xa_set(&loc, xi, ctxt);
+ if (!ret)
+ xs->here = loc.xl_entry;
+ else if (ret != -ENOSPC)
goto end;
+ else {
+ ret = ocfs2_xattr_create_index_block(inode, xs, ctxt);
+ if (ret)
+ goto end;
+ }
}
- ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt);
+ if (le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)
+ ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt);
end:
-
return ret;
}
@@ -2371,7 +2977,6 @@ static int ocfs2_xattr_can_be_in_inode(struct inode *inode,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_search *xs)
{
- u64 value_size;
struct ocfs2_xattr_entry *last;
int free, i;
size_t min_offs = xs->end - xs->base;
@@ -2394,13 +2999,7 @@ static int ocfs2_xattr_can_be_in_inode(struct inode *inode,
BUG_ON(!xs->not_found);
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
- value_size = OCFS2_XATTR_ROOT_SIZE;
- else
- value_size = OCFS2_XATTR_SIZE(xi->value_len);
-
- if (free >= sizeof(struct ocfs2_xattr_entry) +
- OCFS2_XATTR_SIZE(strlen(xi->name)) + value_size)
+ if (free >= (sizeof(struct ocfs2_xattr_entry) + namevalue_size_xi(xi)))
return 1;
return 0;
@@ -2424,7 +3023,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
char *base = NULL;
int name_offset, name_len = 0;
u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
- xi->value_len);
+ xi->xi_value_len);
u64 value_size;
/*
@@ -2432,14 +3031,14 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
* No matter whether we replace an old one or add a new one,
* we need this for writing.
*/
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
credits += new_clusters *
ocfs2_clusters_to_blocks(inode->i_sb, 1);
if (xis->not_found && xbs->not_found) {
credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
clusters_add += new_clusters;
credits += ocfs2_calc_extend_credits(inode->i_sb,
&def_xv.xv.xr_list,
@@ -2484,7 +3083,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
* The credits for removing the value tree will be extended
* by ocfs2_remove_extent itself.
*/
- if (!xi->value) {
+ if (!xi->xi_value) {
if (!ocfs2_xattr_is_local(xe))
credits += ocfs2_remove_extent_credits(inode->i_sb);
@@ -2514,7 +3113,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
}
}
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
/* the new values will be stored outside. */
u32 old_clusters = 0;
@@ -2547,9 +3146,10 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
* value, we don't need any allocation, otherwise we have
* to guess metadata allocation.
*/
- if ((ocfs2_xattr_is_local(xe) && value_size >= xi->value_len) ||
+ if ((ocfs2_xattr_is_local(xe) &&
+ (value_size >= xi->xi_value_len)) ||
(!ocfs2_xattr_is_local(xe) &&
- OCFS2_XATTR_ROOT_SIZE >= xi->value_len))
+ OCFS2_XATTR_ROOT_SIZE >= xi->xi_value_len))
goto out;
}
@@ -2639,7 +3239,7 @@ static int ocfs2_init_xattr_set_ctxt(struct inode *inode,
meta_add += extra_meta;
mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, "
- "credits = %d\n", xi->name, meta_add, clusters_add, *credits);
+ "credits = %d\n", xi->xi_name, meta_add, clusters_add, *credits);
if (meta_add) {
ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add,
@@ -2679,7 +3279,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
{
int ret = 0, credits, old_found;
- if (!xi->value) {
+ if (!xi->xi_value) {
/* Remove existing extended attribute */
if (!xis->not_found)
ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
@@ -2693,8 +3293,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
* If succeed and that extended attribute existing in
* external block, then we will remove it.
*/
- xi->value = NULL;
- xi->value_len = 0;
+ xi->xi_value = NULL;
+ xi->xi_value_len = 0;
old_found = xis->not_found;
xis->not_found = -ENODATA;
@@ -2722,8 +3322,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
} else if (ret == -ENOSPC) {
if (di->i_xattr_loc && !xbs->xattr_bh) {
ret = ocfs2_xattr_block_find(inode,
- xi->name_index,
- xi->name, xbs);
+ xi->xi_name_index,
+ xi->xi_name, xbs);
if (ret)
goto out;
@@ -2762,8 +3362,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
* If succeed and that extended attribute
* existing in inode, we will remove it.
*/
- xi->value = NULL;
- xi->value_len = 0;
+ xi->xi_value = NULL;
+ xi->xi_value_len = 0;
xbs->not_found = -ENODATA;
ret = ocfs2_calc_xattr_set_need(inode,
di,
@@ -2829,10 +3429,11 @@ int ocfs2_xattr_set_handle(handle_t *handle,
int ret;
struct ocfs2_xattr_info xi = {
- .name_index = name_index,
- .name = name,
- .value = value,
- .value_len = value_len,
+ .xi_name_index = name_index,
+ .xi_name = name,
+ .xi_name_len = strlen(name),
+ .xi_value = value,
+ .xi_value_len = value_len,
};
struct ocfs2_xattr_search xis = {
@@ -2912,10 +3513,11 @@ int ocfs2_xattr_set(struct inode *inode,
struct ocfs2_refcount_tree *ref_tree = NULL;
struct ocfs2_xattr_info xi = {
- .name_index = name_index,
- .name = name,
- .value = value,
- .value_len = value_len,
+ .xi_name_index = name_index,
+ .xi_name = name,
+ .xi_name_len = strlen(name),
+ .xi_value = value,
+ .xi_value_len = value_len,
};
struct ocfs2_xattr_search xis = {
@@ -3759,7 +4361,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode,
struct ocfs2_xattr_bucket *bucket)
{
int ret, i;
- size_t end, offset, len, value_len;
+ size_t end, offset, len;
struct ocfs2_xattr_header *xh;
char *entries, *buf, *bucket_buf = NULL;
u64 blkno = bucket_blkno(bucket);
@@ -3813,12 +4415,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode,
end = OCFS2_XATTR_BUCKET_SIZE;
for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) {
offset = le16_to_cpu(xe->xe_name_offset);
- if (ocfs2_xattr_is_local(xe))
- value_len = OCFS2_XATTR_SIZE(
- le64_to_cpu(xe->xe_value_size));
- else
- value_len = OCFS2_XATTR_ROOT_SIZE;
- len = OCFS2_XATTR_SIZE(xe->xe_name_len) + value_len;
+ len = namevalue_size_xe(xe);
/*
* We must make sure that the name/value pair
@@ -4007,7 +4604,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode,
int new_bucket_head)
{
int ret, i;
- int count, start, len, name_value_len = 0, xe_len, name_offset = 0;
+ int count, start, len, name_value_len = 0, name_offset = 0;
struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
struct ocfs2_xattr_header *xh;
struct ocfs2_xattr_entry *xe;
@@ -4098,13 +4695,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode,
name_value_len = 0;
for (i = 0; i < start; i++) {
xe = &xh->xh_entries[i];
- xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
- if (ocfs2_xattr_is_local(xe))
- xe_len +=
- OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
- else
- xe_len += OCFS2_XATTR_ROOT_SIZE;
- name_value_len += xe_len;
+ name_value_len += namevalue_size_xe(xe);
if (le16_to_cpu(xe->xe_name_offset) < name_offset)
name_offset = le16_to_cpu(xe->xe_name_offset);
}
@@ -4134,12 +4725,6 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode,
xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
xe = &xh->xh_entries[i];
- xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
- if (ocfs2_xattr_is_local(xe))
- xe_len +=
- OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
- else
- xe_len += OCFS2_XATTR_ROOT_SIZE;
if (le16_to_cpu(xe->xe_name_offset) <
le16_to_cpu(xh->xh_free_start))
xh->xh_free_start = xe->xe_name_offset;
@@ -4751,195 +5336,6 @@ static inline char *ocfs2_xattr_bucket_get_val(struct inode *inode,
}
/*
- * Handle the normal xattr set, including replace, delete and new.
- *
- * Note: "local" indicates the real data's locality. So we can't
- * just its bucket locality by its length.
- */
-static void ocfs2_xattr_set_entry_normal(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- u32 name_hash,
- int local)
-{
- struct ocfs2_xattr_entry *last, *xe;
- int name_len = strlen(xi->name);
- struct ocfs2_xattr_header *xh = xs->header;
- u16 count = le16_to_cpu(xh->xh_count), start;
- size_t blocksize = inode->i_sb->s_blocksize;
- char *val;
- size_t offs, size, new_size;
-
- last = &xh->xh_entries[count];
- if (!xs->not_found) {
- xe = xs->here;
- offs = le16_to_cpu(xe->xe_name_offset);
- if (ocfs2_xattr_is_local(xe))
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
- else
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE);
-
- /*
- * If the new value will be stored outside, xi->value has been
- * initalized as an empty ocfs2_xattr_value_root, and the same
- * goes with xi->value_len, so we can set new_size safely here.
- * See ocfs2_xattr_set_in_bucket.
- */
- new_size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(xi->value_len);
-
- le16_add_cpu(&xh->xh_name_value_len, -size);
- if (xi->value) {
- if (new_size > size)
- goto set_new_name_value;
-
- /* Now replace the old value with new one. */
- if (local)
- xe->xe_value_size = cpu_to_le64(xi->value_len);
- else
- xe->xe_value_size = 0;
-
- val = ocfs2_xattr_bucket_get_val(inode,
- xs->bucket, offs);
- memset(val + OCFS2_XATTR_SIZE(name_len), 0,
- size - OCFS2_XATTR_SIZE(name_len));
- if (OCFS2_XATTR_SIZE(xi->value_len) > 0)
- memcpy(val + OCFS2_XATTR_SIZE(name_len),
- xi->value, xi->value_len);
-
- le16_add_cpu(&xh->xh_name_value_len, new_size);
- ocfs2_xattr_set_local(xe, local);
- return;
- } else {
- /*
- * Remove the old entry if there is more than one.
- * We don't remove the last entry so that we can
- * use it to indicate the hash value of the empty
- * bucket.
- */
- last -= 1;
- le16_add_cpu(&xh->xh_count, -1);
- if (xh->xh_count) {
- memmove(xe, xe + 1,
- (void *)last - (void *)xe);
- memset(last, 0,
- sizeof(struct ocfs2_xattr_entry));
- } else
- xh->xh_free_start =
- cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
-
- return;
- }
- } else {
- /* find a new entry for insert. */
- int low = 0, high = count - 1, tmp;
- struct ocfs2_xattr_entry *tmp_xe;
-
- while (low <= high && count) {
- tmp = (low + high) / 2;
- tmp_xe = &xh->xh_entries[tmp];
-
- if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash))
- low = tmp + 1;
- else if (name_hash <
- le32_to_cpu(tmp_xe->xe_name_hash))
- high = tmp - 1;
- else {
- low = tmp;
- break;
- }
- }
-
- xe = &xh->xh_entries[low];
- if (low != count)
- memmove(xe + 1, xe, (void *)last - (void *)xe);
-
- le16_add_cpu(&xh->xh_count, 1);
- memset(xe, 0, sizeof(struct ocfs2_xattr_entry));
- xe->xe_name_hash = cpu_to_le32(name_hash);
- xe->xe_name_len = name_len;
- ocfs2_xattr_set_type(xe, xi->name_index);
- }
-
-set_new_name_value:
- /* Insert the new name+value. */
- size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(xi->value_len);
-
- /*
- * We must make sure that the name/value pair
- * exists in the same block.
- */
- offs = le16_to_cpu(xh->xh_free_start);
- start = offs - size;
-
- if (start >> inode->i_sb->s_blocksize_bits !=
- (offs - 1) >> inode->i_sb->s_blocksize_bits) {
- offs = offs - offs % blocksize;
- xh->xh_free_start = cpu_to_le16(offs);
- }
-
- val = ocfs2_xattr_bucket_get_val(inode, xs->bucket, offs - size);
- xe->xe_name_offset = cpu_to_le16(offs - size);
-
- memset(val, 0, size);
- memcpy(val, xi->name, name_len);
- memcpy(val + OCFS2_XATTR_SIZE(name_len), xi->value, xi->value_len);
-
- xe->xe_value_size = cpu_to_le64(xi->value_len);
- ocfs2_xattr_set_local(xe, local);
- xs->here = xe;
- le16_add_cpu(&xh->xh_free_start, -size);
- le16_add_cpu(&xh->xh_name_value_len, size);
-
- return;
-}
-
-/*
- * Set the xattr entry in the specified bucket.
- * The bucket is indicated by xs->bucket and it should have the enough
- * space for the xattr insertion.
- */
-static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode,
- handle_t *handle,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- u32 name_hash,
- int local)
-{
- int ret;
- u64 blkno;
-
- mlog(0, "Set xattr entry len = %lu index = %d in bucket %llu\n",
- (unsigned long)xi->value_len, xi->name_index,
- (unsigned long long)bucket_blkno(xs->bucket));
-
- if (!xs->bucket->bu_bhs[1]) {
- blkno = bucket_blkno(xs->bucket);
- ocfs2_xattr_bucket_relse(xs->bucket);
- ret = ocfs2_read_xattr_bucket(xs->bucket, blkno);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
- }
-
- ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
-
- ocfs2_xattr_set_entry_normal(inode, xi, xs, name_hash, local);
- ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
-
-out:
- return ret;
-}
-
-/*
* Truncate the specified xe_off entry in xattr bucket.
* bucket is indicated by header_bh and len is the new length.
* Both the ocfs2_xattr_value_root and the entry will be updated here.
@@ -5009,66 +5405,6 @@ out:
return ret;
}
-static int ocfs2_xattr_bucket_value_truncate_xs(struct inode *inode,
- struct ocfs2_xattr_search *xs,
- int len,
- struct ocfs2_xattr_set_ctxt *ctxt)
-{
- int ret, offset;
- struct ocfs2_xattr_entry *xe = xs->here;
- struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)xs->base;
-
- BUG_ON(!xs->bucket->bu_bhs[0] || !xe || ocfs2_xattr_is_local(xe));
-
- offset = xe - xh->xh_entries;
- ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket,
- offset, len, ctxt);
- if (ret)
- mlog_errno(ret);
-
- return ret;
-}
-
-static int ocfs2_xattr_bucket_set_value_outside(struct inode *inode,
- handle_t *handle,
- struct ocfs2_xattr_search *xs,
- char *val,
- int value_len)
-{
- int ret, offset, block_off;
- struct ocfs2_xattr_value_root *xv;
- struct ocfs2_xattr_entry *xe = xs->here;
- struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket);
- void *base;
- struct ocfs2_xattr_value_buf vb = {
- .vb_access = ocfs2_journal_access,
- };
-
- BUG_ON(!xs->base || !xe || ocfs2_xattr_is_local(xe));
-
- ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, xh,
- xe - xh->xh_entries,
- &block_off,
- &offset);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- base = bucket_block(xs->bucket, block_off);
- xv = (struct ocfs2_xattr_value_root *)(base + offset +
- OCFS2_XATTR_SIZE(xe->xe_name_len));
-
- vb.vb_xv = xv;
- vb.vb_bh = xs->bucket->bu_bhs[block_off];
- ret = __ocfs2_xattr_set_value_outside(inode, handle,
- &vb, val, value_len);
- if (ret)
- mlog_errno(ret);
-out:
- return ret;
-}
-
static int ocfs2_rm_xattr_cluster(struct inode *inode,
struct buffer_head *root_bh,
u64 blkno,
@@ -5167,128 +5503,6 @@ out:
return ret;
}
-static void ocfs2_xattr_bucket_remove_xs(struct inode *inode,
- handle_t *handle,
- struct ocfs2_xattr_search *xs)
-{
- struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket);
- struct ocfs2_xattr_entry *last = &xh->xh_entries[
- le16_to_cpu(xh->xh_count) - 1];
- int ret = 0;
-
- ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- return;
- }
-
- /* Remove the old entry. */
- memmove(xs->here, xs->here + 1,
- (void *)last - (void *)xs->here);
- memset(last, 0, sizeof(struct ocfs2_xattr_entry));
- le16_add_cpu(&xh->xh_count, -1);
-
- ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
-}
-
-/*
- * Set the xattr name/value in the bucket specified in xs.
- *
- * As the new value in xi may be stored in the bucket or in an outside cluster,
- * we divide the whole process into 3 steps:
- * 1. insert name/value in the bucket(ocfs2_xattr_set_entry_in_bucket)
- * 2. truncate of the outside cluster(ocfs2_xattr_bucket_value_truncate_xs)
- * 3. Set the value to the outside cluster(ocfs2_xattr_bucket_set_value_outside)
- * 4. If the clusters for the new outside value can't be allocated, we need
- * to free the xattr we allocated in set.
- */
-static int ocfs2_xattr_set_in_bucket(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_set_ctxt *ctxt)
-{
- int ret, local = 1;
- size_t value_len;
- char *val = (char *)xi->value;
- struct ocfs2_xattr_entry *xe = xs->here;
- u32 name_hash = ocfs2_xattr_name_hash(inode, xi->name,
- strlen(xi->name));
-
- if (!xs->not_found && !ocfs2_xattr_is_local(xe)) {
- /*
- * We need to truncate the xattr storage first.
- *
- * If both the old and new value are stored to
- * outside block, we only need to truncate
- * the storage and then set the value outside.
- *
- * If the new value should be stored within block,
- * we should free all the outside block first and
- * the modification to the xattr block will be done
- * by following steps.
- */
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
- value_len = xi->value_len;
- else
- value_len = 0;
-
- ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs,
- value_len,
- ctxt);
- if (ret)
- goto out;
-
- if (value_len)
- goto set_value_outside;
- }
-
- value_len = xi->value_len;
- /* So we have to handle the inside block change now. */
- if (value_len > OCFS2_XATTR_INLINE_SIZE) {
- /*
- * If the new value will be stored outside of block,
- * initalize a new empty value root and insert it first.
- */
- local = 0;
- xi->value = &def_xv;
- xi->value_len = OCFS2_XATTR_ROOT_SIZE;
- }
-
- ret = ocfs2_xattr_set_entry_in_bucket(inode, ctxt->handle, xi, xs,
- name_hash, local);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- if (value_len <= OCFS2_XATTR_INLINE_SIZE)
- goto out;
-
- /* allocate the space now for the outside block storage. */
- ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs,
- value_len, ctxt);
- if (ret) {
- mlog_errno(ret);
-
- if (xs->not_found) {
- /*
- * We can't allocate enough clusters for outside
- * storage and we have allocated xattr already,
- * so need to remove it.
- */
- ocfs2_xattr_bucket_remove_xs(inode, ctxt->handle, xs);
- }
- goto out;
- }
-
-set_value_outside:
- ret = ocfs2_xattr_bucket_set_value_outside(inode, ctxt->handle,
- xs, val, value_len);
-out:
- return ret;
-}
-
/*
* check whether the xattr bucket is filled up with the same hash value.
* If we want to insert the xattr with the same hash, return -ENOSPC.
@@ -5317,156 +5531,116 @@ static int ocfs2_check_xattr_bucket_collision(struct inode *inode,
return 0;
}
-static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_set_ctxt *ctxt)
+/*
+ * Try to set the entry in the current bucket. If we fail, the caller
+ * will handle getting us another bucket.
+ */
+static int ocfs2_xattr_set_entry_bucket(struct inode *inode,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_search *xs,
+ struct ocfs2_xattr_set_ctxt *ctxt)
{
- struct ocfs2_xattr_header *xh;
- struct ocfs2_xattr_entry *xe;
- u16 count, header_size, xh_free_start;
- int free, max_free, need, old;
- size_t value_size = 0, name_len = strlen(xi->name);
- size_t blocksize = inode->i_sb->s_blocksize;
- int ret, allocation = 0;
-
- mlog_entry("Set xattr %s in xattr index block\n", xi->name);
-
-try_again:
- xh = xs->header;
- count = le16_to_cpu(xh->xh_count);
- xh_free_start = le16_to_cpu(xh->xh_free_start);
- header_size = sizeof(struct ocfs2_xattr_header) +
- count * sizeof(struct ocfs2_xattr_entry);
- max_free = OCFS2_XATTR_BUCKET_SIZE - header_size -
- le16_to_cpu(xh->xh_name_value_len) - OCFS2_XATTR_HEADER_GAP;
-
- mlog_bug_on_msg(header_size > blocksize, "bucket %llu has header size "
- "of %u which exceed block size\n",
- (unsigned long long)bucket_blkno(xs->bucket),
- header_size);
+ int ret;
+ struct ocfs2_xa_loc loc;
- if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE)
- value_size = OCFS2_XATTR_ROOT_SIZE;
- else if (xi->value)
- value_size = OCFS2_XATTR_SIZE(xi->value_len);
+ mlog_entry("Set xattr %s in xattr bucket\n", xi->xi_name);
- if (xs->not_found)
- need = sizeof(struct ocfs2_xattr_entry) +
- OCFS2_XATTR_SIZE(name_len) + value_size;
- else {
- need = value_size + OCFS2_XATTR_SIZE(name_len);
+ ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket,
+ xs->not_found ? NULL : xs->here);
+ ret = ocfs2_xa_set(&loc, xi, ctxt);
+ if (!ret) {
+ xs->here = loc.xl_entry;
+ goto out;
+ }
+ if (ret != -ENOSPC) {
+ mlog_errno(ret);
+ goto out;
+ }
- /*
- * We only replace the old value if the new length is smaller
- * than the old one. Otherwise we will allocate new space in the
- * bucket to store it.
- */
- xe = xs->here;
- if (ocfs2_xattr_is_local(xe))
- old = OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
- else
- old = OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE);
+ /* Ok, we need space. Let's try defragmenting the bucket. */
+ ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle,
+ xs->bucket);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
- if (old >= value_size)
- need = 0;
+ ret = ocfs2_xa_set(&loc, xi, ctxt);
+ if (!ret) {
+ xs->here = loc.xl_entry;
+ goto out;
}
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
- free = xh_free_start - header_size - OCFS2_XATTR_HEADER_GAP;
- /*
- * We need to make sure the new name/value pair
- * can exist in the same block.
- */
- if (xh_free_start % blocksize < need)
- free -= xh_free_start % blocksize;
-
- mlog(0, "xs->not_found = %d, in xattr bucket %llu: free = %d, "
- "need = %d, max_free = %d, xh_free_start = %u, xh_name_value_len ="
- " %u\n", xs->not_found,
- (unsigned long long)bucket_blkno(xs->bucket),
- free, need, max_free, le16_to_cpu(xh->xh_free_start),
- le16_to_cpu(xh->xh_name_value_len));
-
- if (free < need ||
- (xs->not_found &&
- count == ocfs2_xattr_max_xe_in_bucket(inode->i_sb))) {
- if (need <= max_free &&
- count < ocfs2_xattr_max_xe_in_bucket(inode->i_sb)) {
- /*
- * We can create the space by defragment. Since only the
- * name/value will be moved, the xe shouldn't be changed
- * in xs.
- */
- ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle,
- xs->bucket);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
- xh_free_start = le16_to_cpu(xh->xh_free_start);
- free = xh_free_start - header_size
- - OCFS2_XATTR_HEADER_GAP;
- if (xh_free_start % blocksize < need)
- free -= xh_free_start % blocksize;
+out:
+ mlog_exit(ret);
+ return ret;
+}
- if (free >= need)
- goto xattr_set;
+static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_search *xs,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int ret;
- mlog(0, "Can't get enough space for xattr insert by "
- "defragment. Need %u bytes, but we have %d, so "
- "allocate new bucket for it.\n", need, free);
- }
+ mlog_entry("Set xattr %s in xattr index block\n", xi->xi_name);
- /*
- * We have to add new buckets or clusters and one
- * allocation should leave us enough space for insert.
- */
- BUG_ON(allocation);
+ ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
+ if (!ret)
+ goto out;
+ if (ret != -ENOSPC) {
+ mlog_errno(ret);
+ goto out;
+ }
- /*
- * We do not allow for overlapping ranges between buckets. And
- * the maximum number of collisions we will allow for then is
- * one bucket's worth, so check it here whether we need to
- * add a new bucket for the insert.
- */
- ret = ocfs2_check_xattr_bucket_collision(inode,
- xs->bucket,
- xi->name);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
+ /* Ack, need more space. Let's try to get another bucket! */
- ret = ocfs2_add_new_xattr_bucket(inode,
- xs->xattr_bh,
+ /*
+ * We do not allow for overlapping ranges between buckets. And
+ * the maximum number of collisions we will allow for then is
+ * one bucket's worth, so check it here whether we need to
+ * add a new bucket for the insert.
+ */
+ ret = ocfs2_check_xattr_bucket_collision(inode,
xs->bucket,
- ctxt);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
+ xi->xi_name);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
- /*
- * ocfs2_add_new_xattr_bucket() will have updated
- * xs->bucket if it moved, but it will not have updated
- * any of the other search fields. Thus, we drop it and
- * re-search. Everything should be cached, so it'll be
- * quick.
- */
- ocfs2_xattr_bucket_relse(xs->bucket);
- ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh,
- xi->name_index,
- xi->name, xs);
- if (ret && ret != -ENODATA)
- goto out;
- xs->not_found = ret;
- allocation = 1;
- goto try_again;
+ ret = ocfs2_add_new_xattr_bucket(inode,
+ xs->xattr_bh,
+ xs->bucket,
+ ctxt);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
}
-xattr_set:
- ret = ocfs2_xattr_set_in_bucket(inode, xi, xs, ctxt);
+ /*
+ * ocfs2_add_new_xattr_bucket() will have updated
+ * xs->bucket if it moved, but it will not have updated
+ * any of the other search fields. Thus, we drop it and
+ * re-search. Everything should be cached, so it'll be
+ * quick.
+ */
+ ocfs2_xattr_bucket_relse(xs->bucket);
+ ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh,
+ xi->xi_name_index,
+ xi->xi_name, xs);
+ if (ret && ret != -ENODATA)
+ goto out;
+ xs->not_found = ret;
+
+ /* Ok, we have a new bucket, let's try again */
+ ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
+ if (ret && (ret != -ENOSPC))
+ mlog_errno(ret);
+
out:
mlog_exit(ret);
return ret;
@@ -5678,7 +5852,7 @@ static int ocfs2_prepare_refcount_xattr(struct inode *inode,
* refcount tree, and make the original extent become 3. So we will need
* 2 * cluster more extent recs at most.
*/
- if (!xi->value || xi->value_len <= OCFS2_XATTR_INLINE_SIZE) {
+ if (!xi->xi_value || xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE) {
ret = ocfs2_refcounted_xattr_delete_need(inode,
&(*ref_tree)->rf_ci,
@@ -6354,9 +6528,11 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode,
int indexed)
{
int ret;
- handle_t *handle;
struct ocfs2_alloc_context *meta_ac;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_xattr_set_ctxt ctxt = {
+ .meta_ac = meta_ac,
+ };
ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
if (ret < 0) {
@@ -6364,21 +6540,21 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode,
return ret;
}
- handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
+ ctxt.handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS);
+ if (IS_ERR(ctxt.handle)) {
+ ret = PTR_ERR(ctxt.handle);
mlog_errno(ret);
goto out;
}
mlog(0, "create new xattr block for inode %llu, index = %d\n",
(unsigned long long)fe_bh->b_blocknr, indexed);
- ret = ocfs2_create_xattr_block(handle, inode, fe_bh,
- meta_ac, ret_bh, indexed);
+ ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed,
+ ret_bh);
if (ret)
mlog_errno(ret);
- ocfs2_commit_trans(osb, handle);
+ ocfs2_commit_trans(osb, ctxt.handle);
out:
ocfs2_free_alloc_context(meta_ac);
return ret;
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index f3b7c15..75d9b5b 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -11,6 +11,7 @@
#include <linux/parser.h>
#include <linux/buffer_head.h>
#include <linux/vmalloc.h>
+#include <linux/writeback.h>
#include <linux/crc-itu-t.h>
#include "omfs.h"
@@ -89,7 +90,7 @@ static void omfs_update_checksums(struct omfs_inode *oi)
oi->i_head.h_check_xor = xor;
}
-static int omfs_write_inode(struct inode *inode, int wait)
+static int __omfs_write_inode(struct inode *inode, int wait)
{
struct omfs_inode *oi;
struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
@@ -162,9 +163,14 @@ out:
return ret;
}
+static int omfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ return __omfs_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+}
+
int omfs_sync_inode(struct inode *inode)
{
- return omfs_write_inode(inode, 1);
+ return __omfs_write_inode(inode, 1);
}
/*
diff --git a/fs/open.c b/fs/open.c
index b740c42..e17f544 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -270,7 +270,7 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
* Make sure that there are no leases. get_write_access() protects
* against the truncate racing with a lease-granting setlease().
*/
- error = break_lease(inode, FMODE_WRITE);
+ error = break_lease(inode, O_WRONLY);
if (error)
goto put_write_and_out;
diff --git a/fs/pnode.c b/fs/pnode.c
index 8d5f392..5cc564a 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -86,7 +86,7 @@ static int do_make_slave(struct vfsmount *mnt)
/*
* slave 'mnt' to a peer mount that has the
- * same root dentry. If none is available than
+ * same root dentry. If none is available then
* slave it to anything that is available.
*/
while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
@@ -147,6 +147,11 @@ void change_mnt_propagation(struct vfsmount *mnt, int type)
* get the next mount in the propagation tree.
* @m: the mount seen last
* @origin: the original mount from where the tree walk initiated
+ *
+ * Note that peer groups form contiguous segments of slave lists.
+ * We rely on that in get_source() to be able to find out if
+ * vfsmount found while iterating with propagation_next() is
+ * a peer of one we'd found earlier.
*/
static struct vfsmount *propagation_next(struct vfsmount *m,
struct vfsmount *origin)
@@ -186,10 +191,6 @@ static struct vfsmount *get_source(struct vfsmount *dest,
{
struct vfsmount *p_last_src = NULL;
struct vfsmount *p_last_dest = NULL;
- *type = CL_PROPAGATION;
-
- if (IS_MNT_SHARED(dest))
- *type |= CL_MAKE_SHARED;
while (last_dest != dest->mnt_master) {
p_last_dest = last_dest;
@@ -202,13 +203,18 @@ static struct vfsmount *get_source(struct vfsmount *dest,
do {
p_last_dest = next_peer(p_last_dest);
} while (IS_MNT_NEW(p_last_dest));
+ /* is that a peer of the earlier? */
+ if (dest == p_last_dest) {
+ *type = CL_MAKE_SHARED;
+ return p_last_src;
+ }
}
-
- if (dest != p_last_dest) {
- *type |= CL_SLAVE;
- return last_src;
- } else
- return p_last_src;
+ /* slave of the earlier, then */
+ *type = CL_SLAVE;
+ /* beginning of peer group among the slaves? */
+ if (IS_MNT_SHARED(dest))
+ *type |= CL_MAKE_SHARED;
+ return last_src;
}
/*
diff --git a/fs/pnode.h b/fs/pnode.h
index 958665d..1ea4ae1 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -21,12 +21,11 @@
#define CL_SLAVE 0x02
#define CL_COPY_ALL 0x04
#define CL_MAKE_SHARED 0x08
-#define CL_PROPAGATION 0x10
-#define CL_PRIVATE 0x20
+#define CL_PRIVATE 0x10
static inline void set_mnt_shared(struct vfsmount *mnt)
{
- mnt->mnt_flags &= ~MNT_PNODE_MASK;
+ mnt->mnt_flags &= ~MNT_SHARED_MASK;
mnt->mnt_flags |= MNT_SHARED;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 623e2ff..a731084 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -647,17 +647,11 @@ static int mounts_release(struct inode *inode, struct file *file)
static unsigned mounts_poll(struct file *file, poll_table *wait)
{
struct proc_mounts *p = file->private_data;
- struct mnt_namespace *ns = p->ns;
unsigned res = POLLIN | POLLRDNORM;
- poll_wait(file, &ns->poll, wait);
-
- spin_lock(&vfsmount_lock);
- if (p->event != ns->event) {
- p->event = ns->event;
+ poll_wait(file, &p->ns->poll, wait);
+ if (mnt_had_events(p))
res |= POLLERR | POLLPRI;
- }
- spin_unlock(&vfsmount_lock);
return res;
}
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 480cb10..9580abe 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -662,6 +662,7 @@ struct proc_dir_entry *proc_symlink(const char *name,
}
return ent;
}
+EXPORT_SYMBOL(proc_symlink);
struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
struct proc_dir_entry *parent)
@@ -700,6 +701,7 @@ struct proc_dir_entry *proc_mkdir(const char *name,
{
return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent);
}
+EXPORT_SYMBOL(proc_mkdir);
struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
struct proc_dir_entry *parent)
@@ -728,6 +730,7 @@ struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
}
return ent;
}
+EXPORT_SYMBOL(create_proc_entry);
struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
struct proc_dir_entry *parent,
@@ -762,6 +765,7 @@ out_free:
out:
return NULL;
}
+EXPORT_SYMBOL(proc_create_data);
static void free_proc_entry(struct proc_dir_entry *de)
{
@@ -853,3 +857,4 @@ continue_removing:
de->parent->name, de->name, de->subdir->name);
pde_put(de);
}
+EXPORT_SYMBOL(remove_proc_entry);
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index 7ca7834..cfe90a4 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -12,37 +12,37 @@
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/fs.h>
+#include <linux/syslog.h>
#include <asm/uaccess.h>
#include <asm/io.h>
extern wait_queue_head_t log_wait;
-extern int do_syslog(int type, char __user *bug, int count);
-
static int kmsg_open(struct inode * inode, struct file * file)
{
- return do_syslog(1,NULL,0);
+ return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
}
static int kmsg_release(struct inode * inode, struct file * file)
{
- (void) do_syslog(0,NULL,0);
+ (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
return 0;
}
static ssize_t kmsg_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
+ if ((file->f_flags & O_NONBLOCK) &&
+ !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
return -EAGAIN;
- return do_syslog(2, buf, count);
+ return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
}
static unsigned int kmsg_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &log_wait, wait);
- if (do_syslog(9, NULL, 0))
+ if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
return POLLIN | POLLRDNORM;
return 0;
}
diff --git a/fs/proc/root.c b/fs/proc/root.c
index b080b79..757c069 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -220,9 +220,3 @@ void pid_ns_release_proc(struct pid_namespace *ns)
{
mntput(ns->proc_mnt);
}
-
-EXPORT_SYMBOL(proc_symlink);
-EXPORT_SYMBOL(proc_mkdir);
-EXPORT_SYMBOL(create_proc_entry);
-EXPORT_SYMBOL(proc_create_data);
-EXPORT_SYMBOL(remove_proc_entry);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index b8671a5..d1da94b 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1618,7 +1618,7 @@ int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
** to properly mark inodes for datasync and such, but only actually
** does something when called for a synchronous update.
*/
-int reiserfs_write_inode(struct inode *inode, int do_sync)
+int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct reiserfs_transaction_handle th;
int jbegin_count = 1;
@@ -1630,7 +1630,7 @@ int reiserfs_write_inode(struct inode *inode, int do_sync)
** inode needs to reach disk for safety, and they can safely be
** ignored because the altered inode has already been logged.
*/
- if (do_sync && !(current->flags & PF_MEMALLOC)) {
+ if (wbc->sync_mode == WB_SYNC_ALL && !(current->flags & PF_MEMALLOC)) {
reiserfs_write_lock(inode->i_sb);
if (!journal_begin(&th, inode->i_sb, jbegin_count)) {
reiserfs_update_sd(&th, inode);
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index 70e3244..df8a19e 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_SQUASHFS) += squashfs.o
squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
-squashfs-y += namei.o super.o symlink.o
+squashfs-y += namei.o super.o symlink.o zlib_wrapper.o decompressor.o
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 2a79603..1cb0d81 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -29,15 +29,14 @@
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
-#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
-#include <linux/zlib.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
+#include "decompressor.h"
/*
* Read the metadata block length, this is stored in the first two
@@ -153,72 +152,10 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
}
if (compressed) {
- int zlib_err = 0, zlib_init = 0;
-
- /*
- * Uncompress block.
- */
-
- mutex_lock(&msblk->read_data_mutex);
-
- msblk->stream.avail_out = 0;
- msblk->stream.avail_in = 0;
-
- bytes = length;
- do {
- if (msblk->stream.avail_in == 0 && k < b) {
- avail = min(bytes, msblk->devblksize - offset);
- bytes -= avail;
- wait_on_buffer(bh[k]);
- if (!buffer_uptodate(bh[k]))
- goto release_mutex;
-
- if (avail == 0) {
- offset = 0;
- put_bh(bh[k++]);
- continue;
- }
-
- msblk->stream.next_in = bh[k]->b_data + offset;
- msblk->stream.avail_in = avail;
- offset = 0;
- }
-
- if (msblk->stream.avail_out == 0 && page < pages) {
- msblk->stream.next_out = buffer[page++];
- msblk->stream.avail_out = PAGE_CACHE_SIZE;
- }
-
- if (!zlib_init) {
- zlib_err = zlib_inflateInit(&msblk->stream);
- if (zlib_err != Z_OK) {
- ERROR("zlib_inflateInit returned"
- " unexpected result 0x%x,"
- " srclength %d\n", zlib_err,
- srclength);
- goto release_mutex;
- }
- zlib_init = 1;
- }
-
- zlib_err = zlib_inflate(&msblk->stream, Z_SYNC_FLUSH);
-
- if (msblk->stream.avail_in == 0 && k < b)
- put_bh(bh[k++]);
- } while (zlib_err == Z_OK);
-
- if (zlib_err != Z_STREAM_END) {
- ERROR("zlib_inflate error, data probably corrupt\n");
- goto release_mutex;
- }
-
- zlib_err = zlib_inflateEnd(&msblk->stream);
- if (zlib_err != Z_OK) {
- ERROR("zlib_inflate error, data probably corrupt\n");
- goto release_mutex;
- }
- length = msblk->stream.total_out;
- mutex_unlock(&msblk->read_data_mutex);
+ length = squashfs_decompress(msblk, buffer, bh, b, offset,
+ length, srclength, pages);
+ if (length < 0)
+ goto read_failure;
} else {
/*
* Block is uncompressed.
@@ -255,9 +192,6 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
kfree(bh);
return length;
-release_mutex:
- mutex_unlock(&msblk->read_data_mutex);
-
block_release:
for (; k < b; k++)
put_bh(bh[k]);
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 40c98fa..57314be 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -51,7 +51,6 @@
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
-#include <linux/zlib.h>
#include <linux/pagemap.h>
#include "squashfs_fs.h"
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
new file mode 100644
index 0000000..157478d
--- /dev/null
+++ b/fs/squashfs/decompressor.c
@@ -0,0 +1,68 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * decompressor.c
+ */
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/buffer_head.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "decompressor.h"
+#include "squashfs.h"
+
+/*
+ * This file (and decompressor.h) implements a decompressor framework for
+ * Squashfs, allowing multiple decompressors to be easily supported
+ */
+
+static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = {
+ NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0
+};
+
+static const struct squashfs_decompressor squashfs_lzo_unsupported_comp_ops = {
+ NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0
+};
+
+static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
+ NULL, NULL, NULL, 0, "unknown", 0
+};
+
+static const struct squashfs_decompressor *decompressor[] = {
+ &squashfs_zlib_comp_ops,
+ &squashfs_lzma_unsupported_comp_ops,
+ &squashfs_lzo_unsupported_comp_ops,
+ &squashfs_unknown_comp_ops
+};
+
+
+const struct squashfs_decompressor *squashfs_lookup_decompressor(int id)
+{
+ int i;
+
+ for (i = 0; decompressor[i]->id; i++)
+ if (id == decompressor[i]->id)
+ break;
+
+ return decompressor[i];
+}
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
new file mode 100644
index 0000000..7425f80
--- /dev/null
+++ b/fs/squashfs/decompressor.h
@@ -0,0 +1,55 @@
+#ifndef DECOMPRESSOR_H
+#define DECOMPRESSOR_H
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * decompressor.h
+ */
+
+struct squashfs_decompressor {
+ void *(*init)(struct squashfs_sb_info *);
+ void (*free)(void *);
+ int (*decompress)(struct squashfs_sb_info *, void **,
+ struct buffer_head **, int, int, int, int, int);
+ int id;
+ char *name;
+ int supported;
+};
+
+static inline void *squashfs_decompressor_init(struct squashfs_sb_info *msblk)
+{
+ return msblk->decompressor->init(msblk);
+}
+
+static inline void squashfs_decompressor_free(struct squashfs_sb_info *msblk,
+ void *s)
+{
+ if (msblk->decompressor)
+ msblk->decompressor->free(s);
+}
+
+static inline int squashfs_decompress(struct squashfs_sb_info *msblk,
+ void **buffer, struct buffer_head **bh, int b, int offset, int length,
+ int srclength, int pages)
+{
+ return msblk->decompressor->decompress(msblk, buffer, bh, b, offset,
+ length, srclength, pages);
+}
+#endif
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c
index 566b0ea..12b933a 100644
--- a/fs/squashfs/dir.c
+++ b/fs/squashfs/dir.c
@@ -30,7 +30,6 @@
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
-#include <linux/zlib.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
index 2b1b8fe..7f93d5a 100644
--- a/fs/squashfs/export.c
+++ b/fs/squashfs/export.c
@@ -39,7 +39,6 @@
#include <linux/vfs.h>
#include <linux/dcache.h>
#include <linux/exportfs.h>
-#include <linux/zlib.h>
#include <linux/slab.h>
#include "squashfs_fs.h"
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 717767d..a25c506 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -47,7 +47,6 @@
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/mutex.h>
-#include <linux/zlib.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
index b5a2c15..7c90bbd 100644
--- a/fs/squashfs/fragment.c
+++ b/fs/squashfs/fragment.c
@@ -36,7 +36,6 @@
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
-#include <linux/zlib.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
index 3795b83..b7f64bc 100644
--- a/fs/squashfs/id.c
+++ b/fs/squashfs/id.c
@@ -34,7 +34,6 @@
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
-#include <linux/zlib.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index 9101dbd..49daaf6 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -40,7 +40,6 @@
#include <linux/fs.h>
#include <linux/vfs.h>
-#include <linux/zlib.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
index 9e39865..5266bd8 100644
--- a/fs/squashfs/namei.c
+++ b/fs/squashfs/namei.c
@@ -57,7 +57,6 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/dcache.h>
-#include <linux/zlib.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index 0e9feb6..fe2587a 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -51,6 +51,9 @@ extern struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *,
u64, int);
extern int squashfs_read_table(struct super_block *, void *, u64, int);
+/* decompressor.c */
+extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int);
+
/* export.c */
extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64,
unsigned int);
@@ -71,7 +74,7 @@ extern struct inode *squashfs_iget(struct super_block *, long long,
extern int squashfs_read_inode(struct inode *, long long);
/*
- * Inodes and files operations
+ * Inodes, files and decompressor operations
*/
/* dir.c */
@@ -88,3 +91,6 @@ extern const struct inode_operations squashfs_dir_inode_ops;
/* symlink.c */
extern const struct address_space_operations squashfs_symlink_aops;
+
+/* zlib_wrapper.c */
+extern const struct squashfs_decompressor squashfs_zlib_comp_ops;
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 283daaf..7902424 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -183,8 +183,6 @@
#define SQUASHFS_MAX_FILE_SIZE (1LL << \
(SQUASHFS_MAX_FILE_SIZE_LOG - 2))
-#define SQUASHFS_MARKER_BYTE 0xff
-
/* meta index cache */
#define SQUASHFS_META_INDEXES (SQUASHFS_METADATA_SIZE / sizeof(unsigned int))
#define SQUASHFS_META_ENTRIES 127
@@ -211,7 +209,9 @@ struct meta_index {
/*
* definitions for structures on disk
*/
-#define ZLIB_COMPRESSION 1
+#define ZLIB_COMPRESSION 1
+#define LZMA_COMPRESSION 2
+#define LZO_COMPRESSION 3
struct squashfs_super_block {
__le32 s_magic;
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index c8c6561..2e77dc5 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -52,25 +52,25 @@ struct squashfs_cache_entry {
};
struct squashfs_sb_info {
- int devblksize;
- int devblksize_log2;
- struct squashfs_cache *block_cache;
- struct squashfs_cache *fragment_cache;
- struct squashfs_cache *read_page;
- int next_meta_index;
- __le64 *id_table;
- __le64 *fragment_index;
- unsigned int *fragment_index_2;
- struct mutex read_data_mutex;
- struct mutex meta_index_mutex;
- struct meta_index *meta_index;
- z_stream stream;
- __le64 *inode_lookup_table;
- u64 inode_table;
- u64 directory_table;
- unsigned int block_size;
- unsigned short block_log;
- long long bytes_used;
- unsigned int inodes;
+ const struct squashfs_decompressor *decompressor;
+ int devblksize;
+ int devblksize_log2;
+ struct squashfs_cache *block_cache;
+ struct squashfs_cache *fragment_cache;
+ struct squashfs_cache *read_page;
+ int next_meta_index;
+ __le64 *id_table;
+ __le64 *fragment_index;
+ struct mutex read_data_mutex;
+ struct mutex meta_index_mutex;
+ struct meta_index *meta_index;
+ void *stream;
+ __le64 *inode_lookup_table;
+ u64 inode_table;
+ u64 directory_table;
+ unsigned int block_size;
+ unsigned short block_log;
+ long long bytes_used;
+ unsigned int inodes;
};
#endif
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 6c197ef..3550aec 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -35,34 +35,41 @@
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/zlib.h>
#include <linux/magic.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
+#include "decompressor.h"
static struct file_system_type squashfs_fs_type;
static const struct super_operations squashfs_super_ops;
-static int supported_squashfs_filesystem(short major, short minor, short comp)
+static const struct squashfs_decompressor *supported_squashfs_filesystem(short
+ major, short minor, short id)
{
+ const struct squashfs_decompressor *decompressor;
+
if (major < SQUASHFS_MAJOR) {
ERROR("Major/Minor mismatch, older Squashfs %d.%d "
"filesystems are unsupported\n", major, minor);
- return -EINVAL;
+ return NULL;
} else if (major > SQUASHFS_MAJOR || minor > SQUASHFS_MINOR) {
ERROR("Major/Minor mismatch, trying to mount newer "
"%d.%d filesystem\n", major, minor);
ERROR("Please update your kernel\n");
- return -EINVAL;
+ return NULL;
}
- if (comp != ZLIB_COMPRESSION)
- return -EINVAL;
+ decompressor = squashfs_lookup_decompressor(id);
+ if (!decompressor->supported) {
+ ERROR("Filesystem uses \"%s\" compression. This is not "
+ "supported\n", decompressor->name);
+ return NULL;
+ }
- return 0;
+ return decompressor;
}
@@ -87,13 +94,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
}
msblk = sb->s_fs_info;
- msblk->stream.workspace = kmalloc(zlib_inflate_workspacesize(),
- GFP_KERNEL);
- if (msblk->stream.workspace == NULL) {
- ERROR("Failed to allocate zlib workspace\n");
- goto failure;
- }
-
sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
if (sblk == NULL) {
ERROR("Failed to allocate squashfs_super_block\n");
@@ -120,25 +120,25 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
+ err = -EINVAL;
+
/* Check it is a SQUASHFS superblock */
sb->s_magic = le32_to_cpu(sblk->s_magic);
if (sb->s_magic != SQUASHFS_MAGIC) {
if (!silent)
ERROR("Can't find a SQUASHFS superblock on %s\n",
bdevname(sb->s_bdev, b));
- err = -EINVAL;
goto failed_mount;
}
- /* Check the MAJOR & MINOR versions and compression type */
- err = supported_squashfs_filesystem(le16_to_cpu(sblk->s_major),
+ /* Check the MAJOR & MINOR versions and lookup compression type */
+ msblk->decompressor = supported_squashfs_filesystem(
+ le16_to_cpu(sblk->s_major),
le16_to_cpu(sblk->s_minor),
le16_to_cpu(sblk->compression));
- if (err < 0)
+ if (msblk->decompressor == NULL)
goto failed_mount;
- err = -EINVAL;
-
/*
* Check if there's xattrs in the filesystem. These are not
* supported in this version, so warn that they will be ignored.
@@ -205,6 +205,10 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
err = -ENOMEM;
+ msblk->stream = squashfs_decompressor_init(msblk);
+ if (msblk->stream == NULL)
+ goto failed_mount;
+
msblk->block_cache = squashfs_cache_init("metadata",
SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE);
if (msblk->block_cache == NULL)
@@ -292,17 +296,16 @@ failed_mount:
squashfs_cache_delete(msblk->block_cache);
squashfs_cache_delete(msblk->fragment_cache);
squashfs_cache_delete(msblk->read_page);
+ squashfs_decompressor_free(msblk, msblk->stream);
kfree(msblk->inode_lookup_table);
kfree(msblk->fragment_index);
kfree(msblk->id_table);
- kfree(msblk->stream.workspace);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
kfree(sblk);
return err;
failure:
- kfree(msblk->stream.workspace);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
return -ENOMEM;
@@ -346,10 +349,10 @@ static void squashfs_put_super(struct super_block *sb)
squashfs_cache_delete(sbi->block_cache);
squashfs_cache_delete(sbi->fragment_cache);
squashfs_cache_delete(sbi->read_page);
+ squashfs_decompressor_free(sbi, sbi->stream);
kfree(sbi->id_table);
kfree(sbi->fragment_index);
kfree(sbi->meta_index);
- kfree(sbi->stream.workspace);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
}
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index 83d8788..e80be20 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -36,7 +36,6 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/pagemap.h>
-#include <linux/zlib.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
new file mode 100644
index 0000000..4dd70e0
--- /dev/null
+++ b/fs/squashfs/zlib_wrapper.c
@@ -0,0 +1,150 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * zlib_wrapper.c
+ */
+
+
+#include <linux/mutex.h>
+#include <linux/buffer_head.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+#include "decompressor.h"
+
+static void *zlib_init(struct squashfs_sb_info *dummy)
+{
+ z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL);
+ if (stream == NULL)
+ goto failed;
+ stream->workspace = kmalloc(zlib_inflate_workspacesize(),
+ GFP_KERNEL);
+ if (stream->workspace == NULL)
+ goto failed;
+
+ return stream;
+
+failed:
+ ERROR("Failed to allocate zlib workspace\n");
+ kfree(stream);
+ return NULL;
+}
+
+
+static void zlib_free(void *strm)
+{
+ z_stream *stream = strm;
+
+ if (stream)
+ kfree(stream->workspace);
+ kfree(stream);
+}
+
+
+static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer,
+ struct buffer_head **bh, int b, int offset, int length, int srclength,
+ int pages)
+{
+ int zlib_err = 0, zlib_init = 0;
+ int avail, bytes, k = 0, page = 0;
+ z_stream *stream = msblk->stream;
+
+ mutex_lock(&msblk->read_data_mutex);
+
+ stream->avail_out = 0;
+ stream->avail_in = 0;
+
+ bytes = length;
+ do {
+ if (stream->avail_in == 0 && k < b) {
+ avail = min(bytes, msblk->devblksize - offset);
+ bytes -= avail;
+ wait_on_buffer(bh[k]);
+ if (!buffer_uptodate(bh[k]))
+ goto release_mutex;
+
+ if (avail == 0) {
+ offset = 0;
+ put_bh(bh[k++]);
+ continue;
+ }
+
+ stream->next_in = bh[k]->b_data + offset;
+ stream->avail_in = avail;
+ offset = 0;
+ }
+
+ if (stream->avail_out == 0 && page < pages) {
+ stream->next_out = buffer[page++];
+ stream->avail_out = PAGE_CACHE_SIZE;
+ }
+
+ if (!zlib_init) {
+ zlib_err = zlib_inflateInit(stream);
+ if (zlib_err != Z_OK) {
+ ERROR("zlib_inflateInit returned unexpected "
+ "result 0x%x, srclength %d\n",
+ zlib_err, srclength);
+ goto release_mutex;
+ }
+ zlib_init = 1;
+ }
+
+ zlib_err = zlib_inflate(stream, Z_SYNC_FLUSH);
+
+ if (stream->avail_in == 0 && k < b)
+ put_bh(bh[k++]);
+ } while (zlib_err == Z_OK);
+
+ if (zlib_err != Z_STREAM_END) {
+ ERROR("zlib_inflate error, data probably corrupt\n");
+ goto release_mutex;
+ }
+
+ zlib_err = zlib_inflateEnd(stream);
+ if (zlib_err != Z_OK) {
+ ERROR("zlib_inflate error, data probably corrupt\n");
+ goto release_mutex;
+ }
+
+ mutex_unlock(&msblk->read_data_mutex);
+ return stream->total_out;
+
+release_mutex:
+ mutex_unlock(&msblk->read_data_mutex);
+
+ for (; k < b; k++)
+ put_bh(bh[k]);
+
+ return -EIO;
+}
+
+const struct squashfs_decompressor squashfs_zlib_comp_ops = {
+ .init = zlib_init,
+ .free = zlib_free,
+ .decompress = zlib_uncompress,
+ .id = ZLIB_COMPRESSION,
+ .name = "zlib",
+ .supported = 1
+};
+
diff --git a/fs/super.c b/fs/super.c
index aff046b..f35ac60 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -568,7 +568,7 @@ out:
int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
{
int retval;
- int remount_rw;
+ int remount_rw, remount_ro;
if (sb->s_frozen != SB_UNFROZEN)
return -EBUSY;
@@ -583,9 +583,12 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
shrink_dcache_sb(sb);
sync_filesystem(sb);
+ remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
+ remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY);
+
/* If we are remounting RDONLY and current sb is read/write,
make sure there are no rw files opened */
- if ((flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY)) {
+ if (remount_ro) {
if (force)
mark_files_ro(sb);
else if (!fs_may_remount_ro(sb))
@@ -594,7 +597,6 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
if (retval < 0 && retval != -ENOSYS)
return -EBUSY;
}
- remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY);
if (sb->s_op->remount_fs) {
retval = sb->s_op->remount_fs(sb, &flags, data);
@@ -604,6 +606,16 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
if (remount_rw)
vfs_dq_quota_on_remount(sb);
+ /*
+ * Some filesystems modify their metadata via some other path than the
+ * bdev buffer cache (eg. use a private mapping, or directories in
+ * pagecache, etc). Also file data modifications go via their own
+ * mappings. So If we try to mount readonly then copy the filesystem
+ * from bdev, we could get stale data, so invalidate it to give a best
+ * effort at coherency.
+ */
+ if (remount_ro && sb->s_bdev)
+ invalidate_bdev(sb->s_bdev);
return 0;
}
@@ -925,6 +937,9 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
if (!mnt)
goto out;
+ if (flags & MS_KERNMOUNT)
+ mnt->mnt_flags = MNT_INTERNAL;
+
if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
secdata = alloc_secdata();
if (!secdata)
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 9824743..4573734 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/buffer_head.h>
#include <linux/vfs.h>
+#include <linux/writeback.h>
#include <linux/namei.h>
#include <asm/byteorder.h>
#include "sysv.h"
@@ -246,7 +247,7 @@ bad_inode:
return ERR_PTR(-EIO);
}
-int sysv_write_inode(struct inode *inode, int wait)
+static int __sysv_write_inode(struct inode *inode, int wait)
{
struct super_block * sb = inode->i_sb;
struct sysv_sb_info * sbi = SYSV_SB(sb);
@@ -296,9 +297,14 @@ int sysv_write_inode(struct inode *inode, int wait)
return 0;
}
+int sysv_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ return __sysv_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+}
+
int sysv_sync_inode(struct inode *inode)
{
- return sysv_write_inode(inode, 1);
+ return __sysv_write_inode(inode, 1);
}
static void sysv_delete_inode(struct inode *inode)
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index 53786eb..94cb9b4 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -142,7 +142,7 @@ extern int __sysv_write_begin(struct file *file, struct address_space *mapping,
/* inode.c */
extern struct inode *sysv_iget(struct super_block *, unsigned int);
-extern int sysv_write_inode(struct inode *, int);
+extern int sysv_write_inode(struct inode *, struct writeback_control *wbc);
extern int sysv_sync_inode(struct inode *);
extern void sysv_set_inode(struct inode *, dev_t);
extern int sysv_getattr(struct vfsmount *, struct dentry *, struct kstat *);
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 552fb01..401e503 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -1120,7 +1120,7 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (release)
ubifs_release_budget(c, &ino_req);
if (IS_SYNC(old_inode))
- err = old_inode->i_sb->s_op->write_inode(old_inode, 1);
+ err = old_inode->i_sb->s_op->write_inode(old_inode, NULL);
return err;
out_cancel:
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 16a6444..e26c02a 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1011,7 +1011,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
/* Is the page fully inside @i_size? */
if (page->index < end_index) {
if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) {
- err = inode->i_sb->s_op->write_inode(inode, 1);
+ err = inode->i_sb->s_op->write_inode(inode, NULL);
if (err)
goto out_unlock;
/*
@@ -1039,7 +1039,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
kunmap_atomic(kaddr, KM_USER0);
if (i_size > synced_i_size) {
- err = inode->i_sb->s_op->write_inode(inode, 1);
+ err = inode->i_sb->s_op->write_inode(inode, NULL);
if (err)
goto out_unlock;
}
@@ -1242,7 +1242,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode,
if (release)
ubifs_release_budget(c, &req);
if (IS_SYNC(inode))
- err = inode->i_sb->s_op->write_inode(inode, 1);
+ err = inode->i_sb->s_op->write_inode(inode, NULL);
return err;
out:
@@ -1316,7 +1316,7 @@ int ubifs_fsync(struct file *file, struct dentry *dentry, int datasync)
* the inode unless this is a 'datasync()' call.
*/
if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
- err = inode->i_sb->s_op->write_inode(inode, 1);
+ err = inode->i_sb->s_op->write_inode(inode, NULL);
if (err)
return err;
}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 43f9d19..4d2f215 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -283,7 +283,7 @@ static void ubifs_destroy_inode(struct inode *inode)
/*
* Note, Linux write-back code calls this without 'i_mutex'.
*/
-static int ubifs_write_inode(struct inode *inode, int wait)
+static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int err = 0;
struct ubifs_info *c = inode->i_sb->s_fs_info;
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index e2ff180..ccc3ad7 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -551,7 +551,7 @@ static void udf_table_free_blocks(struct super_block *sb,
}
if (epos.offset + (2 * adsize) > sb->s_blocksize) {
- char *sptr, *dptr;
+ unsigned char *sptr, *dptr;
int loffset;
brelse(oepos.bh);
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 61d9a76..f0f2a43 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -45,8 +45,8 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
int block, iblock;
loff_t nf_pos = (filp->f_pos - 1) << 2;
int flen;
- char *fname = NULL;
- char *nameptr;
+ unsigned char *fname = NULL;
+ unsigned char *nameptr;
uint16_t liu;
uint8_t lfi;
loff_t size = udf_ext0_offset(dir) + dir->i_size;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index c7da1a3..b57ab04 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1379,12 +1379,12 @@ static mode_t udf_convert_permissions(struct fileEntry *fe)
return mode;
}
-int udf_write_inode(struct inode *inode, int sync)
+int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int ret;
lock_kernel();
- ret = udf_update_inode(inode, sync);
+ ret = udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
unlock_kernel();
return ret;
@@ -1678,7 +1678,7 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
return -1;
if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
- char *sptr, *dptr;
+ unsigned char *sptr, *dptr;
struct buffer_head *nbh;
int err, loffset;
struct kernel_lb_addr obloc = epos->block;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 96757e3..db423ab 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -34,8 +34,8 @@
#include <linux/crc-itu-t.h>
#include <linux/exportfs.h>
-static inline int udf_match(int len1, const char *name1, int len2,
- const char *name2)
+static inline int udf_match(int len1, const unsigned char *name1, int len2,
+ const unsigned char *name2)
{
if (len1 != len2)
return 0;
@@ -142,15 +142,15 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
}
static struct fileIdentDesc *udf_find_entry(struct inode *dir,
- struct qstr *child,
+ const struct qstr *child,
struct udf_fileident_bh *fibh,
struct fileIdentDesc *cfi)
{
struct fileIdentDesc *fi = NULL;
loff_t f_pos;
int block, flen;
- char *fname = NULL;
- char *nameptr;
+ unsigned char *fname = NULL;
+ unsigned char *nameptr;
uint8_t lfi;
uint16_t liu;
loff_t size;
@@ -308,7 +308,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
{
struct super_block *sb = dir->i_sb;
struct fileIdentDesc *fi = NULL;
- char *name = NULL;
+ unsigned char *name = NULL;
int namelen;
loff_t f_pos;
loff_t size = udf_ext0_offset(dir) + dir->i_size;
@@ -895,16 +895,16 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
{
struct inode *inode;
struct pathComponent *pc;
- char *compstart;
+ const char *compstart;
struct udf_fileident_bh fibh;
struct extent_position epos = {};
int eoffset, elen = 0;
struct fileIdentDesc *fi;
struct fileIdentDesc cfi;
- char *ea;
+ uint8_t *ea;
int err;
int block;
- char *name = NULL;
+ unsigned char *name = NULL;
int namelen;
struct buffer_head *bh;
struct udf_inode_info *iinfo;
@@ -982,7 +982,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
pc = (struct pathComponent *)(ea + elen);
- compstart = (char *)symname;
+ compstart = symname;
do {
symname++;
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index c3265e1..852e918 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -32,12 +32,12 @@
#include <linux/buffer_head.h>
#include "udf_i.h"
-static void udf_pc_to_char(struct super_block *sb, char *from, int fromlen,
- char *to)
+static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
+ int fromlen, unsigned char *to)
{
struct pathComponent *pc;
int elen = 0;
- char *p = to;
+ unsigned char *p = to;
while (elen < fromlen) {
pc = (struct pathComponent *)(from + elen);
@@ -75,9 +75,9 @@ static int udf_symlink_filler(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
struct buffer_head *bh = NULL;
- char *symlink;
+ unsigned char *symlink;
int err = -EIO;
- char *p = kmap(page);
+ unsigned char *p = kmap(page);
struct udf_inode_info *iinfo;
lock_kernel();
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 8d46f42..4223ac8 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -142,7 +142,7 @@ extern void udf_truncate(struct inode *);
extern void udf_read_inode(struct inode *);
extern void udf_delete_inode(struct inode *);
extern void udf_clear_inode(struct inode *);
-extern int udf_write_inode(struct inode *, int);
+extern int udf_write_inode(struct inode *, struct writeback_control *wbc);
extern long udf_block_map(struct inode *, sector_t);
extern int udf_extend_file(struct inode *, struct extent_position *,
struct kernel_long_ad *, sector_t);
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 22af68f..317a0d4 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -31,7 +31,7 @@
* len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller.
*/
static inline int ufs_match(struct super_block *sb, int len,
- const char * const name, struct ufs_dir_entry * de)
+ const unsigned char *name, struct ufs_dir_entry *de)
{
if (len != ufs_get_de_namlen(sb, de))
return 0;
@@ -70,7 +70,7 @@ static inline unsigned long ufs_dir_pages(struct inode *inode)
return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
}
-ino_t ufs_inode_by_name(struct inode *dir, struct qstr *qstr)
+ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
{
ino_t res = 0;
struct ufs_dir_entry *de;
@@ -249,11 +249,11 @@ struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
* (as a parameter - res_dir). Page is returned mapped and unlocked.
* Entry is guaranteed to be valid.
*/
-struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct qstr *qstr,
+struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
struct page **res_page)
{
struct super_block *sb = dir->i_sb;
- const char *name = qstr->name;
+ const unsigned char *name = qstr->name;
int namelen = qstr->len;
unsigned reclen = UFS_DIR_REC_LEN(namelen);
unsigned long start, n;
@@ -313,7 +313,7 @@ found:
int ufs_add_link(struct dentry *dentry, struct inode *inode)
{
struct inode *dir = dentry->d_parent->d_inode;
- const char *name = dentry->d_name.name;
+ const unsigned char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct super_block *sb = dir->i_sb;
unsigned reclen = UFS_DIR_REC_LEN(namelen);
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 09aef49..80b68c3 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -36,6 +36,7 @@
#include <linux/mm.h>
#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
+#include <linux/writeback.h>
#include <linux/quotaops.h>
#include "ufs_fs.h"
@@ -891,11 +892,11 @@ static int ufs_update_inode(struct inode * inode, int do_sync)
return 0;
}
-int ufs_write_inode (struct inode * inode, int wait)
+int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int ret;
lock_kernel();
- ret = ufs_update_inode (inode, wait);
+ ret = ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
unlock_kernel();
return ret;
}
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 0b4c39b..43f9f5d 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -86,9 +86,9 @@ extern void ufs_put_cylinder (struct super_block *, unsigned);
/* dir.c */
extern const struct inode_operations ufs_dir_inode_operations;
extern int ufs_add_link (struct dentry *, struct inode *);
-extern ino_t ufs_inode_by_name(struct inode *, struct qstr *);
+extern ino_t ufs_inode_by_name(struct inode *, const struct qstr *);
extern int ufs_make_empty(struct inode *, struct inode *);
-extern struct ufs_dir_entry *ufs_find_entry(struct inode *, struct qstr *, struct page **);
+extern struct ufs_dir_entry *ufs_find_entry(struct inode *, const struct qstr *, struct page **);
extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *);
extern int ufs_empty_dir (struct inode *);
extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **);
@@ -106,7 +106,7 @@ extern struct inode * ufs_new_inode (struct inode *, int);
/* inode.c */
extern struct inode *ufs_iget(struct super_block *, unsigned long);
-extern int ufs_write_inode (struct inode *, int);
+extern int ufs_write_inode (struct inode *, struct writeback_control *);
extern int ufs_sync_inode (struct inode *);
extern void ufs_delete_inode (struct inode *);
extern struct buffer_head * ufs_bread (struct inode *, unsigned, int, int *);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 25ea240..71345a3 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -1063,7 +1063,7 @@ xfs_log_inode(
STATIC int
xfs_fs_write_inode(
struct inode *inode,
- int sync)
+ struct writeback_control *wbc)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
@@ -1074,11 +1074,7 @@ xfs_fs_write_inode(
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
- if (sync) {
- error = xfs_wait_on_pages(ip, 0, -1);
- if (error)
- goto out;
-
+ if (wbc->sync_mode == WB_SYNC_ALL) {
/*
* Make sure the inode has hit stable storage. By using the
* log and the fsync transactions we reduce the IOs we have
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 70504fc..14dafd6 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -245,7 +245,7 @@ typedef struct xfs_mount {
struct xfs_qmops *m_qm_ops; /* vector of XQM ops */
atomic_t m_active_trans; /* number trans frozen */
#ifdef HAVE_PERCPU_SB
- xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */
+ xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */
unsigned long m_icsb_counters; /* disabled per-cpu counters */
struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */
struct mutex m_icsb_mutex; /* balancer sync lock */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 2983176..1172c27 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -238,7 +238,7 @@ struct acpi_processor_errata {
extern int acpi_processor_preregister_performance(struct
acpi_processor_performance
- *performance);
+ __percpu *performance);
extern int acpi_processor_register_performance(struct acpi_processor_performance
*performance, unsigned int cpu);
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index fc21844..c8a5d68 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -52,23 +52,4 @@ typedef struct
#define __local_add(i,l) local_set((l), local_read(l) + (i))
#define __local_sub(i,l) local_set((l), local_read(l) - (i))
-/* Use these for per-cpu local_t variables: on some archs they are
- * much more efficient than these naive implementations. Note they take
- * a variable (eg. mystruct.foo), not an address.
- */
-#define cpu_local_read(l) local_read(&__get_cpu_var(l))
-#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
-#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
-#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
-#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
-#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
-
-/* Non-atomic increments, ie. preemption disabled and won't be touched
- * in interrupt, etc. Some archs can optimize this case well.
- */
-#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
-#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
-#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
-#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
-
#endif /* _ASM_GENERIC_LOCAL_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 8087b90..04f91c2 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -41,7 +41,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
* Only S390 provides its own means of moving the pointer.
*/
#ifndef SHIFT_PERCPU_PTR
-#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset))
+/* Weird cast keeps both GCC and sparse happy. */
+#define SHIFT_PERCPU_PTR(__p, __offset) ({ \
+ __verify_pcpu_ptr((__p)); \
+ RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \
+})
#endif
/*
@@ -50,11 +54,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
* offset.
*/
#define per_cpu(var, cpu) \
- (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu)))
+ (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
#define __get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset))
+ (*SHIFT_PERCPU_PTR(&(var), my_cpu_offset))
#define __raw_get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
+ (*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset))
#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
@@ -66,9 +70,9 @@ extern void setup_per_cpu_areas(void);
#else /* ! SMP */
-#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
-#define __get_cpu_var(var) per_cpu_var(var)
-#define __raw_get_cpu_var(var) per_cpu_var(var)
+#define per_cpu(var, cpu) (*((void)(cpu), &(var)))
+#define __get_cpu_var(var) (var)
+#define __raw_get_cpu_var(var) (var)
#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index ffac157..4a3c4e4 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -801,6 +801,7 @@ struct drm_driver {
*/
int (*gem_init_object) (struct drm_gem_object *obj);
void (*gem_free_object) (struct drm_gem_object *obj);
+ void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
/* vga arb irq handler */
void (*vgaarb_irq)(struct drm_device *dev, bool state);
@@ -1427,6 +1428,7 @@ extern void drm_sysfs_connector_remove(struct drm_connector *connector);
int drm_gem_init(struct drm_device *dev);
void drm_gem_destroy(struct drm_device *dev);
void drm_gem_object_free(struct kref *kref);
+void drm_gem_object_free_unlocked(struct kref *kref);
struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
size_t size);
void drm_gem_object_handle_free(struct kref *kref);
@@ -1443,10 +1445,15 @@ drm_gem_object_reference(struct drm_gem_object *obj)
static inline void
drm_gem_object_unreference(struct drm_gem_object *obj)
{
- if (obj == NULL)
- return;
+ if (obj != NULL)
+ kref_put(&obj->refcount, drm_gem_object_free);
+}
- kref_put(&obj->refcount, drm_gem_object_free);
+static inline void
+drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
+{
+ if (obj != NULL)
+ kref_put(&obj->refcount, drm_gem_object_free_unlocked);
}
int drm_gem_handle_create(struct drm_file *file_priv,
@@ -1475,6 +1482,21 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj)
drm_gem_object_unreference(obj);
}
+static inline void
+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
+{
+ if (obj == NULL)
+ return;
+
+ /*
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before we
+ * checked for a name
+ */
+ kref_put(&obj->handlecount, drm_gem_object_handle_free);
+ drm_gem_object_unreference_unlocked(obj);
+}
+
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
struct drm_file *filp,
u32 handle);
diff --git a/include/drm/drm_buffer.h b/include/drm/drm_buffer.h
new file mode 100644
index 0000000..322dbff
--- /dev/null
+++ b/include/drm/drm_buffer.h
@@ -0,0 +1,148 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Pauli Nieminen.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Multipart buffer for coping data which is larger than the page size.
+ *
+ * Authors:
+ * Pauli Nieminen <suokkos-at-gmail-dot-com>
+ */
+
+#ifndef _DRM_BUFFER_H_
+#define _DRM_BUFFER_H_
+
+#include "drmP.h"
+
+struct drm_buffer {
+ int iterator;
+ int size;
+ char *data[];
+};
+
+
+/**
+ * Return the index of page that buffer is currently pointing at.
+ */
+static inline int drm_buffer_page(struct drm_buffer *buf)
+{
+ return buf->iterator / PAGE_SIZE;
+}
+/**
+ * Return the index of the current byte in the page
+ */
+static inline int drm_buffer_index(struct drm_buffer *buf)
+{
+ return buf->iterator & (PAGE_SIZE - 1);
+}
+/**
+ * Return number of bytes that is left to process
+ */
+static inline int drm_buffer_unprocessed(struct drm_buffer *buf)
+{
+ return buf->size - buf->iterator;
+}
+
+/**
+ * Advance the buffer iterator number of bytes that is given.
+ */
+static inline void drm_buffer_advance(struct drm_buffer *buf, int bytes)
+{
+ buf->iterator += bytes;
+}
+
+/**
+ * Allocate the drm buffer object.
+ *
+ * buf: A pointer to a pointer where the object is stored.
+ * size: The number of bytes to allocate.
+ */
+extern int drm_buffer_alloc(struct drm_buffer **buf, int size);
+
+/**
+ * Copy the user data to the begin of the buffer and reset the processing
+ * iterator.
+ *
+ * user_data: A pointer the data that is copied to the buffer.
+ * size: The Number of bytes to copy.
+ */
+extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
+ void __user *user_data, int size);
+
+/**
+ * Free the drm buffer object
+ */
+extern void drm_buffer_free(struct drm_buffer *buf);
+
+/**
+ * Read an object from buffer that may be split to multiple parts. If object
+ * is not split function just returns the pointer to object in buffer. But in
+ * case of split object data is copied to given stack object that is suplied
+ * by caller.
+ *
+ * The processing location of the buffer is also advanced to the next byte
+ * after the object.
+ *
+ * objsize: The size of the objet in bytes.
+ * stack_obj: A pointer to a memory location where object can be copied.
+ */
+extern void *drm_buffer_read_object(struct drm_buffer *buf,
+ int objsize, void *stack_obj);
+
+/**
+ * Returns the pointer to the dword which is offset number of elements from the
+ * current processing location.
+ *
+ * Caller must make sure that dword is not split in the buffer. This
+ * requirement is easily met if all the sizes of objects in buffer are
+ * multiples of dword and PAGE_SIZE is multiple dword.
+ *
+ * Call to this function doesn't change the processing location.
+ *
+ * offset: The index of the dword relative to the internat iterator.
+ */
+static inline void *drm_buffer_pointer_to_dword(struct drm_buffer *buffer,
+ int offset)
+{
+ int iter = buffer->iterator + offset * 4;
+ return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
+}
+/**
+ * Returns the pointer to the dword which is offset number of elements from
+ * the current processing location.
+ *
+ * Call to this function doesn't change the processing location.
+ *
+ * offset: The index of the byte relative to the internat iterator.
+ */
+static inline void *drm_buffer_pointer_to_byte(struct drm_buffer *buffer,
+ int offset)
+{
+ int iter = buffer->iterator + offset;
+ return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
+}
+
+#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index fdf43ab..1347524 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -801,4 +801,6 @@ extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
bool interlaced, int margins);
extern int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
+
+extern bool drm_edid_is_valid(struct edid *edid);
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index d33c3e0..b420989 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -201,4 +201,7 @@ struct edid {
#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
+/* define the number of Extension EDID block */
+#define DRM_MAX_EDID_EXT_NUM 4
+
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index e6f3b12..676104b 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -141,6 +141,41 @@
{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x688A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68a0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68a1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68a9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68c8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68c9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68d9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68da, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68de, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68e0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68e1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68e4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68e5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68e8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68e9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -558,4 +593,5 @@
{0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+ {0x8086, 0x0102, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0, 0, 0}
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index f745948..a6a9f4a 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -25,13 +25,14 @@
#ifndef __NOUVEAU_DRM_H__
#define __NOUVEAU_DRM_H__
-#define NOUVEAU_DRM_HEADER_PATCHLEVEL 15
+#define NOUVEAU_DRM_HEADER_PATCHLEVEL 16
struct drm_nouveau_channel_alloc {
uint32_t fb_ctxdma_handle;
uint32_t tt_ctxdma_handle;
int channel;
+ uint32_t pushbuf_domains;
/* Notifier memory */
uint32_t notifier_handle;
@@ -109,68 +110,58 @@ struct drm_nouveau_gem_new {
uint32_t align;
};
+#define NOUVEAU_GEM_MAX_BUFFERS 1024
+struct drm_nouveau_gem_pushbuf_bo_presumed {
+ uint32_t valid;
+ uint32_t domain;
+ uint64_t offset;
+};
+
struct drm_nouveau_gem_pushbuf_bo {
uint64_t user_priv;
uint32_t handle;
uint32_t read_domains;
uint32_t write_domains;
uint32_t valid_domains;
- uint32_t presumed_ok;
- uint32_t presumed_domain;
- uint64_t presumed_offset;
+ struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
};
#define NOUVEAU_GEM_RELOC_LOW (1 << 0)
#define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
#define NOUVEAU_GEM_RELOC_OR (1 << 2)
+#define NOUVEAU_GEM_MAX_RELOCS 1024
struct drm_nouveau_gem_pushbuf_reloc {
+ uint32_t reloc_bo_index;
+ uint32_t reloc_bo_offset;
uint32_t bo_index;
- uint32_t reloc_index;
uint32_t flags;
uint32_t data;
uint32_t vor;
uint32_t tor;
};
-#define NOUVEAU_GEM_MAX_BUFFERS 1024
-#define NOUVEAU_GEM_MAX_RELOCS 1024
+#define NOUVEAU_GEM_MAX_PUSH 512
+struct drm_nouveau_gem_pushbuf_push {
+ uint32_t bo_index;
+ uint32_t pad;
+ uint64_t offset;
+ uint64_t length;
+};
struct drm_nouveau_gem_pushbuf {
uint32_t channel;
- uint32_t nr_dwords;
uint32_t nr_buffers;
- uint32_t nr_relocs;
- uint64_t dwords;
uint64_t buffers;
- uint64_t relocs;
-};
-
-struct drm_nouveau_gem_pushbuf_call {
- uint32_t channel;
- uint32_t handle;
- uint32_t offset;
- uint32_t nr_buffers;
uint32_t nr_relocs;
- uint32_t nr_dwords;
- uint64_t buffers;
+ uint32_t nr_push;
uint64_t relocs;
+ uint64_t push;
uint32_t suffix0;
uint32_t suffix1;
- /* below only accessed for CALL2 */
uint64_t vram_available;
uint64_t gart_available;
};
-struct drm_nouveau_gem_pin {
- uint32_t handle;
- uint32_t domain;
- uint64_t offset;
-};
-
-struct drm_nouveau_gem_unpin {
- uint32_t handle;
-};
-
#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002
#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
@@ -183,14 +174,6 @@ struct drm_nouveau_gem_cpu_fini {
uint32_t handle;
};
-struct drm_nouveau_gem_tile {
- uint32_t handle;
- uint32_t offset;
- uint32_t size;
- uint32_t tile_mode;
- uint32_t tile_flags;
-};
-
enum nouveau_bus_type {
NV_AGP = 0,
NV_PCI = 1,
@@ -200,22 +183,17 @@ enum nouveau_bus_type {
struct drm_nouveau_sarea {
};
-#define DRM_NOUVEAU_CARD_INIT 0x00
-#define DRM_NOUVEAU_GETPARAM 0x01
-#define DRM_NOUVEAU_SETPARAM 0x02
-#define DRM_NOUVEAU_CHANNEL_ALLOC 0x03
-#define DRM_NOUVEAU_CHANNEL_FREE 0x04
-#define DRM_NOUVEAU_GROBJ_ALLOC 0x05
-#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x06
-#define DRM_NOUVEAU_GPUOBJ_FREE 0x07
+#define DRM_NOUVEAU_GETPARAM 0x00
+#define DRM_NOUVEAU_SETPARAM 0x01
+#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
+#define DRM_NOUVEAU_CHANNEL_FREE 0x03
+#define DRM_NOUVEAU_GROBJ_ALLOC 0x04
+#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05
+#define DRM_NOUVEAU_GPUOBJ_FREE 0x06
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
-#define DRM_NOUVEAU_GEM_PUSHBUF_CALL 0x42
-#define DRM_NOUVEAU_GEM_PIN 0x43 /* !KMS only */
-#define DRM_NOUVEAU_GEM_UNPIN 0x44 /* !KMS only */
-#define DRM_NOUVEAU_GEM_CPU_PREP 0x45
-#define DRM_NOUVEAU_GEM_CPU_FINI 0x46
-#define DRM_NOUVEAU_GEM_INFO 0x47
-#define DRM_NOUVEAU_GEM_PUSHBUF_CALL2 0x48
+#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
+#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
+#define DRM_NOUVEAU_GEM_INFO 0x44
#endif /* __NOUVEAU_DRM_H__ */
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 39537f3..81e614b 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -808,6 +808,7 @@ struct drm_radeon_gem_create {
#define RADEON_TILING_SWAP_32BIT 0x8
#define RADEON_TILING_SURFACE 0x10 /* this object requires a surface
* when mapped - i.e. front buffer */
+#define RADEON_TILING_MICRO_SQUARE 0x20
struct drm_radeon_gem_set_tiling {
uint32_t handle;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 4c4e0f8..e3f1b4a 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -908,7 +908,7 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
* Utility function that returns the pgprot_t that should be used for
* setting up a PTE with the caching model indicated by @c_state.
*/
-extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
+extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
#define TTM_HAS_AGP
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 3c7a358..f391d45 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -424,7 +424,7 @@ extern void audit_syscall_exit(int failed, long return_code);
extern void __audit_getname(const char *name);
extern void audit_putname(const char *name);
extern void __audit_inode(const char *name, const struct dentry *dentry);
-extern void __audit_inode_child(const char *dname, const struct dentry *dentry,
+extern void __audit_inode_child(const struct dentry *dentry,
const struct inode *parent);
extern void __audit_ptrace(struct task_struct *t);
@@ -442,11 +442,10 @@ static inline void audit_inode(const char *name, const struct dentry *dentry) {
if (unlikely(!audit_dummy_context()))
__audit_inode(name, dentry);
}
-static inline void audit_inode_child(const char *dname,
- const struct dentry *dentry,
+static inline void audit_inode_child(const struct dentry *dentry,
const struct inode *parent) {
if (unlikely(!audit_dummy_context()))
- __audit_inode_child(dname, dentry, parent);
+ __audit_inode_child(dentry, parent);
}
void audit_core_dumps(long signr);
@@ -544,9 +543,9 @@ extern int audit_signals;
#define audit_getname(n) do { ; } while (0)
#define audit_putname(n) do { ; } while (0)
#define __audit_inode(n,d) do { ; } while (0)
-#define __audit_inode_child(d,i,p) do { ; } while (0)
+#define __audit_inode_child(i,p) do { ; } while (0)
#define audit_inode(n,d) do { ; } while (0)
-#define audit_inode_child(d,i,p) do { ; } while (0)
+#define audit_inode_child(i,p) do { ; } while (0)
#define audit_core_dumps(i) do { ; } while (0)
#define auditsc_get_stamp(c,t,s) (0)
#define audit_get_loginuid(t) (-1)
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3b73b99..416bf62 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -150,8 +150,8 @@ struct blk_user_trace_setup {
struct blk_trace {
int trace_state;
struct rchan *rchan;
- unsigned long *sequence;
- unsigned char *msg_data;
+ unsigned long __percpu *sequence;
+ unsigned char __percpu *msg_data;
u16 act_mask;
u64 start_lba;
u64 end_lba;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index b10ec49..266ab92 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -23,6 +23,7 @@ extern unsigned long max_pfn;
extern unsigned long saved_max_pfn;
#endif
+#ifndef CONFIG_NO_BOOTMEM
/*
* node_bootmem_map is a map pointer - the bits represent all physical
* memory pages (including holes) on the node.
@@ -37,6 +38,7 @@ typedef struct bootmem_data {
} bootmem_data_t;
extern bootmem_data_t bootmem_node_data[];
+#endif
extern unsigned long bootmem_bootmap_pages(unsigned long);
@@ -46,6 +48,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
unsigned long endpfn);
extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
+unsigned long free_all_memory_core_early(int nodeid);
extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
extern unsigned long free_all_bootmem(void);
@@ -84,6 +87,10 @@ extern void *__alloc_bootmem_node(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal);
+void *__alloc_bootmem_node_high(pg_data_t *pgdat,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal);
extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 188fcae..a5a472b 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -5,7 +5,7 @@
#ifdef __CHECKER__
# define __user __attribute__((noderef, address_space(1)))
-# define __kernel /* default address space */
+# define __kernel __attribute__((address_space(0)))
# define __safe __attribute__((safe))
# define __force __attribute__((force))
# define __nocast __attribute__((nocast))
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 7878498..20ea12c 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -31,6 +31,8 @@
* if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
*/
typedef s32 dma_cookie_t;
+#define DMA_MIN_COOKIE 1
+#define DMA_MAX_COOKIE INT_MAX
#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
@@ -162,7 +164,7 @@ struct dma_chan {
struct dma_chan_dev *dev;
struct list_head device_node;
- struct dma_chan_percpu *local;
+ struct dma_chan_percpu __percpu *local;
int client_count;
int table_count;
void *private;
diff --git a/include/linux/early_res.h b/include/linux/early_res.h
new file mode 100644
index 0000000..29c09f5
--- /dev/null
+++ b/include/linux/early_res.h
@@ -0,0 +1,23 @@
+#ifndef _LINUX_EARLY_RES_H
+#define _LINUX_EARLY_RES_H
+#ifdef __KERNEL__
+
+extern void reserve_early(u64 start, u64 end, char *name);
+extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
+extern void free_early(u64 start, u64 end);
+void free_early_partial(u64 start, u64 end);
+extern void early_res_to_bootmem(u64 start, u64 end);
+
+void reserve_early_without_check(u64 start, u64 end, char *name);
+u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
+ u64 size, u64 align);
+u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
+ u64 *sizep, u64 align);
+u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align);
+u64 get_max_mapped(void);
+#include <linux/range.h>
+int get_free_all_memory_range(struct range **rangep, int nodeid);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_EARLY_RES_H */
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index e6590f8..cac84b0 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -894,7 +894,7 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
int create);
extern struct inode *ext3_iget(struct super_block *, unsigned long);
-extern int ext3_write_inode (struct inode *, int);
+extern int ext3_write_inode (struct inode *, struct writeback_control *);
extern int ext3_setattr (struct dentry *, struct iattr *);
extern void ext3_delete_inode (struct inode *);
extern int ext3_sync_inode (handle_t *, struct inode *);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 369767b..c10163b 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -543,6 +543,8 @@ struct fb_cursor_user {
#define FB_EVENT_GET_REQ 0x0D
/* Unbind from the console if possible */
#define FB_EVENT_FB_UNBIND 0x0E
+/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */
+#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
struct fb_event {
struct fb_info *info;
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 520ecf8..40b1101 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -248,13 +248,20 @@ union fw_cdev_event {
#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request)
#define FW_CDEV_IOC_SEND_STREAM_PACKET _IOW('#', 0x13, struct fw_cdev_send_stream_packet)
+/* available since kernel version 2.6.34 */
+#define FW_CDEV_IOC_GET_CYCLE_TIMER2 _IOWR('#', 0x14, struct fw_cdev_get_cycle_timer2)
+
/*
* FW_CDEV_VERSION History
* 1 (2.6.22) - initial version
* 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if
* &fw_cdev_create_iso_context.header_size is 8 or more
+ * (2.6.32) - added time stamp to xmit &fw_cdev_event_iso_interrupt
+ * (2.6.33) - IR has always packet-per-buffer semantics now, not one of
+ * dual-buffer or packet-per-buffer depending on hardware
+ * 3 (2.6.34) - made &fw_cdev_get_cycle_timer reliable
*/
-#define FW_CDEV_VERSION 2
+#define FW_CDEV_VERSION 3
/**
* struct fw_cdev_get_info - General purpose information ioctl
@@ -544,14 +551,18 @@ struct fw_cdev_stop_iso {
/**
* struct fw_cdev_get_cycle_timer - read cycle timer register
* @local_time: system time, in microseconds since the Epoch
- * @cycle_timer: isochronous cycle timer, as per OHCI 1.1 clause 5.13
+ * @cycle_timer: Cycle Time register contents
*
* The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
- * and also the system clock. This allows to express the receive time of an
- * isochronous packet as a system time with microsecond accuracy.
+ * and also the system clock (%CLOCK_REALTIME). This allows to express the
+ * receive time of an isochronous packet as a system time.
*
* @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and
- * 12 bits cycleOffset, in host byte order.
+ * 12 bits cycleOffset, in host byte order. Cf. the Cycle Time register
+ * per IEEE 1394 or Isochronous Cycle Timer register per OHCI-1394.
+ *
+ * In version 1 and 2 of the ABI, this ioctl returned unreliable (non-
+ * monotonic) @cycle_timer values on certain controllers.
*/
struct fw_cdev_get_cycle_timer {
__u64 local_time;
@@ -559,6 +570,25 @@ struct fw_cdev_get_cycle_timer {
};
/**
+ * struct fw_cdev_get_cycle_timer2 - read cycle timer register
+ * @tv_sec: system time, seconds
+ * @tv_nsec: system time, sub-seconds part in nanoseconds
+ * @clk_id: input parameter, clock from which to get the system time
+ * @cycle_timer: Cycle Time register contents
+ *
+ * The %FW_CDEV_IOC_GET_CYCLE_TIMER2 works like
+ * %FW_CDEV_IOC_GET_CYCLE_TIMER but lets you choose a clock like with POSIX'
+ * clock_gettime function. Supported @clk_id values are POSIX' %CLOCK_REALTIME
+ * and %CLOCK_MONOTONIC and Linux' %CLOCK_MONOTONIC_RAW.
+ */
+struct fw_cdev_get_cycle_timer2 {
+ __s64 tv_sec;
+ __s32 tv_nsec;
+ __s32 clk_id;
+ __u32 cycle_timer;
+};
+
+/**
* struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth
* @closure: Passed back to userspace in correponding iso resource events
* @channels: Isochronous channels of which one is to be (de)allocated
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index a0e6715..4bd94bf 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -65,12 +65,13 @@
#define CSR_DIRECTORY_ID 0x20
struct fw_csr_iterator {
- u32 *p;
- u32 *end;
+ const u32 *p;
+ const u32 *end;
};
-void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p);
int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
+int fw_csr_string(const u32 *directory, int key, char *buf, size_t size);
extern struct bus_type fw_bus_type;
@@ -162,7 +163,7 @@ struct fw_device {
struct mutex client_list_mutex;
struct list_head client_list;
- u32 *config_rom;
+ const u32 *config_rom;
size_t config_rom_length;
int config_rom_retries;
unsigned is_local:1;
@@ -204,7 +205,7 @@ int fw_device_enable_phys_dma(struct fw_device *device);
*/
struct fw_unit {
struct device device;
- u32 *directory;
+ const u32 *directory;
struct fw_attribute_group attribute_group;
};
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ebb1cd5..4568962 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1305,6 +1305,8 @@ extern int send_sigurg(struct fown_struct *fown);
#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
#define MNT_DETACH 0x00000002 /* Just detach from the tree */
#define MNT_EXPIRE 0x00000004 /* Mark for expiry */
+#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
+#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
extern struct list_head super_blocks;
extern spinlock_t sb_lock;
@@ -1314,9 +1316,9 @@ extern spinlock_t sb_lock;
struct super_block {
struct list_head s_list; /* Keep this first */
dev_t s_dev; /* search index; _not_ kdev_t */
- unsigned long s_blocksize;
- unsigned char s_blocksize_bits;
unsigned char s_dirt;
+ unsigned char s_blocksize_bits;
+ unsigned long s_blocksize;
loff_t s_maxbytes; /* Max file size */
struct file_system_type *s_type;
const struct super_operations *s_op;
@@ -1357,16 +1359,16 @@ struct super_block {
void *s_fs_info; /* Filesystem private info */
fmode_t s_mode;
+ /* Granularity of c/m/atime in ns.
+ Cannot be worse than a second */
+ u32 s_time_gran;
+
/*
* The next field is for VFS *only*. No filesystems have any business
* even looking at it. You had been warned.
*/
struct mutex s_vfs_rename_mutex; /* Kludge */
- /* Granularity of c/m/atime in ns.
- Cannot be worse than a second */
- u32 s_time_gran;
-
/*
* Filesystem subtype. If non-empty the filesystem type field
* in /proc/mounts will be "type.subtype"
@@ -1555,7 +1557,7 @@ struct super_operations {
void (*destroy_inode)(struct inode *);
void (*dirty_inode) (struct inode *);
- int (*write_inode) (struct inode *, int);
+ int (*write_inode) (struct inode *, struct writeback_control *wbc);
void (*drop_inode) (struct inode *);
void (*delete_inode) (struct inode *);
void (*put_super) (struct super_block *);
@@ -1794,7 +1796,8 @@ extern int may_umount(struct vfsmount *);
extern long do_mount(char *, char *, char *, unsigned long, void *);
extern struct vfsmount *collect_mounts(struct path *);
extern void drop_collected_mounts(struct vfsmount *);
-
+extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
+ struct vfsmount *);
extern int vfs_statfs(struct dentry *, struct kstatfs *);
extern int current_umask(void);
@@ -2058,12 +2061,6 @@ extern int invalidate_inodes(struct super_block *);
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end);
-static inline unsigned long __deprecated
-invalidate_inode_pages(struct address_space *mapping)
-{
- return invalidate_mapping_pages(mapping, 0, ~0UL);
-}
-
static inline void invalidate_remote_inode(struct inode *inode)
{
if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
@@ -2132,6 +2129,7 @@ extern struct file * open_exec(const char *);
/* fs/dcache.c -- generic fs support functions */
extern int is_subdir(struct dentry *, struct dentry *);
+extern int path_is_under(struct path *, struct path *);
extern ino_t find_inode_number(struct dentry *, struct qstr *);
#include <linux/err.h>
@@ -2340,8 +2338,6 @@ extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct
extern int simple_sync_file(struct file *, struct dentry *, int);
extern int simple_empty(struct dentry *);
extern int simple_readpage(struct file *file, struct page *page);
-extern int simple_prepare_write(struct file *file, struct page *page,
- unsigned offset, unsigned to);
extern int simple_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata);
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 936f9aa..df8fd9a 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -65,7 +65,7 @@ static inline void fsnotify_link_count(struct inode *inode)
* fsnotify_move - file old_name at old_dir was moved to new_name at new_dir
*/
static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
- const char *old_name, const char *new_name,
+ const char *old_name,
int isdir, struct inode *target, struct dentry *moved)
{
struct inode *source = moved->d_inode;
@@ -73,6 +73,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
u32 fs_cookie = fsnotify_get_cookie();
__u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM);
__u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO);
+ const char *new_name = moved->d_name.name;
if (old_dir == new_dir)
old_dir_mask |= FS_DN_RENAME;
@@ -103,7 +104,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL);
fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
- audit_inode_child(new_name, moved, new_dir);
+ audit_inode_child(moved, new_dir);
}
/*
@@ -146,7 +147,7 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
{
inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name,
dentry->d_inode);
- audit_inode_child(dentry->d_name.name, dentry, inode);
+ audit_inode_child(dentry, inode);
fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
}
@@ -161,7 +162,7 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct
inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name,
inode);
fsnotify_link_count(inode);
- audit_inode_child(new_dentry->d_name.name, new_dentry, dir);
+ audit_inode_child(new_dentry, dir);
fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0);
}
@@ -175,7 +176,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
struct inode *d_inode = dentry->d_inode;
inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode);
- audit_inode_child(dentry->d_name.name, dentry, inode);
+ audit_inode_child(dentry, inode);
fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
}
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 9717081..56b5051 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -101,7 +101,7 @@ struct hd_struct {
unsigned long stamp;
int in_flight[2];
#ifdef CONFIG_SMP
- struct disk_stats *dkstats;
+ struct disk_stats __percpu *dkstats;
#else
struct disk_stats dkstats;
#endif
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index 81f90a5..4f44629 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -180,33 +180,6 @@ struct gfs2_rgrp {
};
/*
- * quota linked list: user quotas and group quotas form two separate
- * singly linked lists. ll_next stores uids or gids of next quotas in the
- * linked list.
-
-Given the uid/gid, how to calculate the quota file offsets for the corresponding
-gfs2_quota structures on disk:
-
-for user quotas, given uid,
-offset = uid * sizeof(struct gfs2_quota);
-
-for group quotas, given gid,
-offset = (gid * sizeof(struct gfs2_quota)) + sizeof(struct gfs2_quota);
-
-
- uid:0 gid:0 uid:12 gid:12 uid:17 gid:17 uid:5142 gid:5142
-+-------+-------+ +-------+-------+ +-------+- - - -+ +- - - -+-------+
-| valid | valid | :: | valid | valid | :: | valid | inval | :: | inval | valid |
-+-------+-------+ +-------+-------+ +-------+- - - -+ +- - - -+-------+
-next:12 next:12 next:17 next:5142 next:NULL next:NULL
- | | | | |<-- user quota list |
- \______|___________/ \______|___________/ group quota list -->|
- | | |
- \__________________/ \_______________________________________/
-
-*/
-
-/*
* quota structure
*/
@@ -214,8 +187,7 @@ struct gfs2_quota {
__be64 qu_limit;
__be64 qu_warn;
__be64 qu_value;
- __be32 qu_ll_next; /* location of next quota in list */
- __u8 qu_reserved[60];
+ __u8 qu_reserved[64];
};
/*
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
new file mode 100644
index 0000000..63f57a8
--- /dev/null
+++ b/include/linux/i2c-smbus.h
@@ -0,0 +1,50 @@
+/*
+ * i2c-smbus.h - SMBus extensions to the I2C protocol
+ *
+ * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _LINUX_I2C_SMBUS_H
+#define _LINUX_I2C_SMBUS_H
+
+#include <linux/i2c.h>
+
+
+/**
+ * i2c_smbus_alert_setup - platform data for the smbus_alert i2c client
+ * @alert_edge_triggered: whether the alert interrupt is edge (1) or level (0)
+ * triggered
+ * @irq: IRQ number, if the smbus_alert driver should take care of interrupt
+ * handling
+ *
+ * If irq is not specified, the smbus_alert driver doesn't take care of
+ * interrupt handling. In that case it is up to the I2C bus driver to either
+ * handle the interrupts or to poll for alerts.
+ *
+ * If irq is specified then it it crucial that alert_edge_triggered is
+ * properly set.
+ */
+struct i2c_smbus_alert_setup {
+ unsigned int alert_edge_triggered:1;
+ int irq;
+};
+
+struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
+ struct i2c_smbus_alert_setup *setup);
+int i2c_handle_smbus_alert(struct i2c_client *ara);
+
+#endif /* _LINUX_I2C_SMBUS_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 02fc617..0a5da63 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -53,6 +53,7 @@ struct i2c_board_info;
* on a bus (or read from them). Apart from two basic transfer functions to
* transmit one message at a time, a more complex version can be used to
* transmit an arbitrary number of messages without interruption.
+ * @count must be be less than 64k since msg.len is u16.
*/
extern int i2c_master_send(struct i2c_client *client, const char *buf,
int count);
@@ -152,6 +153,13 @@ struct i2c_driver {
int (*suspend)(struct i2c_client *, pm_message_t mesg);
int (*resume)(struct i2c_client *);
+ /* Alert callback, for example for the SMBus alert protocol.
+ * The format and meaning of the data value depends on the protocol.
+ * For the SMBus alert protocol, there is a single bit of data passed
+ * as the alert response's low bit ("event flag").
+ */
+ void (*alert)(struct i2c_client *, unsigned int data);
+
/* a ioctl like command that can be used to perform specific functions
* with the device.
*/
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 0ec6129..97e6ab4 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -515,6 +515,8 @@ struct ide_drive_s {
u8 init_speed; /* transfer rate set at boot */
u8 current_speed; /* current transfer rate set */
u8 desired_speed; /* desired transfer rate set */
+ u8 pio_mode; /* for ->set_pio_mode _only_ */
+ u8 dma_mode; /* for ->dma_pio_mode _only_ */
u8 dn; /* now wide spread use */
u8 acoustic; /* acoustic management */
u8 media; /* disk, cdrom, tape, floppy, ... */
@@ -622,8 +624,8 @@ extern const struct ide_tp_ops default_tp_ops;
*/
struct ide_port_ops {
void (*init_dev)(ide_drive_t *);
- void (*set_pio_mode)(ide_drive_t *, const u8);
- void (*set_dma_mode)(ide_drive_t *, const u8);
+ void (*set_pio_mode)(struct hwif_s *, ide_drive_t *);
+ void (*set_dma_mode)(struct hwif_s *, ide_drive_t *);
int (*reset_poll)(ide_drive_t *);
void (*pre_reset)(ide_drive_t *);
void (*resetproc)(ide_drive_t *);
@@ -1494,7 +1496,6 @@ int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
#ifdef CONFIG_IDE_XFER_MODE
int ide_scan_pio_blacklist(char *);
const char *ide_xfer_verbose(u8);
-u8 ide_get_best_pio_mode(ide_drive_t *, u8, u8);
int ide_pio_need_iordy(ide_drive_t *, const u8);
int ide_set_pio_mode(ide_drive_t *, u8);
int ide_set_dma_mode(ide_drive_t *, u8);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d13492d..707ab12 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -400,7 +400,9 @@ static inline int irq_has_action(unsigned int irq)
/* Dynamic irq helper functions */
extern void dynamic_irq_init(unsigned int irq);
+void dynamic_irq_init_keep_chip_data(unsigned int irq);
extern void dynamic_irq_cleanup(unsigned int irq);
+void dynamic_irq_cleanup_keep_chip_data(unsigned int irq);
/* Set/get chip/data for an IRQ: */
extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 4cf6191..1ec8763 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -69,15 +69,8 @@ extern u8 jbd2_journal_enable_debug;
#define jbd_debug(f, a...) /**/
#endif
-static inline void *jbd2_alloc(size_t size, gfp_t flags)
-{
- return (void *)__get_free_pages(flags, get_order(size));
-}
-
-static inline void jbd2_free(void *ptr, size_t size)
-{
- free_pages((unsigned long)ptr, get_order(size));
-};
+extern void *jbd2_alloc(size_t size, gfp_t flags);
+extern void jbd2_free(void *ptr, size_t size);
#define JBD2_MIN_JOURNAL_BLOCKS 1024
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 1221d23..7f07074 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -44,6 +44,16 @@ extern const char linux_proc_banner[];
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index c356b69..03e8e8d 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -199,7 +199,7 @@ extern struct kimage *kexec_crash_image;
*/
extern struct resource crashk_res;
typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4];
-extern note_buf_t *crash_notes;
+extern note_buf_t __percpu *crash_notes;
extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
extern size_t vmcoreinfo_size;
extern size_t vmcoreinfo_max_size;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 1b672f7..e7d1b2e 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -122,6 +122,11 @@ struct kprobe {
/* Kprobe status flags */
#define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */
#define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */
+#define KPROBE_FLAG_OPTIMIZED 4 /*
+ * probe is really optimized.
+ * NOTE:
+ * this flag is only for optimized_kprobe.
+ */
/* Has this kprobe gone ? */
static inline int kprobe_gone(struct kprobe *p)
@@ -134,6 +139,12 @@ static inline int kprobe_disabled(struct kprobe *p)
{
return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
}
+
+/* Is this kprobe really running optimized path ? */
+static inline int kprobe_optimized(struct kprobe *p)
+{
+ return p->flags & KPROBE_FLAG_OPTIMIZED;
+}
/*
* Special probe type that uses setjmp-longjmp type tricks to resume
* execution at a specified entry with a matching prototype corresponding
@@ -249,6 +260,39 @@ extern kprobe_opcode_t *get_insn_slot(void);
extern void free_insn_slot(kprobe_opcode_t *slot, int dirty);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
+#ifdef CONFIG_OPTPROBES
+/*
+ * Internal structure for direct jump optimized probe
+ */
+struct optimized_kprobe {
+ struct kprobe kp;
+ struct list_head list; /* list for optimizing queue */
+ struct arch_optimized_insn optinsn;
+};
+
+/* Architecture dependent functions for direct jump optimization */
+extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn);
+extern int arch_check_optimized_kprobe(struct optimized_kprobe *op);
+extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op);
+extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op);
+extern int arch_optimize_kprobe(struct optimized_kprobe *op);
+extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
+extern kprobe_opcode_t *get_optinsn_slot(void);
+extern void free_optinsn_slot(kprobe_opcode_t *slot, int dirty);
+extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
+ unsigned long addr);
+
+extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
+
+#ifdef CONFIG_SYSCTL
+extern int sysctl_kprobes_optimization;
+extern int proc_kprobes_optimization_handler(struct ctl_table *table,
+ int write, void __user *buffer,
+ size_t *length, loff_t *ppos);
+#endif
+
+#endif /* CONFIG_OPTPROBES */
+
/* Get the kprobe at this addr (if any) - called with preemption disabled */
struct kprobe *get_kprobe(void *addr);
void kretprobe_hash_lock(struct task_struct *tsk,
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index a24de0b..60df9c8 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -103,7 +103,7 @@ struct kvm_userspace_memory_region {
/* for kvm_memory_region::flags */
#define KVM_MEM_LOG_DIRTY_PAGES 1UL
-
+#define KVM_MEMSLOT_INVALID (1UL << 1)
/* for KVM_IRQ_LINE */
struct kvm_irq_level {
@@ -497,6 +497,11 @@ struct kvm_ioeventfd {
#endif
#define KVM_CAP_S390_PSW 42
#define KVM_CAP_PPC_SEGSTATE 43
+#define KVM_CAP_HYPERV 44
+#define KVM_CAP_HYPERV_VAPIC 45
+#define KVM_CAP_HYPERV_SPIN 46
+#define KVM_CAP_PCI_SEGMENT 47
+#define KVM_CAP_X86_ROBUST_SINGLESTEP 51
#ifdef KVM_CAP_IRQ_ROUTING
@@ -691,8 +696,9 @@ struct kvm_assigned_pci_dev {
__u32 busnr;
__u32 devfn;
__u32 flags;
+ __u32 segnr;
union {
- __u32 reserved[12];
+ __u32 reserved[11];
};
};
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bd5a616..a3fd0f9 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -38,6 +38,7 @@
#define KVM_REQ_MMU_SYNC 7
#define KVM_REQ_KVMCLOCK_UPDATE 8
#define KVM_REQ_KICK 9
+#define KVM_REQ_DEACTIVATE_FPU 10
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
@@ -57,20 +58,20 @@ struct kvm_io_bus {
struct kvm_io_device *devs[NR_IOBUS_DEVS];
};
-void kvm_io_bus_init(struct kvm_io_bus *bus);
-void kvm_io_bus_destroy(struct kvm_io_bus *bus);
-int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, int len,
- const void *val);
-int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len,
+enum kvm_bus {
+ KVM_MMIO_BUS,
+ KVM_PIO_BUS,
+ KVM_NR_BUSES
+};
+
+int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ int len, const void *val);
+int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
void *val);
-int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
- struct kvm_io_device *dev);
-int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
+int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev);
-void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
- struct kvm_io_device *dev);
-void kvm_io_bus_unregister_dev(struct kvm *kvm, struct kvm_io_bus *bus,
- struct kvm_io_device *dev);
+int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev);
struct kvm_vcpu {
struct kvm *kvm;
@@ -83,6 +84,8 @@ struct kvm_vcpu {
struct kvm_run *run;
unsigned long requests;
unsigned long guest_debug;
+ int srcu_idx;
+
int fpu_active;
int guest_fpu_loaded;
wait_queue_head_t wq;
@@ -150,14 +153,19 @@ struct kvm_irq_routing_table {};
#endif
-struct kvm {
- spinlock_t mmu_lock;
- spinlock_t requests_lock;
- struct rw_semaphore slots_lock;
- struct mm_struct *mm; /* userspace tied to this vm */
+struct kvm_memslots {
int nmemslots;
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
KVM_PRIVATE_MEM_SLOTS];
+};
+
+struct kvm {
+ spinlock_t mmu_lock;
+ raw_spinlock_t requests_lock;
+ struct mutex slots_lock;
+ struct mm_struct *mm; /* userspace tied to this vm */
+ struct kvm_memslots *memslots;
+ struct srcu_struct srcu;
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
u32 bsp_vcpu_id;
struct kvm_vcpu *bsp_vcpu;
@@ -166,8 +174,7 @@ struct kvm {
atomic_t online_vcpus;
struct list_head vm_list;
struct mutex lock;
- struct kvm_io_bus mmio_bus;
- struct kvm_io_bus pio_bus;
+ struct kvm_io_bus *buses[KVM_NR_BUSES];
#ifdef CONFIG_HAVE_KVM_EVENTFD
struct {
spinlock_t lock;
@@ -249,13 +256,20 @@ int kvm_set_memory_region(struct kvm *kvm,
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc);
-int kvm_arch_set_memory_region(struct kvm *kvm,
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
+ int user_alloc);
+void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc);
void kvm_disable_largepages(void);
void kvm_arch_flush_shadow(struct kvm *kvm);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
+gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn);
+
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
void kvm_release_page_clean(struct page *page);
@@ -264,6 +278,9 @@ void kvm_set_page_dirty(struct page *page);
void kvm_set_page_accessed(struct page *page);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
+pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn);
+int memslot_id(struct kvm *kvm, gfn_t gfn);
void kvm_release_pfn_dirty(pfn_t);
void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn);
@@ -283,6 +300,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
+unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
@@ -383,6 +401,7 @@ struct kvm_assigned_dev_kernel {
struct work_struct interrupt_work;
struct list_head list;
int assigned_dev_id;
+ int host_segnr;
int host_busnr;
int host_devfn;
unsigned int entries_nr;
@@ -429,8 +448,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
#define KVM_IOMMU_CACHE_COHERENCY 0x1
#ifdef CONFIG_IOMMU_API
-int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
- unsigned long npages);
+int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
int kvm_iommu_map_guest(struct kvm *kvm);
int kvm_iommu_unmap_guest(struct kvm *kvm);
int kvm_assign_device(struct kvm *kvm,
@@ -480,11 +498,6 @@ static inline void kvm_guest_exit(void)
current->flags &= ~PF_VCPU;
}
-static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
-{
- return slot - kvm->memslots;
-}
-
static inline gpa_t gfn_to_gpa(gfn_t gfn)
{
return (gpa_t)gfn << PAGE_SHIFT;
@@ -532,6 +545,10 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
}
#endif
+#ifndef KVM_ARCH_HAS_UNALIAS_INSTANTIATION
+#define unalias_gfn_instantiation unalias_gfn
+#endif
+
#ifdef CONFIG_HAVE_KVM_IRQCHIP
#define KVM_MAX_IRQ_ROUTES 1024
diff --git a/include/linux/magic.h b/include/linux/magic.h
index 76285e0..eb9800f 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -52,7 +52,6 @@
#define CGROUP_SUPER_MAGIC 0x27e0eb
#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA
-#define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA
#define STACK_END_MAGIC 0x57AC6E9D
diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h
index 3568040..94cb51a 100644
--- a/include/linux/mfd/mc13783.h
+++ b/include/linux/mfd/mc13783.h
@@ -108,6 +108,8 @@ int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
#define MC13783_REGU_V2 28
#define MC13783_REGU_V3 29
#define MC13783_REGU_V4 30
+#define MC13783_REGU_PWGT1SPI 31
+#define MC13783_REGU_PWGT2SPI 32
#define MC13783_IRQ_ADCDONE 0
#define MC13783_IRQ_ADCBISDONE 1
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8b2fa85..90957f1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -12,6 +12,7 @@
#include <linux/prio_tree.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
+#include <linux/range.h>
struct mempolicy;
struct anon_vma;
@@ -1049,6 +1050,10 @@ extern void get_pfn_range_for_nid(unsigned int nid,
extern unsigned long find_min_pfn_with_active_regions(void);
extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
+int add_from_early_node_map(struct range *range, int az,
+ int nr_range, int nid);
+void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
+ u64 goal, u64 limit);
typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
extern void sparse_memory_present_with_active_regions(int nid);
@@ -1081,11 +1086,7 @@ extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
extern int after_bootmem;
-#ifdef CONFIG_NUMA
extern void setup_per_cpu_pageset(void);
-#else
-static inline void setup_per_cpu_pageset(void) {}
-#endif
extern void zone_pcp_update(struct zone *zone);
@@ -1321,12 +1322,19 @@ extern int randomize_va_space;
const char * arch_vma_name(struct vm_area_struct *vma);
void print_vma_addr(char *prefix, unsigned long rip);
+void sparse_mem_maps_populate_node(struct page **map_map,
+ unsigned long pnum_begin,
+ unsigned long pnum_end,
+ unsigned long map_count,
+ int nodeid);
+
struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
void *vmemmap_alloc_block(unsigned long size, int node);
+void *vmemmap_alloc_block_buf(unsigned long size, int node);
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
int vmemmap_populate_basepages(struct page *start_page,
unsigned long pages, int node);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 30fe668..a01a103 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -184,13 +184,7 @@ struct per_cpu_pageset {
s8 stat_threshold;
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
#endif
-} ____cacheline_aligned_in_smp;
-
-#ifdef CONFIG_NUMA
-#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
-#else
-#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
-#endif
+};
#endif /* !__GENERATING_BOUNDS.H */
@@ -306,10 +300,8 @@ struct zone {
*/
unsigned long min_unmapped_pages;
unsigned long min_slab_pages;
- struct per_cpu_pageset *pageset[NR_CPUS];
-#else
- struct per_cpu_pageset pageset[NR_CPUS];
#endif
+ struct per_cpu_pageset __percpu *pageset;
/*
* free areas of different sizes
*/
@@ -620,7 +612,9 @@ typedef struct pglist_data {
struct page_cgroup *node_page_cgroup;
#endif
#endif
+#ifndef CONFIG_NO_BOOTMEM
struct bootmem_data *bdata;
+#endif
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Must be held any time you expect node_start_pfn, node_present_pages
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index d74785c..0b89efc 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -35,6 +35,7 @@ static inline void get_mnt_ns(struct mnt_namespace *ns)
extern const struct seq_operations mounts_op;
extern const struct seq_operations mountinfo_op;
extern const struct seq_operations mountstats_op;
+extern int mnt_had_events(struct proc_mounts *);
#endif
#endif
diff --git a/include/linux/module.h b/include/linux/module.h
index 6cb1a3c..dd618eb 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -17,7 +17,7 @@
#include <linux/moduleparam.h>
#include <linux/tracepoint.h>
-#include <asm/local.h>
+#include <linux/percpu.h>
#include <asm/module.h>
#include <trace/events/module.h>
@@ -363,11 +363,9 @@ struct module
/* Destruction function. */
void (*exit)(void);
-#ifdef CONFIG_SMP
- char *refptr;
-#else
- local_t ref;
-#endif
+ struct module_ref {
+ int count;
+ } __percpu *refptr;
#endif
#ifdef CONFIG_CONSTRUCTORS
@@ -454,25 +452,16 @@ void __symbol_put(const char *symbol);
#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x)
void symbol_put_addr(void *addr);
-static inline local_t *__module_ref_addr(struct module *mod, int cpu)
-{
-#ifdef CONFIG_SMP
- return (local_t *) (mod->refptr + per_cpu_offset(cpu));
-#else
- return &mod->ref;
-#endif
-}
-
/* Sometimes we know we already have a refcount, and it's easier not
to handle the error case (which only happens with rmmod --wait). */
static inline void __module_get(struct module *module)
{
if (module) {
- unsigned int cpu = get_cpu();
- local_inc(__module_ref_addr(module, cpu));
+ preempt_disable();
+ __this_cpu_inc(module->refptr->count);
trace_module_get(module, _THIS_IP_,
- local_read(__module_ref_addr(module, cpu)));
- put_cpu();
+ __this_cpu_read(module->refptr->count));
+ preempt_enable();
}
}
@@ -481,15 +470,17 @@ static inline int try_module_get(struct module *module)
int ret = 1;
if (module) {
- unsigned int cpu = get_cpu();
+ preempt_disable();
+
if (likely(module_is_live(module))) {
- local_inc(__module_ref_addr(module, cpu));
+ __this_cpu_inc(module->refptr->count);
trace_module_get(module, _THIS_IP_,
- local_read(__module_ref_addr(module, cpu)));
+ __this_cpu_read(module->refptr->count));
}
else
ret = 0;
- put_cpu();
+
+ preempt_enable();
}
return ret;
}
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 5d52753..4bd0547 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -34,7 +34,18 @@ struct mnt_namespace;
#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */
#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */
-#define MNT_PNODE_MASK 0x3000 /* propagation flag mask */
+/*
+ * MNT_SHARED_MASK is the set of flags that should be cleared when a
+ * mount becomes shared. Currently, this is only the flag that says a
+ * mount cannot be bind mounted, since this is how we create a mount
+ * that shares events with another mount. If you add a new MNT_*
+ * flag, consider how it interacts with shared mounts.
+ */
+#define MNT_SHARED_MASK (MNT_UNBINDABLE)
+#define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE)
+
+
+#define MNT_INTERNAL 0x4000
struct vfsmount {
struct list_head mnt_hash;
@@ -66,7 +77,7 @@ struct vfsmount {
int mnt_pinned;
int mnt_ghosts;
#ifdef CONFIG_SMP
- int *mnt_writers;
+ int __percpu *mnt_writers;
#else
int mnt_writers;
#endif
@@ -123,7 +134,6 @@ extern int do_add_mount(struct vfsmount *newmnt, struct path *path,
extern void mark_mounts_for_expiry(struct list_head *mounts);
-extern spinlock_t vfsmount_lock;
extern dev_t name_to_dev_t(char *name);
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 34fc6be..6a2e44f 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -105,7 +105,7 @@ struct nfs_server {
struct rpc_clnt * client; /* RPC client handle */
struct rpc_clnt * client_acl; /* ACL RPC client handle */
struct nlm_host *nlm_host; /* NLM client handle */
- struct nfs_iostats * io_stats; /* I/O statistics */
+ struct nfs_iostats __percpu *io_stats; /* I/O statistics */
struct backing_dev_info backing_dev_info;
atomic_long_t writeback; /* number of writeback pages */
int flags; /* various flags */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 3fe02cf..640702e 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -153,6 +153,7 @@ struct nilfs_super_root {
semantics also for data */
#define NILFS_MOUNT_NORECOVERY 0x4000 /* Disable write access during
mount-time recovery */
+#define NILFS_MOUNT_DISCARD 0x8000 /* Issue DISCARD requests */
/**
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 0be8243..9f688d2 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -770,7 +770,6 @@
#define PCI_VENDOR_ID_TI 0x104c
#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
#define PCI_DEVICE_ID_TI_4450 0x8011
-#define PCI_DEVICE_ID_TI_TSB43AB22 0x8023
#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031
#define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033
#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034
@@ -2333,6 +2332,8 @@
#define PCI_VENDOR_ID_KORENIX 0x1982
#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600
#define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff
+#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700
+#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
#define PCI_VENDOR_ID_QMI 0x1a32
@@ -2697,6 +2698,7 @@
#define PCI_DEVICE_ID_NETMOS_9835 0x9835
#define PCI_DEVICE_ID_NETMOS_9845 0x9845
#define PCI_DEVICE_ID_NETMOS_9855 0x9855
+#define PCI_DEVICE_ID_NETMOS_9865 0x9865
#define PCI_DEVICE_ID_NETMOS_9901 0x9901
#define PCI_VENDOR_ID_3COM_2 0xa727
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 5a5d6ce..68567c0 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -2,12 +2,6 @@
#define _LINUX_PERCPU_DEFS_H
/*
- * Determine the real variable name from the name visible in the
- * kernel sources.
- */
-#define per_cpu_var(var) per_cpu__##var
-
-/*
* Base implementations of per-CPU variable declarations and definitions, where
* the section in which the variable is to be placed is provided by the
* 'sec' argument. This may be used to affect the parameters governing the
@@ -18,13 +12,23 @@
* that section.
*/
#define __PCPU_ATTRS(sec) \
- __attribute__((section(PER_CPU_BASE_SECTION sec))) \
+ __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \
PER_CPU_ATTRIBUTES
#define __PCPU_DUMMY_ATTRS \
__attribute__((section(".discard"), unused))
/*
+ * Macro which verifies @ptr is a percpu pointer without evaluating
+ * @ptr. This is to be used in percpu accessors to verify that the
+ * input parameter is a percpu pointer.
+ */
+#define __verify_pcpu_ptr(ptr) do { \
+ const void __percpu *__vpp_verify = (typeof(ptr))NULL; \
+ (void)__vpp_verify; \
+} while (0)
+
+/*
* s390 and alpha modules require percpu variables to be defined as
* weak to force the compiler to generate GOT based external
* references for them. This is necessary because percpu sections
@@ -56,24 +60,24 @@
*/
#define DECLARE_PER_CPU_SECTION(type, name, sec) \
extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
- extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
+ extern __PCPU_ATTRS(sec) __typeof__(type) name
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
- __typeof__(type) per_cpu__##name
+ __typeof__(type) name
#else
/*
* Normal declaration and definition macros.
*/
#define DECLARE_PER_CPU_SECTION(type, name, sec) \
- extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
+ extern __PCPU_ATTRS(sec) __typeof__(type) name
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
- __typeof__(type) per_cpu__##name
+ __typeof__(type) name
#endif
/*
@@ -135,10 +139,16 @@
__aligned(PAGE_SIZE)
/*
- * Intermodule exports for per-CPU variables.
+ * Intermodule exports for per-CPU variables. sparse forgets about
+ * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
+ * noop if __CHECKER__.
*/
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
+#ifndef __CHECKER__
+#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
+#else
+#define EXPORT_PER_CPU_SYMBOL(var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var)
+#endif
#endif /* _LINUX_PERCPU_DEFS_H */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index cf5efbc..a93e5bf 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -27,10 +27,17 @@
* we force a syntax error here if it isn't.
*/
#define get_cpu_var(var) (*({ \
- extern int simple_identifier_##var(void); \
preempt_disable(); \
&__get_cpu_var(var); }))
-#define put_cpu_var(var) preempt_enable()
+
+/*
+ * The weird & is necessary because sparse considers (void)(var) to be
+ * a direct dereference of percpu variable (var).
+ */
+#define put_cpu_var(var) do { \
+ (void)&(var); \
+ preempt_enable(); \
+} while (0)
#ifdef CONFIG_SMP
@@ -127,9 +134,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
*/
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
-extern void *__alloc_reserved_percpu(size_t size, size_t align);
-extern void *__alloc_percpu(size_t size, size_t align);
-extern void free_percpu(void *__pdata);
+extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
+extern void __percpu *__alloc_percpu(size_t size, size_t align);
+extern void free_percpu(void __percpu *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
@@ -140,7 +147,7 @@ extern void __init setup_per_cpu_areas(void);
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
-static inline void *__alloc_percpu(size_t size, size_t align)
+static inline void __percpu *__alloc_percpu(size_t size, size_t align)
{
/*
* Can't easily make larger alignment work with kmalloc. WARN
@@ -151,7 +158,7 @@ static inline void *__alloc_percpu(size_t size, size_t align)
return kzalloc(size, GFP_KERNEL);
}
-static inline void free_percpu(void *p)
+static inline void free_percpu(void __percpu *p)
{
kfree(p);
}
@@ -171,7 +178,7 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
#endif /* CONFIG_SMP */
#define alloc_percpu(type) \
- (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
+ (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
/*
* Optional methods for optimized non-lvalue per-cpu variable access.
@@ -188,17 +195,19 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
#ifndef percpu_read
# define percpu_read(var) \
({ \
- typeof(per_cpu_var(var)) __tmp_var__; \
- __tmp_var__ = get_cpu_var(var); \
- put_cpu_var(var); \
- __tmp_var__; \
+ typeof(var) *pr_ptr__ = &(var); \
+ typeof(var) pr_ret__; \
+ pr_ret__ = get_cpu_var(*pr_ptr__); \
+ put_cpu_var(*pr_ptr__); \
+ pr_ret__; \
})
#endif
#define __percpu_generic_to_op(var, val, op) \
do { \
- get_cpu_var(var) op val; \
- put_cpu_var(var); \
+ typeof(var) *pgto_ptr__ = &(var); \
+ get_cpu_var(*pgto_ptr__) op val; \
+ put_cpu_var(*pgto_ptr__); \
} while (0)
#ifndef percpu_write
@@ -234,6 +243,7 @@ extern void __bad_size_call_parameter(void);
#define __pcpu_size_call_return(stem, variable) \
({ typeof(variable) pscr_ret__; \
+ __verify_pcpu_ptr(&(variable)); \
switch(sizeof(variable)) { \
case 1: pscr_ret__ = stem##1(variable);break; \
case 2: pscr_ret__ = stem##2(variable);break; \
@@ -247,6 +257,7 @@ extern void __bad_size_call_parameter(void);
#define __pcpu_size_call(stem, variable, ...) \
do { \
+ __verify_pcpu_ptr(&(variable)); \
switch(sizeof(variable)) { \
case 1: stem##1(variable, __VA_ARGS__);break; \
case 2: stem##2(variable, __VA_ARGS__);break; \
@@ -259,8 +270,7 @@ do { \
/*
* Optimized manipulation for memory allocated through the per cpu
- * allocator or for addresses of per cpu variables (can be determined
- * using per_cpu_var(xx).
+ * allocator or for addresses of per cpu variables.
*
* These operation guarantee exclusivity of access for other operations
* on the *same* processor. The assumption is that per cpu data is only
@@ -311,7 +321,7 @@ do { \
#define _this_cpu_generic_to_op(pcp, val, op) \
do { \
preempt_disable(); \
- *__this_cpu_ptr(&pcp) op val; \
+ *__this_cpu_ptr(&(pcp)) op val; \
preempt_enable(); \
} while (0)
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 794662b..c88d67b 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -21,7 +21,7 @@ struct percpu_counter {
#ifdef CONFIG_HOTPLUG_CPU
struct list_head list; /* All percpu_counters are on a list */
#endif
- s32 *counters;
+ s32 __percpu *counters;
};
extern int percpu_counter_batch;
diff --git a/include/linux/range.h b/include/linux/range.h
new file mode 100644
index 0000000..bd184a5
--- /dev/null
+++ b/include/linux/range.h
@@ -0,0 +1,30 @@
+#ifndef _LINUX_RANGE_H
+#define _LINUX_RANGE_H
+
+struct range {
+ u64 start;
+ u64 end;
+};
+
+int add_range(struct range *range, int az, int nr_range,
+ u64 start, u64 end);
+
+
+int add_range_with_merge(struct range *range, int az, int nr_range,
+ u64 start, u64 end);
+
+void subtract_range(struct range *range, int az, u64 start, u64 end);
+
+int clean_sort_range(struct range *range, int az);
+
+void sort_range(struct range *range, int nr_range);
+
+#define MAX_RESOURCE ((resource_size_t)~0)
+static inline resource_size_t cap_resource(u64 val)
+{
+ if (val > MAX_RESOURCE)
+ return MAX_RESOURCE;
+
+ return val;
+}
+#endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 030d922..28c9fd0 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -89,8 +89,9 @@
* REGULATION_OUT Regulator output is out of regulation.
* FAIL Regulator output has failed.
* OVER_TEMP Regulator over temp.
- * FORCE_DISABLE Regulator shut down by software.
+ * FORCE_DISABLE Regulator forcibly shut down by software.
* VOLTAGE_CHANGE Regulator voltage changed.
+ * DISABLE Regulator was disabled.
*
* NOTE: These events can be OR'ed together when passed into handler.
*/
@@ -102,6 +103,7 @@
#define REGULATOR_EVENT_OVER_TEMP 0x10
#define REGULATOR_EVENT_FORCE_DISABLE 0x20
#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
+#define REGULATOR_EVENT_DISABLE 0x80
struct regulator;
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 31f2055..592cd7c 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -58,6 +58,9 @@ enum regulator_status {
* @get_optimum_mode: Get the most efficient operating mode for the regulator
* when running with the specified parameters.
*
+ * @enable_time: Time taken for the regulator voltage output voltage to
+ * stabalise after being enabled, in microseconds.
+ *
* @set_suspend_voltage: Set the voltage for the regulator when the system
* is suspended.
* @set_suspend_enable: Mark the regulator as enabled when the system is
@@ -93,6 +96,9 @@ struct regulator_ops {
int (*set_mode) (struct regulator_dev *, unsigned int mode);
unsigned int (*get_mode) (struct regulator_dev *);
+ /* Time taken to enable the regulator */
+ int (*enable_time) (struct regulator_dev *);
+
/* report regulator status ... most other accessors report
* control inputs, this reports results of combining inputs
* from Linux (and other sources) with the actual load.
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index e94a4a1..ffd7d50 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -25,6 +25,7 @@ struct regulator_init_data;
* @microvolts: Output voltage of regulator
* @gpio: GPIO to use for enable control
* set to -EINVAL if not used
+ * @startup_delay: Start-up time in microseconds
* @enable_high: Polarity of enable GPIO
* 1 = Active high, 0 = Active low
* @enabled_at_boot: Whether regulator has been enabled at
@@ -41,6 +42,7 @@ struct fixed_voltage_config {
const char *supply_name;
int microvolts;
int gpio;
+ unsigned startup_delay;
unsigned enable_high:1;
unsigned enabled_at_boot:1;
struct regulator_init_data *init_data;
diff --git a/include/linux/regulator/max8649.h b/include/linux/regulator/max8649.h
new file mode 100644
index 0000000..417d14e
--- /dev/null
+++ b/include/linux/regulator/max8649.h
@@ -0,0 +1,44 @@
+/*
+ * Interface of Maxim max8649
+ *
+ * Copyright (C) 2009-2010 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_REGULATOR_MAX8649_H
+#define __LINUX_REGULATOR_MAX8649_H
+
+#include <linux/regulator/machine.h>
+
+enum {
+ MAX8649_EXTCLK_26MHZ = 0,
+ MAX8649_EXTCLK_13MHZ,
+ MAX8649_EXTCLK_19MHZ, /* 19.2MHz */
+};
+
+enum {
+ MAX8649_RAMP_32MV = 0,
+ MAX8649_RAMP_16MV,
+ MAX8649_RAMP_8MV,
+ MAX8649_RAMP_4MV,
+ MAX8649_RAMP_2MV,
+ MAX8649_RAMP_1MV,
+ MAX8649_RAMP_0_5MV,
+ MAX8649_RAMP_0_25MV,
+};
+
+struct max8649_platform_data {
+ struct regulator_init_data *regulator;
+
+ unsigned mode:2; /* bit[1:0] = VID1,VID0 */
+ unsigned extclk_freq:2;
+ unsigned extclk:1;
+ unsigned ramp_timing:3;
+ unsigned ramp_down:1;
+};
+
+#endif /* __LINUX_REGULATOR_MAX8649_H */
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 1ba3cf6..3b603f4 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -2034,7 +2034,7 @@ void reiserfs_read_locked_inode(struct inode *inode,
int reiserfs_find_actor(struct inode *inode, void *p);
int reiserfs_init_locked_inode(struct inode *inode, void *p);
void reiserfs_delete_inode(struct inode *inode);
-int reiserfs_write_inode(struct inode *inode, int);
+int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc);
int reiserfs_get_block(struct inode *inode, sector_t block,
struct buffer_head *bh_result, int create);
struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
diff --git a/include/linux/security.h b/include/linux/security.h
index 2c627d3..233d20b 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -76,7 +76,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
extern int cap_task_setioprio(struct task_struct *p, int ioprio);
extern int cap_task_setnice(struct task_struct *p, int nice);
-extern int cap_syslog(int type);
+extern int cap_syslog(int type, bool from_file);
extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
struct msghdr;
@@ -95,6 +95,8 @@ struct seq_file;
extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
extern int cap_netlink_recv(struct sk_buff *skb, int cap);
+void reset_security_ops(void);
+
#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
extern unsigned long dac_mmap_min_addr;
@@ -985,6 +987,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Check permissions on incoming network packets. This hook is distinct
* from Netfilter's IP input hooks since it is the first time that the
* incoming sk_buff @skb has been associated with a particular socket, @sk.
+ * Must not sleep inside this hook because some callers hold spinlocks.
* @sk contains the sock (not socket) associated with the incoming sk_buff.
* @skb contains the incoming network data.
* @socket_getpeersec_stream:
@@ -1348,6 +1351,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* logging to the console.
* See the syslog(2) manual page for an explanation of the @type values.
* @type contains the type of action.
+ * @from_file indicates the context of action (if it came from /proc).
* Return 0 if permission is granted.
* @settime:
* Check permission to change the system time.
@@ -1462,7 +1466,7 @@ struct security_operations {
int (*sysctl) (struct ctl_table *table, int op);
int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
int (*quota_on) (struct dentry *dentry);
- int (*syslog) (int type);
+ int (*syslog) (int type, bool from_file);
int (*settime) (struct timespec *ts, struct timezone *tz);
int (*vm_enough_memory) (struct mm_struct *mm, long pages);
@@ -1761,7 +1765,7 @@ int security_acct(struct file *file);
int security_sysctl(struct ctl_table *table, int op);
int security_quotactl(int cmds, int type, int id, struct super_block *sb);
int security_quota_on(struct dentry *dentry);
-int security_syslog(int type);
+int security_syslog(int type, bool from_file);
int security_settime(struct timespec *ts, struct timezone *tz);
int security_vm_enough_memory(long pages);
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
@@ -2007,9 +2011,9 @@ static inline int security_quota_on(struct dentry *dentry)
return 0;
}
-static inline int security_syslog(int type)
+static inline int security_syslog(int type, bool from_file)
{
- return cap_syslog(type);
+ return cap_syslog(type, from_file);
}
static inline int security_settime(struct timespec *ts, struct timezone *tz)
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 3084f80..4d5ecb2 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -33,7 +33,7 @@ struct srcu_struct_array {
struct srcu_struct {
int completed;
- struct srcu_struct_array *per_cpu_ref;
+ struct srcu_struct_array __percpu *per_cpu_ref;
struct mutex mutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
new file mode 100644
index 0000000..3891139
--- /dev/null
+++ b/include/linux/syslog.h
@@ -0,0 +1,52 @@
+/* Syslog internals
+ *
+ * Copyright 2010 Canonical, Ltd.
+ * Author: Kees Cook <kees.cook@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _LINUX_SYSLOG_H
+#define _LINUX_SYSLOG_H
+
+/* Close the log. Currently a NOP. */
+#define SYSLOG_ACTION_CLOSE 0
+/* Open the log. Currently a NOP. */
+#define SYSLOG_ACTION_OPEN 1
+/* Read from the log. */
+#define SYSLOG_ACTION_READ 2
+/* Read all messages remaining in the ring buffer. */
+#define SYSLOG_ACTION_READ_ALL 3
+/* Read and clear all messages remaining in the ring buffer */
+#define SYSLOG_ACTION_READ_CLEAR 4
+/* Clear ring buffer. */
+#define SYSLOG_ACTION_CLEAR 5
+/* Disable printk's to console */
+#define SYSLOG_ACTION_CONSOLE_OFF 6
+/* Enable printk's to console */
+#define SYSLOG_ACTION_CONSOLE_ON 7
+/* Set level of messages printed to console */
+#define SYSLOG_ACTION_CONSOLE_LEVEL 8
+/* Return number of unread characters in the log buffer */
+#define SYSLOG_ACTION_SIZE_UNREAD 9
+/* Return size of the log buffer */
+#define SYSLOG_ACTION_SIZE_BUFFER 10
+
+#define SYSLOG_FROM_CALL 0
+#define SYSLOG_FROM_FILE 1
+
+int do_syslog(int type, char __user *buf, int count, bool from_file);
+
+#endif /* _LINUX_SYSLOG_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 6abfcf5..d96e588 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -68,6 +68,16 @@ struct tty_buffer {
unsigned long data[0];
};
+/*
+ * We default to dicing tty buffer allocations to this many characters
+ * in order to avoid multiple page allocations. We assume tty_buffer itself
+ * is under 256 bytes. See tty_buffer_find for the allocation logic this
+ * must match
+ */
+
+#define TTY_BUFFER_PAGE ((PAGE_SIZE - 256) / 2)
+
+
struct tty_bufhead {
struct delayed_work work;
spinlock_t lock;
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index eb677cf..9239d03 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -2,8 +2,8 @@
#define _LINUX_TTY_FLIP_H
extern int tty_buffer_request_room(struct tty_struct *tty, size_t size);
-extern int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, size_t size);
extern int tty_insert_flip_string_flags(struct tty_struct *tty, const unsigned char *chars, const char *flags, size_t size);
+extern int tty_insert_flip_string_fixed_flag(struct tty_struct *tty, const unsigned char *chars, char flag, size_t size);
extern int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size);
extern int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size);
void tty_schedule_flip(struct tty_struct *tty);
@@ -20,4 +20,9 @@ static inline int tty_insert_flip_char(struct tty_struct *tty,
return tty_insert_flip_string_flags(tty, &ch, &flag, 1);
}
+static inline int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, size_t size)
+{
+ return tty_insert_flip_string_fixed_flag(tty, chars, TTY_NORMAL, size);
+}
+
#endif /* _LINUX_TTY_FLIP_H */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 332eaea..3492abf 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -122,7 +122,6 @@ enum usb_interface_condition {
* number from the USB core by calling usb_register_dev().
* @condition: binding state of the interface: not bound, binding
* (in probe()), bound to a driver, or unbinding (in disconnect())
- * @is_active: flag set when the interface is bound and not suspended.
* @sysfs_files_created: sysfs attributes exist
* @ep_devs_created: endpoint child pseudo-devices exist
* @unregistering: flag set when the interface is being unregistered
@@ -135,8 +134,7 @@ enum usb_interface_condition {
* @dev: driver model's view of this device
* @usb_dev: if an interface is bound to the USB major, this will point
* to the sysfs representation for that device.
- * @pm_usage_cnt: PM usage counter for this interface; autosuspend is not
- * allowed unless the counter is 0.
+ * @pm_usage_cnt: PM usage counter for this interface
* @reset_ws: Used for scheduling resets from atomic context.
* @reset_running: set to 1 if the interface is currently running a
* queued reset so that usb_cancel_queued_reset() doesn't try to
@@ -184,7 +182,6 @@ struct usb_interface {
int minor; /* minor number this interface is
* bound to */
enum usb_interface_condition condition; /* state of binding */
- unsigned is_active:1; /* the interface is not suspended */
unsigned sysfs_files_created:1; /* the sysfs attributes exist */
unsigned ep_devs_created:1; /* endpoint "devices" exist */
unsigned unregistering:1; /* unregistration is in progress */
@@ -401,7 +398,6 @@ struct usb_tt;
* @portnum: parent port number (origin 1)
* @level: number of USB hub ancestors
* @can_submit: URBs may be submitted
- * @discon_suspended: disconnected while suspended
* @persist_enabled: USB_PERSIST enabled for this device
* @have_langid: whether string_langid is valid
* @authorized: policy has said we can use it;
@@ -421,20 +417,15 @@ struct usb_tt;
* @usbfs_dentry: usbfs dentry entry for the device
* @maxchild: number of ports if hub
* @children: child devices - USB devices that are attached to this hub
- * @pm_usage_cnt: usage counter for autosuspend
* @quirks: quirks of the whole device
* @urbnum: number of URBs submitted for the whole device
* @active_duration: total time device is not suspended
- * @autosuspend: for delayed autosuspends
- * @autoresume: for autoresumes requested while in_interrupt
- * @pm_mutex: protects PM operations
* @last_busy: time of last use
* @autosuspend_delay: in jiffies
* @connect_time: time device was first connected
* @do_remote_wakeup: remote wakeup should be enabled
* @reset_resume: needs reset instead of resume
* @autosuspend_disabled: autosuspend disabled by the user
- * @skip_sys_resume: skip the next system resume
* @wusb_dev: if this is a Wireless USB device, link to the WUSB
* specific data for the device.
* @slot_id: Slot ID assigned by xHCI
@@ -475,7 +466,6 @@ struct usb_device {
u8 level;
unsigned can_submit:1;
- unsigned discon_suspended:1;
unsigned persist_enabled:1;
unsigned have_langid:1;
unsigned authorized:1;
@@ -499,17 +489,12 @@ struct usb_device {
int maxchild;
struct usb_device *children[USB_MAXCHILDREN];
- int pm_usage_cnt;
u32 quirks;
atomic_t urbnum;
unsigned long active_duration;
#ifdef CONFIG_PM
- struct delayed_work autosuspend;
- struct work_struct autoresume;
- struct mutex pm_mutex;
-
unsigned long last_busy;
int autosuspend_delay;
unsigned long connect_time;
@@ -517,7 +502,6 @@ struct usb_device {
unsigned do_remote_wakeup:1;
unsigned reset_resume:1;
unsigned autosuspend_disabled:1;
- unsigned skip_sys_resume:1;
#endif
struct wusb_dev *wusb_dev;
int slot_id;
@@ -542,21 +526,15 @@ extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id);
/* USB autosuspend and autoresume */
#ifdef CONFIG_USB_SUSPEND
+extern int usb_enable_autosuspend(struct usb_device *udev);
+extern int usb_disable_autosuspend(struct usb_device *udev);
+
extern int usb_autopm_get_interface(struct usb_interface *intf);
extern void usb_autopm_put_interface(struct usb_interface *intf);
extern int usb_autopm_get_interface_async(struct usb_interface *intf);
extern void usb_autopm_put_interface_async(struct usb_interface *intf);
-
-static inline void usb_autopm_get_interface_no_resume(
- struct usb_interface *intf)
-{
- atomic_inc(&intf->pm_usage_cnt);
-}
-static inline void usb_autopm_put_interface_no_suspend(
- struct usb_interface *intf)
-{
- atomic_dec(&intf->pm_usage_cnt);
-}
+extern void usb_autopm_get_interface_no_resume(struct usb_interface *intf);
+extern void usb_autopm_put_interface_no_suspend(struct usb_interface *intf);
static inline void usb_mark_last_busy(struct usb_device *udev)
{
@@ -565,6 +543,11 @@ static inline void usb_mark_last_busy(struct usb_device *udev)
#else
+static inline int usb_enable_autosuspend(struct usb_device *udev)
+{ return 0; }
+static inline int usb_disable_autosuspend(struct usb_device *udev)
+{ return 0; }
+
static inline int usb_autopm_get_interface(struct usb_interface *intf)
{ return 0; }
static inline int usb_autopm_get_interface_async(struct usb_interface *intf)
@@ -1583,14 +1566,18 @@ extern void usb_register_notify(struct notifier_block *nb);
extern void usb_unregister_notify(struct notifier_block *nb);
#ifdef DEBUG
-#define dbg(format, arg...) printk(KERN_DEBUG "%s: " format "\n" , \
- __FILE__ , ## arg)
+#define dbg(format, arg...) \
+ printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg)
#else
-#define dbg(format, arg...) do {} while (0)
+#define dbg(format, arg...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
+} while (0)
#endif
-#define err(format, arg...) printk(KERN_ERR KBUILD_MODNAME ": " \
- format "\n" , ## arg)
+#define err(format, arg...) \
+ printk(KERN_ERR KBUILD_MODNAME ": " format "\n", ##arg)
/* debugfs stuff */
extern struct dentry *usb_debug_root;
diff --git a/include/linux/usb/Kbuild b/include/linux/usb/Kbuild
index 54c4463..29fd73b 100644
--- a/include/linux/usb/Kbuild
+++ b/include/linux/usb/Kbuild
@@ -5,4 +5,3 @@ header-y += gadgetfs.h
header-y += midi.h
header-y += g_printer.h
header-y += tmc.h
-header-y += vstusb.h
diff --git a/include/linux/usb/atmel_usba_udc.h b/include/linux/usb/atmel_usba_udc.h
index 6311fa2..baf41c8 100644
--- a/include/linux/usb/atmel_usba_udc.h
+++ b/include/linux/usb/atmel_usba_udc.h
@@ -15,6 +15,7 @@ struct usba_ep_data {
struct usba_platform_data {
int vbus_pin;
+ int vbus_pin_inverted;
int num_ep;
struct usba_ep_data ep[0];
};
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 94012e6..e58369f 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -775,7 +775,7 @@ enum usb_device_speed {
USB_SPEED_UNKNOWN = 0, /* enumerating */
USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */
USB_SPEED_HIGH, /* usb 2.0 */
- USB_SPEED_VARIABLE, /* wireless (usb 2.5) */
+ USB_SPEED_WIRELESS, /* wireless (usb 2.5) */
USB_SPEED_SUPER, /* usb 3.0 */
};
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 5dc2f22..7acef02 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -30,26 +30,26 @@ struct musb_hdrc_eps_bits {
struct musb_hdrc_config {
/* MUSB configuration-specific details */
unsigned multipoint:1; /* multipoint device */
- unsigned dyn_fifo:1; /* supports dynamic fifo sizing */
- unsigned soft_con:1; /* soft connect required */
- unsigned utm_16:1; /* utm data witdh is 16 bits */
+ unsigned dyn_fifo:1 __deprecated; /* supports dynamic fifo sizing */
+ unsigned soft_con:1 __deprecated; /* soft connect required */
+ unsigned utm_16:1 __deprecated; /* utm data witdh is 16 bits */
unsigned big_endian:1; /* true if CPU uses big-endian */
unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */
unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */
unsigned high_iso_tx:1; /* Tx ep required for HB iso */
unsigned high_iso_rx:1; /* Rx ep required for HD iso */
- unsigned dma:1; /* supports DMA */
- unsigned vendor_req:1; /* vendor registers required */
+ unsigned dma:1 __deprecated; /* supports DMA */
+ unsigned vendor_req:1 __deprecated; /* vendor registers required */
u8 num_eps; /* number of endpoints _with_ ep0 */
- u8 dma_channels; /* number of dma channels */
+ u8 dma_channels __deprecated; /* number of dma channels */
u8 dyn_fifo_size; /* dynamic size in bytes */
- u8 vendor_ctrl; /* vendor control reg width */
- u8 vendor_stat; /* vendor status reg witdh */
- u8 dma_req_chan; /* bitmask for required dma channels */
+ u8 vendor_ctrl __deprecated; /* vendor control reg width */
+ u8 vendor_stat __deprecated; /* vendor status reg witdh */
+ u8 dma_req_chan __deprecated; /* bitmask for required dma channels */
u8 ram_bits; /* ram address size */
- struct musb_hdrc_eps_bits *eps_bits;
+ struct musb_hdrc_eps_bits *eps_bits __deprecated;
#ifdef CONFIG_BLACKFIN
/* A GPIO controlling VRSEL in Blackfin */
unsigned int gpio_vrsel;
@@ -76,6 +76,9 @@ struct musb_hdrc_platform_data {
/* (HOST or OTG) msec/2 after VBUS on till power good */
u8 potpgt;
+ /* (HOST or OTG) program PHY for external Vbus */
+ unsigned extvbus:1;
+
/* Power the device on or off */
int (*set_power)(int state);
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index fef0972..f8302d0 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -9,6 +9,8 @@
#ifndef __LINUX_USB_OTG_H
#define __LINUX_USB_OTG_H
+#include <linux/notifier.h>
+
/* OTG defines lots of enumeration states before device reset */
enum usb_otg_state {
OTG_STATE_UNDEFINED = 0,
@@ -33,6 +35,14 @@ enum usb_otg_state {
OTG_STATE_A_VBUS_ERR,
};
+enum usb_xceiv_events {
+ USB_EVENT_NONE, /* no events or cable disconnected */
+ USB_EVENT_VBUS, /* vbus valid event */
+ USB_EVENT_ID, /* id was grounded */
+ USB_EVENT_CHARGER, /* usb dedicated charger */
+ USB_EVENT_ENUMERATED, /* gadget driver enumerated */
+};
+
#define USB_OTG_PULLUP_ID (1 << 0)
#define USB_OTG_PULLDOWN_DP (1 << 1)
#define USB_OTG_PULLDOWN_DM (1 << 2)
@@ -70,6 +80,9 @@ struct otg_transceiver {
struct otg_io_access_ops *io_ops;
void __iomem *io_priv;
+ /* for notification of usb_xceiv_events */
+ struct blocking_notifier_head notifier;
+
/* to pass extra port status to the root hub */
u16 port_status;
u16 port_change;
@@ -213,6 +226,18 @@ otg_start_srp(struct otg_transceiver *otg)
return otg->start_srp(otg);
}
+/* notifiers */
+static inline int
+otg_register_notifier(struct otg_transceiver *otg, struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&otg->notifier, nb);
+}
+
+static inline void
+otg_unregister_notifier(struct otg_transceiver *otg, struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&otg->notifier, nb);
+}
/* for OTG controller drivers (and maybe other stuff) */
extern int usb_bus_start_enum(struct usb_bus *bus, unsigned port_num);
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 2526f3b..0a555dd 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -19,4 +19,7 @@
/* device can't handle its Configuration or Interface strings */
#define USB_QUIRK_CONFIG_INTF_STRINGS 0x00000008
+/*device will morph if reset, don't use reset for handling errors */
+#define USB_QUIRK_RESET_MORPHS 0x00000010
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 1819396..0a458b8 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -351,14 +351,11 @@ static inline void usb_serial_debug_data(int debug,
/* Use our own dbg macro */
#undef dbg
-#define dbg(format, arg...) \
- do { \
- if (debug) \
- printk(KERN_DEBUG "%s: " format "\n" , __FILE__ , \
- ## arg); \
- } while (0)
-
-
+#define dbg(format, arg...) \
+do { \
+ if (debug) \
+ printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
+} while (0)
#endif /* __LINUX_USB_SERIAL_H */
diff --git a/include/linux/usb/vstusb.h b/include/linux/usb/vstusb.h
deleted file mode 100644
index 1cfac67..0000000
--- a/include/linux/usb/vstusb.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*****************************************************************************
- * File: drivers/usb/misc/vstusb.h
- *
- * Purpose: Support for the bulk USB Vernier Spectrophotometers
- *
- * Author: EQware Engineering, Inc.
- * Oregon City, OR, USA 97045
- *
- * Copyright: 2007, 2008
- * Vernier Software & Technology
- * Beaverton, OR, USA 97005
- *
- * Web: www.vernier.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *****************************************************************************/
-/*****************************************************************************
- *
- * The vstusb module is a standard usb 'client' driver running on top of the
- * standard usb host controller stack.
- *
- * In general, vstusb supports standard bulk usb pipes. It supports multiple
- * devices and multiple pipes per device.
- *
- * The vstusb driver supports two interfaces:
- * 1 - ioctl SEND_PIPE/RECV_PIPE - a general bulk write/read msg
- * interface to any pipe with timeout support;
- * 2 - standard read/write with ioctl config - offers standard read/write
- * interface with ioctl configured pipes and timeouts.
- *
- * Both interfaces can be signal from other process and will abort its i/o
- * operation.
- *
- * A timeout of 0 means NO timeout. The user can still terminate the read via
- * signal.
- *
- * If using multiple threads with this driver, the user should ensure that
- * any reads, writes, or ioctls are complete before closing the device.
- * Changing read/write timeouts or pipes takes effect on next read/write.
- *
- *****************************************************************************/
-
-struct vstusb_args {
- union {
- /* this struct is used for IOCTL_VSTUSB_SEND_PIPE, *
- * IOCTL_VSTUSB_RECV_PIPE, and read()/write() fops */
- struct {
- void __user *buffer;
- size_t count;
- unsigned int timeout_ms;
- int pipe;
- };
-
- /* this one is used for IOCTL_VSTUSB_CONFIG_RW */
- struct {
- int rd_pipe;
- int rd_timeout_ms;
- int wr_pipe;
- int wr_timeout_ms;
- };
- };
-};
-
-#define VST_IOC_MAGIC 'L'
-#define VST_IOC_FIRST 0x20
-#define IOCTL_VSTUSB_SEND_PIPE _IO(VST_IOC_MAGIC, VST_IOC_FIRST)
-#define IOCTL_VSTUSB_RECV_PIPE _IO(VST_IOC_MAGIC, VST_IOC_FIRST + 1)
-#define IOCTL_VSTUSB_CONFIG_RW _IO(VST_IOC_MAGIC, VST_IOC_FIRST + 2)
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
new file mode 100644
index 0000000..ae9ab13
--- /dev/null
+++ b/include/linux/vga_switcheroo.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2010 Red Hat Inc.
+ * Author : Dave Airlie <airlied@redhat.com>
+ *
+ * Licensed under GPLv2
+ *
+ * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
+ */
+
+#include <linux/fb.h>
+
+enum vga_switcheroo_state {
+ VGA_SWITCHEROO_OFF,
+ VGA_SWITCHEROO_ON,
+};
+
+enum vga_switcheroo_client_id {
+ VGA_SWITCHEROO_IGD,
+ VGA_SWITCHEROO_DIS,
+ VGA_SWITCHEROO_MAX_CLIENTS,
+};
+
+struct vga_switcheroo_handler {
+ int (*switchto)(enum vga_switcheroo_client_id id);
+ int (*power_state)(enum vga_switcheroo_client_id id,
+ enum vga_switcheroo_state state);
+ int (*init)(void);
+ int (*get_client_id)(struct pci_dev *pdev);
+};
+
+
+#if defined(CONFIG_VGA_SWITCHEROO)
+void vga_switcheroo_unregister_client(struct pci_dev *dev);
+int vga_switcheroo_register_client(struct pci_dev *dev,
+ void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
+ bool (*can_switch)(struct pci_dev *dev));
+
+void vga_switcheroo_client_fb_set(struct pci_dev *dev,
+ struct fb_info *info);
+
+int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler);
+void vga_switcheroo_unregister_handler(void);
+
+int vga_switcheroo_process_delayed_switch(void);
+
+#else
+
+static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
+static inline int vga_switcheroo_register_client(struct pci_dev *dev,
+ void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
+ bool (*can_switch)(struct pci_dev *dev)) { return 0; }
+static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
+static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
+static inline void vga_switcheroo_unregister_handler(void) {}
+static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
+
+#endif
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index ee03bba..117f0dd 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -78,22 +78,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
static inline void __count_vm_event(enum vm_event_item item)
{
- __this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
+ __this_cpu_inc(vm_event_states.event[item]);
}
static inline void count_vm_event(enum vm_event_item item)
{
- this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
+ this_cpu_inc(vm_event_states.event[item]);
}
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
- __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
+ __this_cpu_add(vm_event_states.event[item], delta);
}
static inline void count_vm_events(enum vm_event_item item, long delta)
{
- this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
+ this_cpu_add(vm_event_states.event[item], delta);
}
extern void all_vm_events(unsigned long *);
diff --git a/include/linux/vt.h b/include/linux/vt.h
index d5dd0bc..778b7b2 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -27,7 +27,7 @@ struct vt_mode {
#define VT_SETMODE 0x5602 /* set mode of active vt */
#define VT_AUTO 0x00 /* auto vt switching */
#define VT_PROCESS 0x01 /* process controls switching */
-#define VT_ACKACQ 0x02 /* acknowledge switch */
+#define VT_PROCESS_AUTO 0x02 /* process is notified of switching */
struct vt_stat {
unsigned short v_active; /* active vt */
@@ -38,6 +38,7 @@ struct vt_stat {
#define VT_SENDSIG 0x5604 /* signal to send to bitmask of vts */
#define VT_RELDISP 0x5605 /* release display */
+#define VT_ACKACQ 0x02 /* acknowledge switch */
#define VT_ACTIVATE 0x5606 /* make vt active */
#define VT_WAITACTIVE 0x5607 /* wait for vt active */
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index d7fc45c..cbb50f4 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -232,6 +232,7 @@ void ib_unpack(const struct ib_field *desc,
void ib_ud_header_init(int payload_bytes,
int grh_present,
+ int immediate_present,
struct ib_ud_header *header);
int ib_ud_header_pack(struct ib_ud_header *header,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 09509ed..a585e0f 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -984,9 +984,9 @@ struct ib_device {
struct list_head event_handler_list;
spinlock_t event_handler_lock;
+ spinlock_t client_data_lock;
struct list_head core_list;
struct list_head client_data_list;
- spinlock_t client_data_lock;
struct ib_cache cache;
int *pkey_tbl_len;
@@ -1144,8 +1144,8 @@ struct ib_device {
IB_DEV_UNREGISTERED
} reg_state;
- u64 uverbs_cmd_mask;
int uverbs_abi_ver;
+ u64 uverbs_cmd_mask;
char node_desc[64];
__be64 node_guid;
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index c6b2962..4fae903 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -67,7 +67,6 @@ enum rdma_port_space {
RDMA_PS_IPOIB = 0x0002,
RDMA_PS_TCP = 0x0106,
RDMA_PS_UDP = 0x0111,
- RDMA_PS_SCTP = 0x0183
};
struct rdma_addr {
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index d0b6cd3..2aa6aa3 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -874,6 +874,107 @@ TRACE_EVENT(ext4_forget,
__entry->mode, __entry->is_metadata, __entry->block)
);
+TRACE_EVENT(ext4_da_update_reserve_space,
+ TP_PROTO(struct inode *inode, int used_blocks),
+
+ TP_ARGS(inode, used_blocks),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( umode_t, mode )
+ __field( __u64, i_blocks )
+ __field( int, used_blocks )
+ __field( int, reserved_data_blocks )
+ __field( int, reserved_meta_blocks )
+ __field( int, allocated_meta_blocks )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
+ __entry->i_blocks = inode->i_blocks;
+ __entry->used_blocks = used_blocks;
+ __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+ __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
+ __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
+ ),
+
+ TP_printk("dev %s ino %lu mode 0%o i_blocks %llu used_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
+ jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ __entry->mode, (unsigned long long) __entry->i_blocks,
+ __entry->used_blocks, __entry->reserved_data_blocks,
+ __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
+);
+
+TRACE_EVENT(ext4_da_reserve_space,
+ TP_PROTO(struct inode *inode, int md_needed),
+
+ TP_ARGS(inode, md_needed),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( umode_t, mode )
+ __field( __u64, i_blocks )
+ __field( int, md_needed )
+ __field( int, reserved_data_blocks )
+ __field( int, reserved_meta_blocks )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
+ __entry->i_blocks = inode->i_blocks;
+ __entry->md_needed = md_needed;
+ __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+ __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
+ ),
+
+ TP_printk("dev %s ino %lu mode 0%o i_blocks %llu md_needed %d reserved_data_blocks %d reserved_meta_blocks %d",
+ jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ __entry->mode, (unsigned long long) __entry->i_blocks,
+ __entry->md_needed, __entry->reserved_data_blocks,
+ __entry->reserved_meta_blocks)
+);
+
+TRACE_EVENT(ext4_da_release_space,
+ TP_PROTO(struct inode *inode, int freed_blocks),
+
+ TP_ARGS(inode, freed_blocks),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( umode_t, mode )
+ __field( __u64, i_blocks )
+ __field( int, freed_blocks )
+ __field( int, reserved_data_blocks )
+ __field( int, reserved_meta_blocks )
+ __field( int, allocated_meta_blocks )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
+ __entry->i_blocks = inode->i_blocks;
+ __entry->freed_blocks = freed_blocks;
+ __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+ __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
+ __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
+ ),
+
+ TP_printk("dev %s ino %lu mode 0%o i_blocks %llu freed_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
+ jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ __entry->mode, (unsigned long long) __entry->i_blocks,
+ __entry->freed_blocks, __entry->reserved_data_blocks,
+ __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
+);
+
+
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index 96b370a..bf16545 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -199,6 +199,34 @@ TRACE_EVENT(jbd2_checkpoint_stats,
__entry->forced_to_close, __entry->written, __entry->dropped)
);
+TRACE_EVENT(jbd2_cleanup_journal_tail,
+
+ TP_PROTO(journal_t *journal, tid_t first_tid,
+ unsigned long block_nr, unsigned long freed),
+
+ TP_ARGS(journal, first_tid, block_nr, freed),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( tid_t, tail_sequence )
+ __field( tid_t, first_tid )
+ __field(unsigned long, block_nr )
+ __field(unsigned long, freed )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->tail_sequence = journal->j_tail_sequence;
+ __entry->first_tid = first_tid;
+ __entry->block_nr = block_nr;
+ __entry->freed = freed;
+ ),
+
+ TP_printk("dev %s from %u to %u offset %lu freed %lu",
+ jbd2_dev_to_name(__entry->dev), __entry->tail_sequence,
+ __entry->first_tid, __entry->block_nr, __entry->freed)
+);
+
#endif /* _TRACE_JBD2_H */
/* This part must be outside protection */
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index dbe1084..b17d49d 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -145,6 +145,47 @@ TRACE_EVENT(kvm_mmio,
__entry->len, __entry->gpa, __entry->val)
);
+#define kvm_fpu_load_symbol \
+ {0, "unload"}, \
+ {1, "load"}
+
+TRACE_EVENT(kvm_fpu,
+ TP_PROTO(int load),
+ TP_ARGS(load),
+
+ TP_STRUCT__entry(
+ __field( u32, load )
+ ),
+
+ TP_fast_assign(
+ __entry->load = load;
+ ),
+
+ TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
+);
+
+TRACE_EVENT(kvm_age_page,
+ TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
+ TP_ARGS(hva, slot, ref),
+
+ TP_STRUCT__entry(
+ __field( u64, hva )
+ __field( u64, gfn )
+ __field( u8, referenced )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ __entry->gfn =
+ slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
+ __entry->referenced = ref;
+ ),
+
+ TP_printk("hva %llx gfn %llx %s",
+ __entry->hva, __entry->gfn,
+ __entry->referenced ? "YOUNG" : "OLD")
+);
+
#endif /* _TRACE_KVM_MAIN_H */
/* This part must be outside protection */
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 614241b..2b10853 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -30,11 +30,7 @@ static int __init do_linuxrc(void * shell)
extern char * envp_init[];
sys_close(old_fd);sys_close(root_fd);
- sys_close(0);sys_close(1);sys_close(2);
sys_setsid();
- (void) sys_open("/dev/console",O_RDWR,0);
- (void) sys_dup(0);
- (void) sys_dup(0);
return kernel_execve(shell, argv, envp_init);
}
diff --git a/init/main.c b/init/main.c
index c75dcd6..40aaa02 100644
--- a/init/main.c
+++ b/init/main.c
@@ -149,6 +149,20 @@ static int __init nosmp(char *str)
early_param("nosmp", nosmp);
+/* this is hard limit */
+static int __init nrcpus(char *str)
+{
+ int nr_cpus;
+
+ get_option(&str, &nr_cpus);
+ if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
+ nr_cpu_ids = nr_cpus;
+
+ return 0;
+}
+
+early_param("nr_cpus", nrcpus);
+
static int __init maxcpus(char *str)
{
get_option(&str, &setup_max_cpus);
@@ -586,6 +600,7 @@ asmlinkage void __init start_kernel(void)
local_irq_disable();
}
rcu_init();
+ radix_tree_init();
/* init some links before init_ISA_irqs() */
early_irq_init();
init_IRQ();
@@ -659,7 +674,6 @@ asmlinkage void __init start_kernel(void)
proc_caches_init();
buffer_init();
key_init();
- radix_tree_init();
security_init();
vfs_caches_init(totalram_pages);
signals_init();
@@ -808,11 +822,6 @@ static noinline int init_post(void)
system_state = SYSTEM_RUNNING;
numa_default_policy();
- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
- printk(KERN_WARNING "Warning: unable to open an initial console.\n");
-
- (void) sys_dup(0);
- (void) sys_dup(0);
current->signal->flags |= SIGNAL_UNKILLABLE;
@@ -875,6 +884,12 @@ static int __init kernel_init(void * unused)
do_basic_setup();
+ /* Open the /dev/console on the rootfs, this should never fail */
+ if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
+ printk(KERN_WARNING "Warning: unable to open an initial console.\n");
+
+ (void) sys_dup(0);
+ (void) sys_dup(0);
/*
* check if there is an early userspace init. If yes, let it do all
* the work
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index c79bd57..b6cb064 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -134,7 +134,6 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
init_waitqueue_head(&info->wait_q);
INIT_LIST_HEAD(&info->e_wait_q[0].list);
INIT_LIST_HEAD(&info->e_wait_q[1].list);
- info->messages = NULL;
info->notify_owner = NULL;
info->qsize = 0;
info->user = NULL; /* set when all is ok */
@@ -146,6 +145,10 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
info->attr.mq_msgsize = attr->mq_msgsize;
}
mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
+ info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
+ if (!info->messages)
+ goto out_inode;
+
mq_bytes = (mq_msg_tblsz +
(info->attr.mq_maxmsg * info->attr.mq_msgsize));
@@ -154,18 +157,12 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
u->mq_bytes + mq_bytes >
p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) {
spin_unlock(&mq_lock);
+ kfree(info->messages);
goto out_inode;
}
u->mq_bytes += mq_bytes;
spin_unlock(&mq_lock);
- info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
- if (!info->messages) {
- spin_lock(&mq_lock);
- u->mq_bytes -= mq_bytes;
- spin_unlock(&mq_lock);
- goto out_inode;
- }
/* all is ok */
info->user = get_uid(u);
} else if (S_ISDIR(mode)) {
@@ -187,7 +184,7 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *inode;
struct ipc_namespace *ns = data;
- int error = 0;
+ int error;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -205,7 +202,9 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
if (!sb->s_root) {
iput(inode);
error = -ENOMEM;
+ goto out;
}
+ error = 0;
out:
return error;
@@ -264,8 +263,9 @@ static void mqueue_delete_inode(struct inode *inode)
clear_inode(inode);
- mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) +
- (info->attr.mq_maxmsg * info->attr.mq_msgsize));
+ /* Total amount of bytes accounted for the mqueue */
+ mq_bytes = info->attr.mq_maxmsg * (sizeof(struct msg_msg *)
+ + info->attr.mq_msgsize);
user = info->user;
if (user) {
spin_lock(&mq_lock);
@@ -604,8 +604,8 @@ static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
/* check for overflow */
if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
return 0;
- if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) +
- (attr->mq_maxmsg * sizeof (struct msg_msg *)) <
+ if ((unsigned long)(attr->mq_maxmsg * (attr->mq_msgsize
+ + sizeof (struct msg_msg *))) <
(unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
return 0;
return 1;
@@ -623,9 +623,10 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
int ret;
if (attr) {
- ret = -EINVAL;
- if (!mq_attr_ok(ipc_ns, attr))
+ if (!mq_attr_ok(ipc_ns, attr)) {
+ ret = -EINVAL;
goto out;
+ }
/* store for use during create */
dentry->d_fsdata = attr;
}
@@ -659,24 +660,28 @@ out:
static struct file *do_open(struct ipc_namespace *ipc_ns,
struct dentry *dentry, int oflag)
{
+ int ret;
const struct cred *cred = current_cred();
static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
MAY_READ | MAY_WRITE };
if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
- dput(dentry);
- mntput(ipc_ns->mq_mnt);
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto err;
}
if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) {
- dput(dentry);
- mntput(ipc_ns->mq_mnt);
- return ERR_PTR(-EACCES);
+ ret = -EACCES;
+ goto err;
}
return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
+
+err:
+ dput(dentry);
+ mntput(ipc_ns->mq_mnt);
+ return ERR_PTR(ret);
}
SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
@@ -705,16 +710,17 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
if (IS_ERR(dentry)) {
error = PTR_ERR(dentry);
- goto out_err;
+ goto out_putfd;
}
mntget(ipc_ns->mq_mnt);
if (oflag & O_CREAT) {
if (dentry->d_inode) { /* entry already exists */
audit_inode(name, dentry);
- error = -EEXIST;
- if (oflag & O_EXCL)
+ if (oflag & O_EXCL) {
+ error = -EEXIST;
goto out;
+ }
filp = do_open(ipc_ns, dentry, oflag);
} else {
filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root,
@@ -722,9 +728,10 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
u_attr ? &attr : NULL);
}
} else {
- error = -ENOENT;
- if (!dentry->d_inode)
+ if (!dentry->d_inode) {
+ error = -ENOENT;
goto out;
+ }
audit_inode(name, dentry);
filp = do_open(ipc_ns, dentry, oflag);
}
@@ -742,7 +749,6 @@ out:
mntput(ipc_ns->mq_mnt);
out_putfd:
put_unused_fd(fd);
-out_err:
fd = error;
out_upsem:
mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
@@ -872,19 +878,24 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
audit_mq_sendrecv(mqdes, msg_len, msg_prio, p);
timeout = prepare_timeout(p);
- ret = -EBADF;
filp = fget(mqdes);
- if (unlikely(!filp))
+ if (unlikely(!filp)) {
+ ret = -EBADF;
goto out;
+ }
inode = filp->f_path.dentry->d_inode;
- if (unlikely(filp->f_op != &mqueue_file_operations))
+ if (unlikely(filp->f_op != &mqueue_file_operations)) {
+ ret = -EBADF;
goto out_fput;
+ }
info = MQUEUE_I(inode);
audit_inode(NULL, filp->f_path.dentry);
- if (unlikely(!(filp->f_mode & FMODE_WRITE)))
+ if (unlikely(!(filp->f_mode & FMODE_WRITE))) {
+ ret = -EBADF;
goto out_fput;
+ }
if (unlikely(msg_len > info->attr.mq_msgsize)) {
ret = -EMSGSIZE;
@@ -961,19 +972,24 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
audit_mq_sendrecv(mqdes, msg_len, 0, p);
timeout = prepare_timeout(p);
- ret = -EBADF;
filp = fget(mqdes);
- if (unlikely(!filp))
+ if (unlikely(!filp)) {
+ ret = -EBADF;
goto out;
+ }
inode = filp->f_path.dentry->d_inode;
- if (unlikely(filp->f_op != &mqueue_file_operations))
+ if (unlikely(filp->f_op != &mqueue_file_operations)) {
+ ret = -EBADF;
goto out_fput;
+ }
info = MQUEUE_I(inode);
audit_inode(NULL, filp->f_path.dentry);
- if (unlikely(!(filp->f_mode & FMODE_READ)))
+ if (unlikely(!(filp->f_mode & FMODE_READ))) {
+ ret = -EBADF;
goto out_fput;
+ }
/* checks if buffer is big enough */
if (unlikely(msg_len < info->attr.mq_msgsize)) {
@@ -1063,13 +1079,14 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
/* create the notify skb */
nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
- ret = -ENOMEM;
- if (!nc)
+ if (!nc) {
+ ret = -ENOMEM;
goto out;
- ret = -EFAULT;
+ }
if (copy_from_user(nc->data,
notification.sigev_value.sival_ptr,
NOTIFY_COOKIE_LEN)) {
+ ret = -EFAULT;
goto out;
}
@@ -1078,9 +1095,10 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
/* and attach it to the socket */
retry:
filp = fget(notification.sigev_signo);
- ret = -EBADF;
- if (!filp)
+ if (!filp) {
+ ret = -EBADF;
goto out;
+ }
sock = netlink_getsockbyfilp(filp);
fput(filp);
if (IS_ERR(sock)) {
@@ -1092,7 +1110,7 @@ retry:
timeo = MAX_SCHEDULE_TIMEOUT;
ret = netlink_attachskb(sock, nc, &timeo, NULL);
if (ret == 1)
- goto retry;
+ goto retry;
if (ret) {
sock = NULL;
nc = NULL;
@@ -1101,14 +1119,17 @@ retry:
}
}
- ret = -EBADF;
filp = fget(mqdes);
- if (!filp)
+ if (!filp) {
+ ret = -EBADF;
goto out;
+ }
inode = filp->f_path.dentry->d_inode;
- if (unlikely(filp->f_op != &mqueue_file_operations))
+ if (unlikely(filp->f_op != &mqueue_file_operations)) {
+ ret = -EBADF;
goto out_fput;
+ }
info = MQUEUE_I(inode);
ret = 0;
@@ -1171,14 +1192,17 @@ SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
return -EINVAL;
}
- ret = -EBADF;
filp = fget(mqdes);
- if (!filp)
+ if (!filp) {
+ ret = -EBADF;
goto out;
+ }
inode = filp->f_path.dentry->d_inode;
- if (unlikely(filp->f_op != &mqueue_file_operations))
+ if (unlikely(filp->f_op != &mqueue_file_operations)) {
+ ret = -EBADF;
goto out_fput;
+ }
info = MQUEUE_I(inode);
spin_lock(&info->lock);
@@ -1272,7 +1296,7 @@ static int __init init_mqueue_fs(void)
if (mqueue_inode_cachep == NULL)
return -ENOMEM;
- /* ignore failues - they are not fatal */
+ /* ignore failures - they are not fatal */
mq_sysctl_table = mq_register_sysctl_table();
error = register_filesystem(&mqueue_fs_type);
diff --git a/kernel/Makefile b/kernel/Makefile
index 6aebdeb..7b97469 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -10,7 +10,8 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
- async.o
+ async.o range.o
+obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o
obj-y += groups.o
ifdef CONFIG_FUNCTION_TRACER
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 4b05bd9..028e856 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -548,6 +548,11 @@ int audit_remove_tree_rule(struct audit_krule *rule)
return 0;
}
+static int compare_root(struct vfsmount *mnt, void *arg)
+{
+ return mnt->mnt_root->d_inode == arg;
+}
+
void audit_trim_trees(void)
{
struct list_head cursor;
@@ -559,7 +564,6 @@ void audit_trim_trees(void)
struct path path;
struct vfsmount *root_mnt;
struct node *node;
- struct list_head list;
int err;
tree = container_of(cursor.next, struct audit_tree, list);
@@ -577,24 +581,16 @@ void audit_trim_trees(void)
if (!root_mnt)
goto skip_it;
- list_add_tail(&list, &root_mnt->mnt_list);
spin_lock(&hash_lock);
list_for_each_entry(node, &tree->chunks, list) {
- struct audit_chunk *chunk = find_chunk(node);
- struct inode *inode = chunk->watch.inode;
- struct vfsmount *mnt;
+ struct inode *inode = find_chunk(node)->watch.inode;
node->index |= 1U<<31;
- list_for_each_entry(mnt, &list, mnt_list) {
- if (mnt->mnt_root->d_inode == inode) {
- node->index &= ~(1U<<31);
- break;
- }
- }
+ if (iterate_mounts(compare_root, inode, root_mnt))
+ node->index &= ~(1U<<31);
}
spin_unlock(&hash_lock);
trim_marked(tree);
put_tree(tree);
- list_del_init(&list);
drop_collected_mounts(root_mnt);
skip_it:
mutex_lock(&audit_filter_mutex);
@@ -603,22 +599,6 @@ skip_it:
mutex_unlock(&audit_filter_mutex);
}
-static int is_under(struct vfsmount *mnt, struct dentry *dentry,
- struct path *path)
-{
- if (mnt != path->mnt) {
- for (;;) {
- if (mnt->mnt_parent == mnt)
- return 0;
- if (mnt->mnt_parent == path->mnt)
- break;
- mnt = mnt->mnt_parent;
- }
- dentry = mnt->mnt_mountpoint;
- }
- return is_subdir(dentry, path->dentry);
-}
-
int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
{
@@ -638,13 +618,17 @@ void audit_put_tree(struct audit_tree *tree)
put_tree(tree);
}
+static int tag_mount(struct vfsmount *mnt, void *arg)
+{
+ return tag_chunk(mnt->mnt_root->d_inode, arg);
+}
+
/* called with audit_filter_mutex */
int audit_add_tree_rule(struct audit_krule *rule)
{
struct audit_tree *seed = rule->tree, *tree;
struct path path;
- struct vfsmount *mnt, *p;
- struct list_head list;
+ struct vfsmount *mnt;
int err;
list_for_each_entry(tree, &tree_list, list) {
@@ -670,16 +654,9 @@ int audit_add_tree_rule(struct audit_krule *rule)
err = -ENOMEM;
goto Err;
}
- list_add_tail(&list, &mnt->mnt_list);
get_tree(tree);
- list_for_each_entry(p, &list, mnt_list) {
- err = tag_chunk(p->mnt_root->d_inode, tree);
- if (err)
- break;
- }
-
- list_del(&list);
+ err = iterate_mounts(tag_mount, tree, mnt);
drop_collected_mounts(mnt);
if (!err) {
@@ -714,31 +691,23 @@ int audit_tag_tree(char *old, char *new)
{
struct list_head cursor, barrier;
int failed = 0;
- struct path path;
+ struct path path1, path2;
struct vfsmount *tagged;
- struct list_head list;
- struct vfsmount *mnt;
- struct dentry *dentry;
int err;
- err = kern_path(new, 0, &path);
+ err = kern_path(new, 0, &path2);
if (err)
return err;
- tagged = collect_mounts(&path);
- path_put(&path);
+ tagged = collect_mounts(&path2);
+ path_put(&path2);
if (!tagged)
return -ENOMEM;
- err = kern_path(old, 0, &path);
+ err = kern_path(old, 0, &path1);
if (err) {
drop_collected_mounts(tagged);
return err;
}
- mnt = mntget(path.mnt);
- dentry = dget(path.dentry);
- path_put(&path);
-
- list_add_tail(&list, &tagged->mnt_list);
mutex_lock(&audit_filter_mutex);
list_add(&barrier, &tree_list);
@@ -746,7 +715,7 @@ int audit_tag_tree(char *old, char *new)
while (cursor.next != &tree_list) {
struct audit_tree *tree;
- struct vfsmount *p;
+ int good_one = 0;
tree = container_of(cursor.next, struct audit_tree, list);
get_tree(tree);
@@ -754,30 +723,19 @@ int audit_tag_tree(char *old, char *new)
list_add(&cursor, &tree->list);
mutex_unlock(&audit_filter_mutex);
- err = kern_path(tree->pathname, 0, &path);
- if (err) {
- put_tree(tree);
- mutex_lock(&audit_filter_mutex);
- continue;
+ err = kern_path(tree->pathname, 0, &path2);
+ if (!err) {
+ good_one = path_is_under(&path1, &path2);
+ path_put(&path2);
}
- spin_lock(&vfsmount_lock);
- if (!is_under(mnt, dentry, &path)) {
- spin_unlock(&vfsmount_lock);
- path_put(&path);
+ if (!good_one) {
put_tree(tree);
mutex_lock(&audit_filter_mutex);
continue;
}
- spin_unlock(&vfsmount_lock);
- path_put(&path);
-
- list_for_each_entry(p, &list, mnt_list) {
- failed = tag_chunk(p->mnt_root->d_inode, tree);
- if (failed)
- break;
- }
+ failed = iterate_mounts(tag_mount, tree, tagged);
if (failed) {
put_tree(tree);
mutex_lock(&audit_filter_mutex);
@@ -818,10 +776,8 @@ int audit_tag_tree(char *old, char *new)
}
list_del(&barrier);
list_del(&cursor);
- list_del(&list);
mutex_unlock(&audit_filter_mutex);
- dput(dentry);
- mntput(mnt);
+ path_put(&path1);
drop_collected_mounts(tagged);
return failed;
}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index fc0f928..f3a461c 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1988,7 +1988,6 @@ void __audit_inode(const char *name, const struct dentry *dentry)
/**
* audit_inode_child - collect inode info for created/removed objects
- * @dname: inode's dentry name
* @dentry: dentry being audited
* @parent: inode of dentry parent
*
@@ -2000,13 +1999,14 @@ void __audit_inode(const char *name, const struct dentry *dentry)
* must be hooked prior, in order to capture the target inode during
* unsuccessful attempts.
*/
-void __audit_inode_child(const char *dname, const struct dentry *dentry,
+void __audit_inode_child(const struct dentry *dentry,
const struct inode *parent)
{
int idx;
struct audit_context *context = current->audit_context;
const char *found_parent = NULL, *found_child = NULL;
const struct inode *inode = dentry->d_inode;
+ const char *dname = dentry->d_name.name;
int dirlen = 0;
if (!context->in_syscall)
@@ -2014,9 +2014,6 @@ void __audit_inode_child(const char *dname, const struct dentry *dentry,
if (inode)
handle_one(inode);
- /* determine matching parent */
- if (!dname)
- goto add_names;
/* parent is more likely, look for it first */
for (idx = 0; idx < context->name_count; idx++) {
diff --git a/kernel/capability.c b/kernel/capability.c
index 7f876e6..9e4697e 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -135,7 +135,7 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
if (pid && (pid != task_pid_vnr(current))) {
struct task_struct *target;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
target = find_task_by_vpid(pid);
if (!target)
@@ -143,7 +143,7 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
else
ret = security_capget(target, pEp, pIp, pPp);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
} else
ret = security_capget(current, pEp, pIp, pPp);
diff --git a/kernel/early_res.c b/kernel/early_res.c
new file mode 100644
index 0000000..3cb2c66
--- /dev/null
+++ b/kernel/early_res.c
@@ -0,0 +1,578 @@
+/*
+ * early_res, could be used to replace bootmem
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/early_res.h>
+
+/*
+ * Early reserved memory areas.
+ */
+/*
+ * need to make sure this one is bigger enough before
+ * find_fw_memmap_area could be used
+ */
+#define MAX_EARLY_RES_X 32
+
+struct early_res {
+ u64 start, end;
+ char name[15];
+ char overlap_ok;
+};
+static struct early_res early_res_x[MAX_EARLY_RES_X] __initdata;
+
+static int max_early_res __initdata = MAX_EARLY_RES_X;
+static struct early_res *early_res __initdata = &early_res_x[0];
+static int early_res_count __initdata;
+
+static int __init find_overlapped_early(u64 start, u64 end)
+{
+ int i;
+ struct early_res *r;
+
+ for (i = 0; i < max_early_res && early_res[i].end; i++) {
+ r = &early_res[i];
+ if (end > r->start && start < r->end)
+ break;
+ }
+
+ return i;
+}
+
+/*
+ * Drop the i-th range from the early reservation map,
+ * by copying any higher ranges down one over it, and
+ * clearing what had been the last slot.
+ */
+static void __init drop_range(int i)
+{
+ int j;
+
+ for (j = i + 1; j < max_early_res && early_res[j].end; j++)
+ ;
+
+ memmove(&early_res[i], &early_res[i + 1],
+ (j - 1 - i) * sizeof(struct early_res));
+
+ early_res[j - 1].end = 0;
+ early_res_count--;
+}
+
+static void __init drop_range_partial(int i, u64 start, u64 end)
+{
+ u64 common_start, common_end;
+ u64 old_start, old_end;
+
+ old_start = early_res[i].start;
+ old_end = early_res[i].end;
+ common_start = max(old_start, start);
+ common_end = min(old_end, end);
+
+ /* no overlap ? */
+ if (common_start >= common_end)
+ return;
+
+ if (old_start < common_start) {
+ /* make head segment */
+ early_res[i].end = common_start;
+ if (old_end > common_end) {
+ char name[15];
+
+ /*
+ * Save a local copy of the name, since the
+ * early_res array could get resized inside
+ * reserve_early_without_check() ->
+ * __check_and_double_early_res(), which would
+ * make the current name pointer invalid.
+ */
+ strncpy(name, early_res[i].name,
+ sizeof(early_res[i].name) - 1);
+ /* add another for left over on tail */
+ reserve_early_without_check(common_end, old_end, name);
+ }
+ return;
+ } else {
+ if (old_end > common_end) {
+ /* reuse the entry for tail left */
+ early_res[i].start = common_end;
+ return;
+ }
+ /* all covered */
+ drop_range(i);
+ }
+}
+
+/*
+ * Split any existing ranges that:
+ * 1) are marked 'overlap_ok', and
+ * 2) overlap with the stated range [start, end)
+ * into whatever portion (if any) of the existing range is entirely
+ * below or entirely above the stated range. Drop the portion
+ * of the existing range that overlaps with the stated range,
+ * which will allow the caller of this routine to then add that
+ * stated range without conflicting with any existing range.
+ */
+static void __init drop_overlaps_that_are_ok(u64 start, u64 end)
+{
+ int i;
+ struct early_res *r;
+ u64 lower_start, lower_end;
+ u64 upper_start, upper_end;
+ char name[15];
+
+ for (i = 0; i < max_early_res && early_res[i].end; i++) {
+ r = &early_res[i];
+
+ /* Continue past non-overlapping ranges */
+ if (end <= r->start || start >= r->end)
+ continue;
+
+ /*
+ * Leave non-ok overlaps as is; let caller
+ * panic "Overlapping early reservations"
+ * when it hits this overlap.
+ */
+ if (!r->overlap_ok)
+ return;
+
+ /*
+ * We have an ok overlap. We will drop it from the early
+ * reservation map, and add back in any non-overlapping
+ * portions (lower or upper) as separate, overlap_ok,
+ * non-overlapping ranges.
+ */
+
+ /* 1. Note any non-overlapping (lower or upper) ranges. */
+ strncpy(name, r->name, sizeof(name) - 1);
+
+ lower_start = lower_end = 0;
+ upper_start = upper_end = 0;
+ if (r->start < start) {
+ lower_start = r->start;
+ lower_end = start;
+ }
+ if (r->end > end) {
+ upper_start = end;
+ upper_end = r->end;
+ }
+
+ /* 2. Drop the original ok overlapping range */
+ drop_range(i);
+
+ i--; /* resume for-loop on copied down entry */
+
+ /* 3. Add back in any non-overlapping ranges. */
+ if (lower_end)
+ reserve_early_overlap_ok(lower_start, lower_end, name);
+ if (upper_end)
+ reserve_early_overlap_ok(upper_start, upper_end, name);
+ }
+}
+
+static void __init __reserve_early(u64 start, u64 end, char *name,
+ int overlap_ok)
+{
+ int i;
+ struct early_res *r;
+
+ i = find_overlapped_early(start, end);
+ if (i >= max_early_res)
+ panic("Too many early reservations");
+ r = &early_res[i];
+ if (r->end)
+ panic("Overlapping early reservations "
+ "%llx-%llx %s to %llx-%llx %s\n",
+ start, end - 1, name ? name : "", r->start,
+ r->end - 1, r->name);
+ r->start = start;
+ r->end = end;
+ r->overlap_ok = overlap_ok;
+ if (name)
+ strncpy(r->name, name, sizeof(r->name) - 1);
+ early_res_count++;
+}
+
+/*
+ * A few early reservtations come here.
+ *
+ * The 'overlap_ok' in the name of this routine does -not- mean it
+ * is ok for these reservations to overlap an earlier reservation.
+ * Rather it means that it is ok for subsequent reservations to
+ * overlap this one.
+ *
+ * Use this entry point to reserve early ranges when you are doing
+ * so out of "Paranoia", reserving perhaps more memory than you need,
+ * just in case, and don't mind a subsequent overlapping reservation
+ * that is known to be needed.
+ *
+ * The drop_overlaps_that_are_ok() call here isn't really needed.
+ * It would be needed if we had two colliding 'overlap_ok'
+ * reservations, so that the second such would not panic on the
+ * overlap with the first. We don't have any such as of this
+ * writing, but might as well tolerate such if it happens in
+ * the future.
+ */
+void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
+{
+ drop_overlaps_that_are_ok(start, end);
+ __reserve_early(start, end, name, 1);
+}
+
+static void __init __check_and_double_early_res(u64 ex_start, u64 ex_end)
+{
+ u64 start, end, size, mem;
+ struct early_res *new;
+
+ /* do we have enough slots left ? */
+ if ((max_early_res - early_res_count) > max(max_early_res/8, 2))
+ return;
+
+ /* double it */
+ mem = -1ULL;
+ size = sizeof(struct early_res) * max_early_res * 2;
+ if (early_res == early_res_x)
+ start = 0;
+ else
+ start = early_res[0].end;
+ end = ex_start;
+ if (start + size < end)
+ mem = find_fw_memmap_area(start, end, size,
+ sizeof(struct early_res));
+ if (mem == -1ULL) {
+ start = ex_end;
+ end = get_max_mapped();
+ if (start + size < end)
+ mem = find_fw_memmap_area(start, end, size,
+ sizeof(struct early_res));
+ }
+ if (mem == -1ULL)
+ panic("can not find more space for early_res array");
+
+ new = __va(mem);
+ /* save the first one for own */
+ new[0].start = mem;
+ new[0].end = mem + size;
+ new[0].overlap_ok = 0;
+ /* copy old to new */
+ if (early_res == early_res_x) {
+ memcpy(&new[1], &early_res[0],
+ sizeof(struct early_res) * max_early_res);
+ memset(&new[max_early_res+1], 0,
+ sizeof(struct early_res) * (max_early_res - 1));
+ early_res_count++;
+ } else {
+ memcpy(&new[1], &early_res[1],
+ sizeof(struct early_res) * (max_early_res - 1));
+ memset(&new[max_early_res], 0,
+ sizeof(struct early_res) * max_early_res);
+ }
+ memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
+ early_res = new;
+ max_early_res *= 2;
+ printk(KERN_DEBUG "early_res array is doubled to %d at [%llx - %llx]\n",
+ max_early_res, mem, mem + size - 1);
+}
+
+/*
+ * Most early reservations come here.
+ *
+ * We first have drop_overlaps_that_are_ok() drop any pre-existing
+ * 'overlap_ok' ranges, so that we can then reserve this memory
+ * range without risk of panic'ing on an overlapping overlap_ok
+ * early reservation.
+ */
+void __init reserve_early(u64 start, u64 end, char *name)
+{
+ if (start >= end)
+ return;
+
+ __check_and_double_early_res(start, end);
+
+ drop_overlaps_that_are_ok(start, end);
+ __reserve_early(start, end, name, 0);
+}
+
+void __init reserve_early_without_check(u64 start, u64 end, char *name)
+{
+ struct early_res *r;
+
+ if (start >= end)
+ return;
+
+ __check_and_double_early_res(start, end);
+
+ r = &early_res[early_res_count];
+
+ r->start = start;
+ r->end = end;
+ r->overlap_ok = 0;
+ if (name)
+ strncpy(r->name, name, sizeof(r->name) - 1);
+ early_res_count++;
+}
+
+void __init free_early(u64 start, u64 end)
+{
+ struct early_res *r;
+ int i;
+
+ i = find_overlapped_early(start, end);
+ r = &early_res[i];
+ if (i >= max_early_res || r->end != end || r->start != start)
+ panic("free_early on not reserved area: %llx-%llx!",
+ start, end - 1);
+
+ drop_range(i);
+}
+
+void __init free_early_partial(u64 start, u64 end)
+{
+ struct early_res *r;
+ int i;
+
+try_next:
+ i = find_overlapped_early(start, end);
+ if (i >= max_early_res)
+ return;
+
+ r = &early_res[i];
+ /* hole ? */
+ if (r->end >= end && r->start <= start) {
+ drop_range_partial(i, start, end);
+ return;
+ }
+
+ drop_range_partial(i, start, end);
+ goto try_next;
+}
+
+#ifdef CONFIG_NO_BOOTMEM
+static void __init subtract_early_res(struct range *range, int az)
+{
+ int i, count;
+ u64 final_start, final_end;
+ int idx = 0;
+
+ count = 0;
+ for (i = 0; i < max_early_res && early_res[i].end; i++)
+ count++;
+
+ /* need to skip first one ?*/
+ if (early_res != early_res_x)
+ idx = 1;
+
+#define DEBUG_PRINT_EARLY_RES 1
+
+#if DEBUG_PRINT_EARLY_RES
+ printk(KERN_INFO "Subtract (%d early reservations)\n", count);
+#endif
+ for (i = idx; i < count; i++) {
+ struct early_res *r = &early_res[i];
+#if DEBUG_PRINT_EARLY_RES
+ printk(KERN_INFO " #%d [%010llx - %010llx] %15s\n", i,
+ r->start, r->end, r->name);
+#endif
+ final_start = PFN_DOWN(r->start);
+ final_end = PFN_UP(r->end);
+ if (final_start >= final_end)
+ continue;
+ subtract_range(range, az, final_start, final_end);
+ }
+
+}
+
+int __init get_free_all_memory_range(struct range **rangep, int nodeid)
+{
+ int i, count;
+ u64 start = 0, end;
+ u64 size;
+ u64 mem;
+ struct range *range;
+ int nr_range;
+
+ count = 0;
+ for (i = 0; i < max_early_res && early_res[i].end; i++)
+ count++;
+
+ count *= 2;
+
+ size = sizeof(struct range) * count;
+ end = get_max_mapped();
+#ifdef MAX_DMA32_PFN
+ if (end > (MAX_DMA32_PFN << PAGE_SHIFT))
+ start = MAX_DMA32_PFN << PAGE_SHIFT;
+#endif
+ mem = find_fw_memmap_area(start, end, size, sizeof(struct range));
+ if (mem == -1ULL)
+ panic("can not find more space for range free");
+
+ range = __va(mem);
+ /* use early_node_map[] and early_res to get range array at first */
+ memset(range, 0, size);
+ nr_range = 0;
+
+ /* need to go over early_node_map to find out good range for node */
+ nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
+#ifdef CONFIG_X86_32
+ subtract_range(range, count, max_low_pfn, -1ULL);
+#endif
+ subtract_early_res(range, count);
+ nr_range = clean_sort_range(range, count);
+
+ /* need to clear it ? */
+ if (nodeid == MAX_NUMNODES) {
+ memset(&early_res[0], 0,
+ sizeof(struct early_res) * max_early_res);
+ early_res = NULL;
+ max_early_res = 0;
+ }
+
+ *rangep = range;
+ return nr_range;
+}
+#else
+void __init early_res_to_bootmem(u64 start, u64 end)
+{
+ int i, count;
+ u64 final_start, final_end;
+ int idx = 0;
+
+ count = 0;
+ for (i = 0; i < max_early_res && early_res[i].end; i++)
+ count++;
+
+ /* need to skip first one ?*/
+ if (early_res != early_res_x)
+ idx = 1;
+
+ printk(KERN_INFO "(%d/%d early reservations) ==> bootmem [%010llx - %010llx]\n",
+ count - idx, max_early_res, start, end);
+ for (i = idx; i < count; i++) {
+ struct early_res *r = &early_res[i];
+ printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i,
+ r->start, r->end, r->name);
+ final_start = max(start, r->start);
+ final_end = min(end, r->end);
+ if (final_start >= final_end) {
+ printk(KERN_CONT "\n");
+ continue;
+ }
+ printk(KERN_CONT " ==> [%010llx - %010llx]\n",
+ final_start, final_end);
+ reserve_bootmem_generic(final_start, final_end - final_start,
+ BOOTMEM_DEFAULT);
+ }
+ /* clear them */
+ memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
+ early_res = NULL;
+ max_early_res = 0;
+ early_res_count = 0;
+}
+#endif
+
+/* Check for already reserved areas */
+static inline int __init bad_addr(u64 *addrp, u64 size, u64 align)
+{
+ int i;
+ u64 addr = *addrp;
+ int changed = 0;
+ struct early_res *r;
+again:
+ i = find_overlapped_early(addr, addr + size);
+ r = &early_res[i];
+ if (i < max_early_res && r->end) {
+ *addrp = addr = round_up(r->end, align);
+ changed = 1;
+ goto again;
+ }
+ return changed;
+}
+
+/* Check for already reserved areas */
+static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
+{
+ int i;
+ u64 addr = *addrp, last;
+ u64 size = *sizep;
+ int changed = 0;
+again:
+ last = addr + size;
+ for (i = 0; i < max_early_res && early_res[i].end; i++) {
+ struct early_res *r = &early_res[i];
+ if (last > r->start && addr < r->start) {
+ size = r->start - addr;
+ changed = 1;
+ goto again;
+ }
+ if (last > r->end && addr < r->end) {
+ addr = round_up(r->end, align);
+ size = last - addr;
+ changed = 1;
+ goto again;
+ }
+ if (last <= r->end && addr >= r->start) {
+ (*sizep)++;
+ return 0;
+ }
+ }
+ if (changed) {
+ *addrp = addr;
+ *sizep = size;
+ }
+ return changed;
+}
+
+/*
+ * Find a free area with specified alignment in a specific range.
+ * only with the area.between start to end is active range from early_node_map
+ * so they are good as RAM
+ */
+u64 __init find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
+ u64 size, u64 align)
+{
+ u64 addr, last;
+
+ addr = round_up(ei_start, align);
+ if (addr < start)
+ addr = round_up(start, align);
+ if (addr >= ei_last)
+ goto out;
+ while (bad_addr(&addr, size, align) && addr+size <= ei_last)
+ ;
+ last = addr + size;
+ if (last > ei_last)
+ goto out;
+ if (last > end)
+ goto out;
+
+ return addr;
+
+out:
+ return -1ULL;
+}
+
+u64 __init find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
+ u64 *sizep, u64 align)
+{
+ u64 addr, last;
+
+ addr = round_up(ei_start, align);
+ if (addr < start)
+ addr = round_up(start, align);
+ if (addr >= ei_last)
+ goto out;
+ *sizep = ei_last - addr;
+ while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last)
+ ;
+ last = addr + *sizep;
+ if (last > ei_last)
+ goto out;
+
+ return addr;
+
+out:
+ return -1ULL;
+}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index ecc3fa2..d70394f 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -18,11 +18,7 @@
#include "internals.h"
-/**
- * dynamic_irq_init - initialize a dynamically allocated irq
- * @irq: irq number to initialize
- */
-void dynamic_irq_init(unsigned int irq)
+static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
{
struct irq_desc *desc;
unsigned long flags;
@@ -41,7 +37,8 @@ void dynamic_irq_init(unsigned int irq)
desc->depth = 1;
desc->msi_desc = NULL;
desc->handler_data = NULL;
- desc->chip_data = NULL;
+ if (!keep_chip_data)
+ desc->chip_data = NULL;
desc->action = NULL;
desc->irq_count = 0;
desc->irqs_unhandled = 0;
@@ -55,10 +52,26 @@ void dynamic_irq_init(unsigned int irq)
}
/**
- * dynamic_irq_cleanup - cleanup a dynamically allocated irq
+ * dynamic_irq_init - initialize a dynamically allocated irq
* @irq: irq number to initialize
*/
-void dynamic_irq_cleanup(unsigned int irq)
+void dynamic_irq_init(unsigned int irq)
+{
+ dynamic_irq_init_x(irq, false);
+}
+
+/**
+ * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
+ * @irq: irq number to initialize
+ *
+ * does not set irq_to_desc(irq)->chip_data to NULL
+ */
+void dynamic_irq_init_keep_chip_data(unsigned int irq)
+{
+ dynamic_irq_init_x(irq, true);
+}
+
+static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
@@ -77,7 +90,8 @@ void dynamic_irq_cleanup(unsigned int irq)
}
desc->msi_desc = NULL;
desc->handler_data = NULL;
- desc->chip_data = NULL;
+ if (!keep_chip_data)
+ desc->chip_data = NULL;
desc->handle_irq = handle_bad_irq;
desc->chip = &no_irq_chip;
desc->name = NULL;
@@ -85,6 +99,26 @@ void dynamic_irq_cleanup(unsigned int irq)
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
+/**
+ * dynamic_irq_cleanup - cleanup a dynamically allocated irq
+ * @irq: irq number to initialize
+ */
+void dynamic_irq_cleanup(unsigned int irq)
+{
+ dynamic_irq_cleanup_x(irq, false);
+}
+
+/**
+ * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
+ * @irq: irq number to initialize
+ *
+ * does not set irq_to_desc(irq)->chip_data to NULL
+ */
+void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
+{
+ dynamic_irq_cleanup_x(irq, true);
+}
+
/**
* set_irq_chip - set the irq chip for an irq
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 814940e..76d5a67 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -19,7 +19,7 @@
#include <linux/kernel_stat.h>
#include <linux/rculist.h>
#include <linux/hash.h>
-#include <linux/bootmem.h>
+#include <linux/radix-tree.h>
#include <trace/events/irq.h>
#include "internals.h"
@@ -87,12 +87,8 @@ void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
{
void *ptr;
- if (slab_is_available())
- ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
- GFP_ATOMIC, node);
- else
- ptr = alloc_bootmem_node(NODE_DATA(node),
- nr * sizeof(*desc->kstat_irqs));
+ ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
+ GFP_ATOMIC, node);
/*
* don't overwite if can not get new one
@@ -132,7 +128,26 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
*/
DEFINE_RAW_SPINLOCK(sparse_irq_lock);
-struct irq_desc **irq_desc_ptrs __read_mostly;
+static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
+
+static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
+{
+ radix_tree_insert(&irq_desc_tree, irq, desc);
+}
+
+struct irq_desc *irq_to_desc(unsigned int irq)
+{
+ return radix_tree_lookup(&irq_desc_tree, irq);
+}
+
+void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
+{
+ void **ptr;
+
+ ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
+ if (ptr)
+ radix_tree_replace_slot(ptr, desc);
+}
static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
[0 ... NR_IRQS_LEGACY-1] = {
@@ -164,9 +179,6 @@ int __init early_irq_init(void)
legacy_count = ARRAY_SIZE(irq_desc_legacy);
node = first_online_node;
- /* allocate irq_desc_ptrs array based on nr_irqs */
- irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT);
-
/* allocate based on nr_cpu_ids */
kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
sizeof(int), GFP_NOWAIT, node);
@@ -180,23 +192,12 @@ int __init early_irq_init(void)
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
alloc_desc_masks(&desc[i], node, true);
init_desc_masks(&desc[i]);
- irq_desc_ptrs[i] = desc + i;
+ set_irq_desc(i, &desc[i]);
}
- for (i = legacy_count; i < nr_irqs; i++)
- irq_desc_ptrs[i] = NULL;
-
return arch_early_irq_init();
}
-struct irq_desc *irq_to_desc(unsigned int irq)
-{
- if (irq_desc_ptrs && irq < nr_irqs)
- return irq_desc_ptrs[irq];
-
- return NULL;
-}
-
struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
{
struct irq_desc *desc;
@@ -208,21 +209,18 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
return NULL;
}
- desc = irq_desc_ptrs[irq];
+ desc = irq_to_desc(irq);
if (desc)
return desc;
raw_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
- desc = irq_desc_ptrs[irq];
+ desc = irq_to_desc(irq);
if (desc)
goto out_unlock;
- if (slab_is_available())
- desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
- else
- desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc));
+ desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
if (!desc) {
@@ -231,7 +229,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
}
init_one_irq_desc(irq, desc, node);
- irq_desc_ptrs[irq] = desc;
+ set_irq_desc(irq, desc);
out_unlock:
raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index b2821f0..c63f3bc 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -21,11 +21,7 @@ extern void clear_kstat_irqs(struct irq_desc *desc);
extern raw_spinlock_t sparse_irq_lock;
#ifdef CONFIG_SPARSE_IRQ
-/* irq_desc_ptrs allocated at boot time */
-extern struct irq_desc **irq_desc_ptrs;
-#else
-/* irq_desc_ptrs is a fixed size array */
-extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
+void replace_irq_desc(unsigned int irq, struct irq_desc *desc);
#endif
#ifdef CONFIG_PROC_FS
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 26bac9d..963559d 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -70,7 +70,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
raw_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
- desc = irq_desc_ptrs[irq];
+ desc = irq_to_desc(irq);
if (desc && old_desc != desc)
goto out_unlock;
@@ -90,7 +90,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
goto out_unlock;
}
- irq_desc_ptrs[irq] = desc;
+ replace_irq_desc(irq, desc);
raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
/* free the old one */
diff --git a/kernel/kexec.c b/kernel/kexec.c
index ef077fb..87ebe8a 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -41,7 +41,7 @@
#include <asm/sections.h>
/* Per cpu memory for storing cpu states in case of system crash. */
-note_buf_t* crash_notes;
+note_buf_t __percpu *crash_notes;
/* vmcoreinfo stuff */
static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ccec774..fa034d2 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -42,9 +42,11 @@
#include <linux/freezer.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/sysctl.h>
#include <linux/kdebug.h>
#include <linux/memory.h>
#include <linux/ftrace.h>
+#include <linux/cpu.h>
#include <asm-generic/sections.h>
#include <asm/cacheflush.h>
@@ -105,57 +107,74 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
* stepping on the instruction on a vmalloced/kmalloced/data page
* is a recipe for disaster
*/
-#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
-
struct kprobe_insn_page {
struct list_head list;
kprobe_opcode_t *insns; /* Page of instruction slots */
- char slot_used[INSNS_PER_PAGE];
int nused;
int ngarbage;
+ char slot_used[];
+};
+
+#define KPROBE_INSN_PAGE_SIZE(slots) \
+ (offsetof(struct kprobe_insn_page, slot_used) + \
+ (sizeof(char) * (slots)))
+
+struct kprobe_insn_cache {
+ struct list_head pages; /* list of kprobe_insn_page */
+ size_t insn_size; /* size of instruction slot */
+ int nr_garbage;
};
+static int slots_per_page(struct kprobe_insn_cache *c)
+{
+ return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
+}
+
enum kprobe_slot_state {
SLOT_CLEAN = 0,
SLOT_DIRTY = 1,
SLOT_USED = 2,
};
-static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
-static LIST_HEAD(kprobe_insn_pages);
-static int kprobe_garbage_slots;
-static int collect_garbage_slots(void);
+static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */
+static struct kprobe_insn_cache kprobe_insn_slots = {
+ .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
+ .insn_size = MAX_INSN_SIZE,
+ .nr_garbage = 0,
+};
+static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
/**
* __get_insn_slot() - Find a slot on an executable page for an instruction.
* We allocate an executable page if there's no room on existing ones.
*/
-static kprobe_opcode_t __kprobes *__get_insn_slot(void)
+static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
{
struct kprobe_insn_page *kip;
retry:
- list_for_each_entry(kip, &kprobe_insn_pages, list) {
- if (kip->nused < INSNS_PER_PAGE) {
+ list_for_each_entry(kip, &c->pages, list) {
+ if (kip->nused < slots_per_page(c)) {
int i;
- for (i = 0; i < INSNS_PER_PAGE; i++) {
+ for (i = 0; i < slots_per_page(c); i++) {
if (kip->slot_used[i] == SLOT_CLEAN) {
kip->slot_used[i] = SLOT_USED;
kip->nused++;
- return kip->insns + (i * MAX_INSN_SIZE);
+ return kip->insns + (i * c->insn_size);
}
}
- /* Surprise! No unused slots. Fix kip->nused. */
- kip->nused = INSNS_PER_PAGE;
+ /* kip->nused is broken. Fix it. */
+ kip->nused = slots_per_page(c);
+ WARN_ON(1);
}
}
/* If there are any garbage slots, collect it and try again. */
- if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
+ if (c->nr_garbage && collect_garbage_slots(c) == 0)
goto retry;
- }
- /* All out of space. Need to allocate a new page. Use slot 0. */
- kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
+
+ /* All out of space. Need to allocate a new page. */
+ kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
if (!kip)
return NULL;
@@ -170,20 +189,23 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
return NULL;
}
INIT_LIST_HEAD(&kip->list);
- list_add(&kip->list, &kprobe_insn_pages);
- memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
+ memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
kip->slot_used[0] = SLOT_USED;
kip->nused = 1;
kip->ngarbage = 0;
+ list_add(&kip->list, &c->pages);
return kip->insns;
}
+
kprobe_opcode_t __kprobes *get_insn_slot(void)
{
- kprobe_opcode_t *ret;
+ kprobe_opcode_t *ret = NULL;
+
mutex_lock(&kprobe_insn_mutex);
- ret = __get_insn_slot();
+ ret = __get_insn_slot(&kprobe_insn_slots);
mutex_unlock(&kprobe_insn_mutex);
+
return ret;
}
@@ -199,7 +221,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
* so as not to have to set it up again the
* next time somebody inserts a probe.
*/
- if (!list_is_singular(&kprobe_insn_pages)) {
+ if (!list_is_singular(&kip->list)) {
list_del(&kip->list);
module_free(NULL, kip->insns);
kfree(kip);
@@ -209,51 +231,84 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
return 0;
}
-static int __kprobes collect_garbage_slots(void)
+static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
{
struct kprobe_insn_page *kip, *next;
/* Ensure no-one is interrupted on the garbages */
synchronize_sched();
- list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
+ list_for_each_entry_safe(kip, next, &c->pages, list) {
int i;
if (kip->ngarbage == 0)
continue;
kip->ngarbage = 0; /* we will collect all garbages */
- for (i = 0; i < INSNS_PER_PAGE; i++) {
+ for (i = 0; i < slots_per_page(c); i++) {
if (kip->slot_used[i] == SLOT_DIRTY &&
collect_one_slot(kip, i))
break;
}
}
- kprobe_garbage_slots = 0;
+ c->nr_garbage = 0;
return 0;
}
-void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
+static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
+ kprobe_opcode_t *slot, int dirty)
{
struct kprobe_insn_page *kip;
- mutex_lock(&kprobe_insn_mutex);
- list_for_each_entry(kip, &kprobe_insn_pages, list) {
- if (kip->insns <= slot &&
- slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
- int i = (slot - kip->insns) / MAX_INSN_SIZE;
+ list_for_each_entry(kip, &c->pages, list) {
+ long idx = ((long)slot - (long)kip->insns) / c->insn_size;
+ if (idx >= 0 && idx < slots_per_page(c)) {
+ WARN_ON(kip->slot_used[idx] != SLOT_USED);
if (dirty) {
- kip->slot_used[i] = SLOT_DIRTY;
+ kip->slot_used[idx] = SLOT_DIRTY;
kip->ngarbage++;
+ if (++c->nr_garbage > slots_per_page(c))
+ collect_garbage_slots(c);
} else
- collect_one_slot(kip, i);
- break;
+ collect_one_slot(kip, idx);
+ return;
}
}
+ /* Could not free this slot. */
+ WARN_ON(1);
+}
- if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
- collect_garbage_slots();
-
+void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
+{
+ mutex_lock(&kprobe_insn_mutex);
+ __free_insn_slot(&kprobe_insn_slots, slot, dirty);
mutex_unlock(&kprobe_insn_mutex);
}
+#ifdef CONFIG_OPTPROBES
+/* For optimized_kprobe buffer */
+static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
+static struct kprobe_insn_cache kprobe_optinsn_slots = {
+ .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
+ /* .insn_size is initialized later */
+ .nr_garbage = 0,
+};
+/* Get a slot for optimized_kprobe buffer */
+kprobe_opcode_t __kprobes *get_optinsn_slot(void)
+{
+ kprobe_opcode_t *ret = NULL;
+
+ mutex_lock(&kprobe_optinsn_mutex);
+ ret = __get_insn_slot(&kprobe_optinsn_slots);
+ mutex_unlock(&kprobe_optinsn_mutex);
+
+ return ret;
+}
+
+void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
+{
+ mutex_lock(&kprobe_optinsn_mutex);
+ __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
+ mutex_unlock(&kprobe_optinsn_mutex);
+}
+#endif
#endif
/* We have preemption disabled.. so it is safe to use __ versions */
@@ -284,23 +339,401 @@ struct kprobe __kprobes *get_kprobe(void *addr)
if (p->addr == addr)
return p;
}
+
+ return NULL;
+}
+
+static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
+
+/* Return true if the kprobe is an aggregator */
+static inline int kprobe_aggrprobe(struct kprobe *p)
+{
+ return p->pre_handler == aggr_pre_handler;
+}
+
+/*
+ * Keep all fields in the kprobe consistent
+ */
+static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
+{
+ memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
+ memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
+}
+
+#ifdef CONFIG_OPTPROBES
+/* NOTE: change this value only with kprobe_mutex held */
+static bool kprobes_allow_optimization;
+
+/*
+ * Call all pre_handler on the list, but ignores its return value.
+ * This must be called from arch-dep optimized caller.
+ */
+void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe *kp;
+
+ list_for_each_entry_rcu(kp, &p->list, list) {
+ if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
+ set_kprobe_instance(kp);
+ kp->pre_handler(kp, regs);
+ }
+ reset_kprobe_instance();
+ }
+}
+
+/* Return true(!0) if the kprobe is ready for optimization. */
+static inline int kprobe_optready(struct kprobe *p)
+{
+ struct optimized_kprobe *op;
+
+ if (kprobe_aggrprobe(p)) {
+ op = container_of(p, struct optimized_kprobe, kp);
+ return arch_prepared_optinsn(&op->optinsn);
+ }
+
+ return 0;
+}
+
+/*
+ * Return an optimized kprobe whose optimizing code replaces
+ * instructions including addr (exclude breakpoint).
+ */
+struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
+{
+ int i;
+ struct kprobe *p = NULL;
+ struct optimized_kprobe *op;
+
+ /* Don't check i == 0, since that is a breakpoint case. */
+ for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
+ p = get_kprobe((void *)(addr - i));
+
+ if (p && kprobe_optready(p)) {
+ op = container_of(p, struct optimized_kprobe, kp);
+ if (arch_within_optimized_kprobe(op, addr))
+ return p;
+ }
+
return NULL;
}
+/* Optimization staging list, protected by kprobe_mutex */
+static LIST_HEAD(optimizing_list);
+
+static void kprobe_optimizer(struct work_struct *work);
+static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
+#define OPTIMIZE_DELAY 5
+
+/* Kprobe jump optimizer */
+static __kprobes void kprobe_optimizer(struct work_struct *work)
+{
+ struct optimized_kprobe *op, *tmp;
+
+ /* Lock modules while optimizing kprobes */
+ mutex_lock(&module_mutex);
+ mutex_lock(&kprobe_mutex);
+ if (kprobes_all_disarmed || !kprobes_allow_optimization)
+ goto end;
+
+ /*
+ * Wait for quiesence period to ensure all running interrupts
+ * are done. Because optprobe may modify multiple instructions
+ * there is a chance that Nth instruction is interrupted. In that
+ * case, running interrupt can return to 2nd-Nth byte of jump
+ * instruction. This wait is for avoiding it.
+ */
+ synchronize_sched();
+
+ /*
+ * The optimization/unoptimization refers online_cpus via
+ * stop_machine() and cpu-hotplug modifies online_cpus.
+ * And same time, text_mutex will be held in cpu-hotplug and here.
+ * This combination can cause a deadlock (cpu-hotplug try to lock
+ * text_mutex but stop_machine can not be done because online_cpus
+ * has been changed)
+ * To avoid this deadlock, we need to call get_online_cpus()
+ * for preventing cpu-hotplug outside of text_mutex locking.
+ */
+ get_online_cpus();
+ mutex_lock(&text_mutex);
+ list_for_each_entry_safe(op, tmp, &optimizing_list, list) {
+ WARN_ON(kprobe_disabled(&op->kp));
+ if (arch_optimize_kprobe(op) < 0)
+ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+ list_del_init(&op->list);
+ }
+ mutex_unlock(&text_mutex);
+ put_online_cpus();
+end:
+ mutex_unlock(&kprobe_mutex);
+ mutex_unlock(&module_mutex);
+}
+
+/* Optimize kprobe if p is ready to be optimized */
+static __kprobes void optimize_kprobe(struct kprobe *p)
+{
+ struct optimized_kprobe *op;
+
+ /* Check if the kprobe is disabled or not ready for optimization. */
+ if (!kprobe_optready(p) || !kprobes_allow_optimization ||
+ (kprobe_disabled(p) || kprobes_all_disarmed))
+ return;
+
+ /* Both of break_handler and post_handler are not supported. */
+ if (p->break_handler || p->post_handler)
+ return;
+
+ op = container_of(p, struct optimized_kprobe, kp);
+
+ /* Check there is no other kprobes at the optimized instructions */
+ if (arch_check_optimized_kprobe(op) < 0)
+ return;
+
+ /* Check if it is already optimized. */
+ if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
+ return;
+
+ op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
+ list_add(&op->list, &optimizing_list);
+ if (!delayed_work_pending(&optimizing_work))
+ schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
+}
+
+/* Unoptimize a kprobe if p is optimized */
+static __kprobes void unoptimize_kprobe(struct kprobe *p)
+{
+ struct optimized_kprobe *op;
+
+ if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) {
+ op = container_of(p, struct optimized_kprobe, kp);
+ if (!list_empty(&op->list))
+ /* Dequeue from the optimization queue */
+ list_del_init(&op->list);
+ else
+ /* Replace jump with break */
+ arch_unoptimize_kprobe(op);
+ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+ }
+}
+
+/* Remove optimized instructions */
+static void __kprobes kill_optimized_kprobe(struct kprobe *p)
+{
+ struct optimized_kprobe *op;
+
+ op = container_of(p, struct optimized_kprobe, kp);
+ if (!list_empty(&op->list)) {
+ /* Dequeue from the optimization queue */
+ list_del_init(&op->list);
+ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+ }
+ /* Don't unoptimize, because the target code will be freed. */
+ arch_remove_optimized_kprobe(op);
+}
+
+/* Try to prepare optimized instructions */
+static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
+{
+ struct optimized_kprobe *op;
+
+ op = container_of(p, struct optimized_kprobe, kp);
+ arch_prepare_optimized_kprobe(op);
+}
+
+/* Free optimized instructions and optimized_kprobe */
+static __kprobes void free_aggr_kprobe(struct kprobe *p)
+{
+ struct optimized_kprobe *op;
+
+ op = container_of(p, struct optimized_kprobe, kp);
+ arch_remove_optimized_kprobe(op);
+ kfree(op);
+}
+
+/* Allocate new optimized_kprobe and try to prepare optimized instructions */
+static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
+{
+ struct optimized_kprobe *op;
+
+ op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
+ if (!op)
+ return NULL;
+
+ INIT_LIST_HEAD(&op->list);
+ op->kp.addr = p->addr;
+ arch_prepare_optimized_kprobe(op);
+
+ return &op->kp;
+}
+
+static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
+
+/*
+ * Prepare an optimized_kprobe and optimize it
+ * NOTE: p must be a normal registered kprobe
+ */
+static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
+{
+ struct kprobe *ap;
+ struct optimized_kprobe *op;
+
+ ap = alloc_aggr_kprobe(p);
+ if (!ap)
+ return;
+
+ op = container_of(ap, struct optimized_kprobe, kp);
+ if (!arch_prepared_optinsn(&op->optinsn)) {
+ /* If failed to setup optimizing, fallback to kprobe */
+ free_aggr_kprobe(ap);
+ return;
+ }
+
+ init_aggr_kprobe(ap, p);
+ optimize_kprobe(ap);
+}
+
+#ifdef CONFIG_SYSCTL
+static void __kprobes optimize_all_kprobes(void)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct kprobe *p;
+ unsigned int i;
+
+ /* If optimization is already allowed, just return */
+ if (kprobes_allow_optimization)
+ return;
+
+ kprobes_allow_optimization = true;
+ mutex_lock(&text_mutex);
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+ head = &kprobe_table[i];
+ hlist_for_each_entry_rcu(p, node, head, hlist)
+ if (!kprobe_disabled(p))
+ optimize_kprobe(p);
+ }
+ mutex_unlock(&text_mutex);
+ printk(KERN_INFO "Kprobes globally optimized\n");
+}
+
+static void __kprobes unoptimize_all_kprobes(void)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct kprobe *p;
+ unsigned int i;
+
+ /* If optimization is already prohibited, just return */
+ if (!kprobes_allow_optimization)
+ return;
+
+ kprobes_allow_optimization = false;
+ printk(KERN_INFO "Kprobes globally unoptimized\n");
+ get_online_cpus(); /* For avoiding text_mutex deadlock */
+ mutex_lock(&text_mutex);
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+ head = &kprobe_table[i];
+ hlist_for_each_entry_rcu(p, node, head, hlist) {
+ if (!kprobe_disabled(p))
+ unoptimize_kprobe(p);
+ }
+ }
+
+ mutex_unlock(&text_mutex);
+ put_online_cpus();
+ /* Allow all currently running kprobes to complete */
+ synchronize_sched();
+}
+
+int sysctl_kprobes_optimization;
+int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length,
+ loff_t *ppos)
+{
+ int ret;
+
+ mutex_lock(&kprobe_mutex);
+ sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
+ ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+ if (sysctl_kprobes_optimization)
+ optimize_all_kprobes();
+ else
+ unoptimize_all_kprobes();
+ mutex_unlock(&kprobe_mutex);
+
+ return ret;
+}
+#endif /* CONFIG_SYSCTL */
+
+static void __kprobes __arm_kprobe(struct kprobe *p)
+{
+ struct kprobe *old_p;
+
+ /* Check collision with other optimized kprobes */
+ old_p = get_optimized_kprobe((unsigned long)p->addr);
+ if (unlikely(old_p))
+ unoptimize_kprobe(old_p); /* Fallback to unoptimized kprobe */
+
+ arch_arm_kprobe(p);
+ optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
+}
+
+static void __kprobes __disarm_kprobe(struct kprobe *p)
+{
+ struct kprobe *old_p;
+
+ unoptimize_kprobe(p); /* Try to unoptimize */
+ arch_disarm_kprobe(p);
+
+ /* If another kprobe was blocked, optimize it. */
+ old_p = get_optimized_kprobe((unsigned long)p->addr);
+ if (unlikely(old_p))
+ optimize_kprobe(old_p);
+}
+
+#else /* !CONFIG_OPTPROBES */
+
+#define optimize_kprobe(p) do {} while (0)
+#define unoptimize_kprobe(p) do {} while (0)
+#define kill_optimized_kprobe(p) do {} while (0)
+#define prepare_optimized_kprobe(p) do {} while (0)
+#define try_to_optimize_kprobe(p) do {} while (0)
+#define __arm_kprobe(p) arch_arm_kprobe(p)
+#define __disarm_kprobe(p) arch_disarm_kprobe(p)
+
+static __kprobes void free_aggr_kprobe(struct kprobe *p)
+{
+ kfree(p);
+}
+
+static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
+{
+ return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
+}
+#endif /* CONFIG_OPTPROBES */
+
/* Arm a kprobe with text_mutex */
static void __kprobes arm_kprobe(struct kprobe *kp)
{
+ /*
+ * Here, since __arm_kprobe() doesn't use stop_machine(),
+ * this doesn't cause deadlock on text_mutex. So, we don't
+ * need get_online_cpus().
+ */
mutex_lock(&text_mutex);
- arch_arm_kprobe(kp);
+ __arm_kprobe(kp);
mutex_unlock(&text_mutex);
}
/* Disarm a kprobe with text_mutex */
static void __kprobes disarm_kprobe(struct kprobe *kp)
{
+ get_online_cpus(); /* For avoiding text_mutex deadlock */
mutex_lock(&text_mutex);
- arch_disarm_kprobe(kp);
+ __disarm_kprobe(kp);
mutex_unlock(&text_mutex);
+ put_online_cpus();
}
/*
@@ -369,7 +802,7 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
{
struct kprobe *kp;
- if (p->pre_handler != aggr_pre_handler) {
+ if (!kprobe_aggrprobe(p)) {
p->nmissed++;
} else {
list_for_each_entry_rcu(kp, &p->list, list)
@@ -493,21 +926,16 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
}
/*
- * Keep all fields in the kprobe consistent
- */
-static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
-{
- memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
- memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
-}
-
-/*
* Add the new probe to ap->list. Fail if this is the
* second jprobe at the address - two jprobes can't coexist
*/
static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
{
BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
+
+ if (p->break_handler || p->post_handler)
+ unoptimize_kprobe(ap); /* Fall back to normal kprobe */
+
if (p->break_handler) {
if (ap->break_handler)
return -EEXIST;
@@ -522,7 +950,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
ap->flags &= ~KPROBE_FLAG_DISABLED;
if (!kprobes_all_disarmed)
/* Arm the breakpoint again. */
- arm_kprobe(ap);
+ __arm_kprobe(ap);
}
return 0;
}
@@ -531,12 +959,13 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
* Fill in the required fields of the "manager kprobe". Replace the
* earlier kprobe in the hlist with the manager kprobe
*/
-static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
+static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
{
+ /* Copy p's insn slot to ap */
copy_kprobe(p, ap);
flush_insn_slot(ap);
ap->addr = p->addr;
- ap->flags = p->flags;
+ ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
ap->pre_handler = aggr_pre_handler;
ap->fault_handler = aggr_fault_handler;
/* We don't care the kprobe which has gone. */
@@ -546,8 +975,9 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
ap->break_handler = aggr_break_handler;
INIT_LIST_HEAD(&ap->list);
- list_add_rcu(&p->list, &ap->list);
+ INIT_HLIST_NODE(&ap->hlist);
+ list_add_rcu(&p->list, &ap->list);
hlist_replace_rcu(&p->hlist, &ap->hlist);
}
@@ -561,12 +991,12 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
int ret = 0;
struct kprobe *ap = old_p;
- if (old_p->pre_handler != aggr_pre_handler) {
- /* If old_p is not an aggr_probe, create new aggr_kprobe. */
- ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
+ if (!kprobe_aggrprobe(old_p)) {
+ /* If old_p is not an aggr_kprobe, create new aggr_kprobe. */
+ ap = alloc_aggr_kprobe(old_p);
if (!ap)
return -ENOMEM;
- add_aggr_kprobe(ap, old_p);
+ init_aggr_kprobe(ap, old_p);
}
if (kprobe_gone(ap)) {
@@ -585,6 +1015,9 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
*/
return ret;
+ /* Prepare optimized instructions if possible. */
+ prepare_optimized_kprobe(ap);
+
/*
* Clear gone flag to prevent allocating new slot again, and
* set disabled flag because it is not armed yet.
@@ -593,6 +1026,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
| KPROBE_FLAG_DISABLED;
}
+ /* Copy ap's insn slot to p */
copy_kprobe(ap, p);
return add_new_kprobe(ap, p);
}
@@ -743,27 +1177,34 @@ int __kprobes register_kprobe(struct kprobe *p)
p->nmissed = 0;
INIT_LIST_HEAD(&p->list);
mutex_lock(&kprobe_mutex);
+
+ get_online_cpus(); /* For avoiding text_mutex deadlock. */
+ mutex_lock(&text_mutex);
+
old_p = get_kprobe(p->addr);
if (old_p) {
+ /* Since this may unoptimize old_p, locking text_mutex. */
ret = register_aggr_kprobe(old_p, p);
goto out;
}
- mutex_lock(&text_mutex);
ret = arch_prepare_kprobe(p);
if (ret)
- goto out_unlock_text;
+ goto out;
INIT_HLIST_NODE(&p->hlist);
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
if (!kprobes_all_disarmed && !kprobe_disabled(p))
- arch_arm_kprobe(p);
+ __arm_kprobe(p);
+
+ /* Try to optimize kprobe */
+ try_to_optimize_kprobe(p);
-out_unlock_text:
- mutex_unlock(&text_mutex);
out:
+ mutex_unlock(&text_mutex);
+ put_online_cpus();
mutex_unlock(&kprobe_mutex);
if (probed_mod)
@@ -785,7 +1226,7 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
return -EINVAL;
if (old_p == p ||
- (old_p->pre_handler == aggr_pre_handler &&
+ (kprobe_aggrprobe(old_p) &&
list_is_singular(&old_p->list))) {
/*
* Only probe on the hash list. Disarm only if kprobes are
@@ -793,7 +1234,7 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
* already have been removed. We save on flushing icache.
*/
if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
- disarm_kprobe(p);
+ disarm_kprobe(old_p);
hlist_del_rcu(&old_p->hlist);
} else {
if (p->break_handler && !kprobe_gone(p))
@@ -809,8 +1250,13 @@ noclean:
list_del_rcu(&p->list);
if (!kprobe_disabled(old_p)) {
try_to_disable_aggr_kprobe(old_p);
- if (!kprobes_all_disarmed && kprobe_disabled(old_p))
- disarm_kprobe(old_p);
+ if (!kprobes_all_disarmed) {
+ if (kprobe_disabled(old_p))
+ disarm_kprobe(old_p);
+ else
+ /* Try to optimize this probe again */
+ optimize_kprobe(old_p);
+ }
}
}
return 0;
@@ -827,7 +1273,7 @@ static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
old_p = list_entry(p->list.next, struct kprobe, list);
list_del(&p->list);
arch_remove_kprobe(old_p);
- kfree(old_p);
+ free_aggr_kprobe(old_p);
}
}
@@ -1123,7 +1569,7 @@ static void __kprobes kill_kprobe(struct kprobe *p)
struct kprobe *kp;
p->flags |= KPROBE_FLAG_GONE;
- if (p->pre_handler == aggr_pre_handler) {
+ if (kprobe_aggrprobe(p)) {
/*
* If this is an aggr_kprobe, we have to list all the
* chained probes and mark them GONE.
@@ -1132,6 +1578,7 @@ static void __kprobes kill_kprobe(struct kprobe *p)
kp->flags |= KPROBE_FLAG_GONE;
p->post_handler = NULL;
p->break_handler = NULL;
+ kill_optimized_kprobe(p);
}
/*
* Here, we can remove insn_slot safely, because no thread calls
@@ -1241,6 +1688,15 @@ static int __init init_kprobes(void)
}
}
+#if defined(CONFIG_OPTPROBES)
+#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
+ /* Init kprobe_optinsn_slots */
+ kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
+#endif
+ /* By default, kprobes can be optimized */
+ kprobes_allow_optimization = true;
+#endif
+
/* By default, kprobes are armed */
kprobes_all_disarmed = false;
@@ -1259,7 +1715,7 @@ static int __init init_kprobes(void)
#ifdef CONFIG_DEBUG_FS
static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
- const char *sym, int offset,char *modname)
+ const char *sym, int offset, char *modname, struct kprobe *pp)
{
char *kprobe_type;
@@ -1269,19 +1725,21 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
kprobe_type = "j";
else
kprobe_type = "k";
+
if (sym)
- seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
+ seq_printf(pi, "%p %s %s+0x%x %s ",
p->addr, kprobe_type, sym, offset,
- (modname ? modname : " "),
- (kprobe_gone(p) ? "[GONE]" : ""),
- ((kprobe_disabled(p) && !kprobe_gone(p)) ?
- "[DISABLED]" : ""));
+ (modname ? modname : " "));
else
- seq_printf(pi, "%p %s %p %s%s\n",
- p->addr, kprobe_type, p->addr,
- (kprobe_gone(p) ? "[GONE]" : ""),
- ((kprobe_disabled(p) && !kprobe_gone(p)) ?
- "[DISABLED]" : ""));
+ seq_printf(pi, "%p %s %p ",
+ p->addr, kprobe_type, p->addr);
+
+ if (!pp)
+ pp = p;
+ seq_printf(pi, "%s%s%s\n",
+ (kprobe_gone(p) ? "[GONE]" : ""),
+ ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
+ (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""));
}
static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
@@ -1317,11 +1775,11 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
hlist_for_each_entry_rcu(p, node, head, hlist) {
sym = kallsyms_lookup((unsigned long)p->addr, NULL,
&offset, &modname, namebuf);
- if (p->pre_handler == aggr_pre_handler) {
+ if (kprobe_aggrprobe(p)) {
list_for_each_entry_rcu(kp, &p->list, list)
- report_probe(pi, kp, sym, offset, modname);
+ report_probe(pi, kp, sym, offset, modname, p);
} else
- report_probe(pi, p, sym, offset, modname);
+ report_probe(pi, p, sym, offset, modname, NULL);
}
preempt_enable();
return 0;
@@ -1399,12 +1857,13 @@ int __kprobes enable_kprobe(struct kprobe *kp)
goto out;
}
- if (!kprobes_all_disarmed && kprobe_disabled(p))
- arm_kprobe(p);
-
- p->flags &= ~KPROBE_FLAG_DISABLED;
if (p != kp)
kp->flags &= ~KPROBE_FLAG_DISABLED;
+
+ if (!kprobes_all_disarmed && kprobe_disabled(p)) {
+ p->flags &= ~KPROBE_FLAG_DISABLED;
+ arm_kprobe(p);
+ }
out:
mutex_unlock(&kprobe_mutex);
return ret;
@@ -1424,12 +1883,13 @@ static void __kprobes arm_all_kprobes(void)
if (!kprobes_all_disarmed)
goto already_enabled;
+ /* Arming kprobes doesn't optimize kprobe itself */
mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
if (!kprobe_disabled(p))
- arch_arm_kprobe(p);
+ __arm_kprobe(p);
}
mutex_unlock(&text_mutex);
@@ -1456,16 +1916,23 @@ static void __kprobes disarm_all_kprobes(void)
kprobes_all_disarmed = true;
printk(KERN_INFO "Kprobes globally disabled\n");
+
+ /*
+ * Here we call get_online_cpus() for avoiding text_mutex deadlock,
+ * because disarming may also unoptimize kprobes.
+ */
+ get_online_cpus();
mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) {
if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
- arch_disarm_kprobe(p);
+ __disarm_kprobe(p);
}
}
mutex_unlock(&text_mutex);
+ put_online_cpus();
mutex_unlock(&kprobe_mutex);
/* Allow all currently running kprobes to complete */
synchronize_sched();
diff --git a/kernel/module.c b/kernel/module.c
index f82386b..e5538d5 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -474,9 +474,10 @@ static void module_unload_init(struct module *mod)
INIT_LIST_HEAD(&mod->modules_which_use_me);
for_each_possible_cpu(cpu)
- local_set(__module_ref_addr(mod, cpu), 0);
+ per_cpu_ptr(mod->refptr, cpu)->count = 0;
+
/* Hold reference count during initialization. */
- local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1);
+ __this_cpu_write(mod->refptr->count, 1);
/* Backwards compatibility macros put refcount during init. */
mod->waiter = current;
}
@@ -619,7 +620,7 @@ unsigned int module_refcount(struct module *mod)
int cpu;
for_each_possible_cpu(cpu)
- total += local_read(__module_ref_addr(mod, cpu));
+ total += per_cpu_ptr(mod->refptr, cpu)->count;
return total;
}
EXPORT_SYMBOL(module_refcount);
@@ -796,14 +797,15 @@ static struct module_attribute refcnt = {
void module_put(struct module *module)
{
if (module) {
- unsigned int cpu = get_cpu();
- local_dec(__module_ref_addr(module, cpu));
+ preempt_disable();
+ __this_cpu_dec(module->refptr->count);
+
trace_module_put(module, _RET_IP_,
- local_read(__module_ref_addr(module, cpu)));
+ __this_cpu_read(module->refptr->count));
/* Maybe they're waiting for us to drop reference? */
if (unlikely(!module_is_live(module)))
wake_up_process(module->waiter);
- put_cpu();
+ preempt_enable();
}
}
EXPORT_SYMBOL(module_put);
@@ -1397,9 +1399,9 @@ static void free_module(struct module *mod)
kfree(mod->args);
if (mod->percpu)
percpu_modfree(mod->percpu);
-#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
+#if defined(CONFIG_MODULE_UNLOAD)
if (mod->refptr)
- percpu_modfree(mod->refptr);
+ free_percpu(mod->refptr);
#endif
/* Free lock-classes: */
lockdep_free_key_range(mod->module_core, mod->core_size);
@@ -2162,9 +2164,8 @@ static noinline struct module *load_module(void __user *umod,
mod = (void *)sechdrs[modindex].sh_addr;
kmemleak_load_module(mod, hdr, sechdrs, secstrings);
-#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
- mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
- mod->name);
+#if defined(CONFIG_MODULE_UNLOAD)
+ mod->refptr = alloc_percpu(struct module_ref);
if (!mod->refptr) {
err = -ENOMEM;
goto free_init;
@@ -2396,8 +2397,8 @@ static noinline struct module *load_module(void __user *umod,
kobject_put(&mod->mkobj.kobj);
free_unload:
module_unload_free(mod);
-#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
- percpu_modfree(mod->refptr);
+#if defined(CONFIG_MODULE_UNLOAD)
+ free_percpu(mod->refptr);
free_init:
#endif
module_free(mod, mod->module_init);
diff --git a/kernel/padata.c b/kernel/padata.c
index 6f9bcb8..93caf65 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -642,6 +642,9 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask,
if (!pd)
goto err_free_inst;
+ if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL))
+ goto err_free_pd;
+
rcu_assign_pointer(pinst->pd, pd);
pinst->wq = wq;
@@ -654,12 +657,14 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask,
pinst->cpu_notifier.priority = 0;
err = register_hotcpu_notifier(&pinst->cpu_notifier);
if (err)
- goto err_free_pd;
+ goto err_free_cpumask;
mutex_init(&pinst->lock);
return pinst;
+err_free_cpumask:
+ free_cpumask_var(pinst->cpumask);
err_free_pd:
padata_free_pd(pd);
err_free_inst:
@@ -685,6 +690,7 @@ void padata_free(struct padata_instance *pinst)
unregister_hotcpu_notifier(&pinst->cpu_notifier);
padata_free_pd(pinst->pd);
+ free_cpumask_var(pinst->cpumask);
kfree(pinst);
}
EXPORT_SYMBOL(padata_free);
diff --git a/kernel/printk.c b/kernel/printk.c
index 1751c45..4067412 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -35,6 +35,7 @@
#include <linux/kexec.h>
#include <linux/ratelimit.h>
#include <linux/kmsg_dump.h>
+#include <linux/syslog.h>
#include <asm/uaccess.h>
@@ -258,38 +259,23 @@ static inline void boot_delay_msec(void)
}
#endif
-/*
- * Commands to do_syslog:
- *
- * 0 -- Close the log. Currently a NOP.
- * 1 -- Open the log. Currently a NOP.
- * 2 -- Read from the log.
- * 3 -- Read all messages remaining in the ring buffer.
- * 4 -- Read and clear all messages remaining in the ring buffer
- * 5 -- Clear ring buffer.
- * 6 -- Disable printk's to console
- * 7 -- Enable printk's to console
- * 8 -- Set level of messages printed to console
- * 9 -- Return number of unread characters in the log buffer
- * 10 -- Return size of the log buffer
- */
-int do_syslog(int type, char __user *buf, int len)
+int do_syslog(int type, char __user *buf, int len, bool from_file)
{
unsigned i, j, limit, count;
int do_clear = 0;
char c;
int error = 0;
- error = security_syslog(type);
+ error = security_syslog(type, from_file);
if (error)
return error;
switch (type) {
- case 0: /* Close log */
+ case SYSLOG_ACTION_CLOSE: /* Close log */
break;
- case 1: /* Open log */
+ case SYSLOG_ACTION_OPEN: /* Open log */
break;
- case 2: /* Read from log */
+ case SYSLOG_ACTION_READ: /* Read from log */
error = -EINVAL;
if (!buf || len < 0)
goto out;
@@ -320,10 +306,12 @@ int do_syslog(int type, char __user *buf, int len)
if (!error)
error = i;
break;
- case 4: /* Read/clear last kernel messages */
+ /* Read/clear last kernel messages */
+ case SYSLOG_ACTION_READ_CLEAR:
do_clear = 1;
/* FALL THRU */
- case 3: /* Read last kernel messages */
+ /* Read last kernel messages */
+ case SYSLOG_ACTION_READ_ALL:
error = -EINVAL;
if (!buf || len < 0)
goto out;
@@ -376,21 +364,25 @@ int do_syslog(int type, char __user *buf, int len)
}
}
break;
- case 5: /* Clear ring buffer */
+ /* Clear ring buffer */
+ case SYSLOG_ACTION_CLEAR:
logged_chars = 0;
break;
- case 6: /* Disable logging to console */
+ /* Disable logging to console */
+ case SYSLOG_ACTION_CONSOLE_OFF:
if (saved_console_loglevel == -1)
saved_console_loglevel = console_loglevel;
console_loglevel = minimum_console_loglevel;
break;
- case 7: /* Enable logging to console */
+ /* Enable logging to console */
+ case SYSLOG_ACTION_CONSOLE_ON:
if (saved_console_loglevel != -1) {
console_loglevel = saved_console_loglevel;
saved_console_loglevel = -1;
}
break;
- case 8: /* Set level of messages printed to console */
+ /* Set level of messages printed to console */
+ case SYSLOG_ACTION_CONSOLE_LEVEL:
error = -EINVAL;
if (len < 1 || len > 8)
goto out;
@@ -401,10 +393,12 @@ int do_syslog(int type, char __user *buf, int len)
saved_console_loglevel = -1;
error = 0;
break;
- case 9: /* Number of chars in the log buffer */
+ /* Number of chars in the log buffer */
+ case SYSLOG_ACTION_SIZE_UNREAD:
error = log_end - log_start;
break;
- case 10: /* Size of the log buffer */
+ /* Size of the log buffer */
+ case SYSLOG_ACTION_SIZE_BUFFER:
error = log_buf_len;
break;
default:
@@ -417,7 +411,7 @@ out:
SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
{
- return do_syslog(type, buf, len);
+ return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
}
/*
diff --git a/kernel/range.c b/kernel/range.c
new file mode 100644
index 0000000..74e2e61
--- /dev/null
+++ b/kernel/range.c
@@ -0,0 +1,163 @@
+/*
+ * Range add and subtract
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sort.h>
+
+#include <linux/range.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+int add_range(struct range *range, int az, int nr_range, u64 start, u64 end)
+{
+ if (start >= end)
+ return nr_range;
+
+ /* Out of slots: */
+ if (nr_range >= az)
+ return nr_range;
+
+ range[nr_range].start = start;
+ range[nr_range].end = end;
+
+ nr_range++;
+
+ return nr_range;
+}
+
+int add_range_with_merge(struct range *range, int az, int nr_range,
+ u64 start, u64 end)
+{
+ int i;
+
+ if (start >= end)
+ return nr_range;
+
+ /* Try to merge it with old one: */
+ for (i = 0; i < nr_range; i++) {
+ u64 final_start, final_end;
+ u64 common_start, common_end;
+
+ if (!range[i].end)
+ continue;
+
+ common_start = max(range[i].start, start);
+ common_end = min(range[i].end, end);
+ if (common_start > common_end)
+ continue;
+
+ final_start = min(range[i].start, start);
+ final_end = max(range[i].end, end);
+
+ range[i].start = final_start;
+ range[i].end = final_end;
+ return nr_range;
+ }
+
+ /* Need to add it: */
+ return add_range(range, az, nr_range, start, end);
+}
+
+void subtract_range(struct range *range, int az, u64 start, u64 end)
+{
+ int i, j;
+
+ if (start >= end)
+ return;
+
+ for (j = 0; j < az; j++) {
+ if (!range[j].end)
+ continue;
+
+ if (start <= range[j].start && end >= range[j].end) {
+ range[j].start = 0;
+ range[j].end = 0;
+ continue;
+ }
+
+ if (start <= range[j].start && end < range[j].end &&
+ range[j].start < end) {
+ range[j].start = end;
+ continue;
+ }
+
+
+ if (start > range[j].start && end >= range[j].end &&
+ range[j].end > start) {
+ range[j].end = start;
+ continue;
+ }
+
+ if (start > range[j].start && end < range[j].end) {
+ /* Find the new spare: */
+ for (i = 0; i < az; i++) {
+ if (range[i].end == 0)
+ break;
+ }
+ if (i < az) {
+ range[i].end = range[j].end;
+ range[i].start = end;
+ } else {
+ printk(KERN_ERR "run of slot in ranges\n");
+ }
+ range[j].end = start;
+ continue;
+ }
+ }
+}
+
+static int cmp_range(const void *x1, const void *x2)
+{
+ const struct range *r1 = x1;
+ const struct range *r2 = x2;
+ s64 start1, start2;
+
+ start1 = r1->start;
+ start2 = r2->start;
+
+ return start1 - start2;
+}
+
+int clean_sort_range(struct range *range, int az)
+{
+ int i, j, k = az - 1, nr_range = 0;
+
+ for (i = 0; i < k; i++) {
+ if (range[i].end)
+ continue;
+ for (j = k; j > i; j--) {
+ if (range[j].end) {
+ k = j;
+ break;
+ }
+ }
+ if (j == i)
+ break;
+ range[i].start = range[k].start;
+ range[i].end = range[k].end;
+ range[k].start = 0;
+ range[k].end = 0;
+ k--;
+ }
+ /* count it */
+ for (i = 0; i < az; i++) {
+ if (!range[i].end) {
+ nr_range = i;
+ break;
+ }
+ }
+
+ /* sort them */
+ sort(range, nr_range, sizeof(struct range), cmp_range, NULL);
+
+ return nr_range;
+}
+
+void sort_range(struct range *range, int nr_range)
+{
+ /* sort them */
+ sort(range, nr_range, sizeof(struct range), cmp_range, NULL);
+}
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 258cdf0..58df55b 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -818,13 +818,13 @@ static void rcu_torture_timer(unsigned long unused)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
+ __this_cpu_inc(rcu_torture_count[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
- __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
+ __this_cpu_inc(rcu_torture_batch[completed]);
preempt_enable();
cur_ops->readunlock(idx);
}
@@ -877,13 +877,13 @@ rcu_torture_reader(void *arg)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
+ __this_cpu_inc(rcu_torture_count[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
- __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
+ __this_cpu_inc(rcu_torture_batch[completed]);
preempt_enable();
cur_ops->readunlock(idx);
schedule();
diff --git a/kernel/resource.c b/kernel/resource.c
index 4e9d87f..2d5be5d 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -304,7 +304,7 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg, int (*func)(unsigned long, unsigned long, void *))
{
struct resource res;
- unsigned long pfn, len;
+ unsigned long pfn, end_pfn;
u64 orig_end;
int ret = -1;
@@ -314,9 +314,10 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
orig_end = res.end;
while ((res.start < res.end) &&
(find_next_system_ram(&res, "System RAM") >= 0)) {
- pfn = (unsigned long)(res.start >> PAGE_SHIFT);
- len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT);
- ret = (*func)(pfn, len, arg);
+ pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ end_pfn = (res.end + 1) >> PAGE_SHIFT;
+ if (end_pfn > pfn)
+ ret = (*func)(pfn, end_pfn - pfn, arg);
if (ret)
break;
res.start = res.end + 1;
diff --git a/kernel/sched.c b/kernel/sched.c
index 6a212c9..abb36b1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1521,7 +1521,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
#ifdef CONFIG_FAIR_GROUP_SCHED
-static __read_mostly unsigned long *update_shares_data;
+static __read_mostly unsigned long __percpu *update_shares_data;
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
@@ -8813,7 +8813,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
struct cpuacct {
struct cgroup_subsys_state css;
/* cpuusage holds pointer to a u64-type object on every cpu */
- u64 *cpuusage;
+ u64 __percpu *cpuusage;
struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
struct cpuacct *parent;
};
diff --git a/kernel/signal.c b/kernel/signal.c
index 934ae5e..5bb9baf 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -159,6 +159,10 @@ void recalc_sigpending(void)
/* Given the mask, find the first available signal that should be serviced. */
+#define SYNCHRONOUS_MASK \
+ (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
+ sigmask(SIGTRAP) | sigmask(SIGFPE))
+
int next_signal(struct sigpending *pending, sigset_t *mask)
{
unsigned long i, *s, *m, x;
@@ -166,26 +170,39 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
s = pending->signal.sig;
m = mask->sig;
+
+ /*
+ * Handle the first word specially: it contains the
+ * synchronous signals that need to be dequeued first.
+ */
+ x = *s &~ *m;
+ if (x) {
+ if (x & SYNCHRONOUS_MASK)
+ x &= SYNCHRONOUS_MASK;
+ sig = ffz(~x) + 1;
+ return sig;
+ }
+
switch (_NSIG_WORDS) {
default:
- for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
- if ((x = *s &~ *m) != 0) {
- sig = ffz(~x) + i*_NSIG_BPW + 1;
- break;
- }
+ for (i = 1; i < _NSIG_WORDS; ++i) {
+ x = *++s &~ *++m;
+ if (!x)
+ continue;
+ sig = ffz(~x) + i*_NSIG_BPW + 1;
+ break;
+ }
break;
- case 2: if ((x = s[0] &~ m[0]) != 0)
- sig = 1;
- else if ((x = s[1] &~ m[1]) != 0)
- sig = _NSIG_BPW + 1;
- else
+ case 2:
+ x = s[1] &~ m[1];
+ if (!x)
break;
- sig += ffz(~x);
+ sig = ffz(~x) + _NSIG_BPW + 1;
break;
- case 1: if ((x = *s &~ *m) != 0)
- sig = ffz(~x) + 1;
+ case 1:
+ /* Nothing to do */
break;
}
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 912823e..9bb9fb1 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -45,7 +45,7 @@ static int refcount;
static struct workqueue_struct *stop_machine_wq;
static struct stop_machine_data active, idle;
static const struct cpumask *active_cpus;
-static void *stop_machine_work;
+static void __percpu *stop_machine_work;
static void set_state(enum stopmachine_state newstate)
{
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 33e7a38..0ef19c6 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -50,6 +50,7 @@
#include <linux/ftrace.h>
#include <linux/slow-work.h>
#include <linux/perf_event.h>
+#include <linux/kprobes.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
@@ -1450,6 +1451,17 @@ static struct ctl_table debug_table[] = {
.proc_handler = proc_dointvec
},
#endif
+#if defined(CONFIG_OPTPROBES)
+ {
+ .procname = "kprobes-optimization",
+ .data = &sysctl_kprobes_optimization,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_kprobes_optimization_handler,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
{ }
};
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 8f5d16e..8cd50d8 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1331,7 +1331,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
ssize_t result;
char *pathname;
int flags;
- int acc_mode, fmode;
+ int acc_mode;
pathname = sysctl_getname(name, nlen, &table);
result = PTR_ERR(pathname);
@@ -1342,15 +1342,12 @@ static ssize_t binary_sysctl(const int *name, int nlen,
if (oldval && oldlen && newval && newlen) {
flags = O_RDWR;
acc_mode = MAY_READ | MAY_WRITE;
- fmode = FMODE_READ | FMODE_WRITE;
} else if (newval && newlen) {
flags = O_WRONLY;
acc_mode = MAY_WRITE;
- fmode = FMODE_WRITE;
} else if (oldval && oldlen) {
flags = O_RDONLY;
acc_mode = MAY_READ;
- fmode = FMODE_READ;
} else {
result = 0;
goto out_putname;
@@ -1361,7 +1358,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
if (result)
goto out_putname;
- result = may_open(&nd.path, acc_mode, fmode);
+ result = may_open(&nd.path, acc_mode, flags);
if (result)
goto out_putpath;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 8c1b2d2..0287f9f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -20,6 +20,7 @@
#include <linux/cpu.h>
#include <linux/fs.h>
+#include <asm/local.h>
#include "trace.h"
/*
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index b2477ca..df74c79 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -8,6 +8,7 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/time.h>
+#include <asm/local.h>
struct rb_page {
u64 ts;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 032c57c..ed01fdb 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -92,12 +92,12 @@ DEFINE_PER_CPU(int, ftrace_cpu_disabled);
static inline void ftrace_disable_cpu(void)
{
preempt_disable();
- __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
+ __this_cpu_inc(ftrace_cpu_disabled);
}
static inline void ftrace_enable_cpu(void)
{
- __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
+ __this_cpu_dec(ftrace_cpu_disabled);
preempt_enable();
}
@@ -1166,7 +1166,7 @@ trace_function(struct trace_array *tr,
struct ftrace_entry *entry;
/* If we are reading the ring buffer, don't trace */
- if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
return;
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index e998a82..3fc2a57 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -188,7 +188,7 @@ static int __trace_graph_entry(struct trace_array *tr,
struct ring_buffer *buffer = tr->buffer;
struct ftrace_graph_ent_entry *entry;
- if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
return 0;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -247,7 +247,7 @@ static void __trace_graph_return(struct trace_array *tr,
struct ring_buffer *buffer = tr->buffer;
struct ftrace_graph_ret_entry *entry;
- if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
return;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
diff --git a/mm/Kconfig b/mm/Kconfig
index d34c2b9..9c61158 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -115,6 +115,10 @@ config SPARSEMEM_EXTREME
config SPARSEMEM_VMEMMAP_ENABLE
bool
+config SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+ def_bool y
+ depends on SPARSEMEM && X86_64
+
config SPARSEMEM_VMEMMAP
bool "Sparse Memory virtual memmap"
depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 7d14868..d7c791e 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -13,6 +13,7 @@
#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/kmemleak.h>
+#include <linux/range.h>
#include <asm/bug.h>
#include <asm/io.h>
@@ -32,6 +33,7 @@ unsigned long max_pfn;
unsigned long saved_max_pfn;
#endif
+#ifndef CONFIG_NO_BOOTMEM
bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
@@ -142,7 +144,7 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
min_low_pfn = start;
return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
}
-
+#endif
/*
* free_bootmem_late - free bootmem pages directly to page allocator
* @addr: starting address of the range
@@ -167,6 +169,60 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
}
}
+#ifdef CONFIG_NO_BOOTMEM
+static void __init __free_pages_memory(unsigned long start, unsigned long end)
+{
+ int i;
+ unsigned long start_aligned, end_aligned;
+ int order = ilog2(BITS_PER_LONG);
+
+ start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
+ end_aligned = end & ~(BITS_PER_LONG - 1);
+
+ if (end_aligned <= start_aligned) {
+#if 1
+ printk(KERN_DEBUG " %lx - %lx\n", start, end);
+#endif
+ for (i = start; i < end; i++)
+ __free_pages_bootmem(pfn_to_page(i), 0);
+
+ return;
+ }
+
+#if 1
+ printk(KERN_DEBUG " %lx %lx - %lx %lx\n",
+ start, start_aligned, end_aligned, end);
+#endif
+ for (i = start; i < start_aligned; i++)
+ __free_pages_bootmem(pfn_to_page(i), 0);
+
+ for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
+ __free_pages_bootmem(pfn_to_page(i), order);
+
+ for (i = end_aligned; i < end; i++)
+ __free_pages_bootmem(pfn_to_page(i), 0);
+}
+
+unsigned long __init free_all_memory_core_early(int nodeid)
+{
+ int i;
+ u64 start, end;
+ unsigned long count = 0;
+ struct range *range = NULL;
+ int nr_range;
+
+ nr_range = get_free_all_memory_range(&range, nodeid);
+
+ for (i = 0; i < nr_range; i++) {
+ start = range[i].start;
+ end = range[i].end;
+ count += end - start;
+ __free_pages_memory(start, end);
+ }
+
+ return count;
+}
+#else
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
{
int aligned;
@@ -227,6 +283,7 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
return count;
}
+#endif
/**
* free_all_bootmem_node - release a node's free pages to the buddy allocator
@@ -237,7 +294,12 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
register_page_bootmem_info_node(pgdat);
+#ifdef CONFIG_NO_BOOTMEM
+ /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
+ return 0;
+#else
return free_all_bootmem_core(pgdat->bdata);
+#endif
}
/**
@@ -247,9 +309,14 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
*/
unsigned long __init free_all_bootmem(void)
{
+#ifdef CONFIG_NO_BOOTMEM
+ return free_all_memory_core_early(NODE_DATA(0)->node_id);
+#else
return free_all_bootmem_core(NODE_DATA(0)->bdata);
+#endif
}
+#ifndef CONFIG_NO_BOOTMEM
static void __init __free(bootmem_data_t *bdata,
unsigned long sidx, unsigned long eidx)
{
@@ -344,6 +411,7 @@ static int __init mark_bootmem(unsigned long start, unsigned long end,
}
BUG();
}
+#endif
/**
* free_bootmem_node - mark a page range as usable
@@ -358,6 +426,12 @@ static int __init mark_bootmem(unsigned long start, unsigned long end,
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size)
{
+#ifdef CONFIG_NO_BOOTMEM
+ free_early(physaddr, physaddr + size);
+#if 0
+ printk(KERN_DEBUG "free %lx %lx\n", physaddr, size);
+#endif
+#else
unsigned long start, end;
kmemleak_free_part(__va(physaddr), size);
@@ -366,6 +440,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
end = PFN_DOWN(physaddr + size);
mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
+#endif
}
/**
@@ -379,6 +454,12 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
*/
void __init free_bootmem(unsigned long addr, unsigned long size)
{
+#ifdef CONFIG_NO_BOOTMEM
+ free_early(addr, addr + size);
+#if 0
+ printk(KERN_DEBUG "free %lx %lx\n", addr, size);
+#endif
+#else
unsigned long start, end;
kmemleak_free_part(__va(addr), size);
@@ -387,6 +468,7 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
end = PFN_DOWN(addr + size);
mark_bootmem(start, end, 0, 0);
+#endif
}
/**
@@ -403,12 +485,17 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size, int flags)
{
+#ifdef CONFIG_NO_BOOTMEM
+ panic("no bootmem");
+ return 0;
+#else
unsigned long start, end;
start = PFN_DOWN(physaddr);
end = PFN_UP(physaddr + size);
return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
+#endif
}
/**
@@ -424,14 +511,20 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
int __init reserve_bootmem(unsigned long addr, unsigned long size,
int flags)
{
+#ifdef CONFIG_NO_BOOTMEM
+ panic("no bootmem");
+ return 0;
+#else
unsigned long start, end;
start = PFN_DOWN(addr);
end = PFN_UP(addr + size);
return mark_bootmem(start, end, 1, flags);
+#endif
}
+#ifndef CONFIG_NO_BOOTMEM
static unsigned long __init align_idx(struct bootmem_data *bdata,
unsigned long idx, unsigned long step)
{
@@ -582,12 +675,33 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
#endif
return NULL;
}
+#endif
static void * __init ___alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal,
unsigned long limit)
{
+#ifdef CONFIG_NO_BOOTMEM
+ void *ptr;
+
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc(size, GFP_NOWAIT);
+
+restart:
+
+ ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
+
+ if (ptr)
+ return ptr;
+
+ if (goal != 0) {
+ goal = 0;
+ goto restart;
+ }
+
+ return NULL;
+#else
bootmem_data_t *bdata;
void *region;
@@ -613,6 +727,7 @@ restart:
}
return NULL;
+#endif
}
/**
@@ -631,7 +746,13 @@ restart:
void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
unsigned long goal)
{
- return ___alloc_bootmem_nopanic(size, align, goal, 0);
+ unsigned long limit = 0;
+
+#ifdef CONFIG_NO_BOOTMEM
+ limit = -1UL;
+#endif
+
+ return ___alloc_bootmem_nopanic(size, align, goal, limit);
}
static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
@@ -665,9 +786,16 @@ static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
void * __init __alloc_bootmem(unsigned long size, unsigned long align,
unsigned long goal)
{
- return ___alloc_bootmem(size, align, goal, 0);
+ unsigned long limit = 0;
+
+#ifdef CONFIG_NO_BOOTMEM
+ limit = -1UL;
+#endif
+
+ return ___alloc_bootmem(size, align, goal, limit);
}
+#ifndef CONFIG_NO_BOOTMEM
static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit)
@@ -684,6 +812,7 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
return ___alloc_bootmem(size, align, goal, limit);
}
+#endif
/**
* __alloc_bootmem_node - allocate boot memory from a specific node
@@ -706,7 +835,46 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+#ifdef CONFIG_NO_BOOTMEM
+ return __alloc_memory_core_early(pgdat->node_id, size, align,
+ goal, -1ULL);
+#else
return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
+#endif
+}
+
+void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+#ifdef MAX_DMA32_PFN
+ unsigned long end_pfn;
+
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+ /* update goal according ...MAX_DMA32_PFN */
+ end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
+
+ if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
+ (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
+ void *ptr;
+ unsigned long new_goal;
+
+ new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
+#ifdef CONFIG_NO_BOOTMEM
+ ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+ new_goal, -1ULL);
+#else
+ ptr = alloc_bootmem_core(pgdat->bdata, size, align,
+ new_goal, 0);
+#endif
+ if (ptr)
+ return ptr;
+ }
+#endif
+
+ return __alloc_bootmem_node(pgdat, size, align, goal);
+
}
#ifdef CONFIG_SPARSEMEM
@@ -720,6 +888,16 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
void * __init alloc_bootmem_section(unsigned long size,
unsigned long section_nr)
{
+#ifdef CONFIG_NO_BOOTMEM
+ unsigned long pfn, goal, limit;
+
+ pfn = section_nr_to_pfn(section_nr);
+ goal = pfn << PAGE_SHIFT;
+ limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
+
+ return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
+ SMP_CACHE_BYTES, goal, limit);
+#else
bootmem_data_t *bdata;
unsigned long pfn, goal, limit;
@@ -729,6 +907,7 @@ void * __init alloc_bootmem_section(unsigned long size,
bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
+#endif
}
#endif
@@ -740,11 +919,16 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+#ifdef CONFIG_NO_BOOTMEM
+ ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+ goal, -1ULL);
+#else
ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
if (ptr)
return ptr;
ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
+#endif
if (ptr)
return ptr;
@@ -795,6 +979,11 @@ void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+#ifdef CONFIG_NO_BOOTMEM
+ return __alloc_memory_core_early(pgdat->node_id, size, align,
+ goal, ARCH_LOW_ADDRESS_LIMIT);
+#else
return ___alloc_bootmem_node(pgdat->bdata, size, align,
goal, ARCH_LOW_ADDRESS_LIMIT);
+#endif
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 698ea80..148b52a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1117,7 +1117,7 @@ readpage:
if (!PageUptodate(page)) {
if (page->mapping == NULL) {
/*
- * invalidate_inode_pages got it
+ * invalidate_mapping_pages got it
*/
unlock_page(page);
page_cache_release(page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8deb9d0..a6b17aa 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1009,10 +1009,10 @@ static void drain_pages(unsigned int cpu)
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
- pset = zone_pcp(zone, cpu);
+ local_irq_save(flags);
+ pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
- local_irq_save(flags);
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
local_irq_restore(flags);
@@ -1096,7 +1096,6 @@ static void free_hot_cold_page(struct page *page, int cold)
arch_free_page(page, 0);
kernel_map_pages(page, 1, 0);
- pcp = &zone_pcp(zone, get_cpu())->pcp;
migratetype = get_pageblock_migratetype(page);
set_page_private(page, migratetype);
local_irq_save(flags);
@@ -1119,6 +1118,7 @@ static void free_hot_cold_page(struct page *page, int cold)
migratetype = MIGRATE_MOVABLE;
}
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
if (cold)
list_add_tail(&page->lru, &pcp->lists[migratetype]);
else
@@ -1131,7 +1131,6 @@ static void free_hot_cold_page(struct page *page, int cold)
out:
local_irq_restore(flags);
- put_cpu();
}
void free_hot_page(struct page *page)
@@ -1181,17 +1180,15 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
unsigned long flags;
struct page *page;
int cold = !!(gfp_flags & __GFP_COLD);
- int cpu;
again:
- cpu = get_cpu();
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
struct list_head *list;
- pcp = &zone_pcp(zone, cpu)->pcp;
- list = &pcp->lists[migratetype];
local_irq_save(flags);
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
+ list = &pcp->lists[migratetype];
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
@@ -1232,7 +1229,6 @@ again:
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone);
local_irq_restore(flags);
- put_cpu();
VM_BUG_ON(bad_range(zone, page));
if (prep_new_page(page, order, gfp_flags))
@@ -1241,7 +1237,6 @@ again:
failed:
local_irq_restore(flags);
- put_cpu();
return NULL;
}
@@ -2180,7 +2175,7 @@ void show_free_areas(void)
for_each_online_cpu(cpu) {
struct per_cpu_pageset *pageset;
- pageset = zone_pcp(zone, cpu);
+ pageset = per_cpu_ptr(zone->pageset, cpu);
printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
cpu, pageset->pcp.high,
@@ -2745,10 +2740,29 @@ static void build_zonelist_cache(pg_data_t *pgdat)
#endif /* CONFIG_NUMA */
+/*
+ * Boot pageset table. One per cpu which is going to be used for all
+ * zones and all nodes. The parameters will be set in such a way
+ * that an item put on a list will immediately be handed over to
+ * the buddy list. This is safe since pageset manipulation is done
+ * with interrupts disabled.
+ *
+ * The boot_pagesets must be kept even after bootup is complete for
+ * unused processors and/or zones. They do play a role for bootstrapping
+ * hotplugged processors.
+ *
+ * zoneinfo_show() and maybe other functions do
+ * not check if the processor is online before following the pageset pointer.
+ * Other parts of the kernel may not check if the zone is available.
+ */
+static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
+static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
+
/* return values int ....just for stop_machine() */
static int __build_all_zonelists(void *dummy)
{
int nid;
+ int cpu;
#ifdef CONFIG_NUMA
memset(node_load, 0, sizeof(node_load));
@@ -2759,6 +2773,23 @@ static int __build_all_zonelists(void *dummy)
build_zonelists(pgdat);
build_zonelist_cache(pgdat);
}
+
+ /*
+ * Initialize the boot_pagesets that are going to be used
+ * for bootstrapping processors. The real pagesets for
+ * each zone will be allocated later when the per cpu
+ * allocator is available.
+ *
+ * boot_pagesets are used also for bootstrapping offline
+ * cpus if the system is already booted because the pagesets
+ * are needed to initialize allocators on a specific cpu too.
+ * F.e. the percpu allocator needs the page allocator which
+ * needs the percpu allocator in order to allocate its pagesets
+ * (a chicken-egg dilemma).
+ */
+ for_each_possible_cpu(cpu)
+ setup_pageset(&per_cpu(boot_pageset, cpu), 0);
+
return 0;
}
@@ -3096,121 +3127,33 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
pcp->batch = PAGE_SHIFT * 8;
}
-
-#ifdef CONFIG_NUMA
-/*
- * Boot pageset table. One per cpu which is going to be used for all
- * zones and all nodes. The parameters will be set in such a way
- * that an item put on a list will immediately be handed over to
- * the buddy list. This is safe since pageset manipulation is done
- * with interrupts disabled.
- *
- * Some NUMA counter updates may also be caught by the boot pagesets.
- *
- * The boot_pagesets must be kept even after bootup is complete for
- * unused processors and/or zones. They do play a role for bootstrapping
- * hotplugged processors.
- *
- * zoneinfo_show() and maybe other functions do
- * not check if the processor is online before following the pageset pointer.
- * Other parts of the kernel may not check if the zone is available.
- */
-static struct per_cpu_pageset boot_pageset[NR_CPUS];
-
/*
- * Dynamically allocate memory for the
- * per cpu pageset array in struct zone.
+ * Allocate per cpu pagesets and initialize them.
+ * Before this call only boot pagesets were available.
+ * Boot pagesets will no longer be used by this processorr
+ * after setup_per_cpu_pageset().
*/
-static int __cpuinit process_zones(int cpu)
+void __init setup_per_cpu_pageset(void)
{
- struct zone *zone, *dzone;
- int node = cpu_to_node(cpu);
-
- node_set_state(node, N_CPU); /* this node has a cpu */
+ struct zone *zone;
+ int cpu;
for_each_populated_zone(zone) {
- zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
- GFP_KERNEL, node);
- if (!zone_pcp(zone, cpu))
- goto bad;
+ zone->pageset = alloc_percpu(struct per_cpu_pageset);
- setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
+ for_each_possible_cpu(cpu) {
+ struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
- if (percpu_pagelist_fraction)
- setup_pagelist_highmark(zone_pcp(zone, cpu),
- (zone->present_pages / percpu_pagelist_fraction));
- }
+ setup_pageset(pcp, zone_batchsize(zone));
- return 0;
-bad:
- for_each_zone(dzone) {
- if (!populated_zone(dzone))
- continue;
- if (dzone == zone)
- break;
- kfree(zone_pcp(dzone, cpu));
- zone_pcp(dzone, cpu) = &boot_pageset[cpu];
- }
- return -ENOMEM;
-}
-
-static inline void free_zone_pagesets(int cpu)
-{
- struct zone *zone;
-
- for_each_zone(zone) {
- struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
-
- /* Free per_cpu_pageset if it is slab allocated */
- if (pset != &boot_pageset[cpu])
- kfree(pset);
- zone_pcp(zone, cpu) = &boot_pageset[cpu];
- }
-}
-
-static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
-{
- int cpu = (long)hcpu;
- int ret = NOTIFY_OK;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- if (process_zones(cpu))
- ret = NOTIFY_BAD;
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- free_zone_pagesets(cpu);
- break;
- default:
- break;
+ if (percpu_pagelist_fraction)
+ setup_pagelist_highmark(pcp,
+ (zone->present_pages /
+ percpu_pagelist_fraction));
+ }
}
- return ret;
-}
-
-static struct notifier_block __cpuinitdata pageset_notifier =
- { &pageset_cpuup_callback, NULL, 0 };
-
-void __init setup_per_cpu_pageset(void)
-{
- int err;
-
- /* Initialize per_cpu_pageset for cpu 0.
- * A cpuup callback will do this for every cpu
- * as it comes online
- */
- err = process_zones(smp_processor_id());
- BUG_ON(err);
- register_cpu_notifier(&pageset_notifier);
}
-#endif
-
static noinline __init_refok
int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
@@ -3264,7 +3207,7 @@ static int __zone_pcp_update(void *data)
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
- pset = zone_pcp(zone, cpu);
+ pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
local_irq_save(flags);
@@ -3282,21 +3225,17 @@ void zone_pcp_update(struct zone *zone)
static __meminit void zone_pcp_init(struct zone *zone)
{
- int cpu;
- unsigned long batch = zone_batchsize(zone);
+ /*
+ * per cpu subsystem is not up at this point. The following code
+ * relies on the ability of the linker to provide the
+ * offset of a (static) per cpu variable into the per cpu area.
+ */
+ zone->pageset = &boot_pageset;
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
-#ifdef CONFIG_NUMA
- /* Early boot. Slab allocator not functional yet */
- zone_pcp(zone, cpu) = &boot_pageset[cpu];
- setup_pageset(&boot_pageset[cpu],0);
-#else
- setup_pageset(zone_pcp(zone,cpu), batch);
-#endif
- }
if (zone->present_pages)
- printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
- zone->name, zone->present_pages, batch);
+ printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
+ zone->name, zone->present_pages,
+ zone_batchsize(zone));
}
__meminit int init_currently_empty_zone(struct zone *zone,
@@ -3435,6 +3374,61 @@ void __init free_bootmem_with_active_regions(int nid,
}
}
+int __init add_from_early_node_map(struct range *range, int az,
+ int nr_range, int nid)
+{
+ int i;
+ u64 start, end;
+
+ /* need to go over early_node_map to find out good range for node */
+ for_each_active_range_index_in_nid(i, nid) {
+ start = early_node_map[i].start_pfn;
+ end = early_node_map[i].end_pfn;
+ nr_range = add_range(range, az, nr_range, start, end);
+ }
+ return nr_range;
+}
+
+#ifdef CONFIG_NO_BOOTMEM
+void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
+ u64 goal, u64 limit)
+{
+ int i;
+ void *ptr;
+
+ /* need to go over early_node_map to find out good range for node */
+ for_each_active_range_index_in_nid(i, nid) {
+ u64 addr;
+ u64 ei_start, ei_last;
+
+ ei_last = early_node_map[i].end_pfn;
+ ei_last <<= PAGE_SHIFT;
+ ei_start = early_node_map[i].start_pfn;
+ ei_start <<= PAGE_SHIFT;
+ addr = find_early_area(ei_start, ei_last,
+ goal, limit, size, align);
+
+ if (addr == -1ULL)
+ continue;
+
+#if 0
+ printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n",
+ nid,
+ ei_start, ei_last, goal, limit, size,
+ align, addr);
+#endif
+
+ ptr = phys_to_virt(addr);
+ memset(ptr, 0, size);
+ reserve_early_without_check(addr, addr + size, "BOOTMEM");
+ return ptr;
+ }
+
+ return NULL;
+}
+#endif
+
+
void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
{
int i;
@@ -4467,7 +4461,11 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
-struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
+struct pglist_data __refdata contig_page_data = {
+#ifndef CONFIG_NO_BOOTMEM
+ .bdata = &bootmem_node_data[0]
+#endif
+ };
EXPORT_SYMBOL(contig_page_data);
#endif
@@ -4810,10 +4808,11 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
if (!write || (ret == -EINVAL))
return ret;
for_each_populated_zone(zone) {
- for_each_online_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
unsigned long high;
high = zone->present_pages / percpu_pagelist_fraction;
- setup_pagelist_highmark(zone_pcp(zone, cpu), high);
+ setup_pagelist_highmark(
+ per_cpu_ptr(zone->pageset, cpu), high);
}
}
return 0;
diff --git a/mm/percpu.c b/mm/percpu.c
index 083e7c9..768419d 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -80,13 +80,15 @@
/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
#ifndef __addr_to_pcpu_ptr
#define __addr_to_pcpu_ptr(addr) \
- (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
- + (unsigned long)__per_cpu_start)
+ (void __percpu *)((unsigned long)(addr) - \
+ (unsigned long)pcpu_base_addr + \
+ (unsigned long)__per_cpu_start)
#endif
#ifndef __pcpu_ptr_to_addr
#define __pcpu_ptr_to_addr(ptr) \
- (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
- - (unsigned long)__per_cpu_start)
+ (void __force *)((unsigned long)(ptr) + \
+ (unsigned long)pcpu_base_addr - \
+ (unsigned long)__per_cpu_start)
#endif
struct pcpu_chunk {
@@ -913,11 +915,10 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
int rs, re;
/* quick path, check whether it's empty already */
- pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
- if (rs == page_start && re == page_end)
- return;
- break;
- }
+ rs = page_start;
+ pcpu_next_unpop(chunk, &rs, &re, page_end);
+ if (rs == page_start && re == page_end)
+ return;
/* immutable chunks can't be depopulated */
WARN_ON(chunk->immutable);
@@ -968,11 +969,10 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
int rs, re, rc;
/* quick path, check whether all pages are already there */
- pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) {
- if (rs == page_start && re == page_end)
- goto clear;
- break;
- }
+ rs = page_start;
+ pcpu_next_pop(chunk, &rs, &re, page_end);
+ if (rs == page_start && re == page_end)
+ goto clear;
/* need to allocate and map pages, this chunk can't be immutable */
WARN_ON(chunk->immutable);
@@ -1067,7 +1067,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
* RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure.
*/
-static void *pcpu_alloc(size_t size, size_t align, bool reserved)
+static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
{
static int warn_limit = 10;
struct pcpu_chunk *chunk;
@@ -1196,7 +1196,7 @@ fail_unlock_mutex:
* RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure.
*/
-void *__alloc_percpu(size_t size, size_t align)
+void __percpu *__alloc_percpu(size_t size, size_t align)
{
return pcpu_alloc(size, align, false);
}
@@ -1217,7 +1217,7 @@ EXPORT_SYMBOL_GPL(__alloc_percpu);
* RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure.
*/
-void *__alloc_reserved_percpu(size_t size, size_t align)
+void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
{
return pcpu_alloc(size, align, true);
}
@@ -1269,7 +1269,7 @@ static void pcpu_reclaim(struct work_struct *work)
* CONTEXT:
* Can be called from atomic context.
*/
-void free_percpu(void *ptr)
+void free_percpu(void __percpu *ptr)
{
void *addr;
struct pcpu_chunk *chunk;
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index d9714bd..392b9bb 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -40,9 +40,11 @@ static void * __init_refok __earlyonly_bootmem_alloc(int node,
unsigned long align,
unsigned long goal)
{
- return __alloc_bootmem_node(NODE_DATA(node), size, align, goal);
+ return __alloc_bootmem_node_high(NODE_DATA(node), size, align, goal);
}
+static void *vmemmap_buf;
+static void *vmemmap_buf_end;
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
{
@@ -64,6 +66,24 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
__pa(MAX_DMA_ADDRESS));
}
+/* need to make sure size is all the same during early stage */
+void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
+{
+ void *ptr;
+
+ if (!vmemmap_buf)
+ return vmemmap_alloc_block(size, node);
+
+ /* take the from buf */
+ ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
+ if (ptr + size > vmemmap_buf_end)
+ return vmemmap_alloc_block(size, node);
+
+ vmemmap_buf = ptr + size;
+
+ return ptr;
+}
+
void __meminit vmemmap_verify(pte_t *pte, int node,
unsigned long start, unsigned long end)
{
@@ -80,7 +100,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
pte_t *pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte)) {
pte_t entry;
- void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+ void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
if (!p)
return NULL;
entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
@@ -163,3 +183,55 @@ struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
return map;
}
+
+void __init sparse_mem_maps_populate_node(struct page **map_map,
+ unsigned long pnum_begin,
+ unsigned long pnum_end,
+ unsigned long map_count, int nodeid)
+{
+ unsigned long pnum;
+ unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
+ void *vmemmap_buf_start;
+
+ size = ALIGN(size, PMD_SIZE);
+ vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
+ PMD_SIZE, __pa(MAX_DMA_ADDRESS));
+
+ if (vmemmap_buf_start) {
+ vmemmap_buf = vmemmap_buf_start;
+ vmemmap_buf_end = vmemmap_buf_start + size * map_count;
+ }
+
+ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+ struct mem_section *ms;
+
+ if (!present_section_nr(pnum))
+ continue;
+
+ map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
+ if (map_map[pnum])
+ continue;
+ ms = __nr_to_section(pnum);
+ printk(KERN_ERR "%s: sparsemem memory map backing failed "
+ "some memory will not be available.\n", __func__);
+ ms->section_mem_map = 0;
+ }
+
+ if (vmemmap_buf_start) {
+ /* need to free left buf */
+#ifdef CONFIG_NO_BOOTMEM
+ free_early(__pa(vmemmap_buf_start), __pa(vmemmap_buf_end));
+ if (vmemmap_buf_start < vmemmap_buf) {
+ char name[15];
+
+ snprintf(name, sizeof(name), "MEMMAP %d", nodeid);
+ reserve_early_without_check(__pa(vmemmap_buf_start),
+ __pa(vmemmap_buf), name);
+ }
+#else
+ free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf);
+#endif
+ vmemmap_buf = NULL;
+ vmemmap_buf_end = NULL;
+ }
+}
diff --git a/mm/sparse.c b/mm/sparse.c
index 6ce4aab..22896d5 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -271,7 +271,8 @@ static unsigned long *__kmalloc_section_usemap(void)
#ifdef CONFIG_MEMORY_HOTREMOVE
static unsigned long * __init
-sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat)
+sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
+ unsigned long count)
{
unsigned long section_nr;
@@ -286,7 +287,7 @@ sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat)
* this problem.
*/
section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
- return alloc_bootmem_section(usemap_size(), section_nr);
+ return alloc_bootmem_section(usemap_size() * count, section_nr);
}
static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -329,7 +330,8 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
}
#else
static unsigned long * __init
-sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat)
+sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
+ unsigned long count)
{
return NULL;
}
@@ -339,27 +341,40 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
-static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum)
+static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
+ unsigned long pnum_begin,
+ unsigned long pnum_end,
+ unsigned long usemap_count, int nodeid)
{
- unsigned long *usemap;
- struct mem_section *ms = __nr_to_section(pnum);
- int nid = sparse_early_nid(ms);
-
- usemap = sparse_early_usemap_alloc_pgdat_section(NODE_DATA(nid));
- if (usemap)
- return usemap;
+ void *usemap;
+ unsigned long pnum;
+ int size = usemap_size();
- usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
+ usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
+ usemap_count);
if (usemap) {
- check_usemap_section_nr(nid, usemap);
- return usemap;
+ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+ if (!present_section_nr(pnum))
+ continue;
+ usemap_map[pnum] = usemap;
+ usemap += size;
+ }
+ return;
}
- /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
- nid = 0;
+ usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
+ if (usemap) {
+ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+ if (!present_section_nr(pnum))
+ continue;
+ usemap_map[pnum] = usemap;
+ usemap += size;
+ check_usemap_section_nr(nodeid, usemap_map[pnum]);
+ }
+ return;
+ }
printk(KERN_WARNING "%s: allocation failed\n", __func__);
- return NULL;
}
#ifndef CONFIG_SPARSEMEM_VMEMMAP
@@ -375,8 +390,65 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
return map;
}
+void __init sparse_mem_maps_populate_node(struct page **map_map,
+ unsigned long pnum_begin,
+ unsigned long pnum_end,
+ unsigned long map_count, int nodeid)
+{
+ void *map;
+ unsigned long pnum;
+ unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
+
+ map = alloc_remap(nodeid, size * map_count);
+ if (map) {
+ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+ if (!present_section_nr(pnum))
+ continue;
+ map_map[pnum] = map;
+ map += size;
+ }
+ return;
+ }
+
+ size = PAGE_ALIGN(size);
+ map = alloc_bootmem_pages_node(NODE_DATA(nodeid), size * map_count);
+ if (map) {
+ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+ if (!present_section_nr(pnum))
+ continue;
+ map_map[pnum] = map;
+ map += size;
+ }
+ return;
+ }
+
+ /* fallback */
+ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+ struct mem_section *ms;
+
+ if (!present_section_nr(pnum))
+ continue;
+ map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
+ if (map_map[pnum])
+ continue;
+ ms = __nr_to_section(pnum);
+ printk(KERN_ERR "%s: sparsemem memory map backing failed "
+ "some memory will not be available.\n", __func__);
+ ms->section_mem_map = 0;
+ }
+}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
+ unsigned long pnum_begin,
+ unsigned long pnum_end,
+ unsigned long map_count, int nodeid)
+{
+ sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
+ map_count, nodeid);
+}
+#else
static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
{
struct page *map;
@@ -392,10 +464,12 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
ms->section_mem_map = 0;
return NULL;
}
+#endif
void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
{
}
+
/*
* Allocate the accumulated non-linear sections, allocate a mem_map
* for each and record the physical to section mapping.
@@ -407,6 +481,14 @@ void __init sparse_init(void)
unsigned long *usemap;
unsigned long **usemap_map;
int size;
+ int nodeid_begin = 0;
+ unsigned long pnum_begin = 0;
+ unsigned long usemap_count;
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+ unsigned long map_count;
+ int size2;
+ struct page **map_map;
+#endif
/*
* map is using big page (aka 2M in x86 64 bit)
@@ -425,10 +507,81 @@ void __init sparse_init(void)
panic("can not allocate usemap_map\n");
for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
+ struct mem_section *ms;
+
if (!present_section_nr(pnum))
continue;
- usemap_map[pnum] = sparse_early_usemap_alloc(pnum);
+ ms = __nr_to_section(pnum);
+ nodeid_begin = sparse_early_nid(ms);
+ pnum_begin = pnum;
+ break;
}
+ usemap_count = 1;
+ for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
+ struct mem_section *ms;
+ int nodeid;
+
+ if (!present_section_nr(pnum))
+ continue;
+ ms = __nr_to_section(pnum);
+ nodeid = sparse_early_nid(ms);
+ if (nodeid == nodeid_begin) {
+ usemap_count++;
+ continue;
+ }
+ /* ok, we need to take cake of from pnum_begin to pnum - 1*/
+ sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
+ usemap_count, nodeid_begin);
+ /* new start, update count etc*/
+ nodeid_begin = nodeid;
+ pnum_begin = pnum;
+ usemap_count = 1;
+ }
+ /* ok, last chunk */
+ sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
+ usemap_count, nodeid_begin);
+
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+ size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
+ map_map = alloc_bootmem(size2);
+ if (!map_map)
+ panic("can not allocate map_map\n");
+
+ for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
+ struct mem_section *ms;
+
+ if (!present_section_nr(pnum))
+ continue;
+ ms = __nr_to_section(pnum);
+ nodeid_begin = sparse_early_nid(ms);
+ pnum_begin = pnum;
+ break;
+ }
+ map_count = 1;
+ for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
+ struct mem_section *ms;
+ int nodeid;
+
+ if (!present_section_nr(pnum))
+ continue;
+ ms = __nr_to_section(pnum);
+ nodeid = sparse_early_nid(ms);
+ if (nodeid == nodeid_begin) {
+ map_count++;
+ continue;
+ }
+ /* ok, we need to take cake of from pnum_begin to pnum - 1*/
+ sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
+ map_count, nodeid_begin);
+ /* new start, update count etc*/
+ nodeid_begin = nodeid;
+ pnum_begin = pnum;
+ map_count = 1;
+ }
+ /* ok, last chunk */
+ sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
+ map_count, nodeid_begin);
+#endif
for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
if (!present_section_nr(pnum))
@@ -438,7 +591,11 @@ void __init sparse_init(void)
if (!usemap)
continue;
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+ map = map_map[pnum];
+#else
map = sparse_early_mem_map_alloc(pnum);
+#endif
if (!map)
continue;
@@ -448,6 +605,9 @@ void __init sparse_init(void)
vmemmap_populate_print_last();
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+ free_bootmem(__pa(map_map), size2);
+#endif
free_bootmem(__pa(usemap_map), size);
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6051fba..fc5aa18 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -139,7 +139,8 @@ static void refresh_zone_stat_thresholds(void)
threshold = calculate_threshold(zone);
for_each_online_cpu(cpu)
- zone_pcp(zone, cpu)->stat_threshold = threshold;
+ per_cpu_ptr(zone->pageset, cpu)->stat_threshold
+ = threshold;
}
}
@@ -149,7 +150,8 @@ static void refresh_zone_stat_thresholds(void)
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
int delta)
{
- struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
+ struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
+
s8 *p = pcp->vm_stat_diff + item;
long x;
@@ -202,7 +204,7 @@ EXPORT_SYMBOL(mod_zone_page_state);
*/
void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
- struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
+ struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
s8 *p = pcp->vm_stat_diff + item;
(*p)++;
@@ -223,7 +225,7 @@ EXPORT_SYMBOL(__inc_zone_page_state);
void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
{
- struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
+ struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
s8 *p = pcp->vm_stat_diff + item;
(*p)--;
@@ -300,7 +302,7 @@ void refresh_cpu_vm_stats(int cpu)
for_each_populated_zone(zone) {
struct per_cpu_pageset *p;
- p = zone_pcp(zone, cpu);
+ p = per_cpu_ptr(zone->pageset, cpu);
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
if (p->vm_stat_diff[i]) {
@@ -741,7 +743,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
for_each_online_cpu(i) {
struct per_cpu_pageset *pageset;
- pageset = zone_pcp(zone, i);
+ pageset = per_cpu_ptr(zone->pageset, i);
seq_printf(m,
"\n cpu: %i"
"\n count: %i"
@@ -906,6 +908,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
start_cpu_timer(cpu);
+ node_set_state(cpu_to_node(cpu), N_CPU);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 9ea4538..8d63f8f 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -999,19 +999,14 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
inode = rpc_get_inode(sb, S_IFDIR | 0755);
if (!inode)
return -ENOMEM;
- root = d_alloc_root(inode);
+ sb->s_root = root = d_alloc_root(inode);
if (!root) {
iput(inode);
return -ENOMEM;
}
if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL))
- goto out;
- sb->s_root = root;
+ return -ENOMEM;
return 0;
-out:
- d_genocide(root);
- dput(root);
- return -ENOMEM;
}
static int
diff --git a/security/capability.c b/security/capability.c
index 5c700e1..4875142 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -906,10 +906,6 @@ static void cap_audit_rule_free(void *lsmrule)
}
#endif /* CONFIG_AUDIT */
-struct security_operations default_security_ops = {
- .name = "default",
-};
-
#define set_to_cap_if_null(ops, function) \
do { \
if (!ops->function) { \
diff --git a/security/commoncap.c b/security/commoncap.c
index f800fdb..6166973 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/prctl.h>
#include <linux/securebits.h>
+#include <linux/syslog.h>
/*
* If a non-root user executes a setuid-root binary in
@@ -888,13 +889,17 @@ error:
/**
* cap_syslog - Determine whether syslog function is permitted
* @type: Function requested
+ * @from_file: Whether this request came from an open file (i.e. /proc)
*
* Determine whether the current process is permitted to use a particular
* syslog function, returning 0 if permission is granted, -ve if not.
*/
-int cap_syslog(int type)
+int cap_syslog(int type, bool from_file)
{
- if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
+ if (type != SYSLOG_ACTION_OPEN && from_file)
+ return 0;
+ if ((type != SYSLOG_ACTION_READ_ALL &&
+ type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
diff --git a/security/security.c b/security/security.c
index 122b748..687c6fd 100644
--- a/security/security.c
+++ b/security/security.c
@@ -23,10 +23,12 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
CONFIG_DEFAULT_SECURITY;
/* things that live in capability.c */
-extern struct security_operations default_security_ops;
extern void security_fixup_ops(struct security_operations *ops);
-struct security_operations *security_ops; /* Initialized to NULL */
+static struct security_operations *security_ops;
+static struct security_operations default_security_ops = {
+ .name = "default",
+};
static inline int verify(struct security_operations *ops)
{
@@ -63,6 +65,11 @@ int __init security_init(void)
return 0;
}
+void reset_security_ops(void)
+{
+ security_ops = &default_security_ops;
+}
+
/* Save user chosen LSM */
static int __init choose_lsm(char *str)
{
@@ -203,9 +210,9 @@ int security_quota_on(struct dentry *dentry)
return security_ops->quota_on(dentry);
}
-int security_syslog(int type)
+int security_syslog(int type, bool from_file)
{
- return security_ops->syslog(type);
+ return security_ops->syslog(type, from_file);
}
int security_settime(struct timespec *ts, struct timezone *tz)
@@ -389,42 +396,42 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
EXPORT_SYMBOL(security_inode_init_security);
#ifdef CONFIG_SECURITY_PATH
-int security_path_mknod(struct path *path, struct dentry *dentry, int mode,
+int security_path_mknod(struct path *dir, struct dentry *dentry, int mode,
unsigned int dev)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_mknod(path, dentry, mode, dev);
+ return security_ops->path_mknod(dir, dentry, mode, dev);
}
EXPORT_SYMBOL(security_path_mknod);
-int security_path_mkdir(struct path *path, struct dentry *dentry, int mode)
+int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_mkdir(path, dentry, mode);
+ return security_ops->path_mkdir(dir, dentry, mode);
}
-int security_path_rmdir(struct path *path, struct dentry *dentry)
+int security_path_rmdir(struct path *dir, struct dentry *dentry)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_rmdir(path, dentry);
+ return security_ops->path_rmdir(dir, dentry);
}
-int security_path_unlink(struct path *path, struct dentry *dentry)
+int security_path_unlink(struct path *dir, struct dentry *dentry)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_unlink(path, dentry);
+ return security_ops->path_unlink(dir, dentry);
}
-int security_path_symlink(struct path *path, struct dentry *dentry,
+int security_path_symlink(struct path *dir, struct dentry *dentry,
const char *old_name)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_symlink(path, dentry, old_name);
+ return security_ops->path_symlink(dir, dentry, old_name);
}
int security_path_link(struct dentry *old_dentry, struct path *new_dir,
@@ -630,14 +637,14 @@ int security_inode_killpriv(struct dentry *dentry)
int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
{
if (unlikely(IS_PRIVATE(inode)))
- return 0;
+ return -EOPNOTSUPP;
return security_ops->inode_getsecurity(inode, name, buffer, alloc);
}
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
{
if (unlikely(IS_PRIVATE(inode)))
- return 0;
+ return -EOPNOTSUPP;
return security_ops->inode_setsecurity(inode, name, value, size, flags);
}
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index f2dde26..db0fd9f 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -489,17 +489,14 @@ void avc_audit(u32 ssid, u32 tsid,
struct common_audit_data stack_data;
u32 denied, audited;
denied = requested & ~avd->allowed;
- if (denied) {
- audited = denied;
- if (!(audited & avd->auditdeny))
- return;
- } else if (result) {
+ if (denied)
+ audited = denied & avd->auditdeny;
+ else if (result)
audited = denied = requested;
- } else {
- audited = requested;
- if (!(audited & avd->auditallow))
- return;
- }
+ else
+ audited = requested & avd->auditallow;
+ if (!audited)
+ return;
if (!a) {
a = &stack_data;
memset(a, 0, sizeof(*a));
@@ -746,9 +743,7 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
else
avd = &avd_entry;
- rc = security_compute_av(ssid, tsid, tclass, requested, avd);
- if (rc)
- goto out;
+ security_compute_av(ssid, tsid, tclass, avd);
rcu_read_lock();
node = avc_insert(ssid, tsid, tclass, avd);
} else {
@@ -770,7 +765,6 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
}
rcu_read_unlock();
-out:
return rc;
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 9a2ee84..5feecb4 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -76,6 +76,7 @@
#include <linux/selinux.h>
#include <linux/mutex.h>
#include <linux/posix-timers.h>
+#include <linux/syslog.h>
#include "avc.h"
#include "objsec.h"
@@ -125,13 +126,6 @@ __setup("selinux=", selinux_enabled_setup);
int selinux_enabled = 1;
#endif
-
-/*
- * Minimal support for a secondary security module,
- * just to allow the use of the capability module.
- */
-static struct security_operations *secondary_ops;
-
/* Lists of inode and superblock security structures initialized
before the policy was loaded. */
static LIST_HEAD(superblock_security_head);
@@ -2049,29 +2043,30 @@ static int selinux_quota_on(struct dentry *dentry)
return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON);
}
-static int selinux_syslog(int type)
+static int selinux_syslog(int type, bool from_file)
{
int rc;
- rc = cap_syslog(type);
+ rc = cap_syslog(type, from_file);
if (rc)
return rc;
switch (type) {
- case 3: /* Read last kernel messages */
- case 10: /* Return size of the log buffer */
+ case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
+ case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
rc = task_has_system(current, SYSTEM__SYSLOG_READ);
break;
- case 6: /* Disable logging to console */
- case 7: /* Enable logging to console */
- case 8: /* Set level of messages printed to console */
+ case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
+ case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
+ /* Set level of messages printed to console */
+ case SYSLOG_ACTION_CONSOLE_LEVEL:
rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
break;
- case 0: /* Close log */
- case 1: /* Open log */
- case 2: /* Read from log */
- case 4: /* Read/clear last kernel messages */
- case 5: /* Clear ring buffer */
+ case SYSLOG_ACTION_CLOSE: /* Close log */
+ case SYSLOG_ACTION_OPEN: /* Open log */
+ case SYSLOG_ACTION_READ: /* Read from log */
+ case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
+ case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
default:
rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
break;
@@ -3334,7 +3329,7 @@ static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
if (ret == 0)
tsec->create_sid = isec->sid;
- return 0;
+ return ret;
}
static int selinux_kernel_module_request(char *kmod_name)
@@ -5672,9 +5667,6 @@ static __init int selinux_init(void)
0, SLAB_PANIC, NULL);
avc_init();
- secondary_ops = security_ops;
- if (!secondary_ops)
- panic("SELinux: No initial security operations\n");
if (register_security(&selinux_ops))
panic("SELinux: Unable to register with kernel.\n");
@@ -5835,8 +5827,7 @@ int selinux_disable(void)
selinux_disabled = 1;
selinux_enabled = 0;
- /* Reset security_ops to the secondary module, dummy or capability. */
- security_ops = secondary_ops;
+ reset_security_ops();
/* Try to destroy the avc node cache */
avc_disable();
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 2553266..1f7c249 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -57,7 +57,6 @@
struct netlbl_lsm_secattr;
extern int selinux_enabled;
-extern int selinux_mls_enabled;
/* Policy capabilities */
enum {
@@ -80,6 +79,8 @@ extern int selinux_policycap_openperm;
/* limitation of boundary depth */
#define POLICYDB_BOUNDS_MAXDEPTH 4
+int security_mls_enabled(void);
+
int security_load_policy(void *data, size_t len);
int security_policycap_supported(unsigned int req_cap);
@@ -96,13 +97,11 @@ struct av_decision {
/* definitions of av_decision.flags */
#define AVD_FLAGS_PERMISSIVE 0x0001
-int security_compute_av(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct av_decision *avd);
+void security_compute_av(u32 ssid, u32 tsid,
+ u16 tclass, struct av_decision *avd);
-int security_compute_av_user(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct av_decision *avd);
+void security_compute_av_user(u32 ssid, u32 tsid,
+ u16 tclass, struct av_decision *avd);
int security_transition_sid(u32 ssid, u32 tsid,
u16 tclass, u32 *out_sid);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index fab36fd..cd191bb 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -282,7 +282,8 @@ static ssize_t sel_read_mls(struct file *filp, char __user *buf,
char tmpbuf[TMPBUFLEN];
ssize_t length;
- length = scnprintf(tmpbuf, TMPBUFLEN, "%d", selinux_mls_enabled);
+ length = scnprintf(tmpbuf, TMPBUFLEN, "%d",
+ security_mls_enabled());
return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
}
@@ -494,7 +495,6 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
char *scon, *tcon;
u32 ssid, tsid;
u16 tclass;
- u32 req;
struct av_decision avd;
ssize_t length;
@@ -512,7 +512,7 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
goto out;
length = -EINVAL;
- if (sscanf(buf, "%s %s %hu %x", scon, tcon, &tclass, &req) != 4)
+ if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out2;
length = security_context_to_sid(scon, strlen(scon)+1, &ssid);
@@ -522,9 +522,7 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
if (length < 0)
goto out2;
- length = security_compute_av_user(ssid, tsid, tclass, req, &avd);
- if (length < 0)
- goto out2;
+ security_compute_av_user(ssid, tsid, tclass, &avd);
length = scnprintf(buf, SIMPLE_TRANSACTION_LIMIT,
"%x %x %x %x %u %x",
@@ -979,6 +977,8 @@ static int sel_make_bools(void)
u32 sid;
/* remove any existing files */
+ for (i = 0; i < bool_num; i++)
+ kfree(bool_pending_names[i]);
kfree(bool_pending_names);
kfree(bool_pending_values);
bool_pending_names = NULL;
diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h
index d9dd7a2..45e8fb0 100644
--- a/security/selinux/ss/context.h
+++ b/security/selinux/ss/context.h
@@ -41,9 +41,6 @@ static inline int mls_context_cpy(struct context *dst, struct context *src)
{
int rc;
- if (!selinux_mls_enabled)
- return 0;
-
dst->range.level[0].sens = src->range.level[0].sens;
rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
if (rc)
@@ -64,9 +61,6 @@ static inline int mls_context_cpy_low(struct context *dst, struct context *src)
{
int rc;
- if (!selinux_mls_enabled)
- return 0;
-
dst->range.level[0].sens = src->range.level[0].sens;
rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
if (rc)
@@ -82,9 +76,6 @@ out:
static inline int mls_context_cmp(struct context *c1, struct context *c2)
{
- if (!selinux_mls_enabled)
- return 1;
-
return ((c1->range.level[0].sens == c2->range.level[0].sens) &&
ebitmap_cmp(&c1->range.level[0].cat, &c2->range.level[0].cat) &&
(c1->range.level[1].sens == c2->range.level[1].sens) &&
@@ -93,9 +84,6 @@ static inline int mls_context_cmp(struct context *c1, struct context *c2)
static inline void mls_context_destroy(struct context *c)
{
- if (!selinux_mls_enabled)
- return;
-
ebitmap_destroy(&c->range.level[0].cat);
ebitmap_destroy(&c->range.level[1].cat);
mls_context_init(c);
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 3f2b270..372b773 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -39,7 +39,7 @@ int mls_compute_context_len(struct context *context)
struct ebitmap *e;
struct ebitmap_node *node;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return 0;
len = 1; /* for the beginning ":" */
@@ -93,7 +93,7 @@ void mls_sid_to_context(struct context *context,
struct ebitmap *e;
struct ebitmap_node *node;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return;
scontextp = *scontext;
@@ -200,7 +200,7 @@ int mls_context_isvalid(struct policydb *p, struct context *c)
{
struct user_datum *usrdatum;
- if (!selinux_mls_enabled)
+ if (!p->mls_enabled)
return 1;
if (!mls_range_isvalid(p, &c->range))
@@ -253,7 +253,7 @@ int mls_context_to_sid(struct policydb *pol,
struct cat_datum *catdatum, *rngdatum;
int l, rc = -EINVAL;
- if (!selinux_mls_enabled) {
+ if (!pol->mls_enabled) {
if (def_sid != SECSID_NULL && oldc)
*scontext += strlen(*scontext)+1;
return 0;
@@ -387,7 +387,7 @@ int mls_from_string(char *str, struct context *context, gfp_t gfp_mask)
char *tmpstr, *freestr;
int rc;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return -EINVAL;
/* we need freestr because mls_context_to_sid will change
@@ -407,7 +407,7 @@ int mls_from_string(char *str, struct context *context, gfp_t gfp_mask)
/*
* Copies the MLS range `range' into `context'.
*/
-static inline int mls_range_set(struct context *context,
+int mls_range_set(struct context *context,
struct mls_range *range)
{
int l, rc = 0;
@@ -427,7 +427,7 @@ static inline int mls_range_set(struct context *context,
int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
struct context *usercon)
{
- if (selinux_mls_enabled) {
+ if (policydb.mls_enabled) {
struct mls_level *fromcon_sen = &(fromcon->range.level[0]);
struct mls_level *fromcon_clr = &(fromcon->range.level[1]);
struct mls_level *user_low = &(user->range.level[0]);
@@ -477,7 +477,7 @@ int mls_convert_context(struct policydb *oldp,
struct ebitmap_node *node;
int l, i;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return 0;
for (l = 0; l < 2; l++) {
@@ -513,23 +513,21 @@ int mls_compute_sid(struct context *scontext,
u32 specified,
struct context *newcontext)
{
- struct range_trans *rtr;
+ struct range_trans rtr;
+ struct mls_range *r;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return 0;
switch (specified) {
case AVTAB_TRANSITION:
/* Look for a range transition rule. */
- for (rtr = policydb.range_tr; rtr; rtr = rtr->next) {
- if (rtr->source_type == scontext->type &&
- rtr->target_type == tcontext->type &&
- rtr->target_class == tclass) {
- /* Set the range from the rule */
- return mls_range_set(newcontext,
- &rtr->target_range);
- }
- }
+ rtr.source_type = scontext->type;
+ rtr.target_type = tcontext->type;
+ rtr.target_class = tclass;
+ r = hashtab_search(policydb.range_tr, &rtr);
+ if (r)
+ return mls_range_set(newcontext, r);
/* Fallthrough */
case AVTAB_CHANGE:
if (tclass == policydb.process_class)
@@ -541,8 +539,8 @@ int mls_compute_sid(struct context *scontext,
case AVTAB_MEMBER:
/* Use the process effective MLS attributes. */
return mls_context_cpy_low(newcontext, scontext);
- default:
- return -EINVAL;
+
+ /* fall through */
}
return -EINVAL;
}
@@ -561,7 +559,7 @@ int mls_compute_sid(struct context *scontext,
void mls_export_netlbl_lvl(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return;
secattr->attr.mls.lvl = context->range.level[0].sens - 1;
@@ -581,7 +579,7 @@ void mls_export_netlbl_lvl(struct context *context,
void mls_import_netlbl_lvl(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return;
context->range.level[0].sens = secattr->attr.mls.lvl + 1;
@@ -603,7 +601,7 @@ int mls_export_netlbl_cat(struct context *context,
{
int rc;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return 0;
rc = ebitmap_netlbl_export(&context->range.level[0].cat,
@@ -631,7 +629,7 @@ int mls_import_netlbl_cat(struct context *context,
{
int rc;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return 0;
rc = ebitmap_netlbl_import(&context->range.level[0].cat,
diff --git a/security/selinux/ss/mls.h b/security/selinux/ss/mls.h
index 1276715..cd91526 100644
--- a/security/selinux/ss/mls.h
+++ b/security/selinux/ss/mls.h
@@ -39,6 +39,8 @@ int mls_context_to_sid(struct policydb *p,
int mls_from_string(char *str, struct context *context, gfp_t gfp_mask);
+int mls_range_set(struct context *context, struct mls_range *range);
+
int mls_convert_context(struct policydb *oldp,
struct policydb *newp,
struct context *context);
diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h
index b6e943a..03bed52 100644
--- a/security/selinux/ss/mls_types.h
+++ b/security/selinux/ss/mls_types.h
@@ -15,6 +15,7 @@
#define _SS_MLS_TYPES_H_
#include "security.h"
+#include "ebitmap.h"
struct mls_level {
u32 sens; /* sensitivity */
@@ -27,18 +28,12 @@ struct mls_range {
static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
{
- if (!selinux_mls_enabled)
- return 1;
-
return ((l1->sens == l2->sens) &&
ebitmap_cmp(&l1->cat, &l2->cat));
}
static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
{
- if (!selinux_mls_enabled)
- return 1;
-
return ((l1->sens >= l2->sens) &&
ebitmap_contains(&l1->cat, &l2->cat));
}
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index f036672..23c6e53 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -52,8 +52,6 @@ static char *symtab_name[SYM_NUM] = {
};
#endif
-int selinux_mls_enabled;
-
static unsigned int symtab_sizes[SYM_NUM] = {
2,
32,
@@ -177,6 +175,21 @@ out_free_role:
goto out;
}
+static u32 rangetr_hash(struct hashtab *h, const void *k)
+{
+ const struct range_trans *key = k;
+ return (key->source_type + (key->target_type << 3) +
+ (key->target_class << 5)) & (h->size - 1);
+}
+
+static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
+{
+ const struct range_trans *key1 = k1, *key2 = k2;
+ return (key1->source_type != key2->source_type ||
+ key1->target_type != key2->target_type ||
+ key1->target_class != key2->target_class);
+}
+
/*
* Initialize a policy database structure.
*/
@@ -204,6 +217,10 @@ static int policydb_init(struct policydb *p)
if (rc)
goto out_free_symtab;
+ p->range_tr = hashtab_create(rangetr_hash, rangetr_cmp, 256);
+ if (!p->range_tr)
+ goto out_free_symtab;
+
ebitmap_init(&p->policycaps);
ebitmap_init(&p->permissive_map);
@@ -408,6 +425,20 @@ static void symtab_hash_eval(struct symtab *s)
info.slots_used, h->size, info.max_chain_len);
}
}
+
+static void rangetr_hash_eval(struct hashtab *h)
+{
+ struct hashtab_info info;
+
+ hashtab_stat(h, &info);
+ printk(KERN_DEBUG "SELinux: rangetr: %d entries and %d/%d buckets used, "
+ "longest chain length %d\n", h->nel,
+ info.slots_used, h->size, info.max_chain_len);
+}
+#else
+static inline void rangetr_hash_eval(struct hashtab *h)
+{
+}
#endif
/*
@@ -422,7 +453,7 @@ static int policydb_index_others(struct policydb *p)
printk(KERN_DEBUG "SELinux: %d users, %d roles, %d types, %d bools",
p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim);
- if (selinux_mls_enabled)
+ if (p->mls_enabled)
printk(", %d sens, %d cats", p->p_levels.nprim,
p->p_cats.nprim);
printk("\n");
@@ -612,6 +643,17 @@ static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) =
cat_destroy,
};
+static int range_tr_destroy(void *key, void *datum, void *p)
+{
+ struct mls_range *rt = datum;
+ kfree(key);
+ ebitmap_destroy(&rt->level[0].cat);
+ ebitmap_destroy(&rt->level[1].cat);
+ kfree(datum);
+ cond_resched();
+ return 0;
+}
+
static void ocontext_destroy(struct ocontext *c, int i)
{
context_destroy(&c->context[0]);
@@ -632,7 +674,6 @@ void policydb_destroy(struct policydb *p)
int i;
struct role_allow *ra, *lra = NULL;
struct role_trans *tr, *ltr = NULL;
- struct range_trans *rt, *lrt = NULL;
for (i = 0; i < SYM_NUM; i++) {
cond_resched();
@@ -693,20 +734,8 @@ void policydb_destroy(struct policydb *p)
}
kfree(lra);
- for (rt = p->range_tr; rt; rt = rt->next) {
- cond_resched();
- if (lrt) {
- ebitmap_destroy(&lrt->target_range.level[0].cat);
- ebitmap_destroy(&lrt->target_range.level[1].cat);
- kfree(lrt);
- }
- lrt = rt;
- }
- if (lrt) {
- ebitmap_destroy(&lrt->target_range.level[0].cat);
- ebitmap_destroy(&lrt->target_range.level[1].cat);
- kfree(lrt);
- }
+ hashtab_map(p->range_tr, range_tr_destroy, NULL);
+ hashtab_destroy(p->range_tr);
if (p->type_attr_map) {
for (i = 0; i < p->p_types.nprim; i++)
@@ -1686,12 +1715,11 @@ int policydb_read(struct policydb *p, void *fp)
int i, j, rc;
__le32 buf[4];
u32 nodebuf[8];
- u32 len, len2, config, nprim, nel, nel2;
+ u32 len, len2, nprim, nel, nel2;
char *policydb_str;
struct policydb_compat_info *info;
- struct range_trans *rt, *lrt;
-
- config = 0;
+ struct range_trans *rt;
+ struct mls_range *r;
rc = policydb_init(p);
if (rc)
@@ -1740,7 +1768,7 @@ int policydb_read(struct policydb *p, void *fp)
kfree(policydb_str);
policydb_str = NULL;
- /* Read the version, config, and table sizes. */
+ /* Read the version and table sizes. */
rc = next_entry(buf, fp, sizeof(u32)*4);
if (rc < 0)
goto bad;
@@ -1755,13 +1783,7 @@ int policydb_read(struct policydb *p, void *fp)
}
if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_MLS)) {
- if (ss_initialized && !selinux_mls_enabled) {
- printk(KERN_ERR "SELinux: Cannot switch between non-MLS"
- " and MLS policies\n");
- goto bad;
- }
- selinux_mls_enabled = 1;
- config |= POLICYDB_CONFIG_MLS;
+ p->mls_enabled = 1;
if (p->policyvers < POLICYDB_VERSION_MLS) {
printk(KERN_ERR "SELinux: security policydb version %d "
@@ -1769,12 +1791,6 @@ int policydb_read(struct policydb *p, void *fp)
p->policyvers);
goto bad;
}
- } else {
- if (ss_initialized && selinux_mls_enabled) {
- printk(KERN_ERR "SELinux: Cannot switch between MLS and"
- " non-MLS policies\n");
- goto bad;
- }
}
p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN);
p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN);
@@ -2122,44 +2138,61 @@ int policydb_read(struct policydb *p, void *fp)
if (rc < 0)
goto bad;
nel = le32_to_cpu(buf[0]);
- lrt = NULL;
for (i = 0; i < nel; i++) {
rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (!rt) {
rc = -ENOMEM;
goto bad;
}
- if (lrt)
- lrt->next = rt;
- else
- p->range_tr = rt;
rc = next_entry(buf, fp, (sizeof(u32) * 2));
- if (rc < 0)
+ if (rc < 0) {
+ kfree(rt);
goto bad;
+ }
rt->source_type = le32_to_cpu(buf[0]);
rt->target_type = le32_to_cpu(buf[1]);
if (new_rangetr) {
rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
+ if (rc < 0) {
+ kfree(rt);
goto bad;
+ }
rt->target_class = le32_to_cpu(buf[0]);
} else
rt->target_class = p->process_class;
if (!policydb_type_isvalid(p, rt->source_type) ||
!policydb_type_isvalid(p, rt->target_type) ||
!policydb_class_isvalid(p, rt->target_class)) {
+ kfree(rt);
rc = -EINVAL;
goto bad;
}
- rc = mls_read_range_helper(&rt->target_range, fp);
- if (rc)
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r) {
+ kfree(rt);
+ rc = -ENOMEM;
goto bad;
- if (!mls_range_isvalid(p, &rt->target_range)) {
+ }
+ rc = mls_read_range_helper(r, fp);
+ if (rc) {
+ kfree(rt);
+ kfree(r);
+ goto bad;
+ }
+ if (!mls_range_isvalid(p, r)) {
printk(KERN_WARNING "SELinux: rangetrans: invalid range\n");
+ kfree(rt);
+ kfree(r);
+ goto bad;
+ }
+ rc = hashtab_insert(p->range_tr, rt, r);
+ if (rc) {
+ kfree(rt);
+ kfree(r);
goto bad;
}
- lrt = rt;
}
+ rangetr_hash_eval(p->range_tr);
}
p->type_attr_map = kmalloc(p->p_types.nprim*sizeof(struct ebitmap), GFP_KERNEL);
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index cdcc570..26d9adf 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -27,6 +27,8 @@
#include "symtab.h"
#include "avtab.h"
#include "sidtab.h"
+#include "ebitmap.h"
+#include "mls_types.h"
#include "context.h"
#include "constraint.h"
@@ -113,8 +115,6 @@ struct range_trans {
u32 source_type;
u32 target_type;
u32 target_class;
- struct mls_range target_range;
- struct range_trans *next;
};
/* Boolean data type */
@@ -187,6 +187,8 @@ struct genfs {
/* The policy database */
struct policydb {
+ int mls_enabled;
+
/* symbol tables */
struct symtab symtab[SYM_NUM];
#define p_commons symtab[SYM_COMMONS]
@@ -240,8 +242,8 @@ struct policydb {
fixed labeling behavior. */
struct genfs *genfs;
- /* range transitions */
- struct range_trans *range_tr;
+ /* range transitions table (range_trans_key -> mls_range) */
+ struct hashtab *range_tr;
/* type -> attribute reverse mapping */
struct ebitmap *type_attr_map;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index b3efae2..cf27b3e 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -26,6 +26,10 @@
*
* Added support for bounds domain and audit messaged on masked permissions
*
+ * Updated: Guido Trentalancia <guido@trentalancia.com>
+ *
+ * Added support for runtime switching of the policy type
+ *
* Copyright (C) 2008, 2009 NEC Corporation
* Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P.
* Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
@@ -87,11 +91,10 @@ static u32 latest_granting;
static int context_struct_to_string(struct context *context, char **scontext,
u32 *scontext_len);
-static int context_struct_compute_av(struct context *scontext,
- struct context *tcontext,
- u16 tclass,
- u32 requested,
- struct av_decision *avd);
+static void context_struct_compute_av(struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ struct av_decision *avd);
struct selinux_mapping {
u16 value; /* policy value */
@@ -196,23 +199,6 @@ static u16 unmap_class(u16 tclass)
return tclass;
}
-static u32 unmap_perm(u16 tclass, u32 tperm)
-{
- if (tclass < current_mapping_size) {
- unsigned i;
- u32 kperm = 0;
-
- for (i = 0; i < current_mapping[tclass].num_perms; i++)
- if (tperm & (1<<i)) {
- kperm |= current_mapping[tclass].perms[i];
- tperm &= ~(1<<i);
- }
- return kperm;
- }
-
- return tperm;
-}
-
static void map_decision(u16 tclass, struct av_decision *avd,
int allow_unknown)
{
@@ -250,6 +236,10 @@ static void map_decision(u16 tclass, struct av_decision *avd,
}
}
+int security_mls_enabled(void)
+{
+ return policydb.mls_enabled;
+}
/*
* Return the boolean value of a constraint expression
@@ -465,7 +455,8 @@ static void security_dump_masked_av(struct context *scontext,
char *scontext_name = NULL;
char *tcontext_name = NULL;
char *permission_names[32];
- int index, length;
+ int index;
+ u32 length;
bool need_comma = false;
if (!permissions)
@@ -532,7 +523,6 @@ out:
static void type_attribute_bounds_av(struct context *scontext,
struct context *tcontext,
u16 tclass,
- u32 requested,
struct av_decision *avd)
{
struct context lo_scontext;
@@ -553,7 +543,6 @@ static void type_attribute_bounds_av(struct context *scontext,
context_struct_compute_av(&lo_scontext,
tcontext,
tclass,
- requested,
&lo_avd);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
@@ -569,7 +558,6 @@ static void type_attribute_bounds_av(struct context *scontext,
context_struct_compute_av(scontext,
&lo_tcontext,
tclass,
- requested,
&lo_avd);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
@@ -586,7 +574,6 @@ static void type_attribute_bounds_av(struct context *scontext,
context_struct_compute_av(&lo_scontext,
&lo_tcontext,
tclass,
- requested,
&lo_avd);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
@@ -607,11 +594,10 @@ static void type_attribute_bounds_av(struct context *scontext,
* Compute access vectors based on a context structure pair for
* the permissions in a particular class.
*/
-static int context_struct_compute_av(struct context *scontext,
- struct context *tcontext,
- u16 tclass,
- u32 requested,
- struct av_decision *avd)
+static void context_struct_compute_av(struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ struct av_decision *avd)
{
struct constraint_node *constraint;
struct role_allow *ra;
@@ -622,19 +608,14 @@ static int context_struct_compute_av(struct context *scontext,
struct ebitmap_node *snode, *tnode;
unsigned int i, j;
- /*
- * Initialize the access vectors to the default values.
- */
avd->allowed = 0;
avd->auditallow = 0;
avd->auditdeny = 0xffffffff;
- avd->seqno = latest_granting;
- avd->flags = 0;
if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
if (printk_ratelimit())
printk(KERN_WARNING "SELinux: Invalid class %hu\n", tclass);
- return -EINVAL;
+ return;
}
tclass_datum = policydb.class_val_to_struct[tclass - 1];
@@ -705,9 +686,7 @@ static int context_struct_compute_av(struct context *scontext,
* permission and notice it to userspace via audit.
*/
type_attribute_bounds_av(scontext, tcontext,
- tclass, requested, avd);
-
- return 0;
+ tclass, avd);
}
static int security_validtrans_handle_fail(struct context *ocontext,
@@ -864,7 +843,7 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
if (rc) {
char *old_name = NULL;
char *new_name = NULL;
- int length;
+ u32 length;
if (!context_struct_to_string(old_context,
&old_name, &length) &&
@@ -886,110 +865,116 @@ out:
return rc;
}
-
-static int security_compute_av_core(u32 ssid,
- u32 tsid,
- u16 tclass,
- u32 requested,
- struct av_decision *avd)
+static void avd_init(struct av_decision *avd)
{
- struct context *scontext = NULL, *tcontext = NULL;
- int rc = 0;
-
- scontext = sidtab_search(&sidtab, ssid);
- if (!scontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
- __func__, ssid);
- return -EINVAL;
- }
- tcontext = sidtab_search(&sidtab, tsid);
- if (!tcontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
- __func__, tsid);
- return -EINVAL;
- }
-
- rc = context_struct_compute_av(scontext, tcontext, tclass,
- requested, avd);
-
- /* permissive domain? */
- if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
- avd->flags |= AVD_FLAGS_PERMISSIVE;
-
- return rc;
+ avd->allowed = 0;
+ avd->auditallow = 0;
+ avd->auditdeny = 0xffffffff;
+ avd->seqno = latest_granting;
+ avd->flags = 0;
}
+
/**
* security_compute_av - Compute access vector decisions.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
- * @requested: requested permissions
* @avd: access vector decisions
*
* Compute a set of access vector decisions based on the
* SID pair (@ssid, @tsid) for the permissions in @tclass.
- * Return -%EINVAL if any of the parameters are invalid or %0
- * if the access vector decisions were computed successfully.
*/
-int security_compute_av(u32 ssid,
- u32 tsid,
- u16 orig_tclass,
- u32 orig_requested,
- struct av_decision *avd)
+void security_compute_av(u32 ssid,
+ u32 tsid,
+ u16 orig_tclass,
+ struct av_decision *avd)
{
u16 tclass;
- u32 requested;
- int rc;
+ struct context *scontext = NULL, *tcontext = NULL;
read_lock(&policy_rwlock);
-
+ avd_init(avd);
if (!ss_initialized)
goto allow;
- requested = unmap_perm(orig_tclass, orig_requested);
+ scontext = sidtab_search(&sidtab, ssid);
+ if (!scontext) {
+ printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ __func__, ssid);
+ goto out;
+ }
+
+ /* permissive domain? */
+ if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
+ avd->flags |= AVD_FLAGS_PERMISSIVE;
+
+ tcontext = sidtab_search(&sidtab, tsid);
+ if (!tcontext) {
+ printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ __func__, tsid);
+ goto out;
+ }
+
tclass = unmap_class(orig_tclass);
if (unlikely(orig_tclass && !tclass)) {
if (policydb.allow_unknown)
goto allow;
- rc = -EINVAL;
goto out;
}
- rc = security_compute_av_core(ssid, tsid, tclass, requested, avd);
+ context_struct_compute_av(scontext, tcontext, tclass, avd);
map_decision(orig_tclass, avd, policydb.allow_unknown);
out:
read_unlock(&policy_rwlock);
- return rc;
+ return;
allow:
avd->allowed = 0xffffffff;
- avd->auditallow = 0;
- avd->auditdeny = 0xffffffff;
- avd->seqno = latest_granting;
- avd->flags = 0;
- rc = 0;
goto out;
}
-int security_compute_av_user(u32 ssid,
- u32 tsid,
- u16 tclass,
- u32 requested,
- struct av_decision *avd)
+void security_compute_av_user(u32 ssid,
+ u32 tsid,
+ u16 tclass,
+ struct av_decision *avd)
{
- int rc;
+ struct context *scontext = NULL, *tcontext = NULL;
- if (!ss_initialized) {
- avd->allowed = 0xffffffff;
- avd->auditallow = 0;
- avd->auditdeny = 0xffffffff;
- avd->seqno = latest_granting;
- return 0;
+ read_lock(&policy_rwlock);
+ avd_init(avd);
+ if (!ss_initialized)
+ goto allow;
+
+ scontext = sidtab_search(&sidtab, ssid);
+ if (!scontext) {
+ printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ __func__, ssid);
+ goto out;
}
- read_lock(&policy_rwlock);
- rc = security_compute_av_core(ssid, tsid, tclass, requested, avd);
+ /* permissive domain? */
+ if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
+ avd->flags |= AVD_FLAGS_PERMISSIVE;
+
+ tcontext = sidtab_search(&sidtab, tsid);
+ if (!tcontext) {
+ printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ __func__, tsid);
+ goto out;
+ }
+
+ if (unlikely(!tclass)) {
+ if (policydb.allow_unknown)
+ goto allow;
+ goto out;
+ }
+
+ context_struct_compute_av(scontext, tcontext, tclass, avd);
+ out:
read_unlock(&policy_rwlock);
- return rc;
+ return;
+allow:
+ avd->allowed = 0xffffffff;
+ goto out;
}
/*
@@ -1565,7 +1550,10 @@ static int clone_sid(u32 sid,
{
struct sidtab *s = arg;
- return sidtab_insert(s, sid, context);
+ if (sid > SECINITSID_NUM)
+ return sidtab_insert(s, sid, context);
+ else
+ return 0;
}
static inline int convert_context_handle_invalid_context(struct context *context)
@@ -1606,12 +1594,17 @@ static int convert_context(u32 key,
{
struct convert_context_args *args;
struct context oldc;
+ struct ocontext *oc;
+ struct mls_range *range;
struct role_datum *role;
struct type_datum *typdatum;
struct user_datum *usrdatum;
char *s;
u32 len;
- int rc;
+ int rc = 0;
+
+ if (key <= SECINITSID_NUM)
+ goto out;
args = p;
@@ -1673,9 +1666,39 @@ static int convert_context(u32 key,
goto bad;
c->type = typdatum->value;
- rc = mls_convert_context(args->oldp, args->newp, c);
- if (rc)
- goto bad;
+ /* Convert the MLS fields if dealing with MLS policies */
+ if (args->oldp->mls_enabled && args->newp->mls_enabled) {
+ rc = mls_convert_context(args->oldp, args->newp, c);
+ if (rc)
+ goto bad;
+ } else if (args->oldp->mls_enabled && !args->newp->mls_enabled) {
+ /*
+ * Switching between MLS and non-MLS policy:
+ * free any storage used by the MLS fields in the
+ * context for all existing entries in the sidtab.
+ */
+ mls_context_destroy(c);
+ } else if (!args->oldp->mls_enabled && args->newp->mls_enabled) {
+ /*
+ * Switching between non-MLS and MLS policy:
+ * ensure that the MLS fields of the context for all
+ * existing entries in the sidtab are filled in with a
+ * suitable default value, likely taken from one of the
+ * initial SIDs.
+ */
+ oc = args->newp->ocontexts[OCON_ISID];
+ while (oc && oc->sid[0] != SECINITSID_UNLABELED)
+ oc = oc->next;
+ if (!oc) {
+ printk(KERN_ERR "SELinux: unable to look up"
+ " the initial SIDs list\n");
+ goto bad;
+ }
+ range = &oc->context[0].range;
+ rc = mls_range_set(c, range);
+ if (rc)
+ goto bad;
+ }
/* Check the validity of the new context. */
if (!policydb_context_isvalid(args->newp, c)) {
@@ -1771,9 +1794,17 @@ int security_load_policy(void *data, size_t len)
if (policydb_read(&newpolicydb, fp))
return -EINVAL;
- if (sidtab_init(&newsidtab)) {
+ /* If switching between different policy types, log MLS status */
+ if (policydb.mls_enabled && !newpolicydb.mls_enabled)
+ printk(KERN_INFO "SELinux: Disabling MLS support...\n");
+ else if (!policydb.mls_enabled && newpolicydb.mls_enabled)
+ printk(KERN_INFO "SELinux: Enabling MLS support...\n");
+
+ rc = policydb_load_isids(&newpolicydb, &newsidtab);
+ if (rc) {
+ printk(KERN_ERR "SELinux: unable to load the initial SIDs\n");
policydb_destroy(&newpolicydb);
- return -ENOMEM;
+ return rc;
}
if (selinux_set_mapping(&newpolicydb, secclass_map,
@@ -1800,8 +1831,12 @@ int security_load_policy(void *data, size_t len)
args.oldp = &policydb;
args.newp = &newpolicydb;
rc = sidtab_map(&newsidtab, convert_context, &args);
- if (rc)
+ if (rc) {
+ printk(KERN_ERR "SELinux: unable to convert the internal"
+ " representation of contexts in the new SID"
+ " table\n");
goto err;
+ }
/* Save the old policydb and SID table to free later. */
memcpy(&oldpolicydb, &policydb, sizeof policydb);
@@ -2397,7 +2432,7 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid)
u32 len;
int rc = 0;
- if (!ss_initialized || !selinux_mls_enabled) {
+ if (!ss_initialized || !policydb.mls_enabled) {
*new_sid = sid;
goto out;
}
@@ -2498,7 +2533,7 @@ int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
/* we don't need to check ss_initialized here since the only way both
* nlbl_sid and xfrm_sid are not equal to SECSID_NULL would be if the
* security server was initialized and ss_initialized was true */
- if (!selinux_mls_enabled) {
+ if (!policydb.mls_enabled) {
*peer_sid = SECSID_NULL;
return 0;
}
@@ -2555,7 +2590,7 @@ int security_get_classes(char ***classes, int *nclasses)
read_lock(&policy_rwlock);
*nclasses = policydb.p_classes.nprim;
- *classes = kcalloc(*nclasses, sizeof(*classes), GFP_ATOMIC);
+ *classes = kcalloc(*nclasses, sizeof(**classes), GFP_ATOMIC);
if (!*classes)
goto out;
@@ -2602,7 +2637,7 @@ int security_get_permissions(char *class, char ***perms, int *nperms)
}
*nperms = match->permissions.nprim;
- *perms = kcalloc(*nperms, sizeof(*perms), GFP_ATOMIC);
+ *perms = kcalloc(*nperms, sizeof(**perms), GFP_ATOMIC);
if (!*perms)
goto out;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 529c9ca..5225e66 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -157,12 +157,12 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
*
* Returns 0 on success, error code otherwise.
*/
-static int smack_syslog(int type)
+static int smack_syslog(int type, bool from_file)
{
int rc;
char *sp = current_security();
- rc = cap_syslog(type);
+ rc = cap_syslog(type, from_file);
if (rc != 0)
return rc;
@@ -387,7 +387,7 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags)
struct smk_audit_info ad;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
- smk_ad_setfield_u_fs_path_dentry(&ad, mnt->mnt_mountpoint);
+ smk_ad_setfield_u_fs_path_dentry(&ad, mnt->mnt_root);
smk_ad_setfield_u_fs_path_mnt(&ad, mnt);
sbp = mnt->mnt_sb->s_security;
diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
index 10ccd68..60a9e20 100644
--- a/security/tomoyo/Makefile
+++ b/security/tomoyo/Makefile
@@ -1 +1 @@
-obj-y = common.o realpath.o tomoyo.o domain.o file.o
+obj-y = common.o realpath.o tomoyo.o domain.o file.o gc.o
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index e0d0354..ff51f10 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -12,9 +12,10 @@
#include <linux/uaccess.h>
#include <linux/security.h>
#include <linux/hardirq.h>
-#include "realpath.h"
#include "common.h"
-#include "tomoyo.h"
+
+/* Lock for protecting policy. */
+DEFINE_MUTEX(tomoyo_policy_lock);
/* Has loading policy done? */
bool tomoyo_policy_loaded;
@@ -178,14 +179,12 @@ static void tomoyo_normalize_line(unsigned char *buffer)
* 1 = must / -1 = must not / 0 = don't care
* @end_type: Should the pathname end with '/'?
* 1 = must / -1 = must not / 0 = don't care
- * @function: The name of function calling me.
*
* Check whether the given filename follows the naming rules.
* Returns true if @filename follows the naming rules, false otherwise.
*/
bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
- const s8 pattern_type, const s8 end_type,
- const char *function)
+ const s8 pattern_type, const s8 end_type)
{
const char *const start = filename;
bool in_repetition = false;
@@ -193,7 +192,6 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
unsigned char c;
unsigned char d;
unsigned char e;
- const char *original_filename = filename;
if (!filename)
goto out;
@@ -282,25 +280,20 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
goto out;
return true;
out:
- printk(KERN_DEBUG "%s: Invalid pathname '%s'\n", function,
- original_filename);
return false;
}
/**
* tomoyo_is_correct_domain - Check whether the given domainname follows the naming rules.
* @domainname: The domainname to check.
- * @function: The name of function calling me.
*
* Returns true if @domainname follows the naming rules, false otherwise.
*/
-bool tomoyo_is_correct_domain(const unsigned char *domainname,
- const char *function)
+bool tomoyo_is_correct_domain(const unsigned char *domainname)
{
unsigned char c;
unsigned char d;
unsigned char e;
- const char *org_domainname = domainname;
if (!domainname || strncmp(domainname, TOMOYO_ROOT_NAME,
TOMOYO_ROOT_NAME_LEN))
@@ -343,8 +336,6 @@ bool tomoyo_is_correct_domain(const unsigned char *domainname,
} while (*domainname);
return true;
out:
- printk(KERN_DEBUG "%s: Invalid domainname '%s'\n", function,
- org_domainname);
return false;
}
@@ -365,10 +356,9 @@ bool tomoyo_is_domain_def(const unsigned char *buffer)
*
* @domainname: The domainname to find.
*
- * Caller must call down_read(&tomoyo_domain_list_lock); or
- * down_write(&tomoyo_domain_list_lock); .
- *
* Returns pointer to "struct tomoyo_domain_info" if found, NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
{
@@ -377,7 +367,7 @@ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
name.name = domainname;
tomoyo_fill_path_info(&name);
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
if (!domain->is_deleted &&
!tomoyo_pathcmp(&name, domain->domainname))
return domain;
@@ -748,7 +738,7 @@ bool tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...)
*
* Returns the tomoyo_realpath() of current process on success, NULL otherwise.
*
- * This function uses tomoyo_alloc(), so the caller must call tomoyo_free()
+ * This function uses kzalloc(), so the caller must call kfree()
* if this function didn't return NULL.
*/
static const char *tomoyo_get_exe(void)
@@ -829,6 +819,8 @@ bool tomoyo_verbose_mode(const struct tomoyo_domain_info *domain)
* @domain: Pointer to "struct tomoyo_domain_info".
*
* Returns true if the domain is not exceeded quota, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain)
{
@@ -837,61 +829,29 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain)
if (!domain)
return true;
- down_read(&tomoyo_domain_acl_info_list_lock);
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- if (ptr->type & TOMOYO_ACL_DELETED)
- continue;
- switch (tomoyo_acl_type2(ptr)) {
- struct tomoyo_single_path_acl_record *acl1;
- struct tomoyo_double_path_acl_record *acl2;
- u16 perm;
- case TOMOYO_TYPE_SINGLE_PATH_ACL:
- acl1 = container_of(ptr,
- struct tomoyo_single_path_acl_record,
- head);
- perm = acl1->perm;
- if (perm & (1 << TOMOYO_TYPE_EXECUTE_ACL))
- count++;
- if (perm &
- ((1 << TOMOYO_TYPE_READ_ACL) |
- (1 << TOMOYO_TYPE_WRITE_ACL)))
- count++;
- if (perm & (1 << TOMOYO_TYPE_CREATE_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_UNLINK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKDIR_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_RMDIR_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKFIFO_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKSOCK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKBLOCK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKCHAR_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_TRUNCATE_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_SYMLINK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_REWRITE_ACL))
- count++;
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+ switch (ptr->type) {
+ struct tomoyo_path_acl *acl;
+ u32 perm;
+ u8 i;
+ case TOMOYO_TYPE_PATH_ACL:
+ acl = container_of(ptr, struct tomoyo_path_acl, head);
+ perm = acl->perm | (((u32) acl->perm_high) << 16);
+ for (i = 0; i < TOMOYO_MAX_PATH_OPERATION; i++)
+ if (perm & (1 << i))
+ count++;
+ if (perm & (1 << TOMOYO_TYPE_READ_WRITE))
+ count -= 2;
break;
- case TOMOYO_TYPE_DOUBLE_PATH_ACL:
- acl2 = container_of(ptr,
- struct tomoyo_double_path_acl_record,
- head);
- perm = acl2->perm;
- if (perm & (1 << TOMOYO_TYPE_LINK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_RENAME_ACL))
- count++;
+ case TOMOYO_TYPE_PATH2_ACL:
+ perm = container_of(ptr, struct tomoyo_path2_acl, head)
+ ->perm;
+ for (i = 0; i < TOMOYO_MAX_PATH2_OPERATION; i++)
+ if (perm & (1 << i))
+ count++;
break;
}
}
- up_read(&tomoyo_domain_acl_info_list_lock);
if (count < tomoyo_check_flags(domain, TOMOYO_MAX_ACCEPT_ENTRY))
return true;
if (!domain->quota_warned) {
@@ -923,9 +883,11 @@ static struct tomoyo_profile *tomoyo_find_or_assign_new_profile(const unsigned
ptr = tomoyo_profile_ptr[profile];
if (ptr)
goto ok;
- ptr = tomoyo_alloc_element(sizeof(*ptr));
- if (!ptr)
+ ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
+ if (!tomoyo_memory_ok(ptr)) {
+ kfree(ptr);
goto ok;
+ }
for (i = 0; i < TOMOYO_MAX_CONTROL_INDEX; i++)
ptr->value[i] = tomoyo_control_array[i].current_value;
mb(); /* Avoid out-of-order execution. */
@@ -966,7 +928,9 @@ static int tomoyo_write_profile(struct tomoyo_io_buffer *head)
return -EINVAL;
*cp = '\0';
if (!strcmp(data, "COMMENT")) {
- profile->comment = tomoyo_save_name(cp + 1);
+ const struct tomoyo_path_info *old_comment = profile->comment;
+ profile->comment = tomoyo_get_name(cp + 1);
+ tomoyo_put_name(old_comment);
return 0;
}
for (i = 0; i < TOMOYO_MAX_CONTROL_INDEX; i++) {
@@ -1061,27 +1025,6 @@ static int tomoyo_read_profile(struct tomoyo_io_buffer *head)
}
/*
- * tomoyo_policy_manager_entry is a structure which is used for holding list of
- * domainnames or programs which are permitted to modify configuration via
- * /sys/kernel/security/tomoyo/ interface.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_policy_manager_list .
- * (2) "manager" is a domainname or a program's pathname.
- * (3) "is_domain" is a bool which is true if "manager" is a domainname, false
- * otherwise.
- * (4) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- */
-struct tomoyo_policy_manager_entry {
- struct list_head list;
- /* A path to program or a domainname. */
- const struct tomoyo_path_info *manager;
- bool is_domain; /* True if manager is a domainname. */
- bool is_deleted; /* True if this entry is deleted. */
-};
-
-/*
* tomoyo_policy_manager_list is used for holding list of domainnames or
* programs which are permitted to modify configuration via
* /sys/kernel/security/tomoyo/ interface.
@@ -1111,8 +1054,7 @@ struct tomoyo_policy_manager_entry {
*
* # cat /sys/kernel/security/tomoyo/manager
*/
-static LIST_HEAD(tomoyo_policy_manager_list);
-static DECLARE_RWSEM(tomoyo_policy_manager_list_lock);
+LIST_HEAD(tomoyo_policy_manager_list);
/**
* tomoyo_update_manager_entry - Add a manager entry.
@@ -1121,48 +1063,50 @@ static DECLARE_RWSEM(tomoyo_policy_manager_list_lock);
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_manager_entry(const char *manager,
const bool is_delete)
{
- struct tomoyo_policy_manager_entry *new_entry;
+ struct tomoyo_policy_manager_entry *entry = NULL;
struct tomoyo_policy_manager_entry *ptr;
const struct tomoyo_path_info *saved_manager;
- int error = -ENOMEM;
+ int error = is_delete ? -ENOENT : -ENOMEM;
bool is_domain = false;
if (tomoyo_is_domain_def(manager)) {
- if (!tomoyo_is_correct_domain(manager, __func__))
+ if (!tomoyo_is_correct_domain(manager))
return -EINVAL;
is_domain = true;
} else {
- if (!tomoyo_is_correct_path(manager, 1, -1, -1, __func__))
+ if (!tomoyo_is_correct_path(manager, 1, -1, -1))
return -EINVAL;
}
- saved_manager = tomoyo_save_name(manager);
+ saved_manager = tomoyo_get_name(manager);
if (!saved_manager)
return -ENOMEM;
- down_write(&tomoyo_policy_manager_list_lock);
- list_for_each_entry(ptr, &tomoyo_policy_manager_list, list) {
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list, list) {
if (ptr->manager != saved_manager)
continue;
ptr->is_deleted = is_delete;
error = 0;
- goto out;
+ break;
}
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->manager = saved_manager;
+ saved_manager = NULL;
+ entry->is_domain = is_domain;
+ list_add_tail_rcu(&entry->list, &tomoyo_policy_manager_list);
+ entry = NULL;
+ error = 0;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->manager = saved_manager;
- new_entry->is_domain = is_domain;
- list_add_tail(&new_entry->list, &tomoyo_policy_manager_list);
- error = 0;
- out:
- up_write(&tomoyo_policy_manager_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
+ tomoyo_put_name(saved_manager);
+ kfree(entry);
return error;
}
@@ -1172,6 +1116,8 @@ static int tomoyo_update_manager_entry(const char *manager,
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_manager_policy(struct tomoyo_io_buffer *head)
{
@@ -1191,6 +1137,8 @@ static int tomoyo_write_manager_policy(struct tomoyo_io_buffer *head)
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_read_manager_policy(struct tomoyo_io_buffer *head)
{
@@ -1199,7 +1147,6 @@ static int tomoyo_read_manager_policy(struct tomoyo_io_buffer *head)
if (head->read_eof)
return 0;
- down_read(&tomoyo_policy_manager_list_lock);
list_for_each_cookie(pos, head->read_var2,
&tomoyo_policy_manager_list) {
struct tomoyo_policy_manager_entry *ptr;
@@ -1211,7 +1158,6 @@ static int tomoyo_read_manager_policy(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_policy_manager_list_lock);
head->read_eof = done;
return 0;
}
@@ -1221,6 +1167,8 @@ static int tomoyo_read_manager_policy(struct tomoyo_io_buffer *head)
*
* Returns true if the current process is permitted to modify policy
* via /sys/kernel/security/tomoyo/ interface.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_policy_manager(void)
{
@@ -1234,29 +1182,25 @@ static bool tomoyo_is_policy_manager(void)
return true;
if (!tomoyo_manage_by_non_root && (task->cred->uid || task->cred->euid))
return false;
- down_read(&tomoyo_policy_manager_list_lock);
- list_for_each_entry(ptr, &tomoyo_policy_manager_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list, list) {
if (!ptr->is_deleted && ptr->is_domain
&& !tomoyo_pathcmp(domainname, ptr->manager)) {
found = true;
break;
}
}
- up_read(&tomoyo_policy_manager_list_lock);
if (found)
return true;
exe = tomoyo_get_exe();
if (!exe)
return false;
- down_read(&tomoyo_policy_manager_list_lock);
- list_for_each_entry(ptr, &tomoyo_policy_manager_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list, list) {
if (!ptr->is_deleted && !ptr->is_domain
&& !strcmp(exe, ptr->manager->name)) {
found = true;
break;
}
}
- up_read(&tomoyo_policy_manager_list_lock);
if (!found) { /* Reduce error messages. */
static pid_t last_pid;
const pid_t pid = current->pid;
@@ -1266,7 +1210,7 @@ static bool tomoyo_is_policy_manager(void)
last_pid = pid;
}
}
- tomoyo_free(exe);
+ kfree(exe);
return found;
}
@@ -1277,6 +1221,8 @@ static bool tomoyo_is_policy_manager(void)
* @data: String to parse.
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
const char *data)
@@ -1286,17 +1232,16 @@ static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
if (sscanf(data, "pid=%u", &pid) == 1) {
struct task_struct *p;
+ rcu_read_lock();
read_lock(&tasklist_lock);
p = find_task_by_vpid(pid);
if (p)
domain = tomoyo_real_domain(p);
read_unlock(&tasklist_lock);
+ rcu_read_unlock();
} else if (!strncmp(data, "domain=", 7)) {
- if (tomoyo_is_domain_def(data + 7)) {
- down_read(&tomoyo_domain_list_lock);
+ if (tomoyo_is_domain_def(data + 7))
domain = tomoyo_find_domain(data + 7);
- up_read(&tomoyo_domain_list_lock);
- }
} else
return false;
head->write_var1 = domain;
@@ -1310,13 +1255,11 @@ static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
if (domain) {
struct tomoyo_domain_info *d;
head->read_var1 = NULL;
- down_read(&tomoyo_domain_list_lock);
- list_for_each_entry(d, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(d, &tomoyo_domain_list, list) {
if (d == domain)
break;
head->read_var1 = &d->list;
}
- up_read(&tomoyo_domain_list_lock);
head->read_var2 = NULL;
head->read_bit = 0;
head->read_step = 0;
@@ -1332,6 +1275,8 @@ static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
* @domainname: The name of domain.
*
* Returns 0.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_delete_domain(char *domainname)
{
@@ -1340,9 +1285,9 @@ static int tomoyo_delete_domain(char *domainname)
name.name = domainname;
tomoyo_fill_path_info(&name);
- down_write(&tomoyo_domain_list_lock);
+ mutex_lock(&tomoyo_policy_lock);
/* Is there an active domain? */
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
/* Never delete tomoyo_kernel_domain */
if (domain == &tomoyo_kernel_domain)
continue;
@@ -1352,7 +1297,7 @@ static int tomoyo_delete_domain(char *domainname)
domain->is_deleted = true;
break;
}
- up_write(&tomoyo_domain_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
return 0;
}
@@ -1362,6 +1307,8 @@ static int tomoyo_delete_domain(char *domainname)
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_domain_policy(struct tomoyo_io_buffer *head)
{
@@ -1384,11 +1331,9 @@ static int tomoyo_write_domain_policy(struct tomoyo_io_buffer *head)
domain = NULL;
if (is_delete)
tomoyo_delete_domain(data);
- else if (is_select) {
- down_read(&tomoyo_domain_list_lock);
+ else if (is_select)
domain = tomoyo_find_domain(data);
- up_read(&tomoyo_domain_list_lock);
- } else
+ else
domain = tomoyo_find_or_assign_new_domain(data, 0);
head->write_var1 = domain;
return 0;
@@ -1403,43 +1348,39 @@ static int tomoyo_write_domain_policy(struct tomoyo_io_buffer *head)
return 0;
}
if (!strcmp(data, TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ)) {
- tomoyo_set_domain_flag(domain, is_delete,
- TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ);
+ domain->ignore_global_allow_read = !is_delete;
return 0;
}
return tomoyo_write_file_policy(data, domain, is_delete);
}
/**
- * tomoyo_print_single_path_acl - Print a single path ACL entry.
+ * tomoyo_print_path_acl - Print a single path ACL entry.
*
* @head: Pointer to "struct tomoyo_io_buffer".
- * @ptr: Pointer to "struct tomoyo_single_path_acl_record".
+ * @ptr: Pointer to "struct tomoyo_path_acl".
*
* Returns true on success, false otherwise.
*/
-static bool tomoyo_print_single_path_acl(struct tomoyo_io_buffer *head,
- struct tomoyo_single_path_acl_record *
- ptr)
+static bool tomoyo_print_path_acl(struct tomoyo_io_buffer *head,
+ struct tomoyo_path_acl *ptr)
{
int pos;
u8 bit;
const char *atmark = "";
const char *filename;
- const u16 perm = ptr->perm;
+ const u32 perm = ptr->perm | (((u32) ptr->perm_high) << 16);
filename = ptr->filename->name;
- for (bit = head->read_bit; bit < TOMOYO_MAX_SINGLE_PATH_OPERATION;
- bit++) {
+ for (bit = head->read_bit; bit < TOMOYO_MAX_PATH_OPERATION; bit++) {
const char *msg;
if (!(perm & (1 << bit)))
continue;
/* Print "read/write" instead of "read" and "write". */
- if ((bit == TOMOYO_TYPE_READ_ACL ||
- bit == TOMOYO_TYPE_WRITE_ACL)
- && (perm & (1 << TOMOYO_TYPE_READ_WRITE_ACL)))
+ if ((bit == TOMOYO_TYPE_READ || bit == TOMOYO_TYPE_WRITE)
+ && (perm & (1 << TOMOYO_TYPE_READ_WRITE)))
continue;
- msg = tomoyo_sp2keyword(bit);
+ msg = tomoyo_path2keyword(bit);
pos = head->read_avail;
if (!tomoyo_io_printf(head, "allow_%s %s%s\n", msg,
atmark, filename))
@@ -1454,16 +1395,15 @@ static bool tomoyo_print_single_path_acl(struct tomoyo_io_buffer *head,
}
/**
- * tomoyo_print_double_path_acl - Print a double path ACL entry.
+ * tomoyo_print_path2_acl - Print a double path ACL entry.
*
* @head: Pointer to "struct tomoyo_io_buffer".
- * @ptr: Pointer to "struct tomoyo_double_path_acl_record".
+ * @ptr: Pointer to "struct tomoyo_path2_acl".
*
* Returns true on success, false otherwise.
*/
-static bool tomoyo_print_double_path_acl(struct tomoyo_io_buffer *head,
- struct tomoyo_double_path_acl_record *
- ptr)
+static bool tomoyo_print_path2_acl(struct tomoyo_io_buffer *head,
+ struct tomoyo_path2_acl *ptr)
{
int pos;
const char *atmark1 = "";
@@ -1475,12 +1415,11 @@ static bool tomoyo_print_double_path_acl(struct tomoyo_io_buffer *head,
filename1 = ptr->filename1->name;
filename2 = ptr->filename2->name;
- for (bit = head->read_bit; bit < TOMOYO_MAX_DOUBLE_PATH_OPERATION;
- bit++) {
+ for (bit = head->read_bit; bit < TOMOYO_MAX_PATH2_OPERATION; bit++) {
const char *msg;
if (!(perm & (1 << bit)))
continue;
- msg = tomoyo_dp2keyword(bit);
+ msg = tomoyo_path22keyword(bit);
pos = head->read_avail;
if (!tomoyo_io_printf(head, "allow_%s %s%s %s%s\n", msg,
atmark1, filename1, atmark2, filename2))
@@ -1505,23 +1444,17 @@ static bool tomoyo_print_double_path_acl(struct tomoyo_io_buffer *head,
static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
struct tomoyo_acl_info *ptr)
{
- const u8 acl_type = tomoyo_acl_type2(ptr);
+ const u8 acl_type = ptr->type;
- if (acl_type & TOMOYO_ACL_DELETED)
- return true;
- if (acl_type == TOMOYO_TYPE_SINGLE_PATH_ACL) {
- struct tomoyo_single_path_acl_record *acl
- = container_of(ptr,
- struct tomoyo_single_path_acl_record,
- head);
- return tomoyo_print_single_path_acl(head, acl);
+ if (acl_type == TOMOYO_TYPE_PATH_ACL) {
+ struct tomoyo_path_acl *acl
+ = container_of(ptr, struct tomoyo_path_acl, head);
+ return tomoyo_print_path_acl(head, acl);
}
- if (acl_type == TOMOYO_TYPE_DOUBLE_PATH_ACL) {
- struct tomoyo_double_path_acl_record *acl
- = container_of(ptr,
- struct tomoyo_double_path_acl_record,
- head);
- return tomoyo_print_double_path_acl(head, acl);
+ if (acl_type == TOMOYO_TYPE_PATH2_ACL) {
+ struct tomoyo_path2_acl *acl
+ = container_of(ptr, struct tomoyo_path2_acl, head);
+ return tomoyo_print_path2_acl(head, acl);
}
BUG(); /* This must not happen. */
return false;
@@ -1533,6 +1466,8 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_read_domain_policy(struct tomoyo_io_buffer *head)
{
@@ -1544,7 +1479,6 @@ static int tomoyo_read_domain_policy(struct tomoyo_io_buffer *head)
return 0;
if (head->read_step == 0)
head->read_step = 1;
- down_read(&tomoyo_domain_list_lock);
list_for_each_cookie(dpos, head->read_var1, &tomoyo_domain_list) {
struct tomoyo_domain_info *domain;
const char *quota_exceeded = "";
@@ -1558,10 +1492,9 @@ static int tomoyo_read_domain_policy(struct tomoyo_io_buffer *head)
/* Print domainname and flags. */
if (domain->quota_warned)
quota_exceeded = "quota_exceeded\n";
- if (domain->flags & TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED)
+ if (domain->transition_failed)
transition_failed = "transition_failed\n";
- if (domain->flags &
- TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ)
+ if (domain->ignore_global_allow_read)
ignore_global_allow_read
= TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ "\n";
done = tomoyo_io_printf(head, "%s\n" TOMOYO_KEYWORD_USE_PROFILE
@@ -1577,7 +1510,6 @@ acl_loop:
if (head->read_step == 3)
goto tail_mark;
/* Print ACL entries in the domain. */
- down_read(&tomoyo_domain_acl_info_list_lock);
list_for_each_cookie(apos, head->read_var2,
&domain->acl_info_list) {
struct tomoyo_acl_info *ptr
@@ -1587,7 +1519,6 @@ acl_loop:
if (!done)
break;
}
- up_read(&tomoyo_domain_acl_info_list_lock);
if (!done)
break;
head->read_step = 3;
@@ -1599,7 +1530,6 @@ tail_mark:
if (head->read_single_domain)
break;
}
- up_read(&tomoyo_domain_list_lock);
head->read_eof = done;
return 0;
}
@@ -1615,6 +1545,8 @@ tail_mark:
*
* ( echo "select " $domainname; echo "use_profile " $profile ) |
* /usr/lib/ccs/loadpolicy -d
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_domain_profile(struct tomoyo_io_buffer *head)
{
@@ -1626,9 +1558,7 @@ static int tomoyo_write_domain_profile(struct tomoyo_io_buffer *head)
if (!cp)
return -EINVAL;
*cp = '\0';
- down_read(&tomoyo_domain_list_lock);
domain = tomoyo_find_domain(cp + 1);
- up_read(&tomoyo_domain_list_lock);
if (strict_strtoul(data, 10, &profile))
return -EINVAL;
if (domain && profile < TOMOYO_MAX_PROFILES
@@ -1650,6 +1580,8 @@ static int tomoyo_write_domain_profile(struct tomoyo_io_buffer *head)
* awk ' { if ( domainname == "" ) { if ( $1 == "<kernel>" )
* domainname = $0; } else if ( $1 == "use_profile" ) {
* print $2 " " domainname; domainname = ""; } } ; '
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_read_domain_profile(struct tomoyo_io_buffer *head)
{
@@ -1658,7 +1590,6 @@ static int tomoyo_read_domain_profile(struct tomoyo_io_buffer *head)
if (head->read_eof)
return 0;
- down_read(&tomoyo_domain_list_lock);
list_for_each_cookie(pos, head->read_var1, &tomoyo_domain_list) {
struct tomoyo_domain_info *domain;
domain = list_entry(pos, struct tomoyo_domain_info, list);
@@ -1669,7 +1600,6 @@ static int tomoyo_read_domain_profile(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_domain_list_lock);
head->read_eof = done;
return 0;
}
@@ -1707,11 +1637,13 @@ static int tomoyo_read_pid(struct tomoyo_io_buffer *head)
const int pid = head->read_step;
struct task_struct *p;
struct tomoyo_domain_info *domain = NULL;
+ rcu_read_lock();
read_lock(&tasklist_lock);
p = find_task_by_vpid(pid);
if (p)
domain = tomoyo_real_domain(p);
read_unlock(&tasklist_lock);
+ rcu_read_unlock();
if (domain)
tomoyo_io_printf(head, "%d %u %s", pid, domain->profile,
domain->domainname->name);
@@ -1726,6 +1658,8 @@ static int tomoyo_read_pid(struct tomoyo_io_buffer *head)
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_exception_policy(struct tomoyo_io_buffer *head)
{
@@ -1760,6 +1694,8 @@ static int tomoyo_write_exception_policy(struct tomoyo_io_buffer *head)
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, -EINVAL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_read_exception_policy(struct tomoyo_io_buffer *head)
{
@@ -1889,15 +1825,13 @@ void tomoyo_load_policy(const char *filename)
tomoyo_policy_loaded = true;
{ /* Check all profiles currently assigned to domains are defined. */
struct tomoyo_domain_info *domain;
- down_read(&tomoyo_domain_list_lock);
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
const u8 profile = domain->profile;
if (tomoyo_profile_ptr[profile])
continue;
panic("Profile %u (used by '%s') not defined.\n",
profile, domain->domainname->name);
}
- up_read(&tomoyo_domain_list_lock);
}
}
@@ -1945,10 +1879,12 @@ static int tomoyo_read_self_domain(struct tomoyo_io_buffer *head)
* @file: Pointer to "struct file".
*
* Associates policy handler and returns 0 on success, -ENOMEM otherwise.
+ *
+ * Caller acquires tomoyo_read_lock().
*/
static int tomoyo_open_control(const u8 type, struct file *file)
{
- struct tomoyo_io_buffer *head = tomoyo_alloc(sizeof(*head));
+ struct tomoyo_io_buffer *head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
return -ENOMEM;
@@ -2009,9 +1945,9 @@ static int tomoyo_open_control(const u8 type, struct file *file)
} else {
if (!head->readbuf_size)
head->readbuf_size = 4096 * 2;
- head->read_buf = tomoyo_alloc(head->readbuf_size);
+ head->read_buf = kzalloc(head->readbuf_size, GFP_KERNEL);
if (!head->read_buf) {
- tomoyo_free(head);
+ kfree(head);
return -ENOMEM;
}
}
@@ -2023,13 +1959,14 @@ static int tomoyo_open_control(const u8 type, struct file *file)
head->write = NULL;
} else if (head->write) {
head->writebuf_size = 4096 * 2;
- head->write_buf = tomoyo_alloc(head->writebuf_size);
+ head->write_buf = kzalloc(head->writebuf_size, GFP_KERNEL);
if (!head->write_buf) {
- tomoyo_free(head->read_buf);
- tomoyo_free(head);
+ kfree(head->read_buf);
+ kfree(head);
return -ENOMEM;
}
}
+ head->reader_idx = tomoyo_read_lock();
file->private_data = head;
/*
* Call the handler now if the file is
@@ -2051,6 +1988,8 @@ static int tomoyo_open_control(const u8 type, struct file *file)
* @buffer_len: Size of @buffer.
*
* Returns bytes read on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_read_control(struct file *file, char __user *buffer,
const int buffer_len)
@@ -2094,6 +2033,8 @@ static int tomoyo_read_control(struct file *file, char __user *buffer,
* @buffer_len: Size of @buffer.
*
* Returns @buffer_len on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_control(struct file *file, const char __user *buffer,
const int buffer_len)
@@ -2144,52 +2085,29 @@ static int tomoyo_write_control(struct file *file, const char __user *buffer,
* @file: Pointer to "struct file".
*
* Releases memory and returns 0.
+ *
+ * Caller looses tomoyo_read_lock().
*/
static int tomoyo_close_control(struct file *file)
{
struct tomoyo_io_buffer *head = file->private_data;
+ const bool is_write = !!head->write_buf;
+ tomoyo_read_unlock(head->reader_idx);
/* Release memory used for policy I/O. */
- tomoyo_free(head->read_buf);
+ kfree(head->read_buf);
head->read_buf = NULL;
- tomoyo_free(head->write_buf);
+ kfree(head->write_buf);
head->write_buf = NULL;
- tomoyo_free(head);
+ kfree(head);
head = NULL;
file->private_data = NULL;
+ if (is_write)
+ tomoyo_run_gc();
return 0;
}
/**
- * tomoyo_alloc_acl_element - Allocate permanent memory for ACL entry.
- *
- * @acl_type: Type of ACL entry.
- *
- * Returns pointer to the ACL entry on success, NULL otherwise.
- */
-void *tomoyo_alloc_acl_element(const u8 acl_type)
-{
- int len;
- struct tomoyo_acl_info *ptr;
-
- switch (acl_type) {
- case TOMOYO_TYPE_SINGLE_PATH_ACL:
- len = sizeof(struct tomoyo_single_path_acl_record);
- break;
- case TOMOYO_TYPE_DOUBLE_PATH_ACL:
- len = sizeof(struct tomoyo_double_path_acl_record);
- break;
- default:
- return NULL;
- }
- ptr = tomoyo_alloc_element(len);
- if (!ptr)
- return NULL;
- ptr->type = acl_type;
- return ptr;
-}
-
-/**
* tomoyo_open - open() for /sys/kernel/security/tomoyo/ interface.
*
* @inode: Pointer to "struct inode".
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 92169d2..67bd22d 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -1,12 +1,9 @@
/*
* security/tomoyo/common.h
*
- * Common functions for TOMOYO.
- *
- * Copyright (C) 2005-2009 NTT DATA CORPORATION
- *
- * Version: 2.2.0 2009/04/01
+ * Header file for TOMOYO.
*
+ * Copyright (C) 2005-2010 NTT DATA CORPORATION
*/
#ifndef _SECURITY_TOMOYO_COMMON_H
@@ -22,9 +19,119 @@
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/list.h>
+#include <linux/cred.h>
+struct linux_binprm;
+
+/********** Constants definitions. **********/
+
+/*
+ * TOMOYO uses this hash only when appending a string into the string
+ * table. Frequency of appending strings is very low. So we don't need
+ * large (e.g. 64k) hash size. 256 will be sufficient.
+ */
+#define TOMOYO_HASH_BITS 8
+#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
+
+/*
+ * This is the max length of a token.
+ *
+ * A token consists of only ASCII printable characters.
+ * Non printable characters in a token is represented in \ooo style
+ * octal string. Thus, \ itself is represented as \\.
+ */
+#define TOMOYO_MAX_PATHNAME_LEN 4000
+
+/* Profile number is an integer between 0 and 255. */
+#define TOMOYO_MAX_PROFILES 256
+
+/* Keywords for ACLs. */
+#define TOMOYO_KEYWORD_ALIAS "alias "
+#define TOMOYO_KEYWORD_ALLOW_READ "allow_read "
+#define TOMOYO_KEYWORD_DELETE "delete "
+#define TOMOYO_KEYWORD_DENY_REWRITE "deny_rewrite "
+#define TOMOYO_KEYWORD_FILE_PATTERN "file_pattern "
+#define TOMOYO_KEYWORD_INITIALIZE_DOMAIN "initialize_domain "
+#define TOMOYO_KEYWORD_KEEP_DOMAIN "keep_domain "
+#define TOMOYO_KEYWORD_NO_INITIALIZE_DOMAIN "no_initialize_domain "
+#define TOMOYO_KEYWORD_NO_KEEP_DOMAIN "no_keep_domain "
+#define TOMOYO_KEYWORD_SELECT "select "
+#define TOMOYO_KEYWORD_USE_PROFILE "use_profile "
+#define TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ "ignore_global_allow_read"
+/* A domain definition starts with <kernel>. */
+#define TOMOYO_ROOT_NAME "<kernel>"
+#define TOMOYO_ROOT_NAME_LEN (sizeof(TOMOYO_ROOT_NAME) - 1)
+
+/* Index numbers for Access Controls. */
+enum tomoyo_mac_index {
+ TOMOYO_MAC_FOR_FILE, /* domain_policy.conf */
+ TOMOYO_MAX_ACCEPT_ENTRY,
+ TOMOYO_VERBOSE,
+ TOMOYO_MAX_CONTROL_INDEX
+};
+
+/* Index numbers for Access Controls. */
+enum tomoyo_acl_entry_type_index {
+ TOMOYO_TYPE_PATH_ACL,
+ TOMOYO_TYPE_PATH2_ACL,
+};
+
+/* Index numbers for File Controls. */
+
+/*
+ * TYPE_READ_WRITE_ACL is special. TYPE_READ_WRITE_ACL is automatically set
+ * if both TYPE_READ_ACL and TYPE_WRITE_ACL are set. Both TYPE_READ_ACL and
+ * TYPE_WRITE_ACL are automatically set if TYPE_READ_WRITE_ACL is set.
+ * TYPE_READ_WRITE_ACL is automatically cleared if either TYPE_READ_ACL or
+ * TYPE_WRITE_ACL is cleared. Both TYPE_READ_ACL and TYPE_WRITE_ACL are
+ * automatically cleared if TYPE_READ_WRITE_ACL is cleared.
+ */
+
+enum tomoyo_path_acl_index {
+ TOMOYO_TYPE_READ_WRITE,
+ TOMOYO_TYPE_EXECUTE,
+ TOMOYO_TYPE_READ,
+ TOMOYO_TYPE_WRITE,
+ TOMOYO_TYPE_CREATE,
+ TOMOYO_TYPE_UNLINK,
+ TOMOYO_TYPE_MKDIR,
+ TOMOYO_TYPE_RMDIR,
+ TOMOYO_TYPE_MKFIFO,
+ TOMOYO_TYPE_MKSOCK,
+ TOMOYO_TYPE_MKBLOCK,
+ TOMOYO_TYPE_MKCHAR,
+ TOMOYO_TYPE_TRUNCATE,
+ TOMOYO_TYPE_SYMLINK,
+ TOMOYO_TYPE_REWRITE,
+ TOMOYO_TYPE_IOCTL,
+ TOMOYO_TYPE_CHMOD,
+ TOMOYO_TYPE_CHOWN,
+ TOMOYO_TYPE_CHGRP,
+ TOMOYO_TYPE_CHROOT,
+ TOMOYO_TYPE_MOUNT,
+ TOMOYO_TYPE_UMOUNT,
+ TOMOYO_MAX_PATH_OPERATION
+};
-struct dentry;
-struct vfsmount;
+enum tomoyo_path2_acl_index {
+ TOMOYO_TYPE_LINK,
+ TOMOYO_TYPE_RENAME,
+ TOMOYO_TYPE_PIVOT_ROOT,
+ TOMOYO_MAX_PATH2_OPERATION
+};
+
+enum tomoyo_securityfs_interface_index {
+ TOMOYO_DOMAINPOLICY,
+ TOMOYO_EXCEPTIONPOLICY,
+ TOMOYO_DOMAIN_STATUS,
+ TOMOYO_PROCESS_STATUS,
+ TOMOYO_MEMINFO,
+ TOMOYO_SELFDOMAIN,
+ TOMOYO_VERSION,
+ TOMOYO_PROFILE,
+ TOMOYO_MANAGER
+};
+
+/********** Structure definitions. **********/
/*
* tomoyo_page_buffer is a structure which is used for holding a pathname
@@ -66,13 +173,14 @@ struct tomoyo_path_info {
};
/*
- * This is the max length of a token.
- *
- * A token consists of only ASCII printable characters.
- * Non printable characters in a token is represented in \ooo style
- * octal string. Thus, \ itself is represented as \\.
+ * tomoyo_name_entry is a structure which is used for linking
+ * "struct tomoyo_path_info" into tomoyo_name_list .
*/
-#define TOMOYO_MAX_PATHNAME_LEN 4000
+struct tomoyo_name_entry {
+ struct list_head list;
+ atomic_t users;
+ struct tomoyo_path_info entry;
+};
/*
* tomoyo_path_info_with_data is a structure which is used for holding a
@@ -89,7 +197,7 @@ struct tomoyo_path_info {
* "struct tomoyo_path_info_with_data".
*/
struct tomoyo_path_info_with_data {
- /* Keep "head" first, for this pointer is passed to tomoyo_free(). */
+ /* Keep "head" first, for this pointer is passed to kfree(). */
struct tomoyo_path_info head;
char barrier1[16]; /* Safeguard for overrun. */
char body[TOMOYO_MAX_PATHNAME_LEN];
@@ -101,30 +209,19 @@ struct tomoyo_path_info_with_data {
*
* (1) "list" which is linked to the ->acl_info_list of
* "struct tomoyo_domain_info"
- * (2) "type" which tells
- * (a) type & 0x7F : type of the entry (either
- * "struct tomoyo_single_path_acl_record" or
- * "struct tomoyo_double_path_acl_record")
- * (b) type & 0x80 : whether the entry is marked as "deleted".
+ * (2) "type" which tells type of the entry (either
+ * "struct tomoyo_path_acl" or "struct tomoyo_path2_acl").
*
* Packing "struct tomoyo_acl_info" allows
- * "struct tomoyo_single_path_acl_record" to embed "u16" and
- * "struct tomoyo_double_path_acl_record" to embed "u8"
+ * "struct tomoyo_path_acl" to embed "u8" + "u16" and
+ * "struct tomoyo_path2_acl" to embed "u8"
* without enlarging their structure size.
*/
struct tomoyo_acl_info {
struct list_head list;
- /*
- * Type of this ACL entry.
- *
- * MSB is is_deleted flag.
- */
u8 type;
} __packed;
-/* This ACL entry is deleted. */
-#define TOMOYO_ACL_DELETED 0x80
-
/*
* tomoyo_domain_info is a structure which is used for holding permissions
* (e.g. "allow_read /lib/libc-2.5.so") given to each domain.
@@ -138,7 +235,17 @@ struct tomoyo_acl_info {
* "deleted", false otherwise.
* (6) "quota_warned" is a bool which is used for suppressing warning message
* when learning mode learned too much entries.
- * (7) "flags" which remembers this domain's attributes.
+ * (7) "ignore_global_allow_read" is a bool which is true if this domain
+ * should ignore "allow_read" directive in exception policy.
+ * (8) "transition_failed" is a bool which is set to true when this domain was
+ * unable to create a new domain at tomoyo_find_next_domain() because the
+ * name of the domain to be created was too long or it could not allocate
+ * memory. If set to true, more than one process continued execve()
+ * without domain transition.
+ * (9) "users" is an atomic_t that holds how many "struct cred"->security
+ * are referring this "struct tomoyo_domain_info". If is_deleted == true
+ * and users == 0, this struct will be kfree()d upon next garbage
+ * collection.
*
* A domain's lifecycle is an analogy of files on / directory.
* Multiple domains with the same domainname cannot be created (as with
@@ -155,25 +262,13 @@ struct tomoyo_domain_info {
u8 profile; /* Profile number to use. */
bool is_deleted; /* Delete flag. */
bool quota_warned; /* Quota warnning flag. */
- /* DOMAIN_FLAGS_*. Use tomoyo_set_domain_flag() to modify. */
- u8 flags;
+ bool ignore_global_allow_read; /* Ignore "allow_read" flag. */
+ bool transition_failed; /* Domain transition failed flag. */
+ atomic_t users; /* Number of referring credentials. */
};
-/* Profile number is an integer between 0 and 255. */
-#define TOMOYO_MAX_PROFILES 256
-
-/* Ignore "allow_read" directive in exception policy. */
-#define TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ 1
-/*
- * This domain was unable to create a new domain at tomoyo_find_next_domain()
- * because the name of the domain to be created was too long or
- * it could not allocate memory.
- * More than one process continued execve() without domain transition.
- */
-#define TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED 2
-
/*
- * tomoyo_single_path_acl_record is a structure which is used for holding an
+ * tomoyo_path_acl is a structure which is used for holding an
* entry with one pathname operation (e.g. open(), mkdir()).
* It has following fields.
*
@@ -184,18 +279,21 @@ struct tomoyo_domain_info {
* Directives held by this structure are "allow_read/write", "allow_execute",
* "allow_read", "allow_write", "allow_create", "allow_unlink", "allow_mkdir",
* "allow_rmdir", "allow_mkfifo", "allow_mksock", "allow_mkblock",
- * "allow_mkchar", "allow_truncate", "allow_symlink" and "allow_rewrite".
+ * "allow_mkchar", "allow_truncate", "allow_symlink", "allow_rewrite",
+ * "allow_chmod", "allow_chown", "allow_chgrp", "allow_chroot", "allow_mount"
+ * and "allow_unmount".
*/
-struct tomoyo_single_path_acl_record {
- struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_SINGLE_PATH_ACL */
+struct tomoyo_path_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_ACL */
+ u8 perm_high;
u16 perm;
/* Pointer to single pathname. */
const struct tomoyo_path_info *filename;
};
/*
- * tomoyo_double_path_acl_record is a structure which is used for holding an
- * entry with two pathnames operation (i.e. link() and rename()).
+ * tomoyo_path2_acl is a structure which is used for holding an
+ * entry with two pathnames operation (i.e. link(), rename() and pivot_root()).
* It has following fields.
*
* (1) "head" which is a "struct tomoyo_acl_info".
@@ -203,10 +301,11 @@ struct tomoyo_single_path_acl_record {
* (3) "filename1" is the source/old pathname.
* (4) "filename2" is the destination/new pathname.
*
- * Directives held by this structure are "allow_rename" and "allow_link".
+ * Directives held by this structure are "allow_rename", "allow_link" and
+ * "allow_pivot_root".
*/
-struct tomoyo_double_path_acl_record {
- struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_DOUBLE_PATH_ACL */
+struct tomoyo_path2_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH2_ACL */
u8 perm;
/* Pointer to single pathname. */
const struct tomoyo_path_info *filename1;
@@ -214,29 +313,6 @@ struct tomoyo_double_path_acl_record {
const struct tomoyo_path_info *filename2;
};
-/* Keywords for ACLs. */
-#define TOMOYO_KEYWORD_ALIAS "alias "
-#define TOMOYO_KEYWORD_ALLOW_READ "allow_read "
-#define TOMOYO_KEYWORD_DELETE "delete "
-#define TOMOYO_KEYWORD_DENY_REWRITE "deny_rewrite "
-#define TOMOYO_KEYWORD_FILE_PATTERN "file_pattern "
-#define TOMOYO_KEYWORD_INITIALIZE_DOMAIN "initialize_domain "
-#define TOMOYO_KEYWORD_KEEP_DOMAIN "keep_domain "
-#define TOMOYO_KEYWORD_NO_INITIALIZE_DOMAIN "no_initialize_domain "
-#define TOMOYO_KEYWORD_NO_KEEP_DOMAIN "no_keep_domain "
-#define TOMOYO_KEYWORD_SELECT "select "
-#define TOMOYO_KEYWORD_USE_PROFILE "use_profile "
-#define TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ "ignore_global_allow_read"
-/* A domain definition starts with <kernel>. */
-#define TOMOYO_ROOT_NAME "<kernel>"
-#define TOMOYO_ROOT_NAME_LEN (sizeof(TOMOYO_ROOT_NAME) - 1)
-
-/* Index numbers for Access Controls. */
-#define TOMOYO_MAC_FOR_FILE 0 /* domain_policy.conf */
-#define TOMOYO_MAX_ACCEPT_ENTRY 1
-#define TOMOYO_VERBOSE 2
-#define TOMOYO_MAX_CONTROL_INDEX 3
-
/*
* tomoyo_io_buffer is a structure which is used for reading and modifying
* configuration via /sys/kernel/security/tomoyo/ interface.
@@ -265,6 +341,8 @@ struct tomoyo_io_buffer {
int (*write) (struct tomoyo_io_buffer *);
/* Exclusive lock for this structure. */
struct mutex io_sem;
+ /* Index returned by tomoyo_read_lock(). */
+ int reader_idx;
/* The position currently reading from. */
struct list_head *read_var1;
/* Extra variables for reading. */
@@ -293,18 +371,159 @@ struct tomoyo_io_buffer {
int writebuf_size;
};
+/*
+ * tomoyo_globally_readable_file_entry is a structure which is used for holding
+ * "allow_read" entries.
+ * It has following fields.
+ *
+ * (1) "list" which is linked to tomoyo_globally_readable_list .
+ * (2) "filename" is a pathname which is allowed to open(O_RDONLY).
+ * (3) "is_deleted" is a bool which is true if marked as deleted, false
+ * otherwise.
+ */
+struct tomoyo_globally_readable_file_entry {
+ struct list_head list;
+ const struct tomoyo_path_info *filename;
+ bool is_deleted;
+};
+
+/*
+ * tomoyo_pattern_entry is a structure which is used for holding
+ * "tomoyo_pattern_list" entries.
+ * It has following fields.
+ *
+ * (1) "list" which is linked to tomoyo_pattern_list .
+ * (2) "pattern" is a pathname pattern which is used for converting pathnames
+ * to pathname patterns during learning mode.
+ * (3) "is_deleted" is a bool which is true if marked as deleted, false
+ * otherwise.
+ */
+struct tomoyo_pattern_entry {
+ struct list_head list;
+ const struct tomoyo_path_info *pattern;
+ bool is_deleted;
+};
+
+/*
+ * tomoyo_no_rewrite_entry is a structure which is used for holding
+ * "deny_rewrite" entries.
+ * It has following fields.
+ *
+ * (1) "list" which is linked to tomoyo_no_rewrite_list .
+ * (2) "pattern" is a pathname which is by default not permitted to modify
+ * already existing content.
+ * (3) "is_deleted" is a bool which is true if marked as deleted, false
+ * otherwise.
+ */
+struct tomoyo_no_rewrite_entry {
+ struct list_head list;
+ const struct tomoyo_path_info *pattern;
+ bool is_deleted;
+};
+
+/*
+ * tomoyo_domain_initializer_entry is a structure which is used for holding
+ * "initialize_domain" and "no_initialize_domain" entries.
+ * It has following fields.
+ *
+ * (1) "list" which is linked to tomoyo_domain_initializer_list .
+ * (2) "domainname" which is "a domainname" or "the last component of a
+ * domainname". This field is NULL if "from" clause is not specified.
+ * (3) "program" which is a program's pathname.
+ * (4) "is_deleted" is a bool which is true if marked as deleted, false
+ * otherwise.
+ * (5) "is_not" is a bool which is true if "no_initialize_domain", false
+ * otherwise.
+ * (6) "is_last_name" is a bool which is true if "domainname" is "the last
+ * component of a domainname", false otherwise.
+ */
+struct tomoyo_domain_initializer_entry {
+ struct list_head list;
+ const struct tomoyo_path_info *domainname; /* This may be NULL */
+ const struct tomoyo_path_info *program;
+ bool is_deleted;
+ bool is_not; /* True if this entry is "no_initialize_domain". */
+ /* True if the domainname is tomoyo_get_last_name(). */
+ bool is_last_name;
+};
+
+/*
+ * tomoyo_domain_keeper_entry is a structure which is used for holding
+ * "keep_domain" and "no_keep_domain" entries.
+ * It has following fields.
+ *
+ * (1) "list" which is linked to tomoyo_domain_keeper_list .
+ * (2) "domainname" which is "a domainname" or "the last component of a
+ * domainname".
+ * (3) "program" which is a program's pathname.
+ * This field is NULL if "from" clause is not specified.
+ * (4) "is_deleted" is a bool which is true if marked as deleted, false
+ * otherwise.
+ * (5) "is_not" is a bool which is true if "no_initialize_domain", false
+ * otherwise.
+ * (6) "is_last_name" is a bool which is true if "domainname" is "the last
+ * component of a domainname", false otherwise.
+ */
+struct tomoyo_domain_keeper_entry {
+ struct list_head list;
+ const struct tomoyo_path_info *domainname;
+ const struct tomoyo_path_info *program; /* This may be NULL */
+ bool is_deleted;
+ bool is_not; /* True if this entry is "no_keep_domain". */
+ /* True if the domainname is tomoyo_get_last_name(). */
+ bool is_last_name;
+};
+
+/*
+ * tomoyo_alias_entry is a structure which is used for holding "alias" entries.
+ * It has following fields.
+ *
+ * (1) "list" which is linked to tomoyo_alias_list .
+ * (2) "original_name" which is a dereferenced pathname.
+ * (3) "aliased_name" which is a symlink's pathname.
+ * (4) "is_deleted" is a bool which is true if marked as deleted, false
+ * otherwise.
+ */
+struct tomoyo_alias_entry {
+ struct list_head list;
+ const struct tomoyo_path_info *original_name;
+ const struct tomoyo_path_info *aliased_name;
+ bool is_deleted;
+};
+
+/*
+ * tomoyo_policy_manager_entry is a structure which is used for holding list of
+ * domainnames or programs which are permitted to modify configuration via
+ * /sys/kernel/security/tomoyo/ interface.
+ * It has following fields.
+ *
+ * (1) "list" which is linked to tomoyo_policy_manager_list .
+ * (2) "manager" is a domainname or a program's pathname.
+ * (3) "is_domain" is a bool which is true if "manager" is a domainname, false
+ * otherwise.
+ * (4) "is_deleted" is a bool which is true if marked as deleted, false
+ * otherwise.
+ */
+struct tomoyo_policy_manager_entry {
+ struct list_head list;
+ /* A path to program or a domainname. */
+ const struct tomoyo_path_info *manager;
+ bool is_domain; /* True if manager is a domainname. */
+ bool is_deleted; /* True if this entry is deleted. */
+};
+
+/********** Function prototypes. **********/
+
/* Check whether the domain has too many ACL entries to hold. */
bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain);
/* Transactional sprintf() for policy dump. */
bool tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...)
__attribute__ ((format(printf, 2, 3)));
/* Check whether the domainname is correct. */
-bool tomoyo_is_correct_domain(const unsigned char *domainname,
- const char *function);
+bool tomoyo_is_correct_domain(const unsigned char *domainname);
/* Check whether the token is correct. */
bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
- const s8 pattern_type, const s8 end_type,
- const char *function);
+ const s8 pattern_type, const s8 end_type);
/* Check whether the token can be a domainname. */
bool tomoyo_is_domain_def(const unsigned char *buffer);
/* Check whether the given filename matches the given pattern. */
@@ -328,13 +547,13 @@ bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head);
/* Write domain policy violation warning message to console? */
bool tomoyo_verbose_mode(const struct tomoyo_domain_info *domain);
/* Convert double path operation to operation name. */
-const char *tomoyo_dp2keyword(const u8 operation);
+const char *tomoyo_path22keyword(const u8 operation);
/* Get the last component of the given domainname. */
const char *tomoyo_get_last_name(const struct tomoyo_domain_info *domain);
/* Get warning message. */
const char *tomoyo_get_msg(const bool is_enforce);
/* Convert single path operation to operation name. */
-const char *tomoyo_sp2keyword(const u8 operation);
+const char *tomoyo_path2keyword(const u8 operation);
/* Create "alias" entry in exception policy. */
int tomoyo_write_alias_policy(char *data, const bool is_delete);
/*
@@ -370,33 +589,107 @@ struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
/* Check mode for specified functionality. */
unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain,
const u8 index);
-/* Allocate memory for structures. */
-void *tomoyo_alloc_acl_element(const u8 acl_type);
/* Fill in "struct tomoyo_path_info" members. */
void tomoyo_fill_path_info(struct tomoyo_path_info *ptr);
/* Run policy loader when /sbin/init starts. */
void tomoyo_load_policy(const char *filename);
-/* Change "struct tomoyo_domain_info"->flags. */
-void tomoyo_set_domain_flag(struct tomoyo_domain_info *domain,
- const bool is_delete, const u8 flags);
-/* strcmp() for "struct tomoyo_path_info" structure. */
-static inline bool tomoyo_pathcmp(const struct tomoyo_path_info *a,
- const struct tomoyo_path_info *b)
+/* Convert binary string to ascii string. */
+int tomoyo_encode(char *buffer, int buflen, const char *str);
+
+/* Returns realpath(3) of the given pathname but ignores chroot'ed root. */
+int tomoyo_realpath_from_path2(struct path *path, char *newname,
+ int newname_len);
+
+/*
+ * Returns realpath(3) of the given pathname but ignores chroot'ed root.
+ * These functions use kzalloc(), so the caller must call kfree()
+ * if these functions didn't return NULL.
+ */
+char *tomoyo_realpath(const char *pathname);
+/*
+ * Same with tomoyo_realpath() except that it doesn't follow the final symlink.
+ */
+char *tomoyo_realpath_nofollow(const char *pathname);
+/* Same with tomoyo_realpath() except that the pathname is already solved. */
+char *tomoyo_realpath_from_path(struct path *path);
+
+/* Check memory quota. */
+bool tomoyo_memory_ok(void *ptr);
+
+/*
+ * Keep the given name on the RAM.
+ * The RAM is shared, so NEVER try to modify or kfree() the returned name.
+ */
+const struct tomoyo_path_info *tomoyo_get_name(const char *name);
+
+/* Check for memory usage. */
+int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head);
+
+/* Set memory quota. */
+int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head);
+
+/* Initialize realpath related code. */
+void __init tomoyo_realpath_init(void);
+int tomoyo_check_exec_perm(struct tomoyo_domain_info *domain,
+ const struct tomoyo_path_info *filename);
+int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
+ struct path *path, const int flag);
+int tomoyo_path_perm(const u8 operation, struct path *path);
+int tomoyo_path2_perm(const u8 operation, struct path *path1,
+ struct path *path2);
+int tomoyo_check_rewrite_permission(struct file *filp);
+int tomoyo_find_next_domain(struct linux_binprm *bprm);
+
+/* Run garbage collector. */
+void tomoyo_run_gc(void);
+
+void tomoyo_memory_free(void *ptr);
+
+/********** External variable definitions. **********/
+
+/* Lock for GC. */
+extern struct srcu_struct tomoyo_ss;
+
+/* The list for "struct tomoyo_domain_info". */
+extern struct list_head tomoyo_domain_list;
+
+extern struct list_head tomoyo_domain_initializer_list;
+extern struct list_head tomoyo_domain_keeper_list;
+extern struct list_head tomoyo_alias_list;
+extern struct list_head tomoyo_globally_readable_list;
+extern struct list_head tomoyo_pattern_list;
+extern struct list_head tomoyo_no_rewrite_list;
+extern struct list_head tomoyo_policy_manager_list;
+extern struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
+extern struct mutex tomoyo_name_list_lock;
+
+/* Lock for protecting policy. */
+extern struct mutex tomoyo_policy_lock;
+
+/* Has /sbin/init started? */
+extern bool tomoyo_policy_loaded;
+
+/* The kernel's domain. */
+extern struct tomoyo_domain_info tomoyo_kernel_domain;
+
+/********** Inlined functions. **********/
+
+static inline int tomoyo_read_lock(void)
{
- return a->hash != b->hash || strcmp(a->name, b->name);
+ return srcu_read_lock(&tomoyo_ss);
}
-/* Get type of an ACL entry. */
-static inline u8 tomoyo_acl_type1(struct tomoyo_acl_info *ptr)
+static inline void tomoyo_read_unlock(int idx)
{
- return ptr->type & ~TOMOYO_ACL_DELETED;
+ srcu_read_unlock(&tomoyo_ss, idx);
}
-/* Get type of an ACL entry. */
-static inline u8 tomoyo_acl_type2(struct tomoyo_acl_info *ptr)
+/* strcmp() for "struct tomoyo_path_info" structure. */
+static inline bool tomoyo_pathcmp(const struct tomoyo_path_info *a,
+ const struct tomoyo_path_info *b)
{
- return ptr->type;
+ return a->hash != b->hash || strcmp(a->name, b->name);
}
/**
@@ -423,18 +716,25 @@ static inline bool tomoyo_is_invalid(const unsigned char c)
return c && (c <= ' ' || c >= 127);
}
-/* The list for "struct tomoyo_domain_info". */
-extern struct list_head tomoyo_domain_list;
-extern struct rw_semaphore tomoyo_domain_list_lock;
-
-/* Lock for domain->acl_info_list. */
-extern struct rw_semaphore tomoyo_domain_acl_info_list_lock;
+static inline void tomoyo_put_name(const struct tomoyo_path_info *name)
+{
+ if (name) {
+ struct tomoyo_name_entry *ptr =
+ container_of(name, struct tomoyo_name_entry, entry);
+ atomic_dec(&ptr->users);
+ }
+}
-/* Has /sbin/init started? */
-extern bool tomoyo_policy_loaded;
+static inline struct tomoyo_domain_info *tomoyo_domain(void)
+{
+ return current_cred()->security;
+}
-/* The kernel's domain. */
-extern struct tomoyo_domain_info tomoyo_kernel_domain;
+static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct
+ *task)
+{
+ return task_cred_xxx(task, security);
+}
/**
* list_for_each_cookie - iterate over a list with cookie.
@@ -442,16 +742,16 @@ extern struct tomoyo_domain_info tomoyo_kernel_domain;
* @cookie: the &struct list_head to use as a cookie.
* @head: the head for your list.
*
- * Same with list_for_each() except that this primitive uses @cookie
+ * Same with list_for_each_rcu() except that this primitive uses @cookie
* so that we can continue iteration.
* @cookie must be NULL when iteration starts, and @cookie will become
* NULL when iteration finishes.
*/
-#define list_for_each_cookie(pos, cookie, head) \
- for (({ if (!cookie) \
- cookie = head; }), \
- pos = (cookie)->next; \
- prefetch(pos->next), pos != (head) || ((cookie) = NULL); \
- (cookie) = pos, pos = pos->next)
+#define list_for_each_cookie(pos, cookie, head) \
+ for (({ if (!cookie) \
+ cookie = head; }), \
+ pos = rcu_dereference((cookie)->next); \
+ prefetch(pos->next), pos != (head) || ((cookie) = NULL); \
+ (cookie) = pos, pos = rcu_dereference(pos->next))
#endif /* !defined(_SECURITY_TOMOYO_COMMON_H) */
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index fcf52ac..66caaa1 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -10,8 +10,6 @@
*/
#include "common.h"
-#include "tomoyo.h"
-#include "realpath.h"
#include <linux/binfmts.h>
/* Variables definitions.*/
@@ -58,99 +56,6 @@ struct tomoyo_domain_info tomoyo_kernel_domain;
* exceptions.
*/
LIST_HEAD(tomoyo_domain_list);
-DECLARE_RWSEM(tomoyo_domain_list_lock);
-
-/*
- * tomoyo_domain_initializer_entry is a structure which is used for holding
- * "initialize_domain" and "no_initialize_domain" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_domain_initializer_list .
- * (2) "domainname" which is "a domainname" or "the last component of a
- * domainname". This field is NULL if "from" clause is not specified.
- * (3) "program" which is a program's pathname.
- * (4) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- * (5) "is_not" is a bool which is true if "no_initialize_domain", false
- * otherwise.
- * (6) "is_last_name" is a bool which is true if "domainname" is "the last
- * component of a domainname", false otherwise.
- */
-struct tomoyo_domain_initializer_entry {
- struct list_head list;
- const struct tomoyo_path_info *domainname; /* This may be NULL */
- const struct tomoyo_path_info *program;
- bool is_deleted;
- bool is_not; /* True if this entry is "no_initialize_domain". */
- /* True if the domainname is tomoyo_get_last_name(). */
- bool is_last_name;
-};
-
-/*
- * tomoyo_domain_keeper_entry is a structure which is used for holding
- * "keep_domain" and "no_keep_domain" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_domain_keeper_list .
- * (2) "domainname" which is "a domainname" or "the last component of a
- * domainname".
- * (3) "program" which is a program's pathname.
- * This field is NULL if "from" clause is not specified.
- * (4) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- * (5) "is_not" is a bool which is true if "no_initialize_domain", false
- * otherwise.
- * (6) "is_last_name" is a bool which is true if "domainname" is "the last
- * component of a domainname", false otherwise.
- */
-struct tomoyo_domain_keeper_entry {
- struct list_head list;
- const struct tomoyo_path_info *domainname;
- const struct tomoyo_path_info *program; /* This may be NULL */
- bool is_deleted;
- bool is_not; /* True if this entry is "no_keep_domain". */
- /* True if the domainname is tomoyo_get_last_name(). */
- bool is_last_name;
-};
-
-/*
- * tomoyo_alias_entry is a structure which is used for holding "alias" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_alias_list .
- * (2) "original_name" which is a dereferenced pathname.
- * (3) "aliased_name" which is a symlink's pathname.
- * (4) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- */
-struct tomoyo_alias_entry {
- struct list_head list;
- const struct tomoyo_path_info *original_name;
- const struct tomoyo_path_info *aliased_name;
- bool is_deleted;
-};
-
-/**
- * tomoyo_set_domain_flag - Set or clear domain's attribute flags.
- *
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @is_delete: True if it is a delete request.
- * @flags: Flags to set or clear.
- *
- * Returns nothing.
- */
-void tomoyo_set_domain_flag(struct tomoyo_domain_info *domain,
- const bool is_delete, const u8 flags)
-{
- /* We need to serialize because this is bitfield operation. */
- static DEFINE_SPINLOCK(lock);
- spin_lock(&lock);
- if (!is_delete)
- domain->flags |= flags;
- else
- domain->flags &= ~flags;
- spin_unlock(&lock);
-}
/**
* tomoyo_get_last_name - Get last component of a domainname.
@@ -205,8 +110,7 @@ const char *tomoyo_get_last_name(const struct tomoyo_domain_info *domain)
* will cause "/usr/sbin/httpd" to belong to "<kernel> /usr/sbin/httpd" domain
* unless executed from "<kernel> /etc/rc.d/init.d/httpd" domain.
*/
-static LIST_HEAD(tomoyo_domain_initializer_list);
-static DECLARE_RWSEM(tomoyo_domain_initializer_list_lock);
+LIST_HEAD(tomoyo_domain_initializer_list);
/**
* tomoyo_update_domain_initializer_entry - Update "struct tomoyo_domain_initializer_entry" list.
@@ -217,59 +121,65 @@ static DECLARE_RWSEM(tomoyo_domain_initializer_list_lock);
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_domain_initializer_entry(const char *domainname,
const char *program,
const bool is_not,
const bool is_delete)
{
- struct tomoyo_domain_initializer_entry *new_entry;
+ struct tomoyo_domain_initializer_entry *entry = NULL;
struct tomoyo_domain_initializer_entry *ptr;
- const struct tomoyo_path_info *saved_program;
+ const struct tomoyo_path_info *saved_program = NULL;
const struct tomoyo_path_info *saved_domainname = NULL;
- int error = -ENOMEM;
+ int error = is_delete ? -ENOENT : -ENOMEM;
bool is_last_name = false;
- if (!tomoyo_is_correct_path(program, 1, -1, -1, __func__))
+ if (!tomoyo_is_correct_path(program, 1, -1, -1))
return -EINVAL; /* No patterns allowed. */
if (domainname) {
if (!tomoyo_is_domain_def(domainname) &&
- tomoyo_is_correct_path(domainname, 1, -1, -1, __func__))
+ tomoyo_is_correct_path(domainname, 1, -1, -1))
is_last_name = true;
- else if (!tomoyo_is_correct_domain(domainname, __func__))
+ else if (!tomoyo_is_correct_domain(domainname))
return -EINVAL;
- saved_domainname = tomoyo_save_name(domainname);
+ saved_domainname = tomoyo_get_name(domainname);
if (!saved_domainname)
- return -ENOMEM;
+ goto out;
}
- saved_program = tomoyo_save_name(program);
+ saved_program = tomoyo_get_name(program);
if (!saved_program)
- return -ENOMEM;
- down_write(&tomoyo_domain_initializer_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_initializer_list, list) {
+ goto out;
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_initializer_list, list) {
if (ptr->is_not != is_not ||
ptr->domainname != saved_domainname ||
ptr->program != saved_program)
continue;
ptr->is_deleted = is_delete;
error = 0;
- goto out;
+ break;
}
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->domainname = saved_domainname;
+ saved_domainname = NULL;
+ entry->program = saved_program;
+ saved_program = NULL;
+ entry->is_not = is_not;
+ entry->is_last_name = is_last_name;
+ list_add_tail_rcu(&entry->list,
+ &tomoyo_domain_initializer_list);
+ entry = NULL;
+ error = 0;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->domainname = saved_domainname;
- new_entry->program = saved_program;
- new_entry->is_not = is_not;
- new_entry->is_last_name = is_last_name;
- list_add_tail(&new_entry->list, &tomoyo_domain_initializer_list);
- error = 0;
+ mutex_unlock(&tomoyo_policy_lock);
out:
- up_write(&tomoyo_domain_initializer_list_lock);
+ tomoyo_put_name(saved_domainname);
+ tomoyo_put_name(saved_program);
+ kfree(entry);
return error;
}
@@ -279,13 +189,14 @@ static int tomoyo_update_domain_initializer_entry(const char *domainname,
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_domain_initializer_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_domain_initializer_list_lock);
list_for_each_cookie(pos, head->read_var2,
&tomoyo_domain_initializer_list) {
const char *no;
@@ -308,7 +219,6 @@ bool tomoyo_read_domain_initializer_policy(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_domain_initializer_list_lock);
return done;
}
@@ -320,6 +230,8 @@ bool tomoyo_read_domain_initializer_policy(struct tomoyo_io_buffer *head)
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_domain_initializer_policy(char *data, const bool is_not,
const bool is_delete)
@@ -345,6 +257,8 @@ int tomoyo_write_domain_initializer_policy(char *data, const bool is_not,
*
* Returns true if executing @program reinitializes domain transition,
* false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info *
domainname,
@@ -355,8 +269,7 @@ static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info *
struct tomoyo_domain_initializer_entry *ptr;
bool flag = false;
- down_read(&tomoyo_domain_initializer_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_initializer_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_initializer_list, list) {
if (ptr->is_deleted)
continue;
if (ptr->domainname) {
@@ -376,7 +289,6 @@ static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info *
}
flag = true;
}
- up_read(&tomoyo_domain_initializer_list_lock);
return flag;
}
@@ -418,8 +330,7 @@ static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info *
* "<kernel> /usr/sbin/sshd /bin/bash /usr/bin/passwd" domain, unless
* explicitly specified by "initialize_domain".
*/
-static LIST_HEAD(tomoyo_domain_keeper_list);
-static DECLARE_RWSEM(tomoyo_domain_keeper_list_lock);
+LIST_HEAD(tomoyo_domain_keeper_list);
/**
* tomoyo_update_domain_keeper_entry - Update "struct tomoyo_domain_keeper_entry" list.
@@ -430,59 +341,64 @@ static DECLARE_RWSEM(tomoyo_domain_keeper_list_lock);
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_domain_keeper_entry(const char *domainname,
const char *program,
const bool is_not,
const bool is_delete)
{
- struct tomoyo_domain_keeper_entry *new_entry;
+ struct tomoyo_domain_keeper_entry *entry = NULL;
struct tomoyo_domain_keeper_entry *ptr;
- const struct tomoyo_path_info *saved_domainname;
+ const struct tomoyo_path_info *saved_domainname = NULL;
const struct tomoyo_path_info *saved_program = NULL;
- int error = -ENOMEM;
+ int error = is_delete ? -ENOENT : -ENOMEM;
bool is_last_name = false;
if (!tomoyo_is_domain_def(domainname) &&
- tomoyo_is_correct_path(domainname, 1, -1, -1, __func__))
+ tomoyo_is_correct_path(domainname, 1, -1, -1))
is_last_name = true;
- else if (!tomoyo_is_correct_domain(domainname, __func__))
+ else if (!tomoyo_is_correct_domain(domainname))
return -EINVAL;
if (program) {
- if (!tomoyo_is_correct_path(program, 1, -1, -1, __func__))
+ if (!tomoyo_is_correct_path(program, 1, -1, -1))
return -EINVAL;
- saved_program = tomoyo_save_name(program);
+ saved_program = tomoyo_get_name(program);
if (!saved_program)
- return -ENOMEM;
+ goto out;
}
- saved_domainname = tomoyo_save_name(domainname);
+ saved_domainname = tomoyo_get_name(domainname);
if (!saved_domainname)
- return -ENOMEM;
- down_write(&tomoyo_domain_keeper_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_keeper_list, list) {
+ goto out;
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_keeper_list, list) {
if (ptr->is_not != is_not ||
ptr->domainname != saved_domainname ||
ptr->program != saved_program)
continue;
ptr->is_deleted = is_delete;
error = 0;
- goto out;
+ break;
}
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->domainname = saved_domainname;
+ saved_domainname = NULL;
+ entry->program = saved_program;
+ saved_program = NULL;
+ entry->is_not = is_not;
+ entry->is_last_name = is_last_name;
+ list_add_tail_rcu(&entry->list, &tomoyo_domain_keeper_list);
+ entry = NULL;
+ error = 0;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->domainname = saved_domainname;
- new_entry->program = saved_program;
- new_entry->is_not = is_not;
- new_entry->is_last_name = is_last_name;
- list_add_tail(&new_entry->list, &tomoyo_domain_keeper_list);
- error = 0;
+ mutex_unlock(&tomoyo_policy_lock);
out:
- up_write(&tomoyo_domain_keeper_list_lock);
+ tomoyo_put_name(saved_domainname);
+ tomoyo_put_name(saved_program);
+ kfree(entry);
return error;
}
@@ -493,6 +409,7 @@ static int tomoyo_update_domain_keeper_entry(const char *domainname,
* @is_not: True if it is "no_keep_domain" entry.
* @is_delete: True if it is a delete request.
*
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_domain_keeper_policy(char *data, const bool is_not,
const bool is_delete)
@@ -513,13 +430,14 @@ int tomoyo_write_domain_keeper_policy(char *data, const bool is_not,
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_domain_keeper_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_domain_keeper_list_lock);
list_for_each_cookie(pos, head->read_var2,
&tomoyo_domain_keeper_list) {
struct tomoyo_domain_keeper_entry *ptr;
@@ -542,7 +460,6 @@ bool tomoyo_read_domain_keeper_policy(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_domain_keeper_list_lock);
return done;
}
@@ -555,6 +472,8 @@ bool tomoyo_read_domain_keeper_policy(struct tomoyo_io_buffer *head)
*
* Returns true if executing @program supresses domain transition,
* false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname,
const struct tomoyo_path_info *program,
@@ -563,8 +482,7 @@ static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname,
struct tomoyo_domain_keeper_entry *ptr;
bool flag = false;
- down_read(&tomoyo_domain_keeper_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_keeper_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_keeper_list, list) {
if (ptr->is_deleted)
continue;
if (!ptr->is_last_name) {
@@ -582,7 +500,6 @@ static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname,
}
flag = true;
}
- up_read(&tomoyo_domain_keeper_list_lock);
return flag;
}
@@ -616,8 +533,7 @@ static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname,
* /bin/busybox and domainname which the current process will belong to after
* execve() succeeds is calculated using /bin/cat rather than /bin/busybox .
*/
-static LIST_HEAD(tomoyo_alias_list);
-static DECLARE_RWSEM(tomoyo_alias_list_lock);
+LIST_HEAD(tomoyo_alias_list);
/**
* tomoyo_update_alias_entry - Update "struct tomoyo_alias_entry" list.
@@ -627,46 +543,51 @@ static DECLARE_RWSEM(tomoyo_alias_list_lock);
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_alias_entry(const char *original_name,
const char *aliased_name,
const bool is_delete)
{
- struct tomoyo_alias_entry *new_entry;
+ struct tomoyo_alias_entry *entry = NULL;
struct tomoyo_alias_entry *ptr;
const struct tomoyo_path_info *saved_original_name;
const struct tomoyo_path_info *saved_aliased_name;
- int error = -ENOMEM;
+ int error = is_delete ? -ENOENT : -ENOMEM;
- if (!tomoyo_is_correct_path(original_name, 1, -1, -1, __func__) ||
- !tomoyo_is_correct_path(aliased_name, 1, -1, -1, __func__))
+ if (!tomoyo_is_correct_path(original_name, 1, -1, -1) ||
+ !tomoyo_is_correct_path(aliased_name, 1, -1, -1))
return -EINVAL; /* No patterns allowed. */
- saved_original_name = tomoyo_save_name(original_name);
- saved_aliased_name = tomoyo_save_name(aliased_name);
+ saved_original_name = tomoyo_get_name(original_name);
+ saved_aliased_name = tomoyo_get_name(aliased_name);
if (!saved_original_name || !saved_aliased_name)
- return -ENOMEM;
- down_write(&tomoyo_alias_list_lock);
- list_for_each_entry(ptr, &tomoyo_alias_list, list) {
+ goto out;
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_alias_list, list) {
if (ptr->original_name != saved_original_name ||
ptr->aliased_name != saved_aliased_name)
continue;
ptr->is_deleted = is_delete;
error = 0;
- goto out;
+ break;
}
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->original_name = saved_original_name;
+ saved_original_name = NULL;
+ entry->aliased_name = saved_aliased_name;
+ saved_aliased_name = NULL;
+ list_add_tail_rcu(&entry->list, &tomoyo_alias_list);
+ entry = NULL;
+ error = 0;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->original_name = saved_original_name;
- new_entry->aliased_name = saved_aliased_name;
- list_add_tail(&new_entry->list, &tomoyo_alias_list);
- error = 0;
+ mutex_unlock(&tomoyo_policy_lock);
out:
- up_write(&tomoyo_alias_list_lock);
+ tomoyo_put_name(saved_original_name);
+ tomoyo_put_name(saved_aliased_name);
+ kfree(entry);
return error;
}
@@ -676,13 +597,14 @@ static int tomoyo_update_alias_entry(const char *original_name,
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_alias_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_alias_list_lock);
list_for_each_cookie(pos, head->read_var2, &tomoyo_alias_list) {
struct tomoyo_alias_entry *ptr;
@@ -695,7 +617,6 @@ bool tomoyo_read_alias_policy(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_alias_list_lock);
return done;
}
@@ -706,6 +627,8 @@ bool tomoyo_read_alias_policy(struct tomoyo_io_buffer *head)
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_alias_policy(char *data, const bool is_delete)
{
@@ -724,63 +647,46 @@ int tomoyo_write_alias_policy(char *data, const bool is_delete)
* @profile: Profile number to assign if the domain was newly created.
*
* Returns pointer to "struct tomoyo_domain_info" on success, NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
domainname,
const u8 profile)
{
- struct tomoyo_domain_info *domain = NULL;
+ struct tomoyo_domain_info *entry;
+ struct tomoyo_domain_info *domain;
const struct tomoyo_path_info *saved_domainname;
+ bool found = false;
- down_write(&tomoyo_domain_list_lock);
- domain = tomoyo_find_domain(domainname);
- if (domain)
- goto out;
- if (!tomoyo_is_correct_domain(domainname, __func__))
- goto out;
- saved_domainname = tomoyo_save_name(domainname);
+ if (!tomoyo_is_correct_domain(domainname))
+ return NULL;
+ saved_domainname = tomoyo_get_name(domainname);
if (!saved_domainname)
- goto out;
- /* Can I reuse memory of deleted domain? */
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
- struct task_struct *p;
- struct tomoyo_acl_info *ptr;
- bool flag;
- if (!domain->is_deleted ||
- domain->domainname != saved_domainname)
+ return NULL;
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+ if (domain->is_deleted ||
+ tomoyo_pathcmp(saved_domainname, domain->domainname))
continue;
- flag = false;
- read_lock(&tasklist_lock);
- for_each_process(p) {
- if (tomoyo_real_domain(p) != domain)
- continue;
- flag = true;
- break;
- }
- read_unlock(&tasklist_lock);
- if (flag)
- continue;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- ptr->type |= TOMOYO_ACL_DELETED;
- }
- tomoyo_set_domain_flag(domain, true, domain->flags);
- domain->profile = profile;
- domain->quota_warned = false;
- mb(); /* Avoid out-of-order execution. */
- domain->is_deleted = false;
- goto out;
+ found = true;
+ break;
}
- /* No memory reusable. Create using new memory. */
- domain = tomoyo_alloc_element(sizeof(*domain));
- if (domain) {
- INIT_LIST_HEAD(&domain->acl_info_list);
- domain->domainname = saved_domainname;
- domain->profile = profile;
- list_add_tail(&domain->list, &tomoyo_domain_list);
+ if (!found && tomoyo_memory_ok(entry)) {
+ INIT_LIST_HEAD(&entry->acl_info_list);
+ entry->domainname = saved_domainname;
+ saved_domainname = NULL;
+ entry->profile = profile;
+ list_add_tail_rcu(&entry->list, &tomoyo_domain_list);
+ domain = entry;
+ entry = NULL;
+ found = true;
}
- out:
- up_write(&tomoyo_domain_list_lock);
- return domain;
+ mutex_unlock(&tomoyo_policy_lock);
+ tomoyo_put_name(saved_domainname);
+ kfree(entry);
+ return found ? domain : NULL;
}
/**
@@ -789,6 +695,8 @@ struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
* @bprm: Pointer to "struct linux_binprm".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_find_next_domain(struct linux_binprm *bprm)
{
@@ -796,7 +704,7 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
* This function assumes that the size of buffer returned by
* tomoyo_realpath() = TOMOYO_MAX_PATHNAME_LEN.
*/
- struct tomoyo_page_buffer *tmp = tomoyo_alloc(sizeof(*tmp));
+ struct tomoyo_page_buffer *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
struct tomoyo_domain_info *old_domain = tomoyo_domain();
struct tomoyo_domain_info *domain = NULL;
const char *old_domain_name = old_domain->domainname->name;
@@ -849,8 +757,7 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
if (tomoyo_pathcmp(&r, &s)) {
struct tomoyo_alias_entry *ptr;
/* Is this program allowed to be called via symbolic links? */
- down_read(&tomoyo_alias_list_lock);
- list_for_each_entry(ptr, &tomoyo_alias_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_alias_list, list) {
if (ptr->is_deleted ||
tomoyo_pathcmp(&r, ptr->original_name) ||
tomoyo_pathcmp(&s, ptr->aliased_name))
@@ -861,7 +768,6 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
tomoyo_fill_path_info(&r);
break;
}
- up_read(&tomoyo_alias_list_lock);
}
/* Check execute permission. */
@@ -892,9 +798,7 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
}
if (domain || strlen(new_domain_name) >= TOMOYO_MAX_PATHNAME_LEN)
goto done;
- down_read(&tomoyo_domain_list_lock);
domain = tomoyo_find_domain(new_domain_name);
- up_read(&tomoyo_domain_list_lock);
if (domain)
goto done;
if (is_enforce)
@@ -909,14 +813,15 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
if (is_enforce)
retval = -EPERM;
else
- tomoyo_set_domain_flag(old_domain, false,
- TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED);
+ old_domain->transition_failed = true;
out:
if (!domain)
domain = old_domain;
+ /* Update reference count on "struct tomoyo_domain_info". */
+ atomic_inc(&domain->users);
bprm->cred->security = domain;
- tomoyo_free(real_program_name);
- tomoyo_free(symlink_program_name);
- tomoyo_free(tmp);
+ kfree(real_program_name);
+ kfree(symlink_program_name);
+ kfree(tmp);
return retval;
}
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 9a6c588..1b24304 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -10,108 +10,64 @@
*/
#include "common.h"
-#include "tomoyo.h"
-#include "realpath.h"
-
-/*
- * tomoyo_globally_readable_file_entry is a structure which is used for holding
- * "allow_read" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_globally_readable_list .
- * (2) "filename" is a pathname which is allowed to open(O_RDONLY).
- * (3) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- */
-struct tomoyo_globally_readable_file_entry {
- struct list_head list;
- const struct tomoyo_path_info *filename;
- bool is_deleted;
-};
-
-/*
- * tomoyo_pattern_entry is a structure which is used for holding
- * "tomoyo_pattern_list" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_pattern_list .
- * (2) "pattern" is a pathname pattern which is used for converting pathnames
- * to pathname patterns during learning mode.
- * (3) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- */
-struct tomoyo_pattern_entry {
- struct list_head list;
- const struct tomoyo_path_info *pattern;
- bool is_deleted;
-};
-
-/*
- * tomoyo_no_rewrite_entry is a structure which is used for holding
- * "deny_rewrite" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_no_rewrite_list .
- * (2) "pattern" is a pathname which is by default not permitted to modify
- * already existing content.
- * (3) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- */
-struct tomoyo_no_rewrite_entry {
- struct list_head list;
- const struct tomoyo_path_info *pattern;
- bool is_deleted;
-};
/* Keyword array for single path operations. */
-static const char *tomoyo_sp_keyword[TOMOYO_MAX_SINGLE_PATH_OPERATION] = {
- [TOMOYO_TYPE_READ_WRITE_ACL] = "read/write",
- [TOMOYO_TYPE_EXECUTE_ACL] = "execute",
- [TOMOYO_TYPE_READ_ACL] = "read",
- [TOMOYO_TYPE_WRITE_ACL] = "write",
- [TOMOYO_TYPE_CREATE_ACL] = "create",
- [TOMOYO_TYPE_UNLINK_ACL] = "unlink",
- [TOMOYO_TYPE_MKDIR_ACL] = "mkdir",
- [TOMOYO_TYPE_RMDIR_ACL] = "rmdir",
- [TOMOYO_TYPE_MKFIFO_ACL] = "mkfifo",
- [TOMOYO_TYPE_MKSOCK_ACL] = "mksock",
- [TOMOYO_TYPE_MKBLOCK_ACL] = "mkblock",
- [TOMOYO_TYPE_MKCHAR_ACL] = "mkchar",
- [TOMOYO_TYPE_TRUNCATE_ACL] = "truncate",
- [TOMOYO_TYPE_SYMLINK_ACL] = "symlink",
- [TOMOYO_TYPE_REWRITE_ACL] = "rewrite",
+static const char *tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION] = {
+ [TOMOYO_TYPE_READ_WRITE] = "read/write",
+ [TOMOYO_TYPE_EXECUTE] = "execute",
+ [TOMOYO_TYPE_READ] = "read",
+ [TOMOYO_TYPE_WRITE] = "write",
+ [TOMOYO_TYPE_CREATE] = "create",
+ [TOMOYO_TYPE_UNLINK] = "unlink",
+ [TOMOYO_TYPE_MKDIR] = "mkdir",
+ [TOMOYO_TYPE_RMDIR] = "rmdir",
+ [TOMOYO_TYPE_MKFIFO] = "mkfifo",
+ [TOMOYO_TYPE_MKSOCK] = "mksock",
+ [TOMOYO_TYPE_MKBLOCK] = "mkblock",
+ [TOMOYO_TYPE_MKCHAR] = "mkchar",
+ [TOMOYO_TYPE_TRUNCATE] = "truncate",
+ [TOMOYO_TYPE_SYMLINK] = "symlink",
+ [TOMOYO_TYPE_REWRITE] = "rewrite",
+ [TOMOYO_TYPE_IOCTL] = "ioctl",
+ [TOMOYO_TYPE_CHMOD] = "chmod",
+ [TOMOYO_TYPE_CHOWN] = "chown",
+ [TOMOYO_TYPE_CHGRP] = "chgrp",
+ [TOMOYO_TYPE_CHROOT] = "chroot",
+ [TOMOYO_TYPE_MOUNT] = "mount",
+ [TOMOYO_TYPE_UMOUNT] = "unmount",
};
/* Keyword array for double path operations. */
-static const char *tomoyo_dp_keyword[TOMOYO_MAX_DOUBLE_PATH_OPERATION] = {
- [TOMOYO_TYPE_LINK_ACL] = "link",
- [TOMOYO_TYPE_RENAME_ACL] = "rename",
+static const char *tomoyo_path2_keyword[TOMOYO_MAX_PATH2_OPERATION] = {
+ [TOMOYO_TYPE_LINK] = "link",
+ [TOMOYO_TYPE_RENAME] = "rename",
+ [TOMOYO_TYPE_PIVOT_ROOT] = "pivot_root",
};
/**
- * tomoyo_sp2keyword - Get the name of single path operation.
+ * tomoyo_path2keyword - Get the name of single path operation.
*
* @operation: Type of operation.
*
* Returns the name of single path operation.
*/
-const char *tomoyo_sp2keyword(const u8 operation)
+const char *tomoyo_path2keyword(const u8 operation)
{
- return (operation < TOMOYO_MAX_SINGLE_PATH_OPERATION)
- ? tomoyo_sp_keyword[operation] : NULL;
+ return (operation < TOMOYO_MAX_PATH_OPERATION)
+ ? tomoyo_path_keyword[operation] : NULL;
}
/**
- * tomoyo_dp2keyword - Get the name of double path operation.
+ * tomoyo_path22keyword - Get the name of double path operation.
*
* @operation: Type of operation.
*
* Returns the name of double path operation.
*/
-const char *tomoyo_dp2keyword(const u8 operation)
+const char *tomoyo_path22keyword(const u8 operation)
{
- return (operation < TOMOYO_MAX_DOUBLE_PATH_OPERATION)
- ? tomoyo_dp_keyword[operation] : NULL;
+ return (operation < TOMOYO_MAX_PATH2_OPERATION)
+ ? tomoyo_path2_keyword[operation] : NULL;
}
/**
@@ -142,7 +98,8 @@ static bool tomoyo_strendswith(const char *name, const char *tail)
static struct tomoyo_path_info *tomoyo_get_path(struct path *path)
{
int error;
- struct tomoyo_path_info_with_data *buf = tomoyo_alloc(sizeof(*buf));
+ struct tomoyo_path_info_with_data *buf = kzalloc(sizeof(*buf),
+ GFP_KERNEL);
if (!buf)
return NULL;
@@ -154,20 +111,17 @@ static struct tomoyo_path_info *tomoyo_get_path(struct path *path)
tomoyo_fill_path_info(&buf->head);
return &buf->head;
}
- tomoyo_free(buf);
+ kfree(buf);
return NULL;
}
-/* Lock for domain->acl_info_list. */
-DECLARE_RWSEM(tomoyo_domain_acl_info_list_lock);
-
-static int tomoyo_update_double_path_acl(const u8 type, const char *filename1,
- const char *filename2,
- struct tomoyo_domain_info *
- const domain, const bool is_delete);
-static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
- struct tomoyo_domain_info *
- const domain, const bool is_delete);
+static int tomoyo_update_path2_acl(const u8 type, const char *filename1,
+ const char *filename2,
+ struct tomoyo_domain_info *const domain,
+ const bool is_delete);
+static int tomoyo_update_path_acl(const u8 type, const char *filename,
+ struct tomoyo_domain_info *const domain,
+ const bool is_delete);
/*
* tomoyo_globally_readable_list is used for holding list of pathnames which
@@ -194,8 +148,7 @@ static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
* given "allow_read /lib/libc-2.5.so" to the domain which current process
* belongs to.
*/
-static LIST_HEAD(tomoyo_globally_readable_list);
-static DECLARE_RWSEM(tomoyo_globally_readable_list_lock);
+LIST_HEAD(tomoyo_globally_readable_list);
/**
* tomoyo_update_globally_readable_entry - Update "struct tomoyo_globally_readable_file_entry" list.
@@ -204,40 +157,42 @@ static DECLARE_RWSEM(tomoyo_globally_readable_list_lock);
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_globally_readable_entry(const char *filename,
const bool is_delete)
{
- struct tomoyo_globally_readable_file_entry *new_entry;
+ struct tomoyo_globally_readable_file_entry *entry = NULL;
struct tomoyo_globally_readable_file_entry *ptr;
const struct tomoyo_path_info *saved_filename;
- int error = -ENOMEM;
+ int error = is_delete ? -ENOENT : -ENOMEM;
- if (!tomoyo_is_correct_path(filename, 1, 0, -1, __func__))
+ if (!tomoyo_is_correct_path(filename, 1, 0, -1))
return -EINVAL;
- saved_filename = tomoyo_save_name(filename);
+ saved_filename = tomoyo_get_name(filename);
if (!saved_filename)
return -ENOMEM;
- down_write(&tomoyo_globally_readable_list_lock);
- list_for_each_entry(ptr, &tomoyo_globally_readable_list, list) {
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_globally_readable_list, list) {
if (ptr->filename != saved_filename)
continue;
ptr->is_deleted = is_delete;
error = 0;
- goto out;
+ break;
}
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->filename = saved_filename;
+ saved_filename = NULL;
+ list_add_tail_rcu(&entry->list, &tomoyo_globally_readable_list);
+ entry = NULL;
+ error = 0;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->filename = saved_filename;
- list_add_tail(&new_entry->list, &tomoyo_globally_readable_list);
- error = 0;
- out:
- up_write(&tomoyo_globally_readable_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
+ tomoyo_put_name(saved_filename);
+ kfree(entry);
return error;
}
@@ -247,21 +202,22 @@ static int tomoyo_update_globally_readable_entry(const char *filename,
* @filename: The filename to check.
*
* Returns true if any domain can open @filename for reading, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_globally_readable_file(const struct tomoyo_path_info *
filename)
{
struct tomoyo_globally_readable_file_entry *ptr;
bool found = false;
- down_read(&tomoyo_globally_readable_list_lock);
- list_for_each_entry(ptr, &tomoyo_globally_readable_list, list) {
+
+ list_for_each_entry_rcu(ptr, &tomoyo_globally_readable_list, list) {
if (!ptr->is_deleted &&
tomoyo_path_matches_pattern(filename, ptr->filename)) {
found = true;
break;
}
}
- up_read(&tomoyo_globally_readable_list_lock);
return found;
}
@@ -272,6 +228,8 @@ static bool tomoyo_is_globally_readable_file(const struct tomoyo_path_info *
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_globally_readable_policy(char *data, const bool is_delete)
{
@@ -284,13 +242,14 @@ int tomoyo_write_globally_readable_policy(char *data, const bool is_delete)
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_globally_readable_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_globally_readable_list_lock);
list_for_each_cookie(pos, head->read_var2,
&tomoyo_globally_readable_list) {
struct tomoyo_globally_readable_file_entry *ptr;
@@ -304,7 +263,6 @@ bool tomoyo_read_globally_readable_policy(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_globally_readable_list_lock);
return done;
}
@@ -337,8 +295,7 @@ bool tomoyo_read_globally_readable_policy(struct tomoyo_io_buffer *head)
* which pretends as if /proc/self/ is not a symlink; so that we can forbid
* current process from accessing other process's information.
*/
-static LIST_HEAD(tomoyo_pattern_list);
-static DECLARE_RWSEM(tomoyo_pattern_list_lock);
+LIST_HEAD(tomoyo_pattern_list);
/**
* tomoyo_update_file_pattern_entry - Update "struct tomoyo_pattern_entry" list.
@@ -347,40 +304,43 @@ static DECLARE_RWSEM(tomoyo_pattern_list_lock);
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_file_pattern_entry(const char *pattern,
const bool is_delete)
{
- struct tomoyo_pattern_entry *new_entry;
+ struct tomoyo_pattern_entry *entry = NULL;
struct tomoyo_pattern_entry *ptr;
const struct tomoyo_path_info *saved_pattern;
- int error = -ENOMEM;
+ int error = is_delete ? -ENOENT : -ENOMEM;
- if (!tomoyo_is_correct_path(pattern, 0, 1, 0, __func__))
- return -EINVAL;
- saved_pattern = tomoyo_save_name(pattern);
+ saved_pattern = tomoyo_get_name(pattern);
if (!saved_pattern)
- return -ENOMEM;
- down_write(&tomoyo_pattern_list_lock);
- list_for_each_entry(ptr, &tomoyo_pattern_list, list) {
+ return error;
+ if (!saved_pattern->is_patterned)
+ goto out;
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_pattern_list, list) {
if (saved_pattern != ptr->pattern)
continue;
ptr->is_deleted = is_delete;
error = 0;
- goto out;
+ break;
}
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->pattern = saved_pattern;
+ saved_pattern = NULL;
+ list_add_tail_rcu(&entry->list, &tomoyo_pattern_list);
+ entry = NULL;
+ error = 0;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->pattern = saved_pattern;
- list_add_tail(&new_entry->list, &tomoyo_pattern_list);
- error = 0;
+ mutex_unlock(&tomoyo_policy_lock);
out:
- up_write(&tomoyo_pattern_list_lock);
+ kfree(entry);
+ tomoyo_put_name(saved_pattern);
return error;
}
@@ -390,6 +350,8 @@ static int tomoyo_update_file_pattern_entry(const char *pattern,
* @filename: The filename to find patterned pathname.
*
* Returns pointer to pathname pattern if matched, @filename otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static const struct tomoyo_path_info *
tomoyo_get_file_pattern(const struct tomoyo_path_info *filename)
@@ -397,8 +359,7 @@ tomoyo_get_file_pattern(const struct tomoyo_path_info *filename)
struct tomoyo_pattern_entry *ptr;
const struct tomoyo_path_info *pattern = NULL;
- down_read(&tomoyo_pattern_list_lock);
- list_for_each_entry(ptr, &tomoyo_pattern_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_pattern_list, list) {
if (ptr->is_deleted)
continue;
if (!tomoyo_path_matches_pattern(filename, ptr->pattern))
@@ -411,7 +372,6 @@ tomoyo_get_file_pattern(const struct tomoyo_path_info *filename)
break;
}
}
- up_read(&tomoyo_pattern_list_lock);
if (pattern)
filename = pattern;
return filename;
@@ -424,6 +384,8 @@ tomoyo_get_file_pattern(const struct tomoyo_path_info *filename)
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_pattern_policy(char *data, const bool is_delete)
{
@@ -436,13 +398,14 @@ int tomoyo_write_pattern_policy(char *data, const bool is_delete)
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_file_pattern(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_pattern_list_lock);
list_for_each_cookie(pos, head->read_var2, &tomoyo_pattern_list) {
struct tomoyo_pattern_entry *ptr;
ptr = list_entry(pos, struct tomoyo_pattern_entry, list);
@@ -453,7 +416,6 @@ bool tomoyo_read_file_pattern(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_pattern_list_lock);
return done;
}
@@ -486,8 +448,7 @@ bool tomoyo_read_file_pattern(struct tomoyo_io_buffer *head)
* " (deleted)" suffix if the file is already unlink()ed; so that we don't
* need to worry whether the file is already unlink()ed or not.
*/
-static LIST_HEAD(tomoyo_no_rewrite_list);
-static DECLARE_RWSEM(tomoyo_no_rewrite_list_lock);
+LIST_HEAD(tomoyo_no_rewrite_list);
/**
* tomoyo_update_no_rewrite_entry - Update "struct tomoyo_no_rewrite_entry" list.
@@ -496,39 +457,42 @@ static DECLARE_RWSEM(tomoyo_no_rewrite_list_lock);
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_no_rewrite_entry(const char *pattern,
const bool is_delete)
{
- struct tomoyo_no_rewrite_entry *new_entry, *ptr;
+ struct tomoyo_no_rewrite_entry *entry = NULL;
+ struct tomoyo_no_rewrite_entry *ptr;
const struct tomoyo_path_info *saved_pattern;
- int error = -ENOMEM;
+ int error = is_delete ? -ENOENT : -ENOMEM;
- if (!tomoyo_is_correct_path(pattern, 0, 0, 0, __func__))
+ if (!tomoyo_is_correct_path(pattern, 0, 0, 0))
return -EINVAL;
- saved_pattern = tomoyo_save_name(pattern);
+ saved_pattern = tomoyo_get_name(pattern);
if (!saved_pattern)
- return -ENOMEM;
- down_write(&tomoyo_no_rewrite_list_lock);
- list_for_each_entry(ptr, &tomoyo_no_rewrite_list, list) {
+ return error;
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_no_rewrite_list, list) {
if (ptr->pattern != saved_pattern)
continue;
ptr->is_deleted = is_delete;
error = 0;
- goto out;
+ break;
}
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->pattern = saved_pattern;
+ saved_pattern = NULL;
+ list_add_tail_rcu(&entry->list, &tomoyo_no_rewrite_list);
+ entry = NULL;
+ error = 0;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->pattern = saved_pattern;
- list_add_tail(&new_entry->list, &tomoyo_no_rewrite_list);
- error = 0;
- out:
- up_write(&tomoyo_no_rewrite_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
+ tomoyo_put_name(saved_pattern);
+ kfree(entry);
return error;
}
@@ -539,14 +503,15 @@ static int tomoyo_update_no_rewrite_entry(const char *pattern,
*
* Returns true if @filename is specified by "deny_rewrite" directive,
* false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_no_rewrite_file(const struct tomoyo_path_info *filename)
{
struct tomoyo_no_rewrite_entry *ptr;
bool found = false;
- down_read(&tomoyo_no_rewrite_list_lock);
- list_for_each_entry(ptr, &tomoyo_no_rewrite_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_no_rewrite_list, list) {
if (ptr->is_deleted)
continue;
if (!tomoyo_path_matches_pattern(filename, ptr->pattern))
@@ -554,7 +519,6 @@ static bool tomoyo_is_no_rewrite_file(const struct tomoyo_path_info *filename)
found = true;
break;
}
- up_read(&tomoyo_no_rewrite_list_lock);
return found;
}
@@ -565,6 +529,8 @@ static bool tomoyo_is_no_rewrite_file(const struct tomoyo_path_info *filename)
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_no_rewrite_policy(char *data, const bool is_delete)
{
@@ -577,13 +543,14 @@ int tomoyo_write_no_rewrite_policy(char *data, const bool is_delete)
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_no_rewrite_list_lock);
list_for_each_cookie(pos, head->read_var2, &tomoyo_no_rewrite_list) {
struct tomoyo_no_rewrite_entry *ptr;
ptr = list_entry(pos, struct tomoyo_no_rewrite_entry, list);
@@ -594,7 +561,6 @@ bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_no_rewrite_list_lock);
return done;
}
@@ -612,6 +578,8 @@ bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head)
* Current policy syntax uses "allow_read/write" instead of "6",
* "allow_read" instead of "4", "allow_write" instead of "2",
* "allow_execute" instead of "1".
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_file_acl(const char *filename, u8 perm,
struct tomoyo_domain_info * const domain,
@@ -629,19 +597,19 @@ static int tomoyo_update_file_acl(const char *filename, u8 perm,
*/
return 0;
if (perm & 4)
- tomoyo_update_single_path_acl(TOMOYO_TYPE_READ_ACL, filename,
- domain, is_delete);
+ tomoyo_update_path_acl(TOMOYO_TYPE_READ, filename, domain,
+ is_delete);
if (perm & 2)
- tomoyo_update_single_path_acl(TOMOYO_TYPE_WRITE_ACL, filename,
- domain, is_delete);
+ tomoyo_update_path_acl(TOMOYO_TYPE_WRITE, filename, domain,
+ is_delete);
if (perm & 1)
- tomoyo_update_single_path_acl(TOMOYO_TYPE_EXECUTE_ACL,
- filename, domain, is_delete);
+ tomoyo_update_path_acl(TOMOYO_TYPE_EXECUTE, filename, domain,
+ is_delete);
return 0;
}
/**
- * tomoyo_check_single_path_acl2 - Check permission for single path operation.
+ * tomoyo_path_acl2 - Check permission for single path operation.
*
* @domain: Pointer to "struct tomoyo_domain_info".
* @filename: Filename to check.
@@ -649,26 +617,28 @@ static int tomoyo_update_file_acl(const char *filename, u8 perm,
* @may_use_pattern: True if patterned ACL is permitted.
*
* Returns 0 on success, -EPERM otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_check_single_path_acl2(const struct tomoyo_domain_info *
- domain,
- const struct tomoyo_path_info *
- filename,
- const u16 perm,
- const bool may_use_pattern)
+static int tomoyo_path_acl2(const struct tomoyo_domain_info *domain,
+ const struct tomoyo_path_info *filename,
+ const u32 perm, const bool may_use_pattern)
{
struct tomoyo_acl_info *ptr;
int error = -EPERM;
- down_read(&tomoyo_domain_acl_info_list_lock);
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- struct tomoyo_single_path_acl_record *acl;
- if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_SINGLE_PATH_ACL)
- continue;
- acl = container_of(ptr, struct tomoyo_single_path_acl_record,
- head);
- if (!(acl->perm & perm))
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+ struct tomoyo_path_acl *acl;
+ if (ptr->type != TOMOYO_TYPE_PATH_ACL)
continue;
+ acl = container_of(ptr, struct tomoyo_path_acl, head);
+ if (perm <= 0xFFFF) {
+ if (!(acl->perm & perm))
+ continue;
+ } else {
+ if (!(acl->perm_high & (perm >> 16)))
+ continue;
+ }
if (may_use_pattern || !acl->filename->is_patterned) {
if (!tomoyo_path_matches_pattern(filename,
acl->filename))
@@ -679,7 +649,6 @@ static int tomoyo_check_single_path_acl2(const struct tomoyo_domain_info *
error = 0;
break;
}
- up_read(&tomoyo_domain_acl_info_list_lock);
return error;
}
@@ -691,27 +660,28 @@ static int tomoyo_check_single_path_acl2(const struct tomoyo_domain_info *
* @operation: Mode ("read" or "write" or "read/write" or "execute").
*
* Returns 0 on success, -EPERM otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_check_file_acl(const struct tomoyo_domain_info *domain,
const struct tomoyo_path_info *filename,
const u8 operation)
{
- u16 perm = 0;
+ u32 perm = 0;
if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE))
return 0;
if (operation == 6)
- perm = 1 << TOMOYO_TYPE_READ_WRITE_ACL;
+ perm = 1 << TOMOYO_TYPE_READ_WRITE;
else if (operation == 4)
- perm = 1 << TOMOYO_TYPE_READ_ACL;
+ perm = 1 << TOMOYO_TYPE_READ;
else if (operation == 2)
- perm = 1 << TOMOYO_TYPE_WRITE_ACL;
+ perm = 1 << TOMOYO_TYPE_WRITE;
else if (operation == 1)
- perm = 1 << TOMOYO_TYPE_EXECUTE_ACL;
+ perm = 1 << TOMOYO_TYPE_EXECUTE;
else
BUG();
- return tomoyo_check_single_path_acl2(domain, filename, perm,
- operation != 1);
+ return tomoyo_path_acl2(domain, filename, perm, operation != 1);
}
/**
@@ -724,6 +694,8 @@ static int tomoyo_check_file_acl(const struct tomoyo_domain_info *domain,
* @mode: Access control mode.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_check_file_perm2(struct tomoyo_domain_info * const domain,
const struct tomoyo_path_info *filename,
@@ -737,18 +709,17 @@ static int tomoyo_check_file_perm2(struct tomoyo_domain_info * const domain,
if (!filename)
return 0;
error = tomoyo_check_file_acl(domain, filename, perm);
- if (error && perm == 4 &&
- (domain->flags & TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ) == 0
+ if (error && perm == 4 && !domain->ignore_global_allow_read
&& tomoyo_is_globally_readable_file(filename))
error = 0;
if (perm == 6)
- msg = tomoyo_sp2keyword(TOMOYO_TYPE_READ_WRITE_ACL);
+ msg = tomoyo_path2keyword(TOMOYO_TYPE_READ_WRITE);
else if (perm == 4)
- msg = tomoyo_sp2keyword(TOMOYO_TYPE_READ_ACL);
+ msg = tomoyo_path2keyword(TOMOYO_TYPE_READ);
else if (perm == 2)
- msg = tomoyo_sp2keyword(TOMOYO_TYPE_WRITE_ACL);
+ msg = tomoyo_path2keyword(TOMOYO_TYPE_WRITE);
else if (perm == 1)
- msg = tomoyo_sp2keyword(TOMOYO_TYPE_EXECUTE_ACL);
+ msg = tomoyo_path2keyword(TOMOYO_TYPE_EXECUTE);
else
BUG();
if (!error)
@@ -777,6 +748,8 @@ static int tomoyo_check_file_perm2(struct tomoyo_domain_info * const domain,
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_file_policy(char *data, struct tomoyo_domain_info *domain,
const bool is_delete)
@@ -795,28 +768,28 @@ int tomoyo_write_file_policy(char *data, struct tomoyo_domain_info *domain,
if (strncmp(data, "allow_", 6))
goto out;
data += 6;
- for (type = 0; type < TOMOYO_MAX_SINGLE_PATH_OPERATION; type++) {
- if (strcmp(data, tomoyo_sp_keyword[type]))
+ for (type = 0; type < TOMOYO_MAX_PATH_OPERATION; type++) {
+ if (strcmp(data, tomoyo_path_keyword[type]))
continue;
- return tomoyo_update_single_path_acl(type, filename,
- domain, is_delete);
+ return tomoyo_update_path_acl(type, filename, domain,
+ is_delete);
}
filename2 = strchr(filename, ' ');
if (!filename2)
goto out;
*filename2++ = '\0';
- for (type = 0; type < TOMOYO_MAX_DOUBLE_PATH_OPERATION; type++) {
- if (strcmp(data, tomoyo_dp_keyword[type]))
+ for (type = 0; type < TOMOYO_MAX_PATH2_OPERATION; type++) {
+ if (strcmp(data, tomoyo_path2_keyword[type]))
continue;
- return tomoyo_update_double_path_acl(type, filename, filename2,
- domain, is_delete);
+ return tomoyo_update_path2_acl(type, filename, filename2,
+ domain, is_delete);
}
out:
return -EINVAL;
}
/**
- * tomoyo_update_single_path_acl - Update "struct tomoyo_single_path_acl_record" list.
+ * tomoyo_update_path_acl - Update "struct tomoyo_path_acl" list.
*
* @type: Type of operation.
* @filename: Filename.
@@ -824,85 +797,82 @@ int tomoyo_write_file_policy(char *data, struct tomoyo_domain_info *domain,
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
- struct tomoyo_domain_info *
- const domain, const bool is_delete)
+static int tomoyo_update_path_acl(const u8 type, const char *filename,
+ struct tomoyo_domain_info *const domain,
+ const bool is_delete)
{
- static const u16 rw_mask =
- (1 << TOMOYO_TYPE_READ_ACL) | (1 << TOMOYO_TYPE_WRITE_ACL);
+ static const u32 rw_mask =
+ (1 << TOMOYO_TYPE_READ) | (1 << TOMOYO_TYPE_WRITE);
const struct tomoyo_path_info *saved_filename;
struct tomoyo_acl_info *ptr;
- struct tomoyo_single_path_acl_record *acl;
- int error = -ENOMEM;
- const u16 perm = 1 << type;
+ struct tomoyo_path_acl *entry = NULL;
+ int error = is_delete ? -ENOENT : -ENOMEM;
+ const u32 perm = 1 << type;
if (!domain)
return -EINVAL;
- if (!tomoyo_is_correct_path(filename, 0, 0, 0, __func__))
+ if (!tomoyo_is_correct_path(filename, 0, 0, 0))
return -EINVAL;
- saved_filename = tomoyo_save_name(filename);
+ saved_filename = tomoyo_get_name(filename);
if (!saved_filename)
return -ENOMEM;
- down_write(&tomoyo_domain_acl_info_list_lock);
- if (is_delete)
- goto delete;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- if (tomoyo_acl_type1(ptr) != TOMOYO_TYPE_SINGLE_PATH_ACL)
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+ struct tomoyo_path_acl *acl =
+ container_of(ptr, struct tomoyo_path_acl, head);
+ if (ptr->type != TOMOYO_TYPE_PATH_ACL)
continue;
- acl = container_of(ptr, struct tomoyo_single_path_acl_record,
- head);
if (acl->filename != saved_filename)
continue;
- /* Special case. Clear all bits if marked as deleted. */
- if (ptr->type & TOMOYO_ACL_DELETED)
- acl->perm = 0;
- acl->perm |= perm;
- if ((acl->perm & rw_mask) == rw_mask)
- acl->perm |= 1 << TOMOYO_TYPE_READ_WRITE_ACL;
- else if (acl->perm & (1 << TOMOYO_TYPE_READ_WRITE_ACL))
- acl->perm |= rw_mask;
- ptr->type &= ~TOMOYO_ACL_DELETED;
+ if (is_delete) {
+ if (perm <= 0xFFFF)
+ acl->perm &= ~perm;
+ else
+ acl->perm_high &= ~(perm >> 16);
+ if ((acl->perm & rw_mask) != rw_mask)
+ acl->perm &= ~(1 << TOMOYO_TYPE_READ_WRITE);
+ else if (!(acl->perm & (1 << TOMOYO_TYPE_READ_WRITE)))
+ acl->perm &= ~rw_mask;
+ } else {
+ if (perm <= 0xFFFF)
+ acl->perm |= perm;
+ else
+ acl->perm_high |= (perm >> 16);
+ if ((acl->perm & rw_mask) == rw_mask)
+ acl->perm |= 1 << TOMOYO_TYPE_READ_WRITE;
+ else if (acl->perm & (1 << TOMOYO_TYPE_READ_WRITE))
+ acl->perm |= rw_mask;
+ }
error = 0;
- goto out;
+ break;
}
- /* Not found. Append it to the tail. */
- acl = tomoyo_alloc_acl_element(TOMOYO_TYPE_SINGLE_PATH_ACL);
- if (!acl)
- goto out;
- acl->perm = perm;
- if (perm == (1 << TOMOYO_TYPE_READ_WRITE_ACL))
- acl->perm |= rw_mask;
- acl->filename = saved_filename;
- list_add_tail(&acl->head.list, &domain->acl_info_list);
- error = 0;
- goto out;
- delete:
- error = -ENOENT;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_SINGLE_PATH_ACL)
- continue;
- acl = container_of(ptr, struct tomoyo_single_path_acl_record,
- head);
- if (acl->filename != saved_filename)
- continue;
- acl->perm &= ~perm;
- if ((acl->perm & rw_mask) != rw_mask)
- acl->perm &= ~(1 << TOMOYO_TYPE_READ_WRITE_ACL);
- else if (!(acl->perm & (1 << TOMOYO_TYPE_READ_WRITE_ACL)))
- acl->perm &= ~rw_mask;
- if (!acl->perm)
- ptr->type |= TOMOYO_ACL_DELETED;
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->head.type = TOMOYO_TYPE_PATH_ACL;
+ if (perm <= 0xFFFF)
+ entry->perm = perm;
+ else
+ entry->perm_high = (perm >> 16);
+ if (perm == (1 << TOMOYO_TYPE_READ_WRITE))
+ entry->perm |= rw_mask;
+ entry->filename = saved_filename;
+ saved_filename = NULL;
+ list_add_tail_rcu(&entry->head.list, &domain->acl_info_list);
+ entry = NULL;
error = 0;
- break;
}
- out:
- up_write(&tomoyo_domain_acl_info_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
+ kfree(entry);
+ tomoyo_put_name(saved_filename);
return error;
}
/**
- * tomoyo_update_double_path_acl - Update "struct tomoyo_double_path_acl_record" list.
+ * tomoyo_update_path2_acl - Update "struct tomoyo_path2_acl" list.
*
* @type: Type of operation.
* @filename1: First filename.
@@ -911,98 +881,88 @@ static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_update_double_path_acl(const u8 type, const char *filename1,
- const char *filename2,
- struct tomoyo_domain_info *
- const domain, const bool is_delete)
+static int tomoyo_update_path2_acl(const u8 type, const char *filename1,
+ const char *filename2,
+ struct tomoyo_domain_info *const domain,
+ const bool is_delete)
{
const struct tomoyo_path_info *saved_filename1;
const struct tomoyo_path_info *saved_filename2;
struct tomoyo_acl_info *ptr;
- struct tomoyo_double_path_acl_record *acl;
- int error = -ENOMEM;
+ struct tomoyo_path2_acl *entry = NULL;
+ int error = is_delete ? -ENOENT : -ENOMEM;
const u8 perm = 1 << type;
if (!domain)
return -EINVAL;
- if (!tomoyo_is_correct_path(filename1, 0, 0, 0, __func__) ||
- !tomoyo_is_correct_path(filename2, 0, 0, 0, __func__))
+ if (!tomoyo_is_correct_path(filename1, 0, 0, 0) ||
+ !tomoyo_is_correct_path(filename2, 0, 0, 0))
return -EINVAL;
- saved_filename1 = tomoyo_save_name(filename1);
- saved_filename2 = tomoyo_save_name(filename2);
+ saved_filename1 = tomoyo_get_name(filename1);
+ saved_filename2 = tomoyo_get_name(filename2);
if (!saved_filename1 || !saved_filename2)
- return -ENOMEM;
- down_write(&tomoyo_domain_acl_info_list_lock);
- if (is_delete)
- goto delete;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- if (tomoyo_acl_type1(ptr) != TOMOYO_TYPE_DOUBLE_PATH_ACL)
- continue;
- acl = container_of(ptr, struct tomoyo_double_path_acl_record,
- head);
- if (acl->filename1 != saved_filename1 ||
- acl->filename2 != saved_filename2)
- continue;
- /* Special case. Clear all bits if marked as deleted. */
- if (ptr->type & TOMOYO_ACL_DELETED)
- acl->perm = 0;
- acl->perm |= perm;
- ptr->type &= ~TOMOYO_ACL_DELETED;
- error = 0;
goto out;
- }
- /* Not found. Append it to the tail. */
- acl = tomoyo_alloc_acl_element(TOMOYO_TYPE_DOUBLE_PATH_ACL);
- if (!acl)
- goto out;
- acl->perm = perm;
- acl->filename1 = saved_filename1;
- acl->filename2 = saved_filename2;
- list_add_tail(&acl->head.list, &domain->acl_info_list);
- error = 0;
- goto out;
- delete:
- error = -ENOENT;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_DOUBLE_PATH_ACL)
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+ struct tomoyo_path2_acl *acl =
+ container_of(ptr, struct tomoyo_path2_acl, head);
+ if (ptr->type != TOMOYO_TYPE_PATH2_ACL)
continue;
- acl = container_of(ptr, struct tomoyo_double_path_acl_record,
- head);
if (acl->filename1 != saved_filename1 ||
acl->filename2 != saved_filename2)
continue;
- acl->perm &= ~perm;
- if (!acl->perm)
- ptr->type |= TOMOYO_ACL_DELETED;
+ if (is_delete)
+ acl->perm &= ~perm;
+ else
+ acl->perm |= perm;
error = 0;
break;
}
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->head.type = TOMOYO_TYPE_PATH2_ACL;
+ entry->perm = perm;
+ entry->filename1 = saved_filename1;
+ saved_filename1 = NULL;
+ entry->filename2 = saved_filename2;
+ saved_filename2 = NULL;
+ list_add_tail_rcu(&entry->head.list, &domain->acl_info_list);
+ entry = NULL;
+ error = 0;
+ }
+ mutex_unlock(&tomoyo_policy_lock);
out:
- up_write(&tomoyo_domain_acl_info_list_lock);
+ tomoyo_put_name(saved_filename1);
+ tomoyo_put_name(saved_filename2);
+ kfree(entry);
return error;
}
/**
- * tomoyo_check_single_path_acl - Check permission for single path operation.
+ * tomoyo_path_acl - Check permission for single path operation.
*
* @domain: Pointer to "struct tomoyo_domain_info".
* @type: Type of operation.
* @filename: Filename to check.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_check_single_path_acl(struct tomoyo_domain_info *domain,
- const u8 type,
- const struct tomoyo_path_info *filename)
+static int tomoyo_path_acl(struct tomoyo_domain_info *domain, const u8 type,
+ const struct tomoyo_path_info *filename)
{
if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE))
return 0;
- return tomoyo_check_single_path_acl2(domain, filename, 1 << type, 1);
+ return tomoyo_path_acl2(domain, filename, 1 << type, 1);
}
/**
- * tomoyo_check_double_path_acl - Check permission for double path operation.
+ * tomoyo_path2_acl - Check permission for double path operation.
*
* @domain: Pointer to "struct tomoyo_domain_info".
* @type: Type of operation.
@@ -1010,13 +970,13 @@ static int tomoyo_check_single_path_acl(struct tomoyo_domain_info *domain,
* @filename2: Second filename to check.
*
* Returns 0 on success, -EPERM otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_check_double_path_acl(const struct tomoyo_domain_info *domain,
- const u8 type,
- const struct tomoyo_path_info *
- filename1,
- const struct tomoyo_path_info *
- filename2)
+static int tomoyo_path2_acl(const struct tomoyo_domain_info *domain,
+ const u8 type,
+ const struct tomoyo_path_info *filename1,
+ const struct tomoyo_path_info *filename2)
{
struct tomoyo_acl_info *ptr;
const u8 perm = 1 << type;
@@ -1024,13 +984,11 @@ static int tomoyo_check_double_path_acl(const struct tomoyo_domain_info *domain,
if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE))
return 0;
- down_read(&tomoyo_domain_acl_info_list_lock);
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- struct tomoyo_double_path_acl_record *acl;
- if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_DOUBLE_PATH_ACL)
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+ struct tomoyo_path2_acl *acl;
+ if (ptr->type != TOMOYO_TYPE_PATH2_ACL)
continue;
- acl = container_of(ptr, struct tomoyo_double_path_acl_record,
- head);
+ acl = container_of(ptr, struct tomoyo_path2_acl, head);
if (!(acl->perm & perm))
continue;
if (!tomoyo_path_matches_pattern(filename1, acl->filename1))
@@ -1040,12 +998,11 @@ static int tomoyo_check_double_path_acl(const struct tomoyo_domain_info *domain,
error = 0;
break;
}
- up_read(&tomoyo_domain_acl_info_list_lock);
return error;
}
/**
- * tomoyo_check_single_path_permission2 - Check permission for single path operation.
+ * tomoyo_path_permission2 - Check permission for single path operation.
*
* @domain: Pointer to "struct tomoyo_domain_info".
* @operation: Type of operation.
@@ -1053,11 +1010,13 @@ static int tomoyo_check_double_path_acl(const struct tomoyo_domain_info *domain,
* @mode: Access control mode.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_check_single_path_permission2(struct tomoyo_domain_info *
- const domain, u8 operation,
- const struct tomoyo_path_info *
- filename, const u8 mode)
+static int tomoyo_path_permission2(struct tomoyo_domain_info *const domain,
+ u8 operation,
+ const struct tomoyo_path_info *filename,
+ const u8 mode)
{
const char *msg;
int error;
@@ -1066,8 +1025,8 @@ static int tomoyo_check_single_path_permission2(struct tomoyo_domain_info *
if (!mode)
return 0;
next:
- error = tomoyo_check_single_path_acl(domain, operation, filename);
- msg = tomoyo_sp2keyword(operation);
+ error = tomoyo_path_acl(domain, operation, filename);
+ msg = tomoyo_path2keyword(operation);
if (!error)
goto ok;
if (tomoyo_verbose_mode(domain))
@@ -1076,7 +1035,7 @@ static int tomoyo_check_single_path_permission2(struct tomoyo_domain_info *
tomoyo_get_last_name(domain));
if (mode == 1 && tomoyo_domain_quota_is_ok(domain)) {
const char *name = tomoyo_get_file_pattern(filename)->name;
- tomoyo_update_single_path_acl(operation, name, domain, false);
+ tomoyo_update_path_acl(operation, name, domain, false);
}
if (!is_enforce)
error = 0;
@@ -1086,9 +1045,9 @@ static int tomoyo_check_single_path_permission2(struct tomoyo_domain_info *
* we need to check "allow_rewrite" permission if the filename is
* specified by "deny_rewrite" keyword.
*/
- if (!error && operation == TOMOYO_TYPE_TRUNCATE_ACL &&
+ if (!error && operation == TOMOYO_TYPE_TRUNCATE &&
tomoyo_is_no_rewrite_file(filename)) {
- operation = TOMOYO_TYPE_REWRITE_ACL;
+ operation = TOMOYO_TYPE_REWRITE;
goto next;
}
return error;
@@ -1101,6 +1060,8 @@ static int tomoyo_check_single_path_permission2(struct tomoyo_domain_info *
* @filename: Check permission for "execute".
*
* Returns 0 on success, negativevalue otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_check_exec_perm(struct tomoyo_domain_info *domain,
const struct tomoyo_path_info *filename)
@@ -1129,6 +1090,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
struct tomoyo_path_info *buf;
const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
const bool is_enforce = (mode == 3);
+ int idx;
if (!mode || !path->mnt)
return 0;
@@ -1140,6 +1102,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
* don't call me.
*/
return 0;
+ idx = tomoyo_read_lock();
buf = tomoyo_get_path(path);
if (!buf)
goto out;
@@ -1152,49 +1115,50 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
if ((acc_mode & MAY_WRITE) &&
((flag & O_TRUNC) || !(flag & O_APPEND)) &&
(tomoyo_is_no_rewrite_file(buf))) {
- error = tomoyo_check_single_path_permission2(domain,
- TOMOYO_TYPE_REWRITE_ACL,
- buf, mode);
+ error = tomoyo_path_permission2(domain, TOMOYO_TYPE_REWRITE,
+ buf, mode);
}
if (!error)
error = tomoyo_check_file_perm2(domain, buf, acc_mode, "open",
mode);
if (!error && (flag & O_TRUNC))
- error = tomoyo_check_single_path_permission2(domain,
- TOMOYO_TYPE_TRUNCATE_ACL,
- buf, mode);
+ error = tomoyo_path_permission2(domain, TOMOYO_TYPE_TRUNCATE,
+ buf, mode);
out:
- tomoyo_free(buf);
+ kfree(buf);
+ tomoyo_read_unlock(idx);
if (!is_enforce)
error = 0;
return error;
}
/**
- * tomoyo_check_1path_perm - Check permission for "create", "unlink", "mkdir", "rmdir", "mkfifo", "mksock", "mkblock", "mkchar", "truncate" and "symlink".
+ * tomoyo_path_perm - Check permission for "create", "unlink", "mkdir", "rmdir", "mkfifo", "mksock", "mkblock", "mkchar", "truncate", "symlink", "ioctl", "chmod", "chown", "chgrp", "chroot", "mount" and "unmount".
*
- * @domain: Pointer to "struct tomoyo_domain_info".
* @operation: Type of operation.
* @path: Pointer to "struct path".
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain,
- const u8 operation, struct path *path)
+int tomoyo_path_perm(const u8 operation, struct path *path)
{
int error = -ENOMEM;
struct tomoyo_path_info *buf;
+ struct tomoyo_domain_info *domain = tomoyo_domain();
const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
const bool is_enforce = (mode == 3);
+ int idx;
if (!mode || !path->mnt)
return 0;
+ idx = tomoyo_read_lock();
buf = tomoyo_get_path(path);
if (!buf)
goto out;
switch (operation) {
- case TOMOYO_TYPE_MKDIR_ACL:
- case TOMOYO_TYPE_RMDIR_ACL:
+ case TOMOYO_TYPE_MKDIR:
+ case TOMOYO_TYPE_RMDIR:
+ case TOMOYO_TYPE_CHROOT:
if (!buf->is_dir) {
/*
* tomoyo_get_path() reserves space for appending "/."
@@ -1203,10 +1167,10 @@ int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain,
tomoyo_fill_path_info(buf);
}
}
- error = tomoyo_check_single_path_permission2(domain, operation, buf,
- mode);
+ error = tomoyo_path_permission2(domain, operation, buf, mode);
out:
- tomoyo_free(buf);
+ kfree(buf);
+ tomoyo_read_unlock(idx);
if (!is_enforce)
error = 0;
return error;
@@ -1215,21 +1179,23 @@ int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain,
/**
* tomoyo_check_rewrite_permission - Check permission for "rewrite".
*
- * @domain: Pointer to "struct tomoyo_domain_info".
* @filp: Pointer to "struct file".
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
- struct file *filp)
+int tomoyo_check_rewrite_permission(struct file *filp)
{
int error = -ENOMEM;
+ struct tomoyo_domain_info *domain = tomoyo_domain();
const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
const bool is_enforce = (mode == 3);
struct tomoyo_path_info *buf;
+ int idx;
if (!mode || !filp->f_path.mnt)
return 0;
+
+ idx = tomoyo_read_lock();
buf = tomoyo_get_path(&filp->f_path);
if (!buf)
goto out;
@@ -1237,38 +1203,38 @@ int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
error = 0;
goto out;
}
- error = tomoyo_check_single_path_permission2(domain,
- TOMOYO_TYPE_REWRITE_ACL,
- buf, mode);
+ error = tomoyo_path_permission2(domain, TOMOYO_TYPE_REWRITE, buf, mode);
out:
- tomoyo_free(buf);
+ kfree(buf);
+ tomoyo_read_unlock(idx);
if (!is_enforce)
error = 0;
return error;
}
/**
- * tomoyo_check_2path_perm - Check permission for "rename" and "link".
+ * tomoyo_path2_perm - Check permission for "rename", "link" and "pivot_root".
*
- * @domain: Pointer to "struct tomoyo_domain_info".
* @operation: Type of operation.
* @path1: Pointer to "struct path".
* @path2: Pointer to "struct path".
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_check_2path_perm(struct tomoyo_domain_info * const domain,
- const u8 operation, struct path *path1,
- struct path *path2)
+int tomoyo_path2_perm(const u8 operation, struct path *path1,
+ struct path *path2)
{
int error = -ENOMEM;
struct tomoyo_path_info *buf1, *buf2;
+ struct tomoyo_domain_info *domain = tomoyo_domain();
const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
const bool is_enforce = (mode == 3);
const char *msg;
+ int idx;
if (!mode || !path1->mnt || !path2->mnt)
return 0;
+ idx = tomoyo_read_lock();
buf1 = tomoyo_get_path(path1);
buf2 = tomoyo_get_path(path2);
if (!buf1 || !buf2)
@@ -1289,8 +1255,8 @@ int tomoyo_check_2path_perm(struct tomoyo_domain_info * const domain,
}
}
}
- error = tomoyo_check_double_path_acl(domain, operation, buf1, buf2);
- msg = tomoyo_dp2keyword(operation);
+ error = tomoyo_path2_acl(domain, operation, buf1, buf2);
+ msg = tomoyo_path22keyword(operation);
if (!error)
goto out;
if (tomoyo_verbose_mode(domain))
@@ -1301,12 +1267,13 @@ int tomoyo_check_2path_perm(struct tomoyo_domain_info * const domain,
if (mode == 1 && tomoyo_domain_quota_is_ok(domain)) {
const char *name1 = tomoyo_get_file_pattern(buf1)->name;
const char *name2 = tomoyo_get_file_pattern(buf2)->name;
- tomoyo_update_double_path_acl(operation, name1, name2, domain,
- false);
+ tomoyo_update_path2_acl(operation, name1, name2, domain,
+ false);
}
out:
- tomoyo_free(buf1);
- tomoyo_free(buf2);
+ kfree(buf1);
+ kfree(buf2);
+ tomoyo_read_unlock(idx);
if (!is_enforce)
error = 0;
return error;
diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c
new file mode 100644
index 0000000..9645525
--- /dev/null
+++ b/security/tomoyo/gc.c
@@ -0,0 +1,370 @@
+/*
+ * security/tomoyo/gc.c
+ *
+ * Implementation of the Domain-Based Mandatory Access Control.
+ *
+ * Copyright (C) 2005-2010 NTT DATA CORPORATION
+ *
+ */
+
+#include "common.h"
+#include <linux/kthread.h>
+
+enum tomoyo_gc_id {
+ TOMOYO_ID_DOMAIN_INITIALIZER,
+ TOMOYO_ID_DOMAIN_KEEPER,
+ TOMOYO_ID_ALIAS,
+ TOMOYO_ID_GLOBALLY_READABLE,
+ TOMOYO_ID_PATTERN,
+ TOMOYO_ID_NO_REWRITE,
+ TOMOYO_ID_MANAGER,
+ TOMOYO_ID_NAME,
+ TOMOYO_ID_ACL,
+ TOMOYO_ID_DOMAIN
+};
+
+struct tomoyo_gc_entry {
+ struct list_head list;
+ int type;
+ void *element;
+};
+static LIST_HEAD(tomoyo_gc_queue);
+static DEFINE_MUTEX(tomoyo_gc_mutex);
+
+/* Caller holds tomoyo_policy_lock mutex. */
+static bool tomoyo_add_to_gc(const int type, void *element)
+{
+ struct tomoyo_gc_entry *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return false;
+ entry->type = type;
+ entry->element = element;
+ list_add(&entry->list, &tomoyo_gc_queue);
+ return true;
+}
+
+static void tomoyo_del_allow_read
+(struct tomoyo_globally_readable_file_entry *ptr)
+{
+ tomoyo_put_name(ptr->filename);
+}
+
+static void tomoyo_del_file_pattern(struct tomoyo_pattern_entry *ptr)
+{
+ tomoyo_put_name(ptr->pattern);
+}
+
+static void tomoyo_del_no_rewrite(struct tomoyo_no_rewrite_entry *ptr)
+{
+ tomoyo_put_name(ptr->pattern);
+}
+
+static void tomoyo_del_domain_initializer
+(struct tomoyo_domain_initializer_entry *ptr)
+{
+ tomoyo_put_name(ptr->domainname);
+ tomoyo_put_name(ptr->program);
+}
+
+static void tomoyo_del_domain_keeper(struct tomoyo_domain_keeper_entry *ptr)
+{
+ tomoyo_put_name(ptr->domainname);
+ tomoyo_put_name(ptr->program);
+}
+
+static void tomoyo_del_alias(struct tomoyo_alias_entry *ptr)
+{
+ tomoyo_put_name(ptr->original_name);
+ tomoyo_put_name(ptr->aliased_name);
+}
+
+static void tomoyo_del_manager(struct tomoyo_policy_manager_entry *ptr)
+{
+ tomoyo_put_name(ptr->manager);
+}
+
+static void tomoyo_del_acl(struct tomoyo_acl_info *acl)
+{
+ switch (acl->type) {
+ case TOMOYO_TYPE_PATH_ACL:
+ {
+ struct tomoyo_path_acl *entry
+ = container_of(acl, typeof(*entry), head);
+ tomoyo_put_name(entry->filename);
+ }
+ break;
+ case TOMOYO_TYPE_PATH2_ACL:
+ {
+ struct tomoyo_path2_acl *entry
+ = container_of(acl, typeof(*entry), head);
+ tomoyo_put_name(entry->filename1);
+ tomoyo_put_name(entry->filename2);
+ }
+ break;
+ default:
+ printk(KERN_WARNING "Unknown type\n");
+ break;
+ }
+}
+
+static bool tomoyo_del_domain(struct tomoyo_domain_info *domain)
+{
+ struct tomoyo_acl_info *acl;
+ struct tomoyo_acl_info *tmp;
+ /*
+ * Since we don't protect whole execve() operation using SRCU,
+ * we need to recheck domain->users at this point.
+ *
+ * (1) Reader starts SRCU section upon execve().
+ * (2) Reader traverses tomoyo_domain_list and finds this domain.
+ * (3) Writer marks this domain as deleted.
+ * (4) Garbage collector removes this domain from tomoyo_domain_list
+ * because this domain is marked as deleted and used by nobody.
+ * (5) Reader saves reference to this domain into
+ * "struct linux_binprm"->cred->security .
+ * (6) Reader finishes SRCU section, although execve() operation has
+ * not finished yet.
+ * (7) Garbage collector waits for SRCU synchronization.
+ * (8) Garbage collector kfree() this domain because this domain is
+ * used by nobody.
+ * (9) Reader finishes execve() operation and restores this domain from
+ * "struct linux_binprm"->cred->security.
+ *
+ * By updating domain->users at (5), we can solve this race problem
+ * by rechecking domain->users at (8).
+ */
+ if (atomic_read(&domain->users))
+ return false;
+ list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) {
+ tomoyo_del_acl(acl);
+ tomoyo_memory_free(acl);
+ }
+ tomoyo_put_name(domain->domainname);
+ return true;
+}
+
+
+static void tomoyo_del_name(const struct tomoyo_name_entry *ptr)
+{
+}
+
+static void tomoyo_collect_entry(void)
+{
+ mutex_lock(&tomoyo_policy_lock);
+ {
+ struct tomoyo_globally_readable_file_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_globally_readable_list,
+ list) {
+ if (!ptr->is_deleted)
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_GLOBALLY_READABLE, ptr))
+ list_del_rcu(&ptr->list);
+ else
+ break;
+ }
+ }
+ {
+ struct tomoyo_pattern_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_pattern_list, list) {
+ if (!ptr->is_deleted)
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_PATTERN, ptr))
+ list_del_rcu(&ptr->list);
+ else
+ break;
+ }
+ }
+ {
+ struct tomoyo_no_rewrite_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_no_rewrite_list, list) {
+ if (!ptr->is_deleted)
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_NO_REWRITE, ptr))
+ list_del_rcu(&ptr->list);
+ else
+ break;
+ }
+ }
+ {
+ struct tomoyo_domain_initializer_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_initializer_list,
+ list) {
+ if (!ptr->is_deleted)
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_DOMAIN_INITIALIZER, ptr))
+ list_del_rcu(&ptr->list);
+ else
+ break;
+ }
+ }
+ {
+ struct tomoyo_domain_keeper_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_keeper_list, list) {
+ if (!ptr->is_deleted)
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_DOMAIN_KEEPER, ptr))
+ list_del_rcu(&ptr->list);
+ else
+ break;
+ }
+ }
+ {
+ struct tomoyo_alias_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_alias_list, list) {
+ if (!ptr->is_deleted)
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_ALIAS, ptr))
+ list_del_rcu(&ptr->list);
+ else
+ break;
+ }
+ }
+ {
+ struct tomoyo_policy_manager_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list,
+ list) {
+ if (!ptr->is_deleted)
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_MANAGER, ptr))
+ list_del_rcu(&ptr->list);
+ else
+ break;
+ }
+ }
+ {
+ struct tomoyo_domain_info *domain;
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+ struct tomoyo_acl_info *acl;
+ list_for_each_entry_rcu(acl, &domain->acl_info_list,
+ list) {
+ switch (acl->type) {
+ case TOMOYO_TYPE_PATH_ACL:
+ if (container_of(acl,
+ struct tomoyo_path_acl,
+ head)->perm ||
+ container_of(acl,
+ struct tomoyo_path_acl,
+ head)->perm_high)
+ continue;
+ break;
+ case TOMOYO_TYPE_PATH2_ACL:
+ if (container_of(acl,
+ struct tomoyo_path2_acl,
+ head)->perm)
+ continue;
+ break;
+ default:
+ continue;
+ }
+ if (tomoyo_add_to_gc(TOMOYO_ID_ACL, acl))
+ list_del_rcu(&acl->list);
+ else
+ break;
+ }
+ if (!domain->is_deleted || atomic_read(&domain->users))
+ continue;
+ /*
+ * Nobody is referring this domain. But somebody may
+ * refer this domain after successful execve().
+ * We recheck domain->users after SRCU synchronization.
+ */
+ if (tomoyo_add_to_gc(TOMOYO_ID_DOMAIN, domain))
+ list_del_rcu(&domain->list);
+ else
+ break;
+ }
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+ mutex_lock(&tomoyo_name_list_lock);
+ {
+ int i;
+ for (i = 0; i < TOMOYO_MAX_HASH; i++) {
+ struct tomoyo_name_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_name_list[i],
+ list) {
+ if (atomic_read(&ptr->users))
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_NAME, ptr))
+ list_del_rcu(&ptr->list);
+ else {
+ i = TOMOYO_MAX_HASH;
+ break;
+ }
+ }
+ }
+ }
+ mutex_unlock(&tomoyo_name_list_lock);
+}
+
+static void tomoyo_kfree_entry(void)
+{
+ struct tomoyo_gc_entry *p;
+ struct tomoyo_gc_entry *tmp;
+
+ list_for_each_entry_safe(p, tmp, &tomoyo_gc_queue, list) {
+ switch (p->type) {
+ case TOMOYO_ID_DOMAIN_INITIALIZER:
+ tomoyo_del_domain_initializer(p->element);
+ break;
+ case TOMOYO_ID_DOMAIN_KEEPER:
+ tomoyo_del_domain_keeper(p->element);
+ break;
+ case TOMOYO_ID_ALIAS:
+ tomoyo_del_alias(p->element);
+ break;
+ case TOMOYO_ID_GLOBALLY_READABLE:
+ tomoyo_del_allow_read(p->element);
+ break;
+ case TOMOYO_ID_PATTERN:
+ tomoyo_del_file_pattern(p->element);
+ break;
+ case TOMOYO_ID_NO_REWRITE:
+ tomoyo_del_no_rewrite(p->element);
+ break;
+ case TOMOYO_ID_MANAGER:
+ tomoyo_del_manager(p->element);
+ break;
+ case TOMOYO_ID_NAME:
+ tomoyo_del_name(p->element);
+ break;
+ case TOMOYO_ID_ACL:
+ tomoyo_del_acl(p->element);
+ break;
+ case TOMOYO_ID_DOMAIN:
+ if (!tomoyo_del_domain(p->element))
+ continue;
+ break;
+ default:
+ printk(KERN_WARNING "Unknown type\n");
+ break;
+ }
+ tomoyo_memory_free(p->element);
+ list_del(&p->list);
+ kfree(p);
+ }
+}
+
+static int tomoyo_gc_thread(void *unused)
+{
+ daemonize("GC for TOMOYO");
+ if (mutex_trylock(&tomoyo_gc_mutex)) {
+ int i;
+ for (i = 0; i < 10; i++) {
+ tomoyo_collect_entry();
+ if (list_empty(&tomoyo_gc_queue))
+ break;
+ synchronize_srcu(&tomoyo_ss);
+ tomoyo_kfree_entry();
+ }
+ mutex_unlock(&tomoyo_gc_mutex);
+ }
+ do_exit(0);
+}
+
+void tomoyo_run_gc(void)
+{
+ struct task_struct *task = kthread_create(tomoyo_gc_thread, NULL,
+ "GC for TOMOYO");
+ if (!IS_ERR(task))
+ wake_up_process(task);
+}
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 18369d49..cf7d61f 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -14,9 +14,8 @@
#include <linux/mnt_namespace.h>
#include <linux/fs_struct.h>
#include <linux/hash.h>
-
+#include <linux/magic.h>
#include "common.h"
-#include "realpath.h"
/**
* tomoyo_encode: Convert binary string to ascii string.
@@ -89,30 +88,15 @@ int tomoyo_realpath_from_path2(struct path *path, char *newname,
sp = dentry->d_op->d_dname(dentry, newname + offset,
newname_len - offset);
} else {
- /* Taken from d_namespace_path(). */
- struct path root;
- struct path ns_root = { };
- struct path tmp;
+ struct path ns_root = {.mnt = NULL, .dentry = NULL};
- read_lock(&current->fs->lock);
- root = current->fs->root;
- path_get(&root);
- read_unlock(&current->fs->lock);
- spin_lock(&vfsmount_lock);
- if (root.mnt && root.mnt->mnt_ns)
- ns_root.mnt = mntget(root.mnt->mnt_ns->root);
- if (ns_root.mnt)
- ns_root.dentry = dget(ns_root.mnt->mnt_root);
- spin_unlock(&vfsmount_lock);
spin_lock(&dcache_lock);
- tmp = ns_root;
- sp = __d_path(path, &tmp, newname, newname_len);
+ /* go to whatever namespace root we are under */
+ sp = __d_path(path, &ns_root, newname, newname_len);
spin_unlock(&dcache_lock);
- path_put(&root);
- path_put(&ns_root);
/* Prepend "/proc" prefix if using internal proc vfs mount. */
- if (!IS_ERR(sp) && (path->mnt->mnt_parent == path->mnt) &&
- (strcmp(path->mnt->mnt_sb->s_type->name, "proc") == 0)) {
+ if (!IS_ERR(sp) && (path->mnt->mnt_flags & MNT_INTERNAL) &&
+ (path->mnt->mnt_sb->s_magic == PROC_SUPER_MAGIC)) {
sp -= 5;
if (sp >= newname)
memcpy(sp, "/proc", 5);
@@ -149,12 +133,12 @@ int tomoyo_realpath_from_path2(struct path *path, char *newname,
*
* Returns the realpath of the given @path on success, NULL otherwise.
*
- * These functions use tomoyo_alloc(), so the caller must call tomoyo_free()
+ * These functions use kzalloc(), so the caller must call kfree()
* if these functions didn't return NULL.
*/
char *tomoyo_realpath_from_path(struct path *path)
{
- char *buf = tomoyo_alloc(sizeof(struct tomoyo_page_buffer));
+ char *buf = kzalloc(sizeof(struct tomoyo_page_buffer), GFP_KERNEL);
BUILD_BUG_ON(sizeof(struct tomoyo_page_buffer)
<= TOMOYO_MAX_PATHNAME_LEN - 1);
@@ -163,7 +147,7 @@ char *tomoyo_realpath_from_path(struct path *path)
if (tomoyo_realpath_from_path2(path, buf,
TOMOYO_MAX_PATHNAME_LEN - 1) == 0)
return buf;
- tomoyo_free(buf);
+ kfree(buf);
return NULL;
}
@@ -206,98 +190,47 @@ char *tomoyo_realpath_nofollow(const char *pathname)
}
/* Memory allocated for non-string data. */
-static unsigned int tomoyo_allocated_memory_for_elements;
-/* Quota for holding non-string data. */
-static unsigned int tomoyo_quota_for_elements;
+static atomic_t tomoyo_policy_memory_size;
+/* Quota for holding policy. */
+static unsigned int tomoyo_quota_for_policy;
/**
- * tomoyo_alloc_element - Allocate permanent memory for structures.
+ * tomoyo_memory_ok - Check memory quota.
*
- * @size: Size in bytes.
+ * @ptr: Pointer to allocated memory.
*
- * Returns pointer to allocated memory on success, NULL otherwise.
+ * Returns true on success, false otherwise.
*
- * Memory has to be zeroed.
- * The RAM is chunked, so NEVER try to kfree() the returned pointer.
+ * Caller holds tomoyo_policy_lock.
+ * Memory pointed by @ptr will be zeroed on success.
*/
-void *tomoyo_alloc_element(const unsigned int size)
+bool tomoyo_memory_ok(void *ptr)
{
- static char *buf;
- static DEFINE_MUTEX(lock);
- static unsigned int buf_used_len = PATH_MAX;
- char *ptr = NULL;
- /*Assumes sizeof(void *) >= sizeof(long) is true. */
- const unsigned int word_aligned_size
- = roundup(size, max(sizeof(void *), sizeof(long)));
- if (word_aligned_size > PATH_MAX)
- return NULL;
- mutex_lock(&lock);
- if (buf_used_len + word_aligned_size > PATH_MAX) {
- if (!tomoyo_quota_for_elements ||
- tomoyo_allocated_memory_for_elements
- + PATH_MAX <= tomoyo_quota_for_elements)
- ptr = kzalloc(PATH_MAX, GFP_KERNEL);
- if (!ptr) {
- printk(KERN_WARNING "ERROR: Out of memory "
- "for tomoyo_alloc_element().\n");
- if (!tomoyo_policy_loaded)
- panic("MAC Initialization failed.\n");
- } else {
- buf = ptr;
- tomoyo_allocated_memory_for_elements += PATH_MAX;
- buf_used_len = word_aligned_size;
- ptr = buf;
- }
- } else if (word_aligned_size) {
- int i;
- ptr = buf + buf_used_len;
- buf_used_len += word_aligned_size;
- for (i = 0; i < word_aligned_size; i++) {
- if (!ptr[i])
- continue;
- printk(KERN_ERR "WARNING: Reserved memory was tainted! "
- "The system might go wrong.\n");
- ptr[i] = '\0';
- }
+ int allocated_len = ptr ? ksize(ptr) : 0;
+ atomic_add(allocated_len, &tomoyo_policy_memory_size);
+ if (ptr && (!tomoyo_quota_for_policy ||
+ atomic_read(&tomoyo_policy_memory_size)
+ <= tomoyo_quota_for_policy)) {
+ memset(ptr, 0, allocated_len);
+ return true;
}
- mutex_unlock(&lock);
- return ptr;
+ printk(KERN_WARNING "ERROR: Out of memory "
+ "for tomoyo_alloc_element().\n");
+ if (!tomoyo_policy_loaded)
+ panic("MAC Initialization failed.\n");
+ return false;
}
-/* Memory allocated for string data in bytes. */
-static unsigned int tomoyo_allocated_memory_for_savename;
-/* Quota for holding string data in bytes. */
-static unsigned int tomoyo_quota_for_savename;
-
-/*
- * TOMOYO uses this hash only when appending a string into the string
- * table. Frequency of appending strings is very low. So we don't need
- * large (e.g. 64k) hash size. 256 will be sufficient.
- */
-#define TOMOYO_HASH_BITS 8
-#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
-
-/*
- * tomoyo_name_entry is a structure which is used for linking
- * "struct tomoyo_path_info" into tomoyo_name_list .
+/**
+ * tomoyo_memory_free - Free memory for elements.
*
- * Since tomoyo_name_list manages a list of strings which are shared by
- * multiple processes (whereas "struct tomoyo_path_info" inside
- * "struct tomoyo_path_info_with_data" is not shared), a reference counter will
- * be added to "struct tomoyo_name_entry" rather than "struct tomoyo_path_info"
- * when TOMOYO starts supporting garbage collector.
+ * @ptr: Pointer to allocated memory.
*/
-struct tomoyo_name_entry {
- struct list_head list;
- struct tomoyo_path_info entry;
-};
-
-/* Structure for available memory region. */
-struct tomoyo_free_memory_block_list {
- struct list_head list;
- char *ptr; /* Pointer to a free area. */
- int len; /* Length of the area. */
-};
+void tomoyo_memory_free(void *ptr)
+{
+ atomic_sub(ksize(ptr), &tomoyo_policy_memory_size);
+ kfree(ptr);
+}
/*
* tomoyo_name_list is used for holding string data used by TOMOYO.
@@ -305,87 +238,58 @@ struct tomoyo_free_memory_block_list {
* "/lib/libc-2.5.so"), TOMOYO shares string data in the form of
* "const struct tomoyo_path_info *".
*/
-static struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
+struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
+/* Lock for protecting tomoyo_name_list . */
+DEFINE_MUTEX(tomoyo_name_list_lock);
/**
- * tomoyo_save_name - Allocate permanent memory for string data.
+ * tomoyo_get_name - Allocate permanent memory for string data.
*
* @name: The string to store into the permernent memory.
*
* Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
- *
- * The RAM is shared, so NEVER try to modify or kfree() the returned name.
*/
-const struct tomoyo_path_info *tomoyo_save_name(const char *name)
+const struct tomoyo_path_info *tomoyo_get_name(const char *name)
{
- static LIST_HEAD(fmb_list);
- static DEFINE_MUTEX(lock);
struct tomoyo_name_entry *ptr;
unsigned int hash;
- /* fmb contains available size in bytes.
- fmb is removed from the fmb_list when fmb->len becomes 0. */
- struct tomoyo_free_memory_block_list *fmb;
int len;
- char *cp;
+ int allocated_len;
struct list_head *head;
if (!name)
return NULL;
len = strlen(name) + 1;
- if (len > TOMOYO_MAX_PATHNAME_LEN) {
- printk(KERN_WARNING "ERROR: Name too long "
- "for tomoyo_save_name().\n");
- return NULL;
- }
hash = full_name_hash((const unsigned char *) name, len - 1);
head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)];
-
- mutex_lock(&lock);
+ mutex_lock(&tomoyo_name_list_lock);
list_for_each_entry(ptr, head, list) {
- if (hash == ptr->entry.hash && !strcmp(name, ptr->entry.name))
- goto out;
- }
- list_for_each_entry(fmb, &fmb_list, list) {
- if (len <= fmb->len)
- goto ready;
+ if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name))
+ continue;
+ atomic_inc(&ptr->users);
+ goto out;
}
- if (!tomoyo_quota_for_savename ||
- tomoyo_allocated_memory_for_savename + PATH_MAX
- <= tomoyo_quota_for_savename)
- cp = kzalloc(PATH_MAX, GFP_KERNEL);
- else
- cp = NULL;
- fmb = kzalloc(sizeof(*fmb), GFP_KERNEL);
- if (!cp || !fmb) {
- kfree(cp);
- kfree(fmb);
+ ptr = kzalloc(sizeof(*ptr) + len, GFP_KERNEL);
+ allocated_len = ptr ? ksize(ptr) : 0;
+ if (!ptr || (tomoyo_quota_for_policy &&
+ atomic_read(&tomoyo_policy_memory_size) + allocated_len
+ > tomoyo_quota_for_policy)) {
+ kfree(ptr);
printk(KERN_WARNING "ERROR: Out of memory "
- "for tomoyo_save_name().\n");
+ "for tomoyo_get_name().\n");
if (!tomoyo_policy_loaded)
panic("MAC Initialization failed.\n");
ptr = NULL;
goto out;
}
- tomoyo_allocated_memory_for_savename += PATH_MAX;
- list_add(&fmb->list, &fmb_list);
- fmb->ptr = cp;
- fmb->len = PATH_MAX;
- ready:
- ptr = tomoyo_alloc_element(sizeof(*ptr));
- if (!ptr)
- goto out;
- ptr->entry.name = fmb->ptr;
- memmove(fmb->ptr, name, len);
+ atomic_add(allocated_len, &tomoyo_policy_memory_size);
+ ptr->entry.name = ((char *) ptr) + sizeof(*ptr);
+ memmove((char *) ptr->entry.name, name, len);
+ atomic_set(&ptr->users, 1);
tomoyo_fill_path_info(&ptr->entry);
- fmb->ptr += len;
- fmb->len -= len;
list_add_tail(&ptr->list, head);
- if (fmb->len == 0) {
- list_del(&fmb->list);
- kfree(fmb);
- }
out:
- mutex_unlock(&lock);
+ mutex_unlock(&tomoyo_name_list_lock);
return ptr ? &ptr->entry : NULL;
}
@@ -400,45 +304,14 @@ void __init tomoyo_realpath_init(void)
for (i = 0; i < TOMOYO_MAX_HASH; i++)
INIT_LIST_HEAD(&tomoyo_name_list[i]);
INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list);
- tomoyo_kernel_domain.domainname = tomoyo_save_name(TOMOYO_ROOT_NAME);
- list_add_tail(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
- down_read(&tomoyo_domain_list_lock);
+ tomoyo_kernel_domain.domainname = tomoyo_get_name(TOMOYO_ROOT_NAME);
+ /*
+ * tomoyo_read_lock() is not needed because this function is
+ * called before the first "delete" request.
+ */
+ list_add_tail_rcu(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
if (tomoyo_find_domain(TOMOYO_ROOT_NAME) != &tomoyo_kernel_domain)
panic("Can't register tomoyo_kernel_domain");
- up_read(&tomoyo_domain_list_lock);
-}
-
-/* Memory allocated for temporary purpose. */
-static atomic_t tomoyo_dynamic_memory_size;
-
-/**
- * tomoyo_alloc - Allocate memory for temporary purpose.
- *
- * @size: Size in bytes.
- *
- * Returns pointer to allocated memory on success, NULL otherwise.
- */
-void *tomoyo_alloc(const size_t size)
-{
- void *p = kzalloc(size, GFP_KERNEL);
- if (p)
- atomic_add(ksize(p), &tomoyo_dynamic_memory_size);
- return p;
-}
-
-/**
- * tomoyo_free - Release memory allocated by tomoyo_alloc().
- *
- * @p: Pointer returned by tomoyo_alloc(). May be NULL.
- *
- * Returns nothing.
- */
-void tomoyo_free(const void *p)
-{
- if (p) {
- atomic_sub(ksize(p), &tomoyo_dynamic_memory_size);
- kfree(p);
- }
}
/**
@@ -451,32 +324,19 @@ void tomoyo_free(const void *p)
int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head)
{
if (!head->read_eof) {
- const unsigned int shared
- = tomoyo_allocated_memory_for_savename;
- const unsigned int private
- = tomoyo_allocated_memory_for_elements;
- const unsigned int dynamic
- = atomic_read(&tomoyo_dynamic_memory_size);
+ const unsigned int policy
+ = atomic_read(&tomoyo_policy_memory_size);
char buffer[64];
memset(buffer, 0, sizeof(buffer));
- if (tomoyo_quota_for_savename)
- snprintf(buffer, sizeof(buffer) - 1,
- " (Quota: %10u)",
- tomoyo_quota_for_savename);
- else
- buffer[0] = '\0';
- tomoyo_io_printf(head, "Shared: %10u%s\n", shared, buffer);
- if (tomoyo_quota_for_elements)
+ if (tomoyo_quota_for_policy)
snprintf(buffer, sizeof(buffer) - 1,
" (Quota: %10u)",
- tomoyo_quota_for_elements);
+ tomoyo_quota_for_policy);
else
buffer[0] = '\0';
- tomoyo_io_printf(head, "Private: %10u%s\n", private, buffer);
- tomoyo_io_printf(head, "Dynamic: %10u\n", dynamic);
- tomoyo_io_printf(head, "Total: %10u\n",
- shared + private + dynamic);
+ tomoyo_io_printf(head, "Policy: %10u%s\n", policy, buffer);
+ tomoyo_io_printf(head, "Total: %10u\n", policy);
head->read_eof = true;
}
return 0;
@@ -494,9 +354,7 @@ int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head)
char *data = head->write_buf;
unsigned int size;
- if (sscanf(data, "Shared: %u", &size) == 1)
- tomoyo_quota_for_savename = size;
- else if (sscanf(data, "Private: %u", &size) == 1)
- tomoyo_quota_for_elements = size;
+ if (sscanf(data, "Policy: %u", &size) == 1)
+ tomoyo_quota_for_policy = size;
return 0;
}
diff --git a/security/tomoyo/realpath.h b/security/tomoyo/realpath.h
deleted file mode 100644
index 78217a3..0000000
--- a/security/tomoyo/realpath.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * security/tomoyo/realpath.h
- *
- * Get the canonicalized absolute pathnames. The basis for TOMOYO.
- *
- * Copyright (C) 2005-2009 NTT DATA CORPORATION
- *
- * Version: 2.2.0 2009/04/01
- *
- */
-
-#ifndef _SECURITY_TOMOYO_REALPATH_H
-#define _SECURITY_TOMOYO_REALPATH_H
-
-struct path;
-struct tomoyo_path_info;
-struct tomoyo_io_buffer;
-
-/* Convert binary string to ascii string. */
-int tomoyo_encode(char *buffer, int buflen, const char *str);
-
-/* Returns realpath(3) of the given pathname but ignores chroot'ed root. */
-int tomoyo_realpath_from_path2(struct path *path, char *newname,
- int newname_len);
-
-/*
- * Returns realpath(3) of the given pathname but ignores chroot'ed root.
- * These functions use tomoyo_alloc(), so the caller must call tomoyo_free()
- * if these functions didn't return NULL.
- */
-char *tomoyo_realpath(const char *pathname);
-/*
- * Same with tomoyo_realpath() except that it doesn't follow the final symlink.
- */
-char *tomoyo_realpath_nofollow(const char *pathname);
-/* Same with tomoyo_realpath() except that the pathname is already solved. */
-char *tomoyo_realpath_from_path(struct path *path);
-
-/*
- * Allocate memory for ACL entry.
- * The RAM is chunked, so NEVER try to kfree() the returned pointer.
- */
-void *tomoyo_alloc_element(const unsigned int size);
-
-/*
- * Keep the given name on the RAM.
- * The RAM is shared, so NEVER try to modify or kfree() the returned name.
- */
-const struct tomoyo_path_info *tomoyo_save_name(const char *name);
-
-/* Allocate memory for temporary use (e.g. permission checks). */
-void *tomoyo_alloc(const size_t size);
-
-/* Free memory allocated by tomoyo_alloc(). */
-void tomoyo_free(const void *p);
-
-/* Check for memory usage. */
-int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head);
-
-/* Set memory quota. */
-int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head);
-
-/* Initialize realpath related code. */
-void __init tomoyo_realpath_init(void);
-
-#endif /* !defined(_SECURITY_TOMOYO_REALPATH_H) */
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 2aceebf..dedd97d 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -11,8 +11,6 @@
#include <linux/security.h>
#include "common.h"
-#include "tomoyo.h"
-#include "realpath.h"
static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp)
{
@@ -23,21 +21,23 @@ static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp)
static int tomoyo_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
- /*
- * Since "struct tomoyo_domain_info *" is a sharable pointer,
- * we don't need to duplicate.
- */
- new->security = old->security;
+ struct tomoyo_domain_info *domain = old->security;
+ new->security = domain;
+ if (domain)
+ atomic_inc(&domain->users);
return 0;
}
static void tomoyo_cred_transfer(struct cred *new, const struct cred *old)
{
- /*
- * Since "struct tomoyo_domain_info *" is a sharable pointer,
- * we don't need to duplicate.
- */
- new->security = old->security;
+ tomoyo_cred_prepare(new, old, 0);
+}
+
+static void tomoyo_cred_free(struct cred *cred)
+{
+ struct tomoyo_domain_info *domain = cred->security;
+ if (domain)
+ atomic_dec(&domain->users);
}
static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
@@ -61,6 +61,14 @@ static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
if (!tomoyo_policy_loaded)
tomoyo_load_policy(bprm->filename);
/*
+ * Release reference to "struct tomoyo_domain_info" stored inside
+ * "bprm->cred->security". New reference to "struct tomoyo_domain_info"
+ * stored inside "bprm->cred->security" will be acquired later inside
+ * tomoyo_find_next_domain().
+ */
+ atomic_dec(&((struct tomoyo_domain_info *)
+ bprm->cred->security)->users);
+ /*
* Tell tomoyo_bprm_check_security() is called for the first time of an
* execve operation.
*/
@@ -76,8 +84,12 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
* Execute permission is checked against pathname passed to do_execve()
* using current domain.
*/
- if (!domain)
- return tomoyo_find_next_domain(bprm);
+ if (!domain) {
+ const int idx = tomoyo_read_lock();
+ const int err = tomoyo_find_next_domain(bprm);
+ tomoyo_read_unlock(idx);
+ return err;
+ }
/*
* Read permission is checked against interpreters using next domain.
*/
@@ -87,67 +99,56 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
static int tomoyo_path_truncate(struct path *path, loff_t length,
unsigned int time_attrs)
{
- return tomoyo_check_1path_perm(tomoyo_domain(),
- TOMOYO_TYPE_TRUNCATE_ACL,
- path);
+ return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path);
}
static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry)
{
struct path path = { parent->mnt, dentry };
- return tomoyo_check_1path_perm(tomoyo_domain(),
- TOMOYO_TYPE_UNLINK_ACL,
- &path);
+ return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path);
}
static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry,
int mode)
{
struct path path = { parent->mnt, dentry };
- return tomoyo_check_1path_perm(tomoyo_domain(),
- TOMOYO_TYPE_MKDIR_ACL,
- &path);
+ return tomoyo_path_perm(TOMOYO_TYPE_MKDIR, &path);
}
static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry)
{
struct path path = { parent->mnt, dentry };
- return tomoyo_check_1path_perm(tomoyo_domain(),
- TOMOYO_TYPE_RMDIR_ACL,
- &path);
+ return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path);
}
static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry,
const char *old_name)
{
struct path path = { parent->mnt, dentry };
- return tomoyo_check_1path_perm(tomoyo_domain(),
- TOMOYO_TYPE_SYMLINK_ACL,
- &path);
+ return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path);
}
static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry,
int mode, unsigned int dev)
{
struct path path = { parent->mnt, dentry };
- int type = TOMOYO_TYPE_CREATE_ACL;
+ int type = TOMOYO_TYPE_CREATE;
switch (mode & S_IFMT) {
case S_IFCHR:
- type = TOMOYO_TYPE_MKCHAR_ACL;
+ type = TOMOYO_TYPE_MKCHAR;
break;
case S_IFBLK:
- type = TOMOYO_TYPE_MKBLOCK_ACL;
+ type = TOMOYO_TYPE_MKBLOCK;
break;
case S_IFIFO:
- type = TOMOYO_TYPE_MKFIFO_ACL;
+ type = TOMOYO_TYPE_MKFIFO;
break;
case S_IFSOCK:
- type = TOMOYO_TYPE_MKSOCK_ACL;
+ type = TOMOYO_TYPE_MKSOCK;
break;
}
- return tomoyo_check_1path_perm(tomoyo_domain(),
- type, &path);
+ return tomoyo_path_perm(type, &path);
}
static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir,
@@ -155,9 +156,7 @@ static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir,
{
struct path path1 = { new_dir->mnt, old_dentry };
struct path path2 = { new_dir->mnt, new_dentry };
- return tomoyo_check_2path_perm(tomoyo_domain(),
- TOMOYO_TYPE_LINK_ACL,
- &path1, &path2);
+ return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2);
}
static int tomoyo_path_rename(struct path *old_parent,
@@ -167,16 +166,14 @@ static int tomoyo_path_rename(struct path *old_parent,
{
struct path path1 = { old_parent->mnt, old_dentry };
struct path path2 = { new_parent->mnt, new_dentry };
- return tomoyo_check_2path_perm(tomoyo_domain(),
- TOMOYO_TYPE_RENAME_ACL,
- &path1, &path2);
+ return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2);
}
static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
unsigned long arg)
{
if (cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND))
- return tomoyo_check_rewrite_permission(tomoyo_domain(), file);
+ return tomoyo_check_rewrite_permission(file);
return 0;
}
@@ -189,6 +186,51 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, flags);
}
+static int tomoyo_file_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return tomoyo_path_perm(TOMOYO_TYPE_IOCTL, &file->f_path);
+}
+
+static int tomoyo_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
+ mode_t mode)
+{
+ struct path path = { mnt, dentry };
+ return tomoyo_path_perm(TOMOYO_TYPE_CHMOD, &path);
+}
+
+static int tomoyo_path_chown(struct path *path, uid_t uid, gid_t gid)
+{
+ int error = 0;
+ if (uid != (uid_t) -1)
+ error = tomoyo_path_perm(TOMOYO_TYPE_CHOWN, path);
+ if (!error && gid != (gid_t) -1)
+ error = tomoyo_path_perm(TOMOYO_TYPE_CHGRP, path);
+ return error;
+}
+
+static int tomoyo_path_chroot(struct path *path)
+{
+ return tomoyo_path_perm(TOMOYO_TYPE_CHROOT, path);
+}
+
+static int tomoyo_sb_mount(char *dev_name, struct path *path,
+ char *type, unsigned long flags, void *data)
+{
+ return tomoyo_path_perm(TOMOYO_TYPE_MOUNT, path);
+}
+
+static int tomoyo_sb_umount(struct vfsmount *mnt, int flags)
+{
+ struct path path = { mnt, mnt->mnt_root };
+ return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path);
+}
+
+static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path)
+{
+ return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path);
+}
+
/*
* tomoyo_security_ops is a "struct security_operations" which is used for
* registering TOMOYO.
@@ -198,6 +240,7 @@ static struct security_operations tomoyo_security_ops = {
.cred_alloc_blank = tomoyo_cred_alloc_blank,
.cred_prepare = tomoyo_cred_prepare,
.cred_transfer = tomoyo_cred_transfer,
+ .cred_free = tomoyo_cred_free,
.bprm_set_creds = tomoyo_bprm_set_creds,
.bprm_check_security = tomoyo_bprm_check_security,
.file_fcntl = tomoyo_file_fcntl,
@@ -210,8 +253,18 @@ static struct security_operations tomoyo_security_ops = {
.path_mknod = tomoyo_path_mknod,
.path_link = tomoyo_path_link,
.path_rename = tomoyo_path_rename,
+ .file_ioctl = tomoyo_file_ioctl,
+ .path_chmod = tomoyo_path_chmod,
+ .path_chown = tomoyo_path_chown,
+ .path_chroot = tomoyo_path_chroot,
+ .sb_mount = tomoyo_sb_mount,
+ .sb_umount = tomoyo_sb_umount,
+ .sb_pivotroot = tomoyo_sb_pivotroot,
};
+/* Lock for GC. */
+struct srcu_struct tomoyo_ss;
+
static int __init tomoyo_init(void)
{
struct cred *cred = (struct cred *) current_cred();
@@ -219,7 +272,8 @@ static int __init tomoyo_init(void)
if (!security_module_enable(&tomoyo_security_ops))
return 0;
/* register ourselves with the security framework */
- if (register_security(&tomoyo_security_ops))
+ if (register_security(&tomoyo_security_ops) ||
+ init_srcu_struct(&tomoyo_ss))
panic("Failure registering TOMOYO Linux");
printk(KERN_INFO "TOMOYO Linux initialized\n");
cred->security = &tomoyo_kernel_domain;
diff --git a/security/tomoyo/tomoyo.h b/security/tomoyo/tomoyo.h
deleted file mode 100644
index ed75832..0000000
--- a/security/tomoyo/tomoyo.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * security/tomoyo/tomoyo.h
- *
- * Implementation of the Domain-Based Mandatory Access Control.
- *
- * Copyright (C) 2005-2009 NTT DATA CORPORATION
- *
- * Version: 2.2.0 2009/04/01
- *
- */
-
-#ifndef _SECURITY_TOMOYO_TOMOYO_H
-#define _SECURITY_TOMOYO_TOMOYO_H
-
-struct tomoyo_path_info;
-struct path;
-struct inode;
-struct linux_binprm;
-struct pt_regs;
-
-int tomoyo_check_exec_perm(struct tomoyo_domain_info *domain,
- const struct tomoyo_path_info *filename);
-int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
- struct path *path, const int flag);
-int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain,
- const u8 operation, struct path *path);
-int tomoyo_check_2path_perm(struct tomoyo_domain_info *domain,
- const u8 operation, struct path *path1,
- struct path *path2);
-int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
- struct file *filp);
-int tomoyo_find_next_domain(struct linux_binprm *bprm);
-
-/* Index numbers for Access Controls. */
-
-#define TOMOYO_TYPE_SINGLE_PATH_ACL 0
-#define TOMOYO_TYPE_DOUBLE_PATH_ACL 1
-
-/* Index numbers for File Controls. */
-
-/*
- * TYPE_READ_WRITE_ACL is special. TYPE_READ_WRITE_ACL is automatically set
- * if both TYPE_READ_ACL and TYPE_WRITE_ACL are set. Both TYPE_READ_ACL and
- * TYPE_WRITE_ACL are automatically set if TYPE_READ_WRITE_ACL is set.
- * TYPE_READ_WRITE_ACL is automatically cleared if either TYPE_READ_ACL or
- * TYPE_WRITE_ACL is cleared. Both TYPE_READ_ACL and TYPE_WRITE_ACL are
- * automatically cleared if TYPE_READ_WRITE_ACL is cleared.
- */
-
-#define TOMOYO_TYPE_READ_WRITE_ACL 0
-#define TOMOYO_TYPE_EXECUTE_ACL 1
-#define TOMOYO_TYPE_READ_ACL 2
-#define TOMOYO_TYPE_WRITE_ACL 3
-#define TOMOYO_TYPE_CREATE_ACL 4
-#define TOMOYO_TYPE_UNLINK_ACL 5
-#define TOMOYO_TYPE_MKDIR_ACL 6
-#define TOMOYO_TYPE_RMDIR_ACL 7
-#define TOMOYO_TYPE_MKFIFO_ACL 8
-#define TOMOYO_TYPE_MKSOCK_ACL 9
-#define TOMOYO_TYPE_MKBLOCK_ACL 10
-#define TOMOYO_TYPE_MKCHAR_ACL 11
-#define TOMOYO_TYPE_TRUNCATE_ACL 12
-#define TOMOYO_TYPE_SYMLINK_ACL 13
-#define TOMOYO_TYPE_REWRITE_ACL 14
-#define TOMOYO_MAX_SINGLE_PATH_OPERATION 15
-
-#define TOMOYO_TYPE_LINK_ACL 0
-#define TOMOYO_TYPE_RENAME_ACL 1
-#define TOMOYO_MAX_DOUBLE_PATH_OPERATION 2
-
-#define TOMOYO_DOMAINPOLICY 0
-#define TOMOYO_EXCEPTIONPOLICY 1
-#define TOMOYO_DOMAIN_STATUS 2
-#define TOMOYO_PROCESS_STATUS 3
-#define TOMOYO_MEMINFO 4
-#define TOMOYO_SELFDOMAIN 5
-#define TOMOYO_VERSION 6
-#define TOMOYO_PROFILE 7
-#define TOMOYO_MANAGER 8
-
-extern struct tomoyo_domain_info tomoyo_kernel_domain;
-
-static inline struct tomoyo_domain_info *tomoyo_domain(void)
-{
- return current_cred()->security;
-}
-
-static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct
- *task)
-{
- return task_cred_xxx(task, security);
-}
-
-#endif /* !defined(_SECURITY_TOMOYO_TOMOYO_H) */
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index 2de3407..34202b1 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -41,7 +41,8 @@ OPTIONS
-d::
--del=::
- Delete a probe event.
+ Delete probe events. This accepts glob wildcards('*', '?') and character
+ classes(e.g. [a-z], [!A-Z]).
-l::
--list::
@@ -50,17 +51,29 @@ OPTIONS
-L::
--line=::
Show source code lines which can be probed. This needs an argument
- which specifies a range of the source code.
+ which specifies a range of the source code. (see LINE SYNTAX for detail)
+
+-f::
+--force::
+ Forcibly add events with existing name.
PROBE SYNTAX
------------
Probe points are defined by following syntax.
- "[EVENT=]FUNC[+OFFS|:RLN|%return][@SRC]|SRC:ALN [ARG ...]"
+ 1) Define event based on function name
+ [EVENT=]FUNC[@SRC][:RLN|+OFFS|%return|;PTN] [ARG ...]
+
+ 2) Define event based on source file with line number
+ [EVENT=]SRC:ALN [ARG ...]
+
+ 3) Define event based on source file with lazy pattern
+ [EVENT=]SRC;PTN [ARG ...]
+
'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function. Currently, event group name is set as 'probe'.
-'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, 'RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. In addition, 'SRC' specifies a source file which has that function.
-It is also possible to specify a probe point by the source line number by using 'SRC:ALN' syntax, where 'SRC' is the source file path and 'ALN' is the line number.
+'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, ':RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. And ';PTN' means lazy matching pattern (see LAZY MATCHING). Note that ';PTN' must be the end of the probe point definition. In addition, '@SRC' specifies a source file which has that function.
+It is also possible to specify a probe point by the source line number or lazy matching by using 'SRC:ALN' or 'SRC;PTN' syntax, where 'SRC' is the source file path, ':ALN' is the line number and ';PTN' is the lazy matching pattern.
'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc).
LINE SYNTAX
@@ -76,6 +89,41 @@ and 'ALN2' is end line number in the file. It is also possible to specify how
many lines to show by using 'NUM'.
So, "source.c:100-120" shows lines between 100th to l20th in source.c file. And "func:10+20" shows 20 lines from 10th line of func function.
+LAZY MATCHING
+-------------
+ The lazy line matching is similar to glob matching but ignoring spaces in both of pattern and target. So this accepts wildcards('*', '?') and character classes(e.g. [a-z], [!A-Z]).
+
+e.g.
+ 'a=*' can matches 'a=b', 'a = b', 'a == b' and so on.
+
+This provides some sort of flexibility and robustness to probe point definitions against minor code changes. For example, actual 10th line of schedule() can be moved easily by modifying schedule(), but the same line matching 'rq=cpu_rq*' may still exist in the function.)
+
+
+EXAMPLES
+--------
+Display which lines in schedule() can be probed:
+
+ ./perf probe --line schedule
+
+Add a probe on schedule() function 12th line with recording cpu local variable:
+
+ ./perf probe schedule:12 cpu
+ or
+ ./perf probe --add='schedule:12 cpu'
+
+ this will add one or more probes which has the name start with "schedule".
+
+ Add probes on lines in schedule() function which calls update_rq_clock().
+
+ ./perf probe 'schedule;update_rq_clock*'
+ or
+ ./perf probe --add='schedule;update_rq_clock*'
+
+Delete all probes on schedule().
+
+ ./perf probe --del='schedule*'
+
+
SEE ALSO
--------
linkperf:perf-trace[1], linkperf:perf-record[1]
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 54a5b50..2d53738 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -500,12 +500,12 @@ else
msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]);
endif
-ifneq ($(shell sh -c "(echo '\#ifndef _MIPS_SZLONG'; echo '\#define _MIPS_SZLONG 0'; echo '\#endif'; echo '\#include <dwarf.h>'; echo '\#include <libdwarf.h>'; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -I/usr/include/libdwarf -ldwarf -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
- msg := $(warning No libdwarf.h found or old libdwarf.h found, disables dwarf support. Please install libdwarf-dev/libdwarf-devel >= 20081231);
- BASIC_CFLAGS += -DNO_LIBDWARF
+ifneq ($(shell sh -c "(echo '\#include <dwarf.h>'; echo '\#include <libdw.h>'; echo 'int main(void) { Dwarf *dbg; dbg = dwarf_begin(0, DWARF_C_READ); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -I/usr/include/elfutils -ldw -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+ msg := $(warning No libdw.h found or old libdw.h found, disables dwarf support. Please install elfutils-devel/elfutils-dev);
+ BASIC_CFLAGS += -DNO_DWARF_SUPPORT
else
- BASIC_CFLAGS += -I/usr/include/libdwarf
- EXTLIBS += -lelf -ldwarf
+ BASIC_CFLAGS += -I/usr/include/elfutils
+ EXTLIBS += -lelf -ldw
LIB_OBJS += util/probe-finder.o
endif
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index ad47bd4..c30a335 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -128,7 +128,7 @@ static void evaluate_probe_point(struct probe_point *pp)
pp->function);
}
-#ifndef NO_LIBDWARF
+#ifndef NO_DWARF_SUPPORT
static int open_vmlinux(void)
{
if (map__load(session.kmaps[MAP__FUNCTION], NULL) < 0) {
@@ -156,14 +156,16 @@ static const char * const probe_usage[] = {
"perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
"perf probe [<options>] --del '[GROUP:]EVENT' ...",
"perf probe --list",
+#ifndef NO_DWARF_SUPPORT
"perf probe --line 'LINEDESC'",
+#endif
NULL
};
static const struct option options[] = {
OPT_BOOLEAN('v', "verbose", &verbose,
"be more verbose (show parsed arguments, etc)"),
-#ifndef NO_LIBDWARF
+#ifndef NO_DWARF_SUPPORT
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
#endif
@@ -172,30 +174,32 @@ static const struct option options[] = {
OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.",
opt_del_probe_event),
OPT_CALLBACK('a', "add", NULL,
-#ifdef NO_LIBDWARF
- "[EVENT=]FUNC[+OFFS|%return] [ARG ...]",
+#ifdef NO_DWARF_SUPPORT
+ "[EVENT=]FUNC[+OFF|%return] [ARG ...]",
#else
- "[EVENT=]FUNC[+OFFS|%return|:RLN][@SRC]|SRC:ALN [ARG ...]",
+ "[EVENT=]FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT"
+ " [ARG ...]",
#endif
"probe point definition, where\n"
"\t\tGROUP:\tGroup name (optional)\n"
"\t\tEVENT:\tEvent name\n"
"\t\tFUNC:\tFunction name\n"
- "\t\tOFFS:\tOffset from function entry (in byte)\n"
+ "\t\tOFF:\tOffset from function entry (in byte)\n"
"\t\t%return:\tPut the probe at function return\n"
-#ifdef NO_LIBDWARF
+#ifdef NO_DWARF_SUPPORT
"\t\tARG:\tProbe argument (only \n"
#else
"\t\tSRC:\tSource code path\n"
- "\t\tRLN:\tRelative line number from function entry.\n"
- "\t\tALN:\tAbsolute line number in file.\n"
+ "\t\tRL:\tRelative line number from function entry.\n"
+ "\t\tAL:\tAbsolute line number in file.\n"
+ "\t\tPT:\tLazy expression of line code.\n"
"\t\tARG:\tProbe argument (local variable name or\n"
#endif
"\t\t\tkprobe-tracer argument format.)\n",
opt_add_probe_event),
OPT_BOOLEAN('f', "force", &session.force_add, "forcibly add events"
" with existing name"),
-#ifndef NO_LIBDWARF
+#ifndef NO_DWARF_SUPPORT
OPT_CALLBACK('L', "line", NULL,
"FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]",
"Show source code lines.", opt_show_lines),
@@ -223,7 +227,7 @@ static void init_vmlinux(void)
int cmd_probe(int argc, const char **argv, const char *prefix __used)
{
int i, ret;
-#ifndef NO_LIBDWARF
+#ifndef NO_DWARF_SUPPORT
int fd;
#endif
struct probe_point *pp;
@@ -259,7 +263,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
return 0;
}
-#ifndef NO_LIBDWARF
+#ifndef NO_DWARF_SUPPORT
if (session.show_lines) {
if (session.nr_probe != 0 || session.dellist) {
pr_warning(" Error: Don't use --line with"
@@ -290,9 +294,9 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
init_vmlinux();
if (session.need_dwarf)
-#ifdef NO_LIBDWARF
+#ifdef NO_DWARF_SUPPORT
die("Debuginfo-analysis is not supported");
-#else /* !NO_LIBDWARF */
+#else /* !NO_DWARF_SUPPORT */
pr_debug("Some probes require debuginfo.\n");
fd = open_vmlinux();
@@ -312,7 +316,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
continue;
lseek(fd, SEEK_SET, 0);
- ret = find_probepoint(fd, pp);
+ ret = find_probe_point(fd, pp);
if (ret > 0)
continue;
if (ret == 0) { /* No error but failed to find probe point. */
@@ -333,7 +337,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
close(fd);
end_dwarf:
-#endif /* !NO_LIBDWARF */
+#endif /* !NO_DWARF_SUPPORT */
/* Synthesize probes without dwarf */
for (i = 0; i < session.nr_probe; i++) {
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 8f05688..c971e81 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -119,14 +119,14 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
char c, nc = 0;
/*
* <Syntax>
- * perf probe [EVENT=]SRC:LN
- * perf probe [EVENT=]FUNC[+OFFS|%return][@SRC]
+ * perf probe [EVENT=]SRC[:LN|;PTN]
+ * perf probe [EVENT=]FUNC[@SRC][+OFFS|%return|:LN|;PAT]
*
* TODO:Group name support
*/
- ptr = strchr(arg, '=');
- if (ptr) { /* Event name */
+ ptr = strpbrk(arg, ";=@+%");
+ if (ptr && *ptr == '=') { /* Event name */
*ptr = '\0';
tmp = ptr + 1;
ptr = strchr(arg, ':');
@@ -139,7 +139,7 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
arg = tmp;
}
- ptr = strpbrk(arg, ":+@%");
+ ptr = strpbrk(arg, ";:+@%");
if (ptr) {
nc = *ptr;
*ptr++ = '\0';
@@ -156,7 +156,11 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
while (ptr) {
arg = ptr;
c = nc;
- ptr = strpbrk(arg, ":+@%");
+ if (c == ';') { /* Lazy pattern must be the last part */
+ pp->lazy_line = strdup(arg);
+ break;
+ }
+ ptr = strpbrk(arg, ";:+@%");
if (ptr) {
nc = *ptr;
*ptr++ = '\0';
@@ -165,13 +169,13 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
case ':': /* Line number */
pp->line = strtoul(arg, &tmp, 0);
if (*tmp != '\0')
- semantic_error("There is non-digit charactor"
- " in line number.");
+ semantic_error("There is non-digit char"
+ " in line number.");
break;
case '+': /* Byte offset from a symbol */
pp->offset = strtoul(arg, &tmp, 0);
if (*tmp != '\0')
- semantic_error("There is non-digit charactor"
+ semantic_error("There is non-digit character"
" in offset.");
break;
case '@': /* File name */
@@ -179,9 +183,6 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
semantic_error("SRC@SRC is not allowed.");
pp->file = strdup(arg);
DIE_IF(pp->file == NULL);
- if (ptr)
- semantic_error("@SRC must be the last "
- "option.");
break;
case '%': /* Probe places */
if (strcmp(arg, "return") == 0) {
@@ -196,11 +197,18 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
}
/* Exclusion check */
+ if (pp->lazy_line && pp->line)
+ semantic_error("Lazy pattern can't be used with line number.");
+
+ if (pp->lazy_line && pp->offset)
+ semantic_error("Lazy pattern can't be used with offset.");
+
if (pp->line && pp->offset)
semantic_error("Offset can't be used with line number.");
- if (!pp->line && pp->file && !pp->function)
- semantic_error("File always requires line number.");
+ if (!pp->line && !pp->lazy_line && pp->file && !pp->function)
+ semantic_error("File always requires line number or "
+ "lazy pattern.");
if (pp->offset && !pp->function)
semantic_error("Offset requires an entry function.");
@@ -208,11 +216,13 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
if (pp->retprobe && !pp->function)
semantic_error("Return probe requires an entry function.");
- if ((pp->offset || pp->line) && pp->retprobe)
- semantic_error("Offset/Line can't be used with return probe.");
+ if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe)
+ semantic_error("Offset/Line/Lazy pattern can't be used with "
+ "return probe.");
- pr_debug("symbol:%s file:%s line:%d offset:%d, return:%d\n",
- pp->function, pp->file, pp->line, pp->offset, pp->retprobe);
+ pr_debug("symbol:%s file:%s line:%d offset:%d return:%d lazy:%s\n",
+ pp->function, pp->file, pp->line, pp->offset, pp->retprobe,
+ pp->lazy_line);
}
/* Parse perf-probe event definition */
@@ -458,6 +468,8 @@ static void clear_probe_point(struct probe_point *pp)
free(pp->function);
if (pp->file)
free(pp->file);
+ if (pp->lazy_line)
+ free(pp->lazy_line);
for (i = 0; i < pp->nr_args; i++)
free(pp->args[i]);
if (pp->args)
@@ -719,6 +731,7 @@ void del_trace_kprobe_events(struct strlist *dellist)
}
#define LINEBUF_SIZE 256
+#define NR_ADDITIONAL_LINES 2
static void show_one_line(FILE *fp, unsigned int l, bool skip, bool show_num)
{
@@ -779,5 +792,11 @@ void show_line_range(struct line_range *lr)
show_one_line(fp, (l++) - lr->offset, false, false);
show_one_line(fp, (l++) - lr->offset, false, true);
}
+
+ if (lr->end == INT_MAX)
+ lr->end = l + NR_ADDITIONAL_LINES;
+ while (l < lr->end && !feof(fp))
+ show_one_line(fp, (l++) - lr->offset, false, false);
+
fclose(fp);
}
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 1b2124d..e77dc88 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -32,21 +32,13 @@
#include <stdarg.h>
#include <ctype.h>
+#include "string.h"
#include "event.h"
#include "debug.h"
#include "util.h"
#include "probe-finder.h"
-/* Dwarf_Die Linkage to parent Die */
-struct die_link {
- struct die_link *parent; /* Parent die */
- Dwarf_Die die; /* Current die */
-};
-
-static Dwarf_Debug __dw_debug;
-static Dwarf_Error __dw_error;
-
/*
* Generic dwarf analysis helpers
*/
@@ -113,281 +105,190 @@ static int strtailcmp(const char *s1, const char *s2)
return 0;
}
-/* Find the fileno of the target file. */
-static Dwarf_Unsigned cu_find_fileno(Dwarf_Die cu_die, const char *fname)
-{
- Dwarf_Signed cnt, i;
- Dwarf_Unsigned found = 0;
- char **srcs;
- int ret;
+/* Line number list operations */
- if (!fname)
- return 0;
+/* Add a line to line number list */
+static void line_list__add_line(struct list_head *head, unsigned int line)
+{
+ struct line_node *ln;
+ struct list_head *p;
- ret = dwarf_srcfiles(cu_die, &srcs, &cnt, &__dw_error);
- if (ret == DW_DLV_OK) {
- for (i = 0; i < cnt && !found; i++) {
- if (strtailcmp(srcs[i], fname) == 0)
- found = i + 1;
- dwarf_dealloc(__dw_debug, srcs[i], DW_DLA_STRING);
- }
- for (; i < cnt; i++)
- dwarf_dealloc(__dw_debug, srcs[i], DW_DLA_STRING);
- dwarf_dealloc(__dw_debug, srcs, DW_DLA_LIST);
+ /* Reverse search, because new line will be the last one */
+ list_for_each_entry_reverse(ln, head, list) {
+ if (ln->line < line) {
+ p = &ln->list;
+ goto found;
+ } else if (ln->line == line) /* Already exist */
+ return ;
}
- if (found)
- pr_debug("found fno: %d\n", (int)found);
- return found;
+ /* List is empty, or the smallest entry */
+ p = head;
+found:
+ pr_debug("line list: add a line %u\n", line);
+ ln = zalloc(sizeof(struct line_node));
+ DIE_IF(ln == NULL);
+ ln->line = line;
+ INIT_LIST_HEAD(&ln->list);
+ list_add(&ln->list, p);
}
-static int cu_get_filename(Dwarf_Die cu_die, Dwarf_Unsigned fno, char **buf)
+/* Check if the line in line number list */
+static int line_list__has_line(struct list_head *head, unsigned int line)
{
- Dwarf_Signed cnt, i;
- char **srcs;
- int ret = 0;
-
- if (!buf || !fno)
- return -EINVAL;
-
- ret = dwarf_srcfiles(cu_die, &srcs, &cnt, &__dw_error);
- if (ret == DW_DLV_OK) {
- if ((Dwarf_Unsigned)cnt > fno - 1) {
- *buf = strdup(srcs[fno - 1]);
- ret = 0;
- pr_debug("found filename: %s\n", *buf);
- } else
- ret = -ENOENT;
- for (i = 0; i < cnt; i++)
- dwarf_dealloc(__dw_debug, srcs[i], DW_DLA_STRING);
- dwarf_dealloc(__dw_debug, srcs, DW_DLA_LIST);
- } else
- ret = -EINVAL;
- return ret;
+ struct line_node *ln;
+
+ /* Reverse search, because new line will be the last one */
+ list_for_each_entry(ln, head, list)
+ if (ln->line == line)
+ return 1;
+
+ return 0;
}
-/* Compare diename and tname */
-static int die_compare_name(Dwarf_Die dw_die, const char *tname)
+/* Init line number list */
+static void line_list__init(struct list_head *head)
{
- char *name;
- int ret;
- ret = dwarf_diename(dw_die, &name, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- if (ret == DW_DLV_OK) {
- ret = strcmp(tname, name);
- dwarf_dealloc(__dw_debug, name, DW_DLA_STRING);
- } else
- ret = -1;
- return ret;
+ INIT_LIST_HEAD(head);
}
-/* Check the address is in the subprogram(function). */
-static int die_within_subprogram(Dwarf_Die sp_die, Dwarf_Addr addr,
- Dwarf_Signed *offs)
+/* Free line number list */
+static void line_list__free(struct list_head *head)
{
- Dwarf_Addr lopc, hipc;
- int ret;
-
- /* TODO: check ranges */
- ret = dwarf_lowpc(sp_die, &lopc, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- if (ret == DW_DLV_NO_ENTRY)
- return 0;
- ret = dwarf_highpc(sp_die, &hipc, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- if (lopc <= addr && addr < hipc) {
- *offs = addr - lopc;
- return 1;
- } else
- return 0;
+ struct line_node *ln;
+ while (!list_empty(head)) {
+ ln = list_first_entry(head, struct line_node, list);
+ list_del(&ln->list);
+ free(ln);
+ }
}
-/* Check the die is inlined function */
-static Dwarf_Bool die_inlined_subprogram(Dwarf_Die dw_die)
+/* Dwarf wrappers */
+
+/* Find the realpath of the target file. */
+static const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname)
{
- /* TODO: check strictly */
- Dwarf_Bool inl;
+ Dwarf_Files *files;
+ size_t nfiles, i;
+ const char *src;
int ret;
- ret = dwarf_hasattr(dw_die, DW_AT_inline, &inl, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- return inl;
-}
+ if (!fname)
+ return NULL;
-/* Get the offset of abstruct_origin */
-static Dwarf_Off die_get_abstract_origin(Dwarf_Die dw_die)
-{
- Dwarf_Attribute attr;
- Dwarf_Off cu_offs;
- int ret;
+ ret = dwarf_getsrcfiles(cu_die, &files, &nfiles);
+ if (ret != 0)
+ return NULL;
- ret = dwarf_attr(dw_die, DW_AT_abstract_origin, &attr, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- ret = dwarf_formref(attr, &cu_offs, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR);
- return cu_offs;
+ for (i = 0; i < nfiles; i++) {
+ src = dwarf_filesrc(files, i, NULL, NULL);
+ if (strtailcmp(src, fname) == 0)
+ break;
+ }
+ return src;
}
-/* Get entry pc(or low pc, 1st entry of ranges) of the die */
-static Dwarf_Addr die_get_entrypc(Dwarf_Die dw_die)
+struct __addr_die_search_param {
+ Dwarf_Addr addr;
+ Dwarf_Die *die_mem;
+};
+
+static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
{
- Dwarf_Attribute attr;
- Dwarf_Addr addr;
- Dwarf_Off offs;
- Dwarf_Ranges *ranges;
- Dwarf_Signed cnt;
- int ret;
+ struct __addr_die_search_param *ad = data;
- /* Try to get entry pc */
- ret = dwarf_attr(dw_die, DW_AT_entry_pc, &attr, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- if (ret == DW_DLV_OK) {
- ret = dwarf_formaddr(attr, &addr, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR);
- return addr;
+ if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
+ dwarf_haspc(fn_die, ad->addr)) {
+ memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
+ return DWARF_CB_ABORT;
}
+ return DWARF_CB_OK;
+}
- /* Try to get low pc */
- ret = dwarf_lowpc(dw_die, &addr, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- if (ret == DW_DLV_OK)
- return addr;
-
- /* Try to get ranges */
- ret = dwarf_attr(dw_die, DW_AT_ranges, &attr, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- ret = dwarf_formref(attr, &offs, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- ret = dwarf_get_ranges(__dw_debug, offs, &ranges, &cnt, NULL,
- &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- addr = ranges[0].dwr_addr1;
- dwarf_ranges_dealloc(__dw_debug, ranges, cnt);
- return addr;
+/* Search a real subprogram including this line, */
+static Dwarf_Die *die_get_real_subprogram(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem)
+{
+ struct __addr_die_search_param ad;
+ ad.addr = addr;
+ ad.die_mem = die_mem;
+ /* dwarf_getscopes can't find subprogram. */
+ if (!dwarf_getfuncs(cu_die, __die_search_func_cb, &ad, 0))
+ return NULL;
+ else
+ return die_mem;
}
-/*
- * Search a Die from Die tree.
- * Note: cur_link->die should be deallocated in this function.
- */
-static int __search_die_tree(struct die_link *cur_link,
- int (*die_cb)(struct die_link *, void *),
- void *data)
+/* Similar to dwarf_getfuncs, but returns inlined_subroutine if exists. */
+static Dwarf_Die *die_get_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem)
{
- Dwarf_Die new_die;
- struct die_link new_link;
+ Dwarf_Die child_die;
int ret;
- if (!die_cb)
- return 0;
-
- /* Check current die */
- while (!(ret = die_cb(cur_link, data))) {
- /* Check child die */
- ret = dwarf_child(cur_link->die, &new_die, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- if (ret == DW_DLV_OK) {
- new_link.parent = cur_link;
- new_link.die = new_die;
- ret = __search_die_tree(&new_link, die_cb, data);
- if (ret)
- break;
- }
+ ret = dwarf_child(sp_die, die_mem);
+ if (ret != 0)
+ return NULL;
- /* Move to next sibling */
- ret = dwarf_siblingof(__dw_debug, cur_link->die, &new_die,
- &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- dwarf_dealloc(__dw_debug, cur_link->die, DW_DLA_DIE);
- cur_link->die = new_die;
- if (ret == DW_DLV_NO_ENTRY)
- return 0;
- }
- dwarf_dealloc(__dw_debug, cur_link->die, DW_DLA_DIE);
- return ret;
-}
+ do {
+ if (dwarf_tag(die_mem) == DW_TAG_inlined_subroutine &&
+ dwarf_haspc(die_mem, addr))
+ return die_mem;
-/* Search a die in its children's die tree */
-static int search_die_from_children(Dwarf_Die parent_die,
- int (*die_cb)(struct die_link *, void *),
- void *data)
-{
- struct die_link new_link;
- int ret;
+ if (die_get_inlinefunc(die_mem, addr, &child_die)) {
+ memcpy(die_mem, &child_die, sizeof(Dwarf_Die));
+ return die_mem;
+ }
+ } while (dwarf_siblingof(die_mem, die_mem) == 0);
- new_link.parent = NULL;
- ret = dwarf_child(parent_die, &new_link.die, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- if (ret == DW_DLV_OK)
- return __search_die_tree(&new_link, die_cb, data);
- else
- return 0;
+ return NULL;
}
-/* Find a locdesc corresponding to the address */
-static int attr_get_locdesc(Dwarf_Attribute attr, Dwarf_Locdesc *desc,
- Dwarf_Addr addr)
+/* Compare diename and tname */
+static bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
{
- Dwarf_Signed lcnt;
- Dwarf_Locdesc **llbuf;
- int ret, i;
-
- ret = dwarf_loclist_n(attr, &llbuf, &lcnt, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- ret = DW_DLV_NO_ENTRY;
- for (i = 0; i < lcnt; ++i) {
- if (llbuf[i]->ld_lopc <= addr &&
- llbuf[i]->ld_hipc > addr) {
- memcpy(desc, llbuf[i], sizeof(Dwarf_Locdesc));
- desc->ld_s =
- malloc(sizeof(Dwarf_Loc) * llbuf[i]->ld_cents);
- DIE_IF(desc->ld_s == NULL);
- memcpy(desc->ld_s, llbuf[i]->ld_s,
- sizeof(Dwarf_Loc) * llbuf[i]->ld_cents);
- ret = DW_DLV_OK;
- break;
- }
- dwarf_dealloc(__dw_debug, llbuf[i]->ld_s, DW_DLA_LOC_BLOCK);
- dwarf_dealloc(__dw_debug, llbuf[i], DW_DLA_LOCDESC);
- }
- /* Releasing loop */
- for (; i < lcnt; ++i) {
- dwarf_dealloc(__dw_debug, llbuf[i]->ld_s, DW_DLA_LOC_BLOCK);
- dwarf_dealloc(__dw_debug, llbuf[i], DW_DLA_LOCDESC);
- }
- dwarf_dealloc(__dw_debug, llbuf, DW_DLA_LIST);
- return ret;
+ const char *name;
+ name = dwarf_diename(dw_die);
+ DIE_IF(name == NULL);
+ return strcmp(tname, name);
}
-/* Get decl_file attribute value (file number) */
-static Dwarf_Unsigned die_get_decl_file(Dwarf_Die sp_die)
+/* Get entry pc(or low pc, 1st entry of ranges) of the die */
+static Dwarf_Addr die_get_entrypc(Dwarf_Die *dw_die)
{
- Dwarf_Attribute attr;
- Dwarf_Unsigned fno;
+ Dwarf_Addr epc;
int ret;
- ret = dwarf_attr(sp_die, DW_AT_decl_file, &attr, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- dwarf_formudata(attr, &fno, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR);
- return fno;
+ ret = dwarf_entrypc(dw_die, &epc);
+ DIE_IF(ret == -1);
+ return epc;
}
-/* Get decl_line attribute value (line number) */
-static Dwarf_Unsigned die_get_decl_line(Dwarf_Die sp_die)
+/* Get a variable die */
+static Dwarf_Die *die_find_variable(Dwarf_Die *sp_die, const char *name,
+ Dwarf_Die *die_mem)
{
- Dwarf_Attribute attr;
- Dwarf_Unsigned lno;
+ Dwarf_Die child_die;
+ int tag;
int ret;
- ret = dwarf_attr(sp_die, DW_AT_decl_line, &attr, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- dwarf_formudata(attr, &lno, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR);
- return lno;
+ ret = dwarf_child(sp_die, die_mem);
+ if (ret != 0)
+ return NULL;
+
+ do {
+ tag = dwarf_tag(die_mem);
+ if ((tag == DW_TAG_formal_parameter ||
+ tag == DW_TAG_variable) &&
+ (die_compare_name(die_mem, name) == 0))
+ return die_mem;
+
+ if (die_find_variable(die_mem, name, &child_die)) {
+ memcpy(die_mem, &child_die, sizeof(Dwarf_Die));
+ return die_mem;
+ }
+ } while (dwarf_siblingof(die_mem, die_mem) == 0);
+
+ return NULL;
}
/*
@@ -395,47 +296,45 @@ static Dwarf_Unsigned die_get_decl_line(Dwarf_Die sp_die)
*/
/* Show a location */
-static void show_location(Dwarf_Loc *loc, struct probe_finder *pf)
+static void show_location(Dwarf_Op *op, struct probe_finder *pf)
{
- Dwarf_Small op;
- Dwarf_Unsigned regn;
- Dwarf_Signed offs;
+ unsigned int regn;
+ Dwarf_Word offs = 0;
int deref = 0, ret;
const char *regs;
- op = loc->lr_atom;
-
+ /* TODO: support CFA */
/* If this is based on frame buffer, set the offset */
- if (op == DW_OP_fbreg) {
+ if (op->atom == DW_OP_fbreg) {
+ if (pf->fb_ops == NULL)
+ die("The attribute of frame base is not supported.\n");
deref = 1;
- offs = (Dwarf_Signed)loc->lr_number;
- op = pf->fbloc.ld_s[0].lr_atom;
- loc = &pf->fbloc.ld_s[0];
- } else
- offs = 0;
+ offs = op->number;
+ op = &pf->fb_ops[0];
+ }
- if (op >= DW_OP_breg0 && op <= DW_OP_breg31) {
- regn = op - DW_OP_breg0;
- offs += (Dwarf_Signed)loc->lr_number;
+ if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) {
+ regn = op->atom - DW_OP_breg0;
+ offs += op->number;
deref = 1;
- } else if (op >= DW_OP_reg0 && op <= DW_OP_reg31) {
- regn = op - DW_OP_reg0;
- } else if (op == DW_OP_bregx) {
- regn = loc->lr_number;
- offs += (Dwarf_Signed)loc->lr_number2;
+ } else if (op->atom >= DW_OP_reg0 && op->atom <= DW_OP_reg31) {
+ regn = op->atom - DW_OP_reg0;
+ } else if (op->atom == DW_OP_bregx) {
+ regn = op->number;
+ offs += op->number2;
deref = 1;
- } else if (op == DW_OP_regx) {
- regn = loc->lr_number;
+ } else if (op->atom == DW_OP_regx) {
+ regn = op->number;
} else
- die("Dwarf_OP %d is not supported.", op);
+ die("DW_OP %d is not supported.", op->atom);
regs = get_arch_regstr(regn);
if (!regs)
- die("%lld exceeds max register number.", regn);
+ die("%u exceeds max register number.", regn);
if (deref)
- ret = snprintf(pf->buf, pf->len,
- " %s=%+lld(%s)", pf->var, offs, regs);
+ ret = snprintf(pf->buf, pf->len, " %s=+%ju(%s)",
+ pf->var, (uintmax_t)offs, regs);
else
ret = snprintf(pf->buf, pf->len, " %s=%s", pf->var, regs);
DIE_IF(ret < 0);
@@ -443,52 +342,37 @@ static void show_location(Dwarf_Loc *loc, struct probe_finder *pf)
}
/* Show a variables in kprobe event format */
-static void show_variable(Dwarf_Die vr_die, struct probe_finder *pf)
+static void show_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
{
Dwarf_Attribute attr;
- Dwarf_Locdesc ld;
+ Dwarf_Op *expr;
+ size_t nexpr;
int ret;
- ret = dwarf_attr(vr_die, DW_AT_location, &attr, &__dw_error);
- if (ret != DW_DLV_OK)
+ if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL)
goto error;
- ret = attr_get_locdesc(attr, &ld, (pf->addr - pf->cu_base));
- if (ret != DW_DLV_OK)
+ /* TODO: handle more than 1 exprs */
+ ret = dwarf_getlocation_addr(&attr, (pf->addr - pf->cu_base),
+ &expr, &nexpr, 1);
+ if (ret <= 0 || nexpr == 0)
goto error;
- /* TODO? */
- DIE_IF(ld.ld_cents != 1);
- show_location(&ld.ld_s[0], pf);
- free(ld.ld_s);
- dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR);
+
+ show_location(expr, pf);
+ /* *expr will be cached in libdw. Don't free it. */
return ;
error:
+ /* TODO: Support const_value */
die("Failed to find the location of %s at this address.\n"
" Perhaps, it has been optimized out.", pf->var);
}
-static int variable_callback(struct die_link *dlink, void *data)
-{
- struct probe_finder *pf = (struct probe_finder *)data;
- Dwarf_Half tag;
- int ret;
-
- ret = dwarf_tag(dlink->die, &tag, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- if ((tag == DW_TAG_formal_parameter ||
- tag == DW_TAG_variable) &&
- (die_compare_name(dlink->die, pf->var) == 0)) {
- show_variable(dlink->die, pf);
- return 1;
- }
- /* TODO: Support struct members and arrays */
- return 0;
-}
-
/* Find a variable in a subprogram die */
-static void find_variable(Dwarf_Die sp_die, struct probe_finder *pf)
+static void find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
{
int ret;
+ Dwarf_Die vr_die;
+ /* TODO: Support struct members and arrays */
if (!is_c_varname(pf->var)) {
/* Output raw parameters */
ret = snprintf(pf->buf, pf->len, " %s", pf->var);
@@ -499,58 +383,51 @@ static void find_variable(Dwarf_Die sp_die, struct probe_finder *pf)
pr_debug("Searching '%s' variable in context.\n", pf->var);
/* Search child die for local variables and parameters. */
- ret = search_die_from_children(sp_die, variable_callback, pf);
- if (!ret)
+ if (!die_find_variable(sp_die, pf->var, &vr_die))
die("Failed to find '%s' in this function.", pf->var);
-}
-
-/* Get a frame base on the address */
-static void get_current_frame_base(Dwarf_Die sp_die, struct probe_finder *pf)
-{
- Dwarf_Attribute attr;
- int ret;
- ret = dwarf_attr(sp_die, DW_AT_frame_base, &attr, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- ret = attr_get_locdesc(attr, &pf->fbloc, (pf->addr - pf->cu_base));
- DIE_IF(ret != DW_DLV_OK);
- dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR);
-}
-
-static void free_current_frame_base(struct probe_finder *pf)
-{
- free(pf->fbloc.ld_s);
- memset(&pf->fbloc, 0, sizeof(Dwarf_Locdesc));
+ show_variable(&vr_die, pf);
}
/* Show a probe point to output buffer */
-static void show_probepoint(Dwarf_Die sp_die, Dwarf_Signed offs,
- struct probe_finder *pf)
+static void show_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
{
struct probe_point *pp = pf->pp;
- char *name;
+ Dwarf_Addr eaddr;
+ Dwarf_Die die_mem;
+ const char *name;
char tmp[MAX_PROBE_BUFFER];
int ret, i, len;
+ Dwarf_Attribute fb_attr;
+ size_t nops;
+
+ /* If no real subprogram, find a real one */
+ if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) {
+ sp_die = die_get_real_subprogram(&pf->cu_die,
+ pf->addr, &die_mem);
+ if (!sp_die)
+ die("Probe point is not found in subprograms.");
+ }
/* Output name of probe point */
- ret = dwarf_diename(sp_die, &name, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- if (ret == DW_DLV_OK) {
- ret = snprintf(tmp, MAX_PROBE_BUFFER, "%s+%u", name,
- (unsigned int)offs);
+ name = dwarf_diename(sp_die);
+ if (name) {
+ dwarf_entrypc(sp_die, &eaddr);
+ ret = snprintf(tmp, MAX_PROBE_BUFFER, "%s+%lu", name,
+ (unsigned long)(pf->addr - eaddr));
/* Copy the function name if possible */
if (!pp->function) {
pp->function = strdup(name);
- pp->offset = offs;
+ pp->offset = (size_t)(pf->addr - eaddr);
}
- dwarf_dealloc(__dw_debug, name, DW_DLA_STRING);
} else {
/* This function has no name. */
- ret = snprintf(tmp, MAX_PROBE_BUFFER, "0x%llx", pf->addr);
+ ret = snprintf(tmp, MAX_PROBE_BUFFER, "0x%jx",
+ (uintmax_t)pf->addr);
if (!pp->function) {
/* TODO: Use _stext */
pp->function = strdup("");
- pp->offset = (int)pf->addr;
+ pp->offset = (size_t)pf->addr;
}
}
DIE_IF(ret < 0);
@@ -558,8 +435,15 @@ static void show_probepoint(Dwarf_Die sp_die, Dwarf_Signed offs,
len = ret;
pr_debug("Probe point found: %s\n", tmp);
+ /* Get the frame base attribute/ops */
+ dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr);
+ ret = dwarf_getlocation_addr(&fb_attr, (pf->addr - pf->cu_base),
+ &pf->fb_ops, &nops, 1);
+ if (ret <= 0 || nops == 0)
+ pf->fb_ops = NULL;
+
/* Find each argument */
- get_current_frame_base(sp_die, pf);
+ /* TODO: use dwarf_cfi_addrframe */
for (i = 0; i < pp->nr_args; i++) {
pf->var = pp->args[i];
pf->buf = &tmp[len];
@@ -567,289 +451,327 @@ static void show_probepoint(Dwarf_Die sp_die, Dwarf_Signed offs,
find_variable(sp_die, pf);
len += strlen(pf->buf);
}
- free_current_frame_base(pf);
+
+ /* *pf->fb_ops will be cached in libdw. Don't free it. */
+ pf->fb_ops = NULL;
pp->probes[pp->found] = strdup(tmp);
pp->found++;
}
-static int probeaddr_callback(struct die_link *dlink, void *data)
+/* Find probe point from its line number */
+static void find_probe_point_by_line(struct probe_finder *pf)
{
- struct probe_finder *pf = (struct probe_finder *)data;
- Dwarf_Half tag;
- Dwarf_Signed offs;
+ Dwarf_Lines *lines;
+ Dwarf_Line *line;
+ size_t nlines, i;
+ Dwarf_Addr addr;
+ int lineno;
int ret;
- ret = dwarf_tag(dlink->die, &tag, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- /* Check the address is in this subprogram */
- if (tag == DW_TAG_subprogram &&
- die_within_subprogram(dlink->die, pf->addr, &offs)) {
- show_probepoint(dlink->die, offs, pf);
- return 1;
+ ret = dwarf_getsrclines(&pf->cu_die, &lines, &nlines);
+ DIE_IF(ret != 0);
+
+ for (i = 0; i < nlines; i++) {
+ line = dwarf_onesrcline(lines, i);
+ dwarf_lineno(line, &lineno);
+ if (lineno != pf->lno)
+ continue;
+
+ /* TODO: Get fileno from line, but how? */
+ if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
+ continue;
+
+ ret = dwarf_lineaddr(line, &addr);
+ DIE_IF(ret != 0);
+ pr_debug("Probe line found: line[%d]:%d addr:0x%jx\n",
+ (int)i, lineno, (uintmax_t)addr);
+ pf->addr = addr;
+
+ show_probe_point(NULL, pf);
+ /* Continuing, because target line might be inlined. */
}
- return 0;
}
-/* Find probe point from its line number */
-static void find_probe_point_by_line(struct probe_finder *pf)
+/* Find lines which match lazy pattern */
+static int find_lazy_match_lines(struct list_head *head,
+ const char *fname, const char *pat)
{
- Dwarf_Signed cnt, i, clm;
- Dwarf_Line *lines;
- Dwarf_Unsigned lineno = 0;
+ char *fbuf, *p1, *p2;
+ int fd, line, nlines = 0;
+ struct stat st;
+
+ fd = open(fname, O_RDONLY);
+ if (fd < 0)
+ die("failed to open %s", fname);
+ DIE_IF(fstat(fd, &st) < 0);
+ fbuf = malloc(st.st_size + 2);
+ DIE_IF(fbuf == NULL);
+ DIE_IF(read(fd, fbuf, st.st_size) < 0);
+ close(fd);
+ fbuf[st.st_size] = '\n'; /* Dummy line */
+ fbuf[st.st_size + 1] = '\0';
+ p1 = fbuf;
+ line = 1;
+ while ((p2 = strchr(p1, '\n')) != NULL) {
+ *p2 = '\0';
+ if (strlazymatch(p1, pat)) {
+ line_list__add_line(head, line);
+ nlines++;
+ }
+ line++;
+ p1 = p2 + 1;
+ }
+ free(fbuf);
+ return nlines;
+}
+
+/* Find probe points from lazy pattern */
+static void find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
+{
+ Dwarf_Lines *lines;
+ Dwarf_Line *line;
+ size_t nlines, i;
Dwarf_Addr addr;
- Dwarf_Unsigned fno;
+ Dwarf_Die die_mem;
+ int lineno;
int ret;
- ret = dwarf_srclines(pf->cu_die, &lines, &cnt, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
+ if (list_empty(&pf->lcache)) {
+ /* Matching lazy line pattern */
+ ret = find_lazy_match_lines(&pf->lcache, pf->fname,
+ pf->pp->lazy_line);
+ if (ret <= 0)
+ die("No matched lines found in %s.", pf->fname);
+ }
+
+ ret = dwarf_getsrclines(&pf->cu_die, &lines, &nlines);
+ DIE_IF(ret != 0);
+ for (i = 0; i < nlines; i++) {
+ line = dwarf_onesrcline(lines, i);
- for (i = 0; i < cnt; i++) {
- ret = dwarf_line_srcfileno(lines[i], &fno, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- if (fno != pf->fno)
+ dwarf_lineno(line, &lineno);
+ if (!line_list__has_line(&pf->lcache, lineno))
continue;
- ret = dwarf_lineno(lines[i], &lineno, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- if (lineno != pf->lno)
+ /* TODO: Get fileno from line, but how? */
+ if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
continue;
- ret = dwarf_lineoff(lines[i], &clm, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
+ ret = dwarf_lineaddr(line, &addr);
+ DIE_IF(ret != 0);
+ if (sp_die) {
+ /* Address filtering 1: does sp_die include addr? */
+ if (!dwarf_haspc(sp_die, addr))
+ continue;
+ /* Address filtering 2: No child include addr? */
+ if (die_get_inlinefunc(sp_die, addr, &die_mem))
+ continue;
+ }
- ret = dwarf_lineaddr(lines[i], &addr, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- pr_debug("Probe line found: line[%d]:%u,%d addr:0x%llx\n",
- (int)i, (unsigned)lineno, (int)clm, addr);
+ pr_debug("Probe line found: line[%d]:%d addr:0x%llx\n",
+ (int)i, lineno, (unsigned long long)addr);
pf->addr = addr;
- /* Search a real subprogram including this line, */
- ret = search_die_from_children(pf->cu_die,
- probeaddr_callback, pf);
- if (ret == 0)
- die("Probe point is not found in subprograms.");
+
+ show_probe_point(sp_die, pf);
/* Continuing, because target line might be inlined. */
}
- dwarf_srclines_dealloc(__dw_debug, lines, cnt);
+ /* TODO: deallocate lines, but how? */
+}
+
+static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
+{
+ struct probe_finder *pf = (struct probe_finder *)data;
+ struct probe_point *pp = pf->pp;
+
+ if (pp->lazy_line)
+ find_probe_point_lazy(in_die, pf);
+ else {
+ /* Get probe address */
+ pf->addr = die_get_entrypc(in_die);
+ pf->addr += pp->offset;
+ pr_debug("found inline addr: 0x%jx\n",
+ (uintmax_t)pf->addr);
+
+ show_probe_point(in_die, pf);
+ }
+
+ return DWARF_CB_OK;
}
/* Search function from function name */
-static int probefunc_callback(struct die_link *dlink, void *data)
+static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
{
struct probe_finder *pf = (struct probe_finder *)data;
struct probe_point *pp = pf->pp;
- struct die_link *lk;
- Dwarf_Signed offs;
- Dwarf_Half tag;
- int ret;
- ret = dwarf_tag(dlink->die, &tag, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- if (tag == DW_TAG_subprogram) {
- if (die_compare_name(dlink->die, pp->function) == 0) {
- if (pp->line) { /* Function relative line */
- pf->fno = die_get_decl_file(dlink->die);
- pf->lno = die_get_decl_line(dlink->die)
- + pp->line;
- find_probe_point_by_line(pf);
- return 1;
- }
- if (die_inlined_subprogram(dlink->die)) {
- /* Inlined function, save it. */
- ret = dwarf_die_CU_offset(dlink->die,
- &pf->inl_offs,
- &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- pr_debug("inline definition offset %lld\n",
- pf->inl_offs);
- return 0; /* Continue to search */
- }
- /* Get probe address */
- pf->addr = die_get_entrypc(dlink->die);
+ /* Check tag and diename */
+ if (dwarf_tag(sp_die) != DW_TAG_subprogram ||
+ die_compare_name(sp_die, pp->function) != 0)
+ return 0;
+
+ pf->fname = dwarf_decl_file(sp_die);
+ if (pp->line) { /* Function relative line */
+ dwarf_decl_line(sp_die, &pf->lno);
+ pf->lno += pp->line;
+ find_probe_point_by_line(pf);
+ } else if (!dwarf_func_inline(sp_die)) {
+ /* Real function */
+ if (pp->lazy_line)
+ find_probe_point_lazy(sp_die, pf);
+ else {
+ pf->addr = die_get_entrypc(sp_die);
pf->addr += pp->offset;
/* TODO: Check the address in this function */
- show_probepoint(dlink->die, pp->offset, pf);
- return 1; /* Exit; no same symbol in this CU. */
- }
- } else if (tag == DW_TAG_inlined_subroutine && pf->inl_offs) {
- if (die_get_abstract_origin(dlink->die) == pf->inl_offs) {
- /* Get probe address */
- pf->addr = die_get_entrypc(dlink->die);
- pf->addr += pp->offset;
- pr_debug("found inline addr: 0x%llx\n", pf->addr);
- /* Inlined function. Get a real subprogram */
- for (lk = dlink->parent; lk != NULL; lk = lk->parent) {
- tag = 0;
- dwarf_tag(lk->die, &tag, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- if (tag == DW_TAG_subprogram &&
- !die_inlined_subprogram(lk->die))
- goto found;
- }
- die("Failed to find real subprogram.");
-found:
- /* Get offset from subprogram */
- ret = die_within_subprogram(lk->die, pf->addr, &offs);
- DIE_IF(!ret);
- show_probepoint(lk->die, offs, pf);
- /* Continue to search */
+ show_probe_point(sp_die, pf);
}
- }
- return 0;
+ } else
+ /* Inlined function: search instances */
+ dwarf_func_inline_instances(sp_die, probe_point_inline_cb, pf);
+
+ return 1; /* Exit; no same symbol in this CU. */
}
static void find_probe_point_by_func(struct probe_finder *pf)
{
- search_die_from_children(pf->cu_die, probefunc_callback, pf);
+ dwarf_getfuncs(&pf->cu_die, probe_point_search_cb, pf, 0);
}
/* Find a probe point */
-int find_probepoint(int fd, struct probe_point *pp)
+int find_probe_point(int fd, struct probe_point *pp)
{
- Dwarf_Half addr_size = 0;
- Dwarf_Unsigned next_cuh = 0;
- int cu_number = 0, ret;
struct probe_finder pf = {.pp = pp};
+ int ret;
+ Dwarf_Off off, noff;
+ size_t cuhl;
+ Dwarf_Die *diep;
+ Dwarf *dbg;
- ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error);
- if (ret != DW_DLV_OK)
+ dbg = dwarf_begin(fd, DWARF_C_READ);
+ if (!dbg)
return -ENOENT;
pp->found = 0;
- while (++cu_number) {
- /* Search CU (Compilation Unit) */
- ret = dwarf_next_cu_header(__dw_debug, NULL, NULL, NULL,
- &addr_size, &next_cuh, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- if (ret == DW_DLV_NO_ENTRY)
- break;
-
+ off = 0;
+ line_list__init(&pf.lcache);
+ /* Loop on CUs (Compilation Unit) */
+ while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) {
/* Get the DIE(Debugging Information Entry) of this CU */
- ret = dwarf_siblingof(__dw_debug, 0, &pf.cu_die, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
+ diep = dwarf_offdie(dbg, off + cuhl, &pf.cu_die);
+ if (!diep)
+ continue;
/* Check if target file is included. */
if (pp->file)
- pf.fno = cu_find_fileno(pf.cu_die, pp->file);
+ pf.fname = cu_find_realpath(&pf.cu_die, pp->file);
+ else
+ pf.fname = NULL;
- if (!pp->file || pf.fno) {
+ if (!pp->file || pf.fname) {
/* Save CU base address (for frame_base) */
- ret = dwarf_lowpc(pf.cu_die, &pf.cu_base, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- if (ret == DW_DLV_NO_ENTRY)
+ ret = dwarf_lowpc(&pf.cu_die, &pf.cu_base);
+ if (ret != 0)
pf.cu_base = 0;
if (pp->function)
find_probe_point_by_func(&pf);
+ else if (pp->lazy_line)
+ find_probe_point_lazy(NULL, &pf);
else {
pf.lno = pp->line;
find_probe_point_by_line(&pf);
}
}
- dwarf_dealloc(__dw_debug, pf.cu_die, DW_DLA_DIE);
+ off = noff;
}
- ret = dwarf_finish(__dw_debug, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
+ line_list__free(&pf.lcache);
+ dwarf_end(dbg);
return pp->found;
}
-
-static void line_range_add_line(struct line_range *lr, unsigned int line)
-{
- struct line_node *ln;
- struct list_head *p;
-
- /* Reverse search, because new line will be the last one */
- list_for_each_entry_reverse(ln, &lr->line_list, list) {
- if (ln->line < line) {
- p = &ln->list;
- goto found;
- } else if (ln->line == line) /* Already exist */
- return ;
- }
- /* List is empty, or the smallest entry */
- p = &lr->line_list;
-found:
- pr_debug("Debug: add a line %u\n", line);
- ln = zalloc(sizeof(struct line_node));
- DIE_IF(ln == NULL);
- ln->line = line;
- INIT_LIST_HEAD(&ln->list);
- list_add(&ln->list, p);
-}
-
/* Find line range from its line number */
-static void find_line_range_by_line(struct line_finder *lf)
+static void find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
{
- Dwarf_Signed cnt, i;
- Dwarf_Line *lines;
- Dwarf_Unsigned lineno = 0;
- Dwarf_Unsigned fno;
+ Dwarf_Lines *lines;
+ Dwarf_Line *line;
+ size_t nlines, i;
Dwarf_Addr addr;
+ int lineno;
int ret;
+ const char *src;
+ Dwarf_Die die_mem;
- ret = dwarf_srclines(lf->cu_die, &lines, &cnt, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
+ line_list__init(&lf->lr->line_list);
+ ret = dwarf_getsrclines(&lf->cu_die, &lines, &nlines);
+ DIE_IF(ret != 0);
- for (i = 0; i < cnt; i++) {
- ret = dwarf_line_srcfileno(lines[i], &fno, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- if (fno != lf->fno)
- continue;
-
- ret = dwarf_lineno(lines[i], &lineno, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
+ for (i = 0; i < nlines; i++) {
+ line = dwarf_onesrcline(lines, i);
+ ret = dwarf_lineno(line, &lineno);
+ DIE_IF(ret != 0);
if (lf->lno_s > lineno || lf->lno_e < lineno)
continue;
- /* Filter line in the function address range */
- if (lf->addr_s && lf->addr_e) {
- ret = dwarf_lineaddr(lines[i], &addr, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
- if (lf->addr_s > addr || lf->addr_e <= addr)
+ if (sp_die) {
+ /* Address filtering 1: does sp_die include addr? */
+ ret = dwarf_lineaddr(line, &addr);
+ DIE_IF(ret != 0);
+ if (!dwarf_haspc(sp_die, addr))
+ continue;
+
+ /* Address filtering 2: No child include addr? */
+ if (die_get_inlinefunc(sp_die, addr, &die_mem))
continue;
}
- line_range_add_line(lf->lr, (unsigned int)lineno);
+
+ /* TODO: Get fileno from line, but how? */
+ src = dwarf_linesrc(line, NULL, NULL);
+ if (strtailcmp(src, lf->fname) != 0)
+ continue;
+
+ /* Copy real path */
+ if (!lf->lr->path)
+ lf->lr->path = strdup(src);
+ line_list__add_line(&lf->lr->line_list, (unsigned int)lineno);
}
- dwarf_srclines_dealloc(__dw_debug, lines, cnt);
+ /* Update status */
if (!list_empty(&lf->lr->line_list))
lf->found = 1;
+ else {
+ free(lf->lr->path);
+ lf->lr->path = NULL;
+ }
+}
+
+static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
+{
+ find_line_range_by_line(in_die, (struct line_finder *)data);
+ return DWARF_CB_ABORT; /* No need to find other instances */
}
/* Search function from function name */
-static int linefunc_callback(struct die_link *dlink, void *data)
+static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
{
struct line_finder *lf = (struct line_finder *)data;
struct line_range *lr = lf->lr;
- Dwarf_Half tag;
- int ret;
- ret = dwarf_tag(dlink->die, &tag, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- if (tag == DW_TAG_subprogram &&
- die_compare_name(dlink->die, lr->function) == 0) {
- /* Get the address range of this function */
- ret = dwarf_highpc(dlink->die, &lf->addr_e, &__dw_error);
- if (ret == DW_DLV_OK)
- ret = dwarf_lowpc(dlink->die, &lf->addr_s, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- if (ret == DW_DLV_NO_ENTRY) {
- lf->addr_s = 0;
- lf->addr_e = 0;
- }
-
- lf->fno = die_get_decl_file(dlink->die);
- lr->offset = die_get_decl_line(dlink->die);;
+ if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
+ die_compare_name(sp_die, lr->function) == 0) {
+ lf->fname = dwarf_decl_file(sp_die);
+ dwarf_decl_line(sp_die, &lr->offset);
+ pr_debug("fname: %s, lineno:%d\n", lf->fname, lr->offset);
lf->lno_s = lr->offset + lr->start;
if (!lr->end)
- lf->lno_e = (Dwarf_Unsigned)-1;
+ lf->lno_e = INT_MAX;
else
lf->lno_e = lr->offset + lr->end;
lr->start = lf->lno_s;
lr->end = lf->lno_e;
- find_line_range_by_line(lf);
- /* If we find a target function, this should be end. */
- lf->found = 1;
+ if (dwarf_func_inline(sp_die))
+ dwarf_func_inline_instances(sp_die,
+ line_range_inline_cb, lf);
+ else
+ find_line_range_by_line(sp_die, lf);
return 1;
}
return 0;
@@ -857,55 +779,55 @@ static int linefunc_callback(struct die_link *dlink, void *data)
static void find_line_range_by_func(struct line_finder *lf)
{
- search_die_from_children(lf->cu_die, linefunc_callback, lf);
+ dwarf_getfuncs(&lf->cu_die, line_range_search_cb, lf, 0);
}
int find_line_range(int fd, struct line_range *lr)
{
- Dwarf_Half addr_size = 0;
- Dwarf_Unsigned next_cuh = 0;
+ struct line_finder lf = {.lr = lr, .found = 0};
int ret;
- struct line_finder lf = {.lr = lr};
+ Dwarf_Off off = 0, noff;
+ size_t cuhl;
+ Dwarf_Die *diep;
+ Dwarf *dbg;
- ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error);
- if (ret != DW_DLV_OK)
+ dbg = dwarf_begin(fd, DWARF_C_READ);
+ if (!dbg)
return -ENOENT;
+ /* Loop on CUs (Compilation Unit) */
while (!lf.found) {
- /* Search CU (Compilation Unit) */
- ret = dwarf_next_cu_header(__dw_debug, NULL, NULL, NULL,
- &addr_size, &next_cuh, &__dw_error);
- DIE_IF(ret == DW_DLV_ERROR);
- if (ret == DW_DLV_NO_ENTRY)
+ ret = dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL);
+ if (ret != 0)
break;
/* Get the DIE(Debugging Information Entry) of this CU */
- ret = dwarf_siblingof(__dw_debug, 0, &lf.cu_die, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
+ diep = dwarf_offdie(dbg, off + cuhl, &lf.cu_die);
+ if (!diep)
+ continue;
/* Check if target file is included. */
if (lr->file)
- lf.fno = cu_find_fileno(lf.cu_die, lr->file);
+ lf.fname = cu_find_realpath(&lf.cu_die, lr->file);
+ else
+ lf.fname = 0;
- if (!lr->file || lf.fno) {
+ if (!lr->file || lf.fname) {
if (lr->function)
find_line_range_by_func(&lf);
else {
lf.lno_s = lr->start;
if (!lr->end)
- lf.lno_e = (Dwarf_Unsigned)-1;
+ lf.lno_e = INT_MAX;
else
lf.lno_e = lr->end;
- find_line_range_by_line(&lf);
+ find_line_range_by_line(NULL, &lf);
}
- /* Get the real file path */
- if (lf.found)
- cu_get_filename(lf.cu_die, lf.fno, &lr->path);
}
- dwarf_dealloc(__dw_debug, lf.cu_die, DW_DLA_DIE);
+ off = noff;
}
- ret = dwarf_finish(__dw_debug, &__dw_error);
- DIE_IF(ret != DW_DLV_OK);
+ pr_debug("path: %lx\n", (unsigned long)lr->path);
+ dwarf_end(dbg);
return lf.found;
}
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 972b386..d1a6517 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -1,6 +1,7 @@
#ifndef _PROBE_FINDER_H
#define _PROBE_FINDER_H
+#include <stdbool.h>
#include "util.h"
#define MAX_PATH_LEN 256
@@ -20,6 +21,7 @@ struct probe_point {
/* Inputs */
char *file; /* File name */
int line; /* Line number */
+ char *lazy_line; /* Lazy line pattern */
char *function; /* Function name */
int offset; /* Offset bytes */
@@ -46,53 +48,46 @@ struct line_range {
char *function; /* Function name */
unsigned int start; /* Start line number */
unsigned int end; /* End line number */
- unsigned int offset; /* Start line offset */
+ int offset; /* Start line offset */
char *path; /* Real path name */
struct list_head line_list; /* Visible lines */
};
-#ifndef NO_LIBDWARF
-extern int find_probepoint(int fd, struct probe_point *pp);
+#ifndef NO_DWARF_SUPPORT
+extern int find_probe_point(int fd, struct probe_point *pp);
extern int find_line_range(int fd, struct line_range *lr);
-/* Workaround for undefined _MIPS_SZLONG bug in libdwarf.h: */
-#ifndef _MIPS_SZLONG
-# define _MIPS_SZLONG 0
-#endif
-
#include <dwarf.h>
-#include <libdwarf.h>
+#include <libdw.h>
struct probe_finder {
- struct probe_point *pp; /* Target probe point */
+ struct probe_point *pp; /* Target probe point */
/* For function searching */
- Dwarf_Addr addr; /* Address */
- Dwarf_Unsigned fno; /* File number */
- Dwarf_Unsigned lno; /* Line number */
- Dwarf_Off inl_offs; /* Inline offset */
- Dwarf_Die cu_die; /* Current CU */
+ Dwarf_Addr addr; /* Address */
+ const char *fname; /* File name */
+ int lno; /* Line number */
+ Dwarf_Die cu_die; /* Current CU */
/* For variable searching */
- Dwarf_Addr cu_base; /* Current CU base address */
- Dwarf_Locdesc fbloc; /* Location of Current Frame Base */
- const char *var; /* Current variable name */
- char *buf; /* Current output buffer */
- int len; /* Length of output buffer */
+ Dwarf_Op *fb_ops; /* Frame base attribute */
+ Dwarf_Addr cu_base; /* Current CU base address */
+ const char *var; /* Current variable name */
+ char *buf; /* Current output buffer */
+ int len; /* Length of output buffer */
+ struct list_head lcache; /* Line cache for lazy match */
};
struct line_finder {
- struct line_range *lr; /* Target line range */
-
- Dwarf_Unsigned fno; /* File number */
- Dwarf_Unsigned lno_s; /* Start line number */
- Dwarf_Unsigned lno_e; /* End line number */
- Dwarf_Addr addr_s; /* Start address */
- Dwarf_Addr addr_e; /* End address */
- Dwarf_Die cu_die; /* Current CU */
+ struct line_range *lr; /* Target line range */
+
+ const char *fname; /* File name */
+ int lno_s; /* Start line number */
+ int lno_e; /* End line number */
+ Dwarf_Die cu_die; /* Current CU */
int found;
};
-#endif /* NO_LIBDWARF */
+#endif /* NO_DWARF_SUPPORT */
#endif /*_PROBE_FINDER_H */
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index c397d4f..a175949 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -265,21 +265,21 @@ error:
return false;
}
-/**
- * strglobmatch - glob expression pattern matching
- * @str: the target string to match
- * @pat: the pattern string to match
- *
- * This returns true if the @str matches @pat. @pat can includes wildcards
- * ('*','?') and character classes ([CHARS], complementation and ranges are
- * also supported). Also, this supports escape character ('\') to use special
- * characters as normal character.
- *
- * Note: if @pat syntax is broken, this always returns false.
- */
-bool strglobmatch(const char *str, const char *pat)
+/* Glob/lazy pattern matching */
+static bool __match_glob(const char *str, const char *pat, bool ignore_space)
{
while (*str && *pat && *pat != '*') {
+ if (ignore_space) {
+ /* Ignore spaces for lazy matching */
+ if (isspace(*str)) {
+ str++;
+ continue;
+ }
+ if (isspace(*pat)) {
+ pat++;
+ continue;
+ }
+ }
if (*pat == '?') { /* Matches any single character */
str++;
pat++;
@@ -308,3 +308,32 @@ bool strglobmatch(const char *str, const char *pat)
return !*str && !*pat;
}
+/**
+ * strglobmatch - glob expression pattern matching
+ * @str: the target string to match
+ * @pat: the pattern string to match
+ *
+ * This returns true if the @str matches @pat. @pat can includes wildcards
+ * ('*','?') and character classes ([CHARS], complementation and ranges are
+ * also supported). Also, this supports escape character ('\') to use special
+ * characters as normal character.
+ *
+ * Note: if @pat syntax is broken, this always returns false.
+ */
+bool strglobmatch(const char *str, const char *pat)
+{
+ return __match_glob(str, pat, false);
+}
+
+/**
+ * strlazymatch - matching pattern strings lazily with glob pattern
+ * @str: the target string to match
+ * @pat: the pattern string to match
+ *
+ * This is similar to strglobmatch, except this ignores spaces in
+ * the target string.
+ */
+bool strlazymatch(const char *str, const char *pat)
+{
+ return __match_glob(str, pat, true);
+}
diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h
index 02ede58..542e44d 100644
--- a/tools/perf/util/string.h
+++ b/tools/perf/util/string.h
@@ -10,6 +10,7 @@ s64 perf_atoll(const char *str);
char **argv_split(const char *str, int *argcp);
void argv_free(char **argv);
bool strglobmatch(const char *str, const char *pat);
+bool strlazymatch(const char *str, const char *pat);
#define _STR(x) #x
#define STR(x) _STR(x)
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index daece36..7f1178f 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -12,3 +12,6 @@ config HAVE_KVM_EVENTFD
config KVM_APIC_ARCHITECTURE
bool
+
+config KVM_MMIO
+ bool
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index f73de63..057e2cc 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -504,12 +504,12 @@ out:
static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
struct kvm_assigned_pci_dev *assigned_dev)
{
- int r = 0;
+ int r = 0, idx;
struct kvm_assigned_dev_kernel *match;
struct pci_dev *dev;
mutex_lock(&kvm->lock);
- down_read(&kvm->slots_lock);
+ idx = srcu_read_lock(&kvm->srcu);
match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
assigned_dev->assigned_dev_id);
@@ -526,7 +526,8 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
r = -ENOMEM;
goto out;
}
- dev = pci_get_bus_and_slot(assigned_dev->busnr,
+ dev = pci_get_domain_bus_and_slot(assigned_dev->segnr,
+ assigned_dev->busnr,
assigned_dev->devfn);
if (!dev) {
printk(KERN_INFO "%s: host device not found\n", __func__);
@@ -548,6 +549,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
pci_reset_function(dev);
match->assigned_dev_id = assigned_dev->assigned_dev_id;
+ match->host_segnr = assigned_dev->segnr;
match->host_busnr = assigned_dev->busnr;
match->host_devfn = assigned_dev->devfn;
match->flags = assigned_dev->flags;
@@ -573,7 +575,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
}
out:
- up_read(&kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
mutex_unlock(&kvm->lock);
return r;
out_list_del:
@@ -585,7 +587,7 @@ out_put:
pci_dev_put(dev);
out_free:
kfree(match);
- up_read(&kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
mutex_unlock(&kvm->lock);
return r;
}
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 04d69cd..5169736 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -92,41 +92,64 @@ static const struct kvm_io_device_ops coalesced_mmio_ops = {
int kvm_coalesced_mmio_init(struct kvm *kvm)
{
struct kvm_coalesced_mmio_dev *dev;
+ struct page *page;
int ret;
+ ret = -ENOMEM;
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ goto out_err;
+ kvm->coalesced_mmio_ring = page_address(page);
+
+ ret = -ENOMEM;
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
if (!dev)
- return -ENOMEM;
+ goto out_free_page;
spin_lock_init(&dev->lock);
kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
dev->kvm = kvm;
kvm->coalesced_mmio_dev = dev;
- ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &dev->dev);
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
+ mutex_unlock(&kvm->slots_lock);
if (ret < 0)
- kfree(dev);
+ goto out_free_dev;
+
+ return ret;
+out_free_dev:
+ kfree(dev);
+out_free_page:
+ __free_page(page);
+out_err:
return ret;
}
+void kvm_coalesced_mmio_free(struct kvm *kvm)
+{
+ if (kvm->coalesced_mmio_ring)
+ free_page((unsigned long)kvm->coalesced_mmio_ring);
+}
+
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
- struct kvm_coalesced_mmio_zone *zone)
+ struct kvm_coalesced_mmio_zone *zone)
{
struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
if (dev == NULL)
return -EINVAL;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return -ENOBUFS;
}
dev->zone[dev->nb_zones] = *zone;
dev->nb_zones++;
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return 0;
}
@@ -140,10 +163,10 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
if (dev == NULL)
return -EINVAL;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
i = dev->nb_zones;
- while(i) {
+ while (i) {
z = &dev->zone[i - 1];
/* unregister all zones
@@ -158,7 +181,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
i--;
}
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return 0;
}
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h
index 4b49f27..8a5959e 100644
--- a/virt/kvm/coalesced_mmio.h
+++ b/virt/kvm/coalesced_mmio.h
@@ -1,3 +1,6 @@
+#ifndef __KVM_COALESCED_MMIO_H__
+#define __KVM_COALESCED_MMIO_H__
+
/*
* KVM coalesced MMIO
*
@@ -7,6 +10,8 @@
*
*/
+#ifdef CONFIG_KVM_MMIO
+
#define KVM_COALESCED_MMIO_ZONE_MAX 100
struct kvm_coalesced_mmio_dev {
@@ -18,7 +23,17 @@ struct kvm_coalesced_mmio_dev {
};
int kvm_coalesced_mmio_init(struct kvm *kvm);
+void kvm_coalesced_mmio_free(struct kvm *kvm);
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone);
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone);
+
+#else
+
+static inline int kvm_coalesced_mmio_init(struct kvm *kvm) { return 0; }
+static inline void kvm_coalesced_mmio_free(struct kvm *kvm) { }
+
+#endif
+
+#endif
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index a9d3fc6..7016319 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -47,7 +47,6 @@ struct _irqfd {
int gsi;
struct list_head list;
poll_table pt;
- wait_queue_head_t *wqh;
wait_queue_t wait;
struct work_struct inject;
struct work_struct shutdown;
@@ -159,8 +158,6 @@ irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
-
- irqfd->wqh = wqh;
add_wait_queue(wqh, &irqfd->wait);
}
@@ -463,7 +460,7 @@ static int
kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO;
- struct kvm_io_bus *bus = pio ? &kvm->pio_bus : &kvm->mmio_bus;
+ enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS;
struct _ioeventfd *p;
struct eventfd_ctx *eventfd;
int ret;
@@ -508,7 +505,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
else
p->wildcard = true;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
/* Verify that there isnt a match already */
if (ioeventfd_check_collision(kvm, p)) {
@@ -518,18 +515,18 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
kvm_iodevice_init(&p->dev, &ioeventfd_ops);
- ret = __kvm_io_bus_register_dev(bus, &p->dev);
+ ret = kvm_io_bus_register_dev(kvm, bus_idx, &p->dev);
if (ret < 0)
goto unlock_fail;
list_add_tail(&p->list, &kvm->ioeventfds);
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return 0;
unlock_fail:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
fail:
kfree(p);
@@ -542,7 +539,7 @@ static int
kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO;
- struct kvm_io_bus *bus = pio ? &kvm->pio_bus : &kvm->mmio_bus;
+ enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS;
struct _ioeventfd *p, *tmp;
struct eventfd_ctx *eventfd;
int ret = -ENOENT;
@@ -551,7 +548,7 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
@@ -565,13 +562,13 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
if (!p->wildcard && p->datamatch != args->datamatch)
continue;
- __kvm_io_bus_unregister_dev(bus, &p->dev);
+ kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
ioeventfd_release(p);
ret = 0;
break;
}
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
eventfd_ctx_put(eventfd);
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 38a2d20..3db15a8 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -100,6 +100,19 @@ static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
return injected;
}
+static void update_handled_vectors(struct kvm_ioapic *ioapic)
+{
+ DECLARE_BITMAP(handled_vectors, 256);
+ int i;
+
+ memset(handled_vectors, 0, sizeof(handled_vectors));
+ for (i = 0; i < IOAPIC_NUM_PINS; ++i)
+ __set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors);
+ memcpy(ioapic->handled_vectors, handled_vectors,
+ sizeof(handled_vectors));
+ smp_wmb();
+}
+
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
{
unsigned index;
@@ -134,6 +147,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
e->bits |= (u32) val;
e->fields.remote_irr = 0;
}
+ update_handled_vectors(ioapic);
mask_after = e->fields.mask;
if (mask_before != mask_after)
kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after);
@@ -241,6 +255,9 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+ smp_rmb();
+ if (!test_bit(vector, ioapic->handled_vectors))
+ return;
mutex_lock(&ioapic->lock);
__kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
mutex_unlock(&ioapic->lock);
@@ -352,6 +369,7 @@ void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
ioapic->ioregsel = 0;
ioapic->irr = 0;
ioapic->id = 0;
+ update_handled_vectors(ioapic);
}
static const struct kvm_io_device_ops ioapic_mmio_ops = {
@@ -372,13 +390,28 @@ int kvm_ioapic_init(struct kvm *kvm)
kvm_ioapic_reset(ioapic);
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
ioapic->kvm = kvm;
- ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &ioapic->dev);
- if (ret < 0)
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
+ mutex_unlock(&kvm->slots_lock);
+ if (ret < 0) {
+ kvm->arch.vioapic = NULL;
kfree(ioapic);
+ }
return ret;
}
+void kvm_ioapic_destroy(struct kvm *kvm)
+{
+ struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+
+ if (ioapic) {
+ kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
+ kvm->arch.vioapic = NULL;
+ kfree(ioapic);
+ }
+}
+
int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
{
struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
@@ -399,6 +432,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
mutex_lock(&ioapic->lock);
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
+ update_handled_vectors(ioapic);
mutex_unlock(&ioapic->lock);
return 0;
}
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 419c43b..8a751b7 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -46,6 +46,7 @@ struct kvm_ioapic {
struct kvm *kvm;
void (*ack_notifier)(void *opaque, int irq);
struct mutex lock;
+ DECLARE_BITMAP(handled_vectors, 256);
};
#ifdef DEBUG
@@ -71,6 +72,7 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
int kvm_ioapic_init(struct kvm *kvm);
+void kvm_ioapic_destroy(struct kvm *kvm);
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 1514758..80fd3ad 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -32,10 +32,10 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm);
static void kvm_iommu_put_pages(struct kvm *kvm,
gfn_t base_gfn, unsigned long npages);
-int kvm_iommu_map_pages(struct kvm *kvm,
- gfn_t base_gfn, unsigned long npages)
+int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
- gfn_t gfn = base_gfn;
+ gfn_t gfn = slot->base_gfn;
+ unsigned long npages = slot->npages;
pfn_t pfn;
int i, r = 0;
struct iommu_domain *domain = kvm->arch.iommu_domain;
@@ -54,7 +54,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn)))
continue;
- pfn = gfn_to_pfn(kvm, gfn);
+ pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
r = iommu_map_range(domain,
gfn_to_gpa(gfn),
pfn_to_hpa(pfn),
@@ -69,17 +69,19 @@ int kvm_iommu_map_pages(struct kvm *kvm,
return 0;
unmap_pages:
- kvm_iommu_put_pages(kvm, base_gfn, i);
+ kvm_iommu_put_pages(kvm, slot->base_gfn, i);
return r;
}
static int kvm_iommu_map_memslots(struct kvm *kvm)
{
int i, r = 0;
+ struct kvm_memslots *slots;
+
+ slots = rcu_dereference(kvm->memslots);
- for (i = 0; i < kvm->nmemslots; i++) {
- r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn,
- kvm->memslots[i].npages);
+ for (i = 0; i < slots->nmemslots; i++) {
+ r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
if (r)
break;
}
@@ -104,7 +106,8 @@ int kvm_assign_device(struct kvm *kvm,
r = iommu_attach_device(domain, &pdev->dev);
if (r) {
- printk(KERN_ERR "assign device %x:%x.%x failed",
+ printk(KERN_ERR "assign device %x:%x:%x.%x failed",
+ pci_domain_nr(pdev->bus),
pdev->bus->number,
PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
@@ -125,7 +128,8 @@ int kvm_assign_device(struct kvm *kvm,
goto out_unmap;
}
- printk(KERN_DEBUG "assign device: host bdf = %x:%x:%x\n",
+ printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
+ assigned_dev->host_segnr,
assigned_dev->host_busnr,
PCI_SLOT(assigned_dev->host_devfn),
PCI_FUNC(assigned_dev->host_devfn));
@@ -152,7 +156,8 @@ int kvm_deassign_device(struct kvm *kvm,
iommu_detach_device(domain, &pdev->dev);
- printk(KERN_DEBUG "deassign device: host bdf = %x:%x:%x\n",
+ printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
+ assigned_dev->host_segnr,
assigned_dev->host_busnr,
PCI_SLOT(assigned_dev->host_devfn),
PCI_FUNC(assigned_dev->host_devfn));
@@ -210,10 +215,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{
int i;
+ struct kvm_memslots *slots;
+
+ slots = rcu_dereference(kvm->memslots);
- for (i = 0; i < kvm->nmemslots; i++) {
- kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn,
- kvm->memslots[i].npages);
+ for (i = 0; i < slots->nmemslots; i++) {
+ kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
+ slots->memslots[i].npages);
}
return 0;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a944be3..548f925 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -44,6 +44,8 @@
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/compat.h>
+#include <linux/srcu.h>
+#include <linux/hugetlb.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -51,9 +53,7 @@
#include <asm/pgtable.h>
#include <asm-generic/bitops/le.h>
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
#include "coalesced_mmio.h"
-#endif
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>
@@ -86,6 +86,8 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
static int hardware_enable_all(void);
static void hardware_disable_all(void);
+static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
+
static bool kvm_rebooting;
static bool largepages_enabled = true;
@@ -136,7 +138,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
- spin_lock(&kvm->requests_lock);
+ raw_spin_lock(&kvm->requests_lock);
me = smp_processor_id();
kvm_for_each_vcpu(i, vcpu, kvm) {
if (test_and_set_bit(req, &vcpu->requests))
@@ -151,7 +153,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
smp_call_function_many(cpus, ack_flush, NULL, 1);
else
called = false;
- spin_unlock(&kvm->requests_lock);
+ raw_spin_unlock(&kvm->requests_lock);
free_cpumask_var(cpus);
return called;
}
@@ -215,7 +217,7 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
- int need_tlb_flush;
+ int need_tlb_flush, idx;
/*
* When ->invalidate_page runs, the linux pte has been zapped
@@ -235,10 +237,12 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
* pte after kvm_unmap_hva returned, without noticing the page
* is going to be freed.
*/
+ idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
need_tlb_flush = kvm_unmap_hva(kvm, address);
spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
@@ -252,11 +256,14 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
pte_t pte)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int idx;
+ idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
kvm_set_spte_hva(kvm, address, pte);
spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
}
static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
@@ -265,8 +272,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
unsigned long end)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
- int need_tlb_flush = 0;
+ int need_tlb_flush = 0, idx;
+ idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
/*
* The count increase must become visible at unlock time as no
@@ -277,6 +285,7 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
for (; start < end; start += PAGE_SIZE)
need_tlb_flush |= kvm_unmap_hva(kvm, start);
spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
@@ -314,11 +323,13 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
- int young;
+ int young, idx;
+ idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
young = kvm_age_hva(kvm, address);
spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
if (young)
kvm_flush_remote_tlbs(kvm);
@@ -341,15 +352,26 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
.change_pte = kvm_mmu_notifier_change_pte,
.release = kvm_mmu_notifier_release,
};
+
+static int kvm_init_mmu_notifier(struct kvm *kvm)
+{
+ kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
+ return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
+}
+
+#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
+
+static int kvm_init_mmu_notifier(struct kvm *kvm)
+{
+ return 0;
+}
+
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
static struct kvm *kvm_create_vm(void)
{
- int r = 0;
+ int r = 0, i;
struct kvm *kvm = kvm_arch_create_vm();
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- struct page *page;
-#endif
if (IS_ERR(kvm))
goto out;
@@ -363,39 +385,35 @@ static struct kvm *kvm_create_vm(void)
INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
#endif
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page) {
- r = -ENOMEM;
+ r = -ENOMEM;
+ kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!kvm->memslots)
goto out_err;
- }
- kvm->coalesced_mmio_ring =
- (struct kvm_coalesced_mmio_ring *)page_address(page);
-#endif
-
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
- {
- kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
- r = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
- if (r) {
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- put_page(page);
-#endif
+ if (init_srcu_struct(&kvm->srcu))
+ goto out_err;
+ for (i = 0; i < KVM_NR_BUSES; i++) {
+ kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
+ GFP_KERNEL);
+ if (!kvm->buses[i]) {
+ cleanup_srcu_struct(&kvm->srcu);
goto out_err;
}
}
-#endif
+
+ r = kvm_init_mmu_notifier(kvm);
+ if (r) {
+ cleanup_srcu_struct(&kvm->srcu);
+ goto out_err;
+ }
kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
spin_lock_init(&kvm->mmu_lock);
- spin_lock_init(&kvm->requests_lock);
- kvm_io_bus_init(&kvm->pio_bus);
+ raw_spin_lock_init(&kvm->requests_lock);
kvm_eventfd_init(kvm);
mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock);
- kvm_io_bus_init(&kvm->mmio_bus);
- init_rwsem(&kvm->slots_lock);
+ mutex_init(&kvm->slots_lock);
atomic_set(&kvm->users_count, 1);
spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
@@ -406,12 +424,12 @@ static struct kvm *kvm_create_vm(void)
out:
return kvm;
-#if defined(KVM_COALESCED_MMIO_PAGE_OFFSET) || \
- (defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER))
out_err:
hardware_disable_all();
-#endif
out_err_nodisable:
+ for (i = 0; i < KVM_NR_BUSES; i++)
+ kfree(kvm->buses[i]);
+ kfree(kvm->memslots);
kfree(kvm);
return ERR_PTR(r);
}
@@ -446,13 +464,17 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
void kvm_free_physmem(struct kvm *kvm)
{
int i;
+ struct kvm_memslots *slots = kvm->memslots;
+
+ for (i = 0; i < slots->nmemslots; ++i)
+ kvm_free_physmem_slot(&slots->memslots[i], NULL);
- for (i = 0; i < kvm->nmemslots; ++i)
- kvm_free_physmem_slot(&kvm->memslots[i], NULL);
+ kfree(kvm->memslots);
}
static void kvm_destroy_vm(struct kvm *kvm)
{
+ int i;
struct mm_struct *mm = kvm->mm;
kvm_arch_sync_events(kvm);
@@ -460,12 +482,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
list_del(&kvm->vm_list);
spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
- kvm_io_bus_destroy(&kvm->pio_bus);
- kvm_io_bus_destroy(&kvm->mmio_bus);
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- if (kvm->coalesced_mmio_ring != NULL)
- free_page((unsigned long)kvm->coalesced_mmio_ring);
-#endif
+ for (i = 0; i < KVM_NR_BUSES; i++)
+ kvm_io_bus_destroy(kvm->buses[i]);
+ kvm_coalesced_mmio_free(kvm);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
#else
@@ -512,12 +531,13 @@ int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
- int r;
+ int r, flush_shadow = 0;
gfn_t base_gfn;
unsigned long npages;
unsigned long i;
struct kvm_memory_slot *memslot;
struct kvm_memory_slot old, new;
+ struct kvm_memslots *slots, *old_memslots;
r = -EINVAL;
/* General sanity checks */
@@ -532,7 +552,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
- memslot = &kvm->memslots[mem->slot];
+ memslot = &kvm->memslots->memslots[mem->slot];
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
@@ -553,7 +573,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
/* Check for overlaps */
r = -EEXIST;
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *s = &kvm->memslots[i];
+ struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
if (s == memslot || !s->npages)
continue;
@@ -579,15 +599,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
memset(new.rmap, 0, npages * sizeof(*new.rmap));
new.user_alloc = user_alloc;
- /*
- * hva_to_rmmap() serialzies with the mmu_lock and to be
- * safe it has to ignore memslots with !user_alloc &&
- * !userspace_addr.
- */
- if (user_alloc)
- new.userspace_addr = mem->userspace_addr;
- else
- new.userspace_addr = 0;
+ new.userspace_addr = mem->userspace_addr;
}
if (!npages)
goto skip_lpage;
@@ -642,8 +654,9 @@ skip_lpage:
if (!new.dirty_bitmap)
goto out_free;
memset(new.dirty_bitmap, 0, dirty_bytes);
+ /* destroy any largepage mappings for dirty tracking */
if (old.npages)
- kvm_arch_flush_shadow(kvm);
+ flush_shadow = 1;
}
#else /* not defined CONFIG_S390 */
new.user_alloc = user_alloc;
@@ -651,36 +664,72 @@ skip_lpage:
new.userspace_addr = mem->userspace_addr;
#endif /* not defined CONFIG_S390 */
- if (!npages)
+ if (!npages) {
+ r = -ENOMEM;
+ slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!slots)
+ goto out_free;
+ memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
+ if (mem->slot >= slots->nmemslots)
+ slots->nmemslots = mem->slot + 1;
+ slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
+
+ old_memslots = kvm->memslots;
+ rcu_assign_pointer(kvm->memslots, slots);
+ synchronize_srcu_expedited(&kvm->srcu);
+ /* From this point no new shadow pages pointing to a deleted
+ * memslot will be created.
+ *
+ * validation of sp->gfn happens in:
+ * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
+ * - kvm_is_visible_gfn (mmu_check_roots)
+ */
kvm_arch_flush_shadow(kvm);
+ kfree(old_memslots);
+ }
- spin_lock(&kvm->mmu_lock);
- if (mem->slot >= kvm->nmemslots)
- kvm->nmemslots = mem->slot + 1;
-
- *memslot = new;
- spin_unlock(&kvm->mmu_lock);
-
- r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
- if (r) {
- spin_lock(&kvm->mmu_lock);
- *memslot = old;
- spin_unlock(&kvm->mmu_lock);
+ r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
+ if (r)
goto out_free;
- }
- kvm_free_physmem_slot(&old, npages ? &new : NULL);
- /* Slot deletion case: we have to update the current slot */
- spin_lock(&kvm->mmu_lock);
- if (!npages)
- *memslot = old;
- spin_unlock(&kvm->mmu_lock);
#ifdef CONFIG_DMAR
/* map the pages in iommu page table */
- r = kvm_iommu_map_pages(kvm, base_gfn, npages);
- if (r)
- goto out;
+ if (npages) {
+ r = kvm_iommu_map_pages(kvm, &new);
+ if (r)
+ goto out_free;
+ }
#endif
+
+ r = -ENOMEM;
+ slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!slots)
+ goto out_free;
+ memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
+ if (mem->slot >= slots->nmemslots)
+ slots->nmemslots = mem->slot + 1;
+
+ /* actual memory is freed via old in kvm_free_physmem_slot below */
+ if (!npages) {
+ new.rmap = NULL;
+ new.dirty_bitmap = NULL;
+ for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
+ new.lpage_info[i] = NULL;
+ }
+
+ slots->memslots[mem->slot] = new;
+ old_memslots = kvm->memslots;
+ rcu_assign_pointer(kvm->memslots, slots);
+ synchronize_srcu_expedited(&kvm->srcu);
+
+ kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
+
+ kvm_free_physmem_slot(&old, &new);
+ kfree(old_memslots);
+
+ if (flush_shadow)
+ kvm_arch_flush_shadow(kvm);
+
return 0;
out_free:
@@ -697,9 +746,9 @@ int kvm_set_memory_region(struct kvm *kvm,
{
int r;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
r = __kvm_set_memory_region(kvm, mem, user_alloc);
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_set_memory_region);
@@ -726,7 +775,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS)
goto out;
- memslot = &kvm->memslots[log->slot];
+ memslot = &kvm->memslots->memslots[log->slot];
r = -ENOENT;
if (!memslot->dirty_bitmap)
goto out;
@@ -780,9 +829,10 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
{
int i;
+ struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
- for (i = 0; i < kvm->nmemslots; ++i) {
- struct kvm_memory_slot *memslot = &kvm->memslots[i];
+ for (i = 0; i < slots->nmemslots; ++i) {
+ struct kvm_memory_slot *memslot = &slots->memslots[i];
if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages)
@@ -801,10 +851,14 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
int i;
+ struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
- gfn = unalias_gfn(kvm, gfn);
+ gfn = unalias_gfn_instantiation(kvm, gfn);
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *memslot = &kvm->memslots[i];
+ struct kvm_memory_slot *memslot = &slots->memslots[i];
+
+ if (memslot->flags & KVM_MEMSLOT_INVALID)
+ continue;
if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages)
@@ -814,33 +868,68 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
+unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
+{
+ struct vm_area_struct *vma;
+ unsigned long addr, size;
+
+ size = PAGE_SIZE;
+
+ addr = gfn_to_hva(kvm, gfn);
+ if (kvm_is_error_hva(addr))
+ return PAGE_SIZE;
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, addr);
+ if (!vma)
+ goto out;
+
+ size = vma_kernel_pagesize(vma);
+
+out:
+ up_read(&current->mm->mmap_sem);
+
+ return size;
+}
+
+int memslot_id(struct kvm *kvm, gfn_t gfn)
+{
+ int i;
+ struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
+ struct kvm_memory_slot *memslot = NULL;
+
+ gfn = unalias_gfn(kvm, gfn);
+ for (i = 0; i < slots->nmemslots; ++i) {
+ memslot = &slots->memslots[i];
+
+ if (gfn >= memslot->base_gfn
+ && gfn < memslot->base_gfn + memslot->npages)
+ break;
+ }
+
+ return memslot - slots->memslots;
+}
+
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *slot;
- gfn = unalias_gfn(kvm, gfn);
+ gfn = unalias_gfn_instantiation(kvm, gfn);
slot = gfn_to_memslot_unaliased(kvm, gfn);
- if (!slot)
+ if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return bad_hva();
return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
}
EXPORT_SYMBOL_GPL(gfn_to_hva);
-pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
+static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
{
struct page *page[1];
- unsigned long addr;
int npages;
pfn_t pfn;
might_sleep();
- addr = gfn_to_hva(kvm, gfn);
- if (kvm_is_error_hva(addr)) {
- get_page(bad_page);
- return page_to_pfn(bad_page);
- }
-
npages = get_user_pages_fast(addr, 1, 1, page);
if (unlikely(npages != 1)) {
@@ -865,8 +954,32 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
return pfn;
}
+pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
+{
+ unsigned long addr;
+
+ addr = gfn_to_hva(kvm, gfn);
+ if (kvm_is_error_hva(addr)) {
+ get_page(bad_page);
+ return page_to_pfn(bad_page);
+ }
+
+ return hva_to_pfn(kvm, addr);
+}
EXPORT_SYMBOL_GPL(gfn_to_pfn);
+static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
+{
+ return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
+}
+
+pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn)
+{
+ unsigned long addr = gfn_to_hva_memslot(slot, gfn);
+ return hva_to_pfn(kvm, addr);
+}
+
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
pfn_t pfn;
@@ -1854,12 +1967,7 @@ static struct notifier_block kvm_reboot_notifier = {
.priority = 0,
};
-void kvm_io_bus_init(struct kvm_io_bus *bus)
-{
- memset(bus, 0, sizeof(*bus));
-}
-
-void kvm_io_bus_destroy(struct kvm_io_bus *bus)
+static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
{
int i;
@@ -1868,13 +1976,15 @@ void kvm_io_bus_destroy(struct kvm_io_bus *bus)
kvm_iodevice_destructor(pos);
}
+ kfree(bus);
}
/* kvm_io_bus_write - called under kvm->slots_lock */
-int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
+int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val)
{
int i;
+ struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
for (i = 0; i < bus->dev_count; i++)
if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
return 0;
@@ -1882,59 +1992,71 @@ int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
}
/* kvm_io_bus_read - called under kvm->slots_lock */
-int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
+int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ int len, void *val)
{
int i;
+ struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
+
for (i = 0; i < bus->dev_count; i++)
if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
return 0;
return -EOPNOTSUPP;
}
-int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
- struct kvm_io_device *dev)
+/* Caller must hold slots_lock. */
+int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev)
{
- int ret;
-
- down_write(&kvm->slots_lock);
- ret = __kvm_io_bus_register_dev(bus, dev);
- up_write(&kvm->slots_lock);
+ struct kvm_io_bus *new_bus, *bus;
- return ret;
-}
-
-/* An unlocked version. Caller must have write lock on slots_lock. */
-int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
- struct kvm_io_device *dev)
-{
+ bus = kvm->buses[bus_idx];
if (bus->dev_count > NR_IOBUS_DEVS-1)
return -ENOSPC;
- bus->devs[bus->dev_count++] = dev;
+ new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
+ if (!new_bus)
+ return -ENOMEM;
+ memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
+ new_bus->devs[new_bus->dev_count++] = dev;
+ rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
+ synchronize_srcu_expedited(&kvm->srcu);
+ kfree(bus);
return 0;
}
-void kvm_io_bus_unregister_dev(struct kvm *kvm,
- struct kvm_io_bus *bus,
- struct kvm_io_device *dev)
+/* Caller must hold slots_lock. */
+int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev)
{
- down_write(&kvm->slots_lock);
- __kvm_io_bus_unregister_dev(bus, dev);
- up_write(&kvm->slots_lock);
-}
+ int i, r;
+ struct kvm_io_bus *new_bus, *bus;
-/* An unlocked version. Caller must have write lock on slots_lock. */
-void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
- struct kvm_io_device *dev)
-{
- int i;
+ new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
+ if (!new_bus)
+ return -ENOMEM;
- for (i = 0; i < bus->dev_count; i++)
- if (bus->devs[i] == dev) {
- bus->devs[i] = bus->devs[--bus->dev_count];
+ bus = kvm->buses[bus_idx];
+ memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
+
+ r = -ENOENT;
+ for (i = 0; i < new_bus->dev_count; i++)
+ if (new_bus->devs[i] == dev) {
+ r = 0;
+ new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
break;
}
+
+ if (r) {
+ kfree(new_bus);
+ return r;
+ }
+
+ rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
+ synchronize_srcu_expedited(&kvm->srcu);
+ kfree(bus);
+ return r;
}
static struct notifier_block kvm_cpu_notifier = {