summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig6
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig40
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/asus_acpi.c55
-rw-r--r--drivers/acpi/battery.c5
-rw-r--r--drivers/acpi/bay.c10
-rw-r--r--drivers/acpi/blacklist.c58
-rw-r--r--drivers/acpi/bus.c26
-rw-r--r--drivers/acpi/debug.c57
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c4
-rw-r--r--drivers/acpi/dock.c14
-rw-r--r--drivers/acpi/ec.c19
-rw-r--r--drivers/acpi/event.c28
-rw-r--r--drivers/acpi/events/evevent.c2
-rw-r--r--drivers/acpi/events/evgpe.c27
-rw-r--r--drivers/acpi/fan.c92
-rw-r--r--drivers/acpi/glue.c4
-rw-r--r--drivers/acpi/hardware/hwsleep.c2
-rw-r--r--drivers/acpi/namespace/nsxfeval.c10
-rw-r--r--drivers/acpi/numa.c3
-rw-r--r--drivers/acpi/osl.c372
-rw-r--r--drivers/acpi/pci_bind.c4
-rw-r--r--drivers/acpi/pci_irq.c7
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/power.c6
-rw-r--r--drivers/acpi/processor_core.c42
-rw-r--r--drivers/acpi/processor_idle.c65
-rw-r--r--drivers/acpi/processor_perflib.c16
-rw-r--r--drivers/acpi/processor_thermal.c134
-rw-r--r--drivers/acpi/processor_throttling.c346
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/sbshc.c4
-rw-r--r--drivers/acpi/scan.c112
-rw-r--r--drivers/acpi/sleep/main.c17
-rw-r--r--drivers/acpi/sleep/proc.c46
-rw-r--r--drivers/acpi/system.c208
-rw-r--r--drivers/acpi/tables/Makefile2
-rw-r--r--drivers/acpi/tables/tbxfroot.c4
-rw-r--r--drivers/acpi/thermal.c663
-rw-r--r--drivers/acpi/utilities/utglobal.c2
-rw-r--r--drivers/acpi/utilities/utresrc.c2
-rw-r--r--drivers/acpi/video.c262
-rw-r--r--drivers/acpi/wmi.c710
-rw-r--r--drivers/ata/ahci.c35
-rw-r--r--drivers/ata/ata_piix.c5
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/ata/pata_at32.c2
-rw-r--r--drivers/ata/pata_efar.c2
-rw-r--r--drivers/ata/pata_it8213.c2
-rw-r--r--drivers/ata/pata_of_platform.c2
-rw-r--r--drivers/ata/pata_platform.c2
-rw-r--r--drivers/ata/pata_sis.c4
-rw-r--r--drivers/ata/sata_fsl.c4
-rw-r--r--drivers/ata/sata_inic162x.c25
-rw-r--r--drivers/ata/sata_mv.c366
-rw-r--r--drivers/ata/sata_nv.c78
-rw-r--r--drivers/ata/sata_via.c2
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/core.c40
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/dmapool.c481
-rw-r--r--drivers/base/driver.c9
-rw-r--r--drivers/base/power/main.c1
-rw-r--r--drivers/base/power/power.h1
-rw-r--r--drivers/block/Kconfig3
-rw-r--r--drivers/block/ataflop.c16
-rw-r--r--drivers/block/cciss.c10
-rw-r--r--drivers/block/cciss_scsi.c4
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/block/paride/pt.c2
-rw-r--r--drivers/block/pktcdvd.c4
-rw-r--r--drivers/block/rd.c3
-rw-r--r--drivers/block/virtio_blk.c106
-rw-r--r--drivers/block/xsysace.c6
-rw-r--r--drivers/bluetooth/bpa10x.c1
-rw-r--r--drivers/bluetooth/bt3c_cs.c2
-rw-r--r--drivers/bluetooth/btsdio.c4
-rw-r--r--drivers/bluetooth/btuart_cs.c4
-rw-r--r--drivers/bluetooth/hci_usb.c1
-rw-r--r--drivers/cdrom/cdrom.c17
-rw-r--r--drivers/cdrom/viocd.c2
-rw-r--r--drivers/char/Kconfig27
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/agp.h6
-rw-r--r--drivers/char/agp/alpha-agp.c17
-rw-r--r--drivers/char/agp/amd-k7-agp.c4
-rw-r--r--drivers/char/agp/backend.c2
-rw-r--r--drivers/char/agp/compat_ioctl.c4
-rw-r--r--drivers/char/agp/compat_ioctl.h2
-rw-r--r--drivers/char/agp/frontend.c13
-rw-r--r--drivers/char/agp/generic.c7
-rw-r--r--drivers/char/agp/intel-agp.c305
-rw-r--r--drivers/char/drm/Kconfig9
-rw-r--r--drivers/char/drm/Makefile2
-rw-r--r--drivers/char/drm/README.drm1
-rw-r--r--drivers/char/drm/ati_pcigart.c6
-rw-r--r--drivers/char/drm/drm.h3
-rw-r--r--drivers/char/drm/drmP.h68
-rw-r--r--drivers/char/drm/drm_agpsupport.c3
-rw-r--r--drivers/char/drm/drm_bufs.c23
-rw-r--r--drivers/char/drm/drm_context.c2
-rw-r--r--drivers/char/drm/drm_drv.c39
-rw-r--r--drivers/char/drm/drm_hashtab.c5
-rw-r--r--drivers/char/drm/drm_hashtab.h1
-rw-r--r--drivers/char/drm/drm_ioc32.c6
-rw-r--r--drivers/char/drm/drm_ioctl.c25
-rw-r--r--drivers/char/drm/drm_irq.c4
-rw-r--r--drivers/char/drm/drm_memory.c1
-rw-r--r--drivers/char/drm/drm_mm.c1
-rw-r--r--drivers/char/drm/drm_os_linux.h4
-rw-r--r--drivers/char/drm/drm_pciids.h97
-rw-r--r--drivers/char/drm/drm_proc.c4
-rw-r--r--drivers/char/drm/drm_sarea.h2
-rw-r--r--drivers/char/drm/drm_scatter.c10
-rw-r--r--drivers/char/drm/drm_stub.c18
-rw-r--r--drivers/char/drm/drm_sysfs.c146
-rw-r--r--drivers/char/drm/drm_vm.c6
-rw-r--r--drivers/char/drm/i810_dma.c24
-rw-r--r--drivers/char/drm/i810_drv.h52
-rw-r--r--drivers/char/drm/i830_dma.c2
-rw-r--r--drivers/char/drm/i830_drm.h8
-rw-r--r--drivers/char/drm/i830_drv.h51
-rw-r--r--drivers/char/drm/i830_irq.c2
-rw-r--r--drivers/char/drm/i915_dma.c124
-rw-r--r--drivers/char/drm/i915_drv.c464
-rw-r--r--drivers/char/drm/i915_drv.h848
-rw-r--r--drivers/char/drm/i915_irq.c26
-rw-r--r--drivers/char/drm/i915_mem.c11
-rw-r--r--drivers/char/drm/mga_dma.c10
-rw-r--r--drivers/char/drm/mga_drv.h123
-rw-r--r--drivers/char/drm/mga_state.c24
-rw-r--r--drivers/char/drm/r128_cce.c6
-rw-r--r--drivers/char/drm/r128_drv.h5
-rw-r--r--drivers/char/drm/r128_state.c43
-rw-r--r--drivers/char/drm/r300_cmdbuf.c75
-rw-r--r--drivers/char/drm/r300_reg.h34
-rw-r--r--drivers/char/drm/radeon_cp.c166
-rw-r--r--drivers/char/drm/radeon_drm.h13
-rw-r--r--drivers/char/drm/radeon_drv.h91
-rw-r--r--drivers/char/drm/radeon_irq.c6
-rw-r--r--drivers/char/drm/radeon_mem.c6
-rw-r--r--drivers/char/drm/radeon_state.c18
-rw-r--r--drivers/char/drm/savage_state.c6
-rw-r--r--drivers/char/drm/sis_mm.c6
-rw-r--r--drivers/char/drm/via_dma.c22
-rw-r--r--drivers/char/drm/via_dmablit.c184
-rw-r--r--drivers/char/drm/via_dmablit.h84
-rw-r--r--drivers/char/drm/via_drm.h4
-rw-r--r--drivers/char/drm/via_drv.c2
-rw-r--r--drivers/char/drm/via_irq.c26
-rw-r--r--drivers/char/drm/via_map.c5
-rw-r--r--drivers/char/drm/via_mm.c6
-rw-r--r--drivers/char/drm/via_video.c4
-rw-r--r--drivers/char/efirtc.c2
-rw-r--r--drivers/char/epca.c4
-rw-r--r--drivers/char/epca.h1
-rw-r--r--drivers/char/esp.c61
-rw-r--r--drivers/char/hangcheck-timer.c2
-rw-r--r--drivers/char/hvc_console.c2
-rw-r--r--drivers/char/hvcs.c4
-rw-r--r--drivers/char/hw_random/core.c10
-rw-r--r--drivers/char/hw_random/via-rng.c14
-rw-r--r--drivers/char/i8k.c43
-rw-r--r--drivers/char/ip2/i2lib.c2
-rw-r--r--drivers/char/ip2/ip2main.c5
-rw-r--r--drivers/char/ip27-rtc.c9
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c6
-rw-r--r--drivers/char/istallion.c23
-rw-r--r--drivers/char/lp.c10
-rw-r--r--drivers/char/mbcs.c19
-rw-r--r--drivers/char/mbcs.h6
-rw-r--r--drivers/char/misc.c13
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/mxser.c3868
-rw-r--r--drivers/char/mxser.h478
-rw-r--r--drivers/char/mxser_new.c2817
-rw-r--r--drivers/char/mxser_new.h293
-rw-r--r--drivers/char/n_tty.c27
-rw-r--r--drivers/char/nozomi.c172
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c17
-rw-r--r--drivers/char/pcmcia/synclink_cs.c3
-rw-r--r--drivers/char/random.c1
-rw-r--r--drivers/char/riscom8.c198
-rw-r--r--drivers/char/riscom8.h3
-rw-r--r--drivers/char/rocket.c108
-rw-r--r--drivers/char/rocket_int.h24
-rw-r--r--drivers/char/ser_a2232.c2
-rw-r--r--drivers/char/serial167.c78
-rw-r--r--drivers/char/specialix.c72
-rw-r--r--drivers/char/specialix_io8.h3
-rw-r--r--drivers/char/stallion.c78
-rw-r--r--drivers/char/sx.h2
-rw-r--r--drivers/char/synclink.c5
-rw-r--r--drivers/char/synclink_gt.c71
-rw-r--r--drivers/char/synclinkmp.c3
-rw-r--r--drivers/char/toshiba.c2
-rw-r--r--drivers/char/tpm/tpm.c44
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm_infineon.c6
-rw-r--r--drivers/char/tty_io.c25
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/char/vt.c8
-rw-r--r--drivers/char/xilinx_hwicap/Makefile7
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.c380
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.h57
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.c381
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.h62
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c904
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.h193
-rw-r--r--drivers/cpufreq/Kconfig3
-rw-r--r--drivers/cpufreq/cpufreq.c33
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c40
-rw-r--r--drivers/cpufreq/freq_table.c2
-rw-r--r--drivers/cpuidle/Kconfig4
-rw-r--r--drivers/cpuidle/cpuidle.c48
-rw-r--r--drivers/cpuidle/governors/ladder.c5
-rw-r--r--drivers/cpuidle/governors/menu.c4
-rw-r--r--drivers/dio/dio-driver.c70
-rw-r--r--drivers/dio/dio.c4
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/dmaengine.c49
-rw-r--r--drivers/dma/ioat_dma.c43
-rw-r--r--drivers/dma/iop-adma.c138
-rw-r--r--drivers/edac/Kconfig23
-rw-r--r--drivers/edac/Makefile3
-rw-r--r--drivers/edac/cell_edac.c258
-rw-r--r--drivers/edac/edac_core.h2
-rw-r--r--drivers/edac/edac_device.c8
-rw-r--r--drivers/edac/edac_mc_sysfs.c3
-rw-r--r--drivers/edac/edac_pci.c4
-rw-r--r--drivers/edac/edac_pci_sysfs.c12
-rw-r--r--drivers/edac/i3000_edac.c267
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c1043
-rw-r--r--drivers/edac/mpc85xx_edac.h162
-rw-r--r--drivers/edac/mv64x60_edac.c855
-rw-r--r--drivers/edac/mv64x60_edac.h114
-rw-r--r--drivers/firmware/dcdbas.c3
-rw-r--r--drivers/firmware/dmi-id.c1
-rw-r--r--drivers/firmware/dmi_scan.c9
-rw-r--r--drivers/firmware/edd.c2
-rw-r--r--drivers/gpio/Kconfig73
-rw-r--r--drivers/gpio/Makefile9
-rw-r--r--drivers/gpio/gpiolib.c567
-rw-r--r--drivers/gpio/mcp23s08.c357
-rw-r--r--drivers/gpio/pca953x.c308
-rw-r--r--drivers/gpio/pcf857x.c330
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/i2c/chips/Kconfig7
-rw-r--r--drivers/ide/Kconfig9
-rw-r--r--drivers/ide/arm/Makefile1
-rw-r--r--drivers/ide/arm/icside.c2
-rw-r--r--drivers/ide/arm/palm_bk3710.c395
-rw-r--r--drivers/ide/cris/ide-cris.c33
-rw-r--r--drivers/ide/ide-acpi.c2
-rw-r--r--drivers/ide/ide-cd.c9
-rw-r--r--drivers/ide/ide-dma.c3
-rw-r--r--drivers/ide/ide-floppy.c6
-rw-r--r--drivers/ide/ide-generic.c10
-rw-r--r--drivers/ide/ide-io.c14
-rw-r--r--drivers/ide/ide-iops.c45
-rw-r--r--drivers/ide/ide-lib.c2
-rw-r--r--drivers/ide/ide-probe.c53
-rw-r--r--drivers/ide/ide-proc.c1
-rw-r--r--drivers/ide/ide-tape.c2400
-rw-r--r--drivers/ide/ide-taskfile.c35
-rw-r--r--drivers/ide/ide-timing.h2
-rw-r--r--drivers/ide/ide.c54
-rw-r--r--drivers/ide/legacy/buddha.c72
-rw-r--r--drivers/ide/legacy/falconide.c42
-rw-r--r--drivers/ide/legacy/gayle.c39
-rw-r--r--drivers/ide/legacy/hd.c9
-rw-r--r--drivers/ide/legacy/ide_platform.c2
-rw-r--r--drivers/ide/legacy/macide.c57
-rw-r--r--drivers/ide/legacy/q40ide.c9
-rw-r--r--drivers/ide/pci/Makefile3
-rw-r--r--drivers/ide/pci/generic.c13
-rw-r--r--drivers/ide/pci/siimage.c3
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/cm.c89
-rw-r--r--drivers/infiniband/core/fmr_pool.c7
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c91
-rw-r--r--drivers/infiniband/hw/mlx4/main.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c11
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c47
-rw-r--r--drivers/infiniband/hw/nes/Kconfig16
-rw-r--r--drivers/infiniband/hw/nes/Makefile3
-rw-r--r--drivers/infiniband/hw/nes/nes.c1152
-rw-r--r--drivers/infiniband/hw/nes/nes.h560
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c3088
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h433
-rw-r--r--drivers/infiniband/hw/nes/nes_context.h193
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c3080
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h1206
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c1703
-rw-r--r--drivers/infiniband/hw/nes/nes_user.h112
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c917
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c3917
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h169
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c19
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c53
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h1
-rw-r--r--drivers/input/gameport/gameport.c1
-rw-r--r--drivers/input/keyboard/bf54x-keys.c1
-rw-r--r--drivers/input/keyboard/jornada720_kbd.c1
-rw-r--r--drivers/input/misc/wistron_btns.c4
-rw-r--r--drivers/input/serio/gscps2.c2
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c4
-rw-r--r--drivers/isdn/act2000/module.c22
-rw-r--r--drivers/isdn/gigaset/asyncdata.c8
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c423
-rw-r--r--drivers/isdn/gigaset/common.c135
-rw-r--r--drivers/isdn/gigaset/ev-layer.c106
-rw-r--r--drivers/isdn/gigaset/gigaset.h45
-rw-r--r--drivers/isdn/gigaset/interface.c19
-rw-r--r--drivers/isdn/gigaset/isocdata.c51
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c20
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c206
-rw-r--r--drivers/isdn/hardware/eicon/debug.c2
-rw-r--r--drivers/isdn/hardware/eicon/debuglib.c2
-rw-r--r--drivers/isdn/hardware/eicon/debuglib.h2
-rw-r--r--drivers/isdn/hardware/eicon/di.c2
-rw-r--r--drivers/isdn/hardware/eicon/diva.c5
-rw-r--r--drivers/isdn/hardware/eicon/message.c4
-rw-r--r--drivers/isdn/hisax/avm_pci.c8
-rw-r--r--drivers/isdn/hysdn/hycapi.c2
-rw-r--r--drivers/isdn/i4l/isdn_tty.c1
-rw-r--r--drivers/isdn/i4l/isdn_ttyfax.c2
-rw-r--r--drivers/isdn/icn/icn.c22
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c16
-rw-r--r--drivers/leds/Kconfig48
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/led-class.c13
-rw-r--r--drivers/leds/leds-ams-delta.c12
-rw-r--r--drivers/leds/leds-clevo-mail.c219
-rw-r--r--drivers/leds/leds-corgi.c4
-rw-r--r--drivers/leds/leds-gpio.c2
-rw-r--r--drivers/leds/leds-hp6xx.c120
-rw-r--r--drivers/leds/leds-ixp4xx-gpio.c214
-rw-r--r--drivers/leds/leds-locomo.c4
-rw-r--r--drivers/leds/leds-net48xx.c2
-rw-r--r--drivers/leds/leds-spitz.c8
-rw-r--r--drivers/leds/leds-tosa.c4
-rw-r--r--drivers/leds/leds-wrap.c47
-rw-r--r--drivers/leds/ledtrig-timer.c41
-rw-r--r--drivers/lguest/lguest_device.c146
-rw-r--r--drivers/macintosh/adb.c1
-rw-r--r--drivers/macintosh/mediabay.c46
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/macintosh/via-macii.c2
-rw-r--r--drivers/md/bitmap.c39
-rw-r--r--drivers/md/faulty.c2
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c395
-rw-r--r--drivers/md/mktables.c187
-rw-r--r--drivers/md/multipath.c2
-rw-r--r--drivers/md/raid0.c8
-rw-r--r--drivers/md/raid1.c5
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c48
-rw-r--r--drivers/md/raid6test/test.c117
-rw-r--r--drivers/media/common/saa7146_core.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c2
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c2
-rw-r--r--drivers/media/video/indycam.c2
-rw-r--r--drivers/media/video/mt20xx.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2.h2
-rw-r--r--drivers/media/video/pwc/pwc-if.c2
-rw-r--r--drivers/media/video/tea6420.c2
-rw-r--r--drivers/media/video/tvmixer.c336
-rw-r--r--drivers/media/video/usbvideo/quickcam_messenger.c2
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c2
-rw-r--r--drivers/media/video/vpx3220.c2
-rw-r--r--drivers/media/video/zoran_card.c2
-rw-r--r--drivers/media/video/zr36050.c2
-rw-r--r--drivers/media/video/zr36060.c2
-rw-r--r--drivers/message/fusion/lsi/mpi_log_sas.h2
-rw-r--r--drivers/message/fusion/mptctl.c8
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/i2o/iop.c2
-rw-r--r--drivers/mfd/Kconfig7
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/asic3.c588
-rw-r--r--drivers/misc/Kconfig53
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/acer-wmi.c1109
-rw-r--r--drivers/misc/asus-laptop.c29
-rw-r--r--drivers/misc/fujitsu-laptop.c1
-rw-r--r--drivers/misc/intel_menlow.c526
-rw-r--r--drivers/misc/lkdtm.c24
-rw-r--r--drivers/misc/msi-laptop.c1
-rw-r--r--drivers/misc/phantom.c7
-rw-r--r--drivers/misc/sony-laptop.c445
-rw-r--r--drivers/misc/tc1100-wmi.c290
-rw-r--r--drivers/misc/thinkpad_acpi.c3256
-rw-r--r--drivers/misc/thinkpad_acpi.h606
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/doc2000.c2
-rw-r--r--drivers/mtd/devices/phram.c4
-rw-r--r--drivers/mtd/maps/mtx-1_flash.c2
-rw-r--r--drivers/mtd/nand/autcpu12.c6
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c2
-rw-r--r--drivers/mtd/nand/cs553x_nand.c2
-rw-r--r--drivers/mtd/nand/edb7312.c2
-rw-r--r--drivers/mtd/nand/nand_base.c2
-rw-r--r--drivers/mtd/nand/nandsim.c2
-rw-r--r--drivers/mtd/nand/s3c2410.c2
-rw-r--r--drivers/mtd/nand/sharpsl.c2
-rw-r--r--drivers/mtd/nftlmount.c2
-rw-r--r--drivers/net/Kconfig24
-rw-r--r--drivers/net/arm/at91_ether.c2
-rw-r--r--drivers/net/ax88796.c40
-rw-r--r--drivers/net/bfin_mac.c107
-rw-r--r--drivers/net/bfin_mac.h31
-rw-r--r--drivers/net/bonding/bond_main.c106
-rw-r--r--drivers/net/bonding/bonding.h4
-rw-r--r--drivers/net/cpmac.c55
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c4
-rw-r--r--drivers/net/cxgb3/mc5.c2
-rw-r--r--drivers/net/cxgb3/sge.c2
-rw-r--r--drivers/net/cxgb3/t3_hw.c22
-rw-r--r--drivers/net/e100.c18
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/e1000e/defines.h1
-rw-r--r--drivers/net/e1000e/ethtool.c17
-rw-r--r--drivers/net/e1000e/netdev.c12
-rw-r--r--drivers/net/eexpress.c2
-rw-r--r--drivers/net/ehea/ehea.h3
-rw-r--r--drivers/net/ehea/ehea_ethtool.c4
-rw-r--r--drivers/net/ehea/ehea_hw.h8
-rw-r--r--drivers/net/ehea/ehea_main.c124
-rw-r--r--drivers/net/ehea/ehea_phyp.c158
-rw-r--r--drivers/net/ehea/ehea_phyp.h22
-rw-r--r--drivers/net/ehea/ehea_qmr.c32
-rw-r--r--drivers/net/ehea/ehea_qmr.h16
-rw-r--r--drivers/net/forcedeth.c110
-rw-r--r--drivers/net/gianfar_mii.c4
-rw-r--r--drivers/net/hamradio/dmascc.c4
-rw-r--r--drivers/net/ibmlana.c4
-rw-r--r--drivers/net/igb/igb_main.c1
-rw-r--r--drivers/net/irda/ali-ircc.h4
-rw-r--r--drivers/net/irda/nsc-ircc.h4
-rw-r--r--drivers/net/irda/via-ircc.h4
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c91
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c305
-rw-r--r--drivers/net/lib8390.c2
-rw-r--r--drivers/net/macb.c9
-rw-r--r--drivers/net/mipsnet.c203
-rw-r--r--drivers/net/mipsnet.h112
-rw-r--r--drivers/net/mlx4/fw.c6
-rw-r--r--drivers/net/mlx4/fw.h3
-rw-r--r--drivers/net/mlx4/main.c11
-rw-r--r--drivers/net/mlx4/mr.c2
-rw-r--r--drivers/net/mv643xx_eth.c11
-rw-r--r--drivers/net/natsemi.c18
-rw-r--r--drivers/net/pasemi_mac.c259
-rw-r--r--drivers/net/pasemi_mac.h16
-rw-r--r--drivers/net/pci-skeleton.c49
-rw-r--r--drivers/net/pcmcia/3c574_cs.c51
-rw-r--r--drivers/net/pcmcia/3c589_cs.c30
-rw-r--r--drivers/net/pcmcia/axnet_cs.c34
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c26
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c25
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c62
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c62
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c51
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/broadcom.c20
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/phy/phy.c68
-rw-r--r--drivers/net/phy/phy_device.c11
-rw-r--r--drivers/net/phy/realtek.c80
-rw-r--r--drivers/net/pppol2tp.c3
-rw-r--r--drivers/net/s2io.c20
-rw-r--r--drivers/net/s2io.h2
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/skfp/ess.c2
-rw-r--r--drivers/net/skfp/fplustm.c2
-rw-r--r--drivers/net/skfp/hwmtm.c2
-rw-r--r--drivers/net/sky2.c31
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/sunbmac.c2
-rw-r--r--drivers/net/sunqe.c6
-rw-r--r--drivers/net/sunvnet.c2
-rw-r--r--drivers/net/tlan.c25
-rw-r--r--drivers/net/tokenring/abyss.c2
-rw-r--r--drivers/net/tokenring/abyss.h2
-rw-r--r--drivers/net/tokenring/madgemc.c2
-rw-r--r--drivers/net/tokenring/madgemc.h2
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/tokenring/proteon.c2
-rw-r--r--drivers/net/tokenring/skisa.c2
-rw-r--r--drivers/net/tokenring/tms380tr.c2
-rw-r--r--drivers/net/tokenring/tms380tr.h2
-rw-r--r--drivers/net/tokenring/tmspci.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/ucc_geth.c37
-rw-r--r--drivers/net/ucc_geth_mii.c4
-rw-r--r--drivers/net/usb/rtl8150.c1
-rw-r--r--drivers/net/via-rhine.c2
-rw-r--r--drivers/net/via-velocity.c70
-rw-r--r--drivers/net/via-velocity.h224
-rw-r--r--drivers/net/virtio_net.c159
-rw-r--r--drivers/net/wan/cycx_drv.c4
-rw-r--r--drivers/net/wan/hdlc.c24
-rw-r--r--drivers/net/wan/hdlc_cisco.c5
-rw-r--r--drivers/net/wan/hdlc_fr.c53
-rw-r--r--drivers/net/wan/hdlc_ppp.c2
-rw-r--r--drivers/net/wan/hdlc_raw.c2
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c6
-rw-r--r--drivers/net/wan/hdlc_x25.c10
-rw-r--r--drivers/net/wireless/ath5k/base.c6
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/dma.c137
-rw-r--r--drivers/net/wireless/b43/dma.h20
-rw-r--r--drivers/net/wireless/b43/leds.c13
-rw-r--r--drivers/net/wireless/b43/main.c26
-rw-r--r--drivers/net/wireless/b43legacy/dma.c23
-rw-r--r--drivers/net/wireless/b43legacy/main.c9
-rw-r--r--drivers/net/wireless/b43legacy/pio.c21
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c15
-rw-r--r--drivers/net/wireless/b43legacy/xmit.h2
-rw-r--r--drivers/net/wireless/ipw2100.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl4965-base.c10
-rw-r--r--drivers/net/wireless/libertas/scan.c2
-rw-r--r--drivers/net/wireless/netwave_cs.c24
-rw-r--r--drivers/net/wireless/wavelan_cs.c56
-rw-r--r--drivers/nubus/Makefile1
-rw-r--r--drivers/nubus/nubus.c15
-rw-r--r--drivers/nubus/nubus_syms.c28
-rw-r--r--drivers/nubus/proc.c4
-rw-r--r--drivers/of/base.c25
-rw-r--r--drivers/of/platform.c10
-rw-r--r--drivers/parisc/ccio-dma.c6
-rw-r--r--drivers/parisc/hppb.c2
-rw-r--r--drivers/parisc/iommu-helpers.h7
-rw-r--r--drivers/parisc/sba_iommu.c2
-rw-r--r--drivers/parport/parport_pc.c50
-rw-r--r--drivers/parport/parport_serial.c4
-rw-r--r--drivers/parport/probe.c2
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/dmar.c1
-rw-r--r--drivers/pci/hotplug-pci.c20
-rw-r--r--drivers/pci/intel-iommu.c4
-rw-r--r--drivers/pci/intel-iommu.h14
-rw-r--r--drivers/pci/iova.c8
-rw-r--r--drivers/pci/iova.h16
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pci/pci.c20
-rw-r--r--drivers/pci/pcie/Kconfig20
-rw-r--r--drivers/pci/pcie/Makefile3
-rw-r--r--drivers/pci/pcie/aspm.c802
-rw-r--r--drivers/pci/probe.c28
-rw-r--r--drivers/pci/remove.c4
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/pcmcia/at91_cf.c62
-rw-r--r--drivers/pcmcia/cardbus.c4
-rw-r--r--drivers/pcmcia/ds.c15
-rw-r--r--drivers/pcmcia/i82092.c2
-rw-r--r--drivers/pcmcia/i82365.c18
-rw-r--r--drivers/pcmcia/m32r_cfc.c7
-rw-r--r--drivers/pcmcia/m32r_pcc.c9
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c8
-rw-r--r--drivers/pcmcia/pcmcia_resource.c14
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c11
-rw-r--r--drivers/pcmcia/sa1100_jornada720.c2
-rw-r--r--drivers/pcmcia/tcic.c2
-rw-r--r--drivers/pnp/driver.c12
-rw-r--r--drivers/pnp/interface.c13
-rw-r--r--drivers/pnp/manager.c27
-rw-r--r--drivers/pnp/pnpacpi/core.c2
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c44
-rw-r--r--drivers/pnp/pnpbios/core.c2
-rw-r--r--drivers/pnp/pnpbios/rsparser.c33
-rw-r--r--drivers/pnp/quirks.c43
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/ps3/ps3av.c97
-rw-r--r--drivers/rtc/Kconfig126
-rw-r--r--drivers/rtc/Makefile4
-rw-r--r--drivers/rtc/rtc-at91sam9.c520
-rw-r--r--drivers/rtc/rtc-bfin.c351
-rw-r--r--drivers/rtc/rtc-cmos.c221
-rw-r--r--drivers/rtc/rtc-dev.c9
-rw-r--r--drivers/rtc/rtc-ds1302.c262
-rw-r--r--drivers/rtc/rtc-ds1307.c27
-rw-r--r--drivers/rtc/rtc-ds1511.c656
-rw-r--r--drivers/rtc/rtc-pcf8583.c24
-rw-r--r--drivers/rtc/rtc-r9701.c178
-rw-r--r--drivers/rtc/rtc-s3c.c5
-rw-r--r--drivers/rtc/rtc-sa1100.c16
-rw-r--r--drivers/rtc/rtc-sysfs.c19
-rw-r--r--drivers/s390/block/dasd.c19
-rw-r--r--drivers/s390/block/dasd_3990_erp.c62
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/s390/char/sclp_tty.c2
-rw-r--r--drivers/s390/char/sclp_vt220.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c12
-rw-r--r--drivers/s390/cio/chsc.c147
-rw-r--r--drivers/s390/cio/device_id.c107
-rw-r--r--drivers/s390/sysinfo.c2
-rw-r--r--drivers/scsi/NCR53C9x.h2
-rw-r--r--drivers/scsi/a2091.c3
-rw-r--r--drivers/scsi/a3000.c3
-rw-r--r--drivers/scsi/aacraid/linit.c9
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aic7xxx/Makefile9
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_inline.h2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_inline.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c2
-rw-r--r--drivers/scsi/aic7xxx_old.c2
-rw-r--r--drivers/scsi/gvp11.c3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c4
-rw-r--r--drivers/scsi/ide-scsi.c4
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c1
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c2
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c2
-rw-r--r--drivers/serial/21285.c8
-rw-r--r--drivers/serial/68328serial.c3
-rw-r--r--drivers/serial/8250.c42
-rw-r--r--drivers/serial/8250_pci.c133
-rw-r--r--drivers/serial/8250_pnp.c10
-rw-r--r--drivers/serial/Kconfig55
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/atmel_serial.c3
-rw-r--r--drivers/serial/atmel_serial.h127
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/serial/crisv10.c5
-rw-r--r--drivers/serial/dz.c543
-rw-r--r--drivers/serial/dz.h11
-rw-r--r--drivers/serial/icom.h2
-rw-r--r--drivers/serial/imx.c4
-rw-r--r--drivers/serial/mcf.c2
-rw-r--r--drivers/serial/mpc52xx_uart.c424
-rw-r--r--drivers/serial/mpsc.c1
-rw-r--r--drivers/serial/mux.c2
-rw-r--r--drivers/serial/s3c2410.c2
-rw-r--r--drivers/serial/sc26xx.c755
-rw-r--r--drivers/serial/serial_core.c20
-rw-r--r--drivers/serial/serial_cs.c6
-rw-r--r--drivers/serial/uartlite.c55
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/atmel_spi.c173
-rw-r--r--drivers/spi/omap2_mcspi.c37
-rw-r--r--drivers/spi/pxa2xx_spi.c17
-rw-r--r--drivers/spi/spi.c35
-rw-r--r--drivers/spi/spi_bfin5xx.c131
-rw-r--r--drivers/spi/spi_imx.c13
-rw-r--r--drivers/spi/spi_s3c24xx.c12
-rw-r--r--drivers/spi/spi_s3c24xx_gpio.c12
-rw-r--r--drivers/spi/spi_sh_sci.c205
-rw-r--r--drivers/ssb/b43_pci_bridge.c2
-rw-r--r--drivers/thermal/Kconfig15
-rw-r--r--drivers/thermal/Makefile5
-rw-r--r--drivers/thermal/thermal.c714
-rw-r--r--drivers/uio/uio.c14
-rw-r--r--drivers/video/atmel_lcdfb.c135
-rw-r--r--drivers/video/aty/radeon_pm.c2
-rw-r--r--drivers/video/backlight/Kconfig22
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/backlight.c12
-rw-r--r--drivers/video/backlight/omap1_bl.c210
-rw-r--r--drivers/video/bf54x-lq043fb.c3
-rw-r--r--drivers/video/console/bitblit.c4
-rw-r--r--drivers/video/console/fbcon.c29
-rw-r--r--drivers/video/console/fbcon.h47
-rw-r--r--drivers/video/console/fbcon_ccw.c4
-rw-r--r--drivers/video/console/fbcon_cw.c4
-rw-r--r--drivers/video/console/fbcon_ud.c4
-rw-r--r--drivers/video/console/fonts.c4
-rw-r--r--drivers/video/console/tileblit.c4
-rw-r--r--drivers/video/console/vgacon.c2
-rw-r--r--drivers/video/cyblafb.c2
-rw-r--r--drivers/video/fb_defio.c17
-rw-r--r--drivers/video/fb_draw.h1
-rw-r--r--drivers/video/fbmon.c118
-rw-r--r--drivers/video/geode/lxfb_core.c2
-rw-r--r--drivers/video/hpfb.c3
-rw-r--r--drivers/video/i810/i810_main.c2
-rw-r--r--drivers/video/igafb.c5
-rw-r--r--drivers/video/intelfb/intelfb.h2
-rw-r--r--drivers/video/intelfb/intelfbhw.c2
-rw-r--r--drivers/video/neofb.c27
-rw-r--r--drivers/video/nvidia/nvidia.c22
-rw-r--r--drivers/video/omap/lcdc.c2
-rw-r--r--drivers/video/pm2fb.c13
-rw-r--r--drivers/video/pm3fb.c2
-rw-r--r--drivers/video/pmag-aa-fb.c2
-rw-r--r--drivers/video/ps3fb.c451
-rw-r--r--drivers/video/s3c2410fb.c88
-rw-r--r--drivers/video/s3c2410fb.h7
-rw-r--r--drivers/video/sis/sis_main.c4
-rw-r--r--drivers/video/sm501fb.c66
-rw-r--r--drivers/video/tdfxfb.c2
-rw-r--r--drivers/video/uvesafb.c8
-rw-r--r--drivers/video/vermilion/vermilion.c5
-rw-r--r--drivers/video/xilinxfb.c4
-rw-r--r--drivers/virtio/Kconfig31
-rw-r--r--drivers/virtio/Makefile2
-rw-r--r--drivers/virtio/virtio.c65
-rw-r--r--drivers/virtio/virtio_balloon.c285
-rw-r--r--drivers/virtio/virtio_pci.c446
-rw-r--r--drivers/virtio/virtio_ring.c51
-rw-r--r--drivers/w1/masters/Kconfig10
-rw-r--r--drivers/w1/masters/Makefile1
-rw-r--r--drivers/w1/masters/ds1wm.c9
-rw-r--r--drivers/w1/masters/w1-gpio.c124
-rw-r--r--drivers/w1/slaves/w1_therm.c3
-rw-r--r--drivers/w1/w1.c4
-rw-r--r--drivers/watchdog/shwdt.c2
743 files changed, 50162 insertions, 18184 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 08d4ae201597..b86877bdc7ac 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -52,12 +52,16 @@ source "drivers/i2c/Kconfig"
source "drivers/spi/Kconfig"
+source "drivers/gpio/Kconfig"
+
source "drivers/w1/Kconfig"
source "drivers/power/Kconfig"
source "drivers/hwmon/Kconfig"
+source "drivers/thermal/Kconfig"
+
source "drivers/watchdog/Kconfig"
source "drivers/ssb/Kconfig"
@@ -91,6 +95,4 @@ source "drivers/dca/Kconfig"
source "drivers/auxdisplay/Kconfig"
source "drivers/uio/Kconfig"
-
-source "drivers/virtio/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 0ee9a8a4095e..30ba97ec5eb5 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -5,6 +5,7 @@
# Rewritten to use lists instead of if-statements.
#
+obj-$(CONFIG_HAVE_GPIO_LIB) += gpio/
obj-$(CONFIG_PCI) += pci/
obj-$(CONFIG_PARISC) += parisc/
obj-$(CONFIG_RAPIDIO) += rapidio/
@@ -64,6 +65,7 @@ obj-y += i2c/
obj-$(CONFIG_W1) += w1/
obj-$(CONFIG_POWER_SUPPLY) += power/
obj-$(CONFIG_HWMON) += hwmon/
+obj-$(CONFIG_THERMAL) += thermal/
obj-$(CONFIG_WATCHDOG) += watchdog/
obj-$(CONFIG_PHONE) += telephony/
obj-$(CONFIG_MD) += md/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ccf6ea95f68c..7ef172c2a1d6 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -68,26 +68,28 @@ config ACPI_PROCFS
Say N to delete /proc/acpi/ files that have moved to /sys/
config ACPI_PROCFS_POWER
- bool "Deprecated power /proc/acpi folders"
+ bool "Deprecated power /proc/acpi directories"
depends on PROC_FS
default y
---help---
For backwards compatibility, this option allows
- deprecated power /proc/acpi/ folders to exist, even when
+ deprecated power /proc/acpi/ directories to exist, even when
they have been replaced by functions in /sys.
- The deprecated folders (and their replacements) include:
+ The deprecated directories (and their replacements) include:
/proc/acpi/battery/* (/sys/class/power_supply/*)
/proc/acpi/ac_adapter/* (sys/class/power_supply/*)
- This option has no effect on /proc/acpi/ folders
+ This option has no effect on /proc/acpi/ directories
and functions, which do not yet exist in /sys
- Say N to delete power /proc/acpi/ folders that have moved to /sys/
+ Say N to delete power /proc/acpi/ directories that have moved to /sys/
+
config ACPI_SYSFS_POWER
bool "Future power /sys interface"
select POWER_SUPPLY
default y
---help---
Say N to disable power /sys interface
+
config ACPI_PROC_EVENT
bool "Deprecated /proc/acpi/event support"
depends on PROC_FS
@@ -186,6 +188,7 @@ config ACPI_HOTPLUG_CPU
config ACPI_THERMAL
tristate "Thermal Zone"
depends on ACPI_PROCESSOR
+ select THERMAL
default y
help
This driver adds support for ACPI thermal zones. Most mobile and
@@ -199,6 +202,16 @@ config ACPI_NUMA
depends on (X86 || IA64)
default y if IA64_GENERIC || IA64_SGI_SN2
+config ACPI_WMI
+ tristate "WMI (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ This driver adds support for the ACPI-WMI mapper device (PNP0C14)
+ found on some systems.
+
+ NOTE: You will need another driver or userspace application on top of
+ this to actually use anything defined in the ACPI-WMI mapper.
+
config ACPI_ASUS
tristate "ASUS/Medion Laptop Extras"
depends on X86
@@ -263,8 +276,10 @@ config ACPI_CUSTOM_DSDT
depends on !STANDALONE
default n
help
- This option is to load a custom ACPI DSDT
- If you don't know what that is, say N.
+ This option supports a custom DSDT by linking it into the kernel.
+ See Documentation/acpi/dsdt-override.txt
+
+ If unsure, say N.
config ACPI_CUSTOM_DSDT_FILE
string "Custom DSDT Table file to include"
@@ -274,6 +289,17 @@ config ACPI_CUSTOM_DSDT_FILE
Enter the full path name to the file which includes the AmlCode
declaration.
+config ACPI_CUSTOM_DSDT_INITRD
+ bool "Read Custom DSDT from initramfs"
+ depends on BLK_DEV_INITRD
+ default n
+ help
+ This option supports a custom DSDT by optionally loading it from initrd.
+ See Documentation/acpi/dsdt-override.txt
+
+ If you are not using this feature now, but may use it later,
+ it is safe to say Y here.
+
config ACPI_BLACKLIST_YEAR
int "Disable ACPI for systems before Jan 1st this year" if X86_32
default 0
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 456446f90077..f29812a86533 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_ACPI_THERMAL) += thermal.o
obj-$(CONFIG_ACPI_SYSTEM) += system.o event.o
obj-$(CONFIG_ACPI_DEBUG) += debug.o
obj-$(CONFIG_ACPI_NUMA) += numa.o
+obj-$(CONFIG_ACPI_WMI) += wmi.o
obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o
diff --git a/drivers/acpi/asus_acpi.c b/drivers/acpi/asus_acpi.c
index d915fec9bf63..d25ef961415c 100644
--- a/drivers/acpi/asus_acpi.c
+++ b/drivers/acpi/asus_acpi.c
@@ -142,6 +142,7 @@ struct asus_hotk {
xxN, //M2400N, M3700N, M5200N, M6800N, S1300N, S5200N
A4S, //Z81sp
//(Centrino)
+ F3Sa,
END_MODEL
} model; //Models currently supported
u16 event_count[128]; //count for each event TODO make this better
@@ -405,7 +406,20 @@ static struct model_data model_conf[END_MODEL] = {
.brightness_get = "GPLV",
.mt_bt_switch = "BLED",
.mt_wled = "WLED"
- }
+ },
+
+ {
+ .name = "F3Sa",
+ .mt_bt_switch = "BLED",
+ .mt_wled = "WLED",
+ .mt_mled = "MLED",
+ .brightness_get = "GPLV",
+ .brightness_set = "SPLV",
+ .mt_lcd_switch = "\\_SB.PCI0.SBRG.EC0._Q10",
+ .lcd_status = "\\_SB.PCI0.SBRG.EC0.RPIN",
+ .display_get = "\\ADVG",
+ .display_set = "SDSP",
+ },
};
@@ -710,15 +724,8 @@ static int get_lcd_state(void)
{
int lcd = 0;
- if (hotk->model != L3H) {
- /* We don't have to check anything if we are here */
- if (!read_acpi_int(NULL, hotk->methods->lcd_status, &lcd))
- printk(KERN_WARNING
- "Asus ACPI: Error reading LCD status\n");
-
- if (hotk->model == L2D)
- lcd = ~lcd;
- } else { /* L3H and the like have to be handled differently */
+ if (hotk->model == L3H) {
+ /* L3H and the like have to be handled differently */
acpi_status status = 0;
struct acpi_object_list input;
union acpi_object mt_params[2];
@@ -745,6 +752,32 @@ static int get_lcd_state(void)
if (out_obj.type == ACPI_TYPE_INTEGER)
/* That's what the AML code does */
lcd = out_obj.integer.value >> 8;
+ } else if (hotk->model == F3Sa) {
+ unsigned long tmp;
+ union acpi_object param;
+ struct acpi_object_list input;
+ acpi_status status;
+
+ /* Read pin 11 */
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = 0x11;
+ input.count = 1;
+ input.pointer = &param;
+
+ status = acpi_evaluate_integer(NULL, hotk->methods->lcd_status,
+ &input, &tmp);
+ if (status != AE_OK)
+ return -1;
+
+ lcd = tmp;
+ } else {
+ /* We don't have to check anything if we are here */
+ if (!read_acpi_int(NULL, hotk->methods->lcd_status, &lcd))
+ printk(KERN_WARNING
+ "Asus ACPI: Error reading LCD status\n");
+
+ if (hotk->model == L2D)
+ lcd = ~lcd;
}
return (lcd & 1);
@@ -1134,6 +1167,8 @@ static int asus_model_match(char *model)
return W5A;
else if (strncmp(model, "A4S", 3) == 0)
return A4S;
+ else if (strncmp(model, "F3Sa", 4) == 0)
+ return F3Sa;
else
return END_MODEL;
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index c4a769d1ba85..f6215e809808 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -194,6 +194,9 @@ static int acpi_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_MANUFACTURER:
val->strval = battery->oem_info;
break;
+ case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ val->strval = battery->serial_number;
+ break;
default:
return -EINVAL;
}
@@ -212,6 +215,7 @@ static enum power_supply_property charge_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
static enum power_supply_property energy_battery_props[] = {
@@ -226,6 +230,7 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_ENERGY_NOW,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
#endif
diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c
index 6daf6088ac88..1fa86811b8ee 100644
--- a/drivers/acpi/bay.c
+++ b/drivers/acpi/bay.c
@@ -46,6 +46,12 @@ MODULE_LICENSE("GPL");
printk(KERN_DEBUG PREFIX "%s: %s\n", prefix, s); }
static void bay_notify(acpi_handle handle, u32 event, void *data);
+static const struct acpi_device_id bay_device_ids[] = {
+ {"LNXIOBAY", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, bay_device_ids);
+
struct bay {
acpi_handle handle;
char *name;
@@ -128,7 +134,7 @@ static ssize_t show_present(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", bay_present(bay));
}
-DEVICE_ATTR(present, S_IRUGO, show_present, NULL);
+static DEVICE_ATTR(present, S_IRUGO, show_present, NULL);
/*
* write_eject - write method for "eject" file in sysfs
@@ -144,7 +150,7 @@ static ssize_t write_eject(struct device *dev, struct device_attribute *attr,
eject_device(bay->handle);
return count;
}
-DEVICE_ATTR(eject, S_IWUSR, NULL, write_eject);
+static DEVICE_ATTR(eject, S_IWUSR, NULL, write_eject);
/**
* is_ata - see if a device is an ata device
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 8809654d6cc9..6dbaa2d15fe0 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -70,8 +70,6 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
/* IBM 600E - _ADR should return 7, but it returns 1 */
{"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
"Incorrect _ADR", 1},
- {"ASUS\0\0", "P2B-S ", 0, ACPI_SIG_DSDT, all_versions,
- "Bogus PCI routing", 1},
{""}
};
@@ -208,33 +206,35 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
* Disable OSI(Linux) warnings on all "Acer, inc."
*
* _OSI(Linux) disables the latest Windows BIOS code:
+ * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3100"),
* DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5050"),
+ * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
* DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5580"),
* DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 3010"),
* _OSI(Linux) effect unknown:
* DMI_MATCH(DMI_PRODUCT_NAME, "Ferrari 5000"),
*/
- {
- .callback = dmi_disable_osi_linux,
- .ident = "Acer, inc.",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer, inc."),
- },
- },
+ /*
+ * note that dmi_check_system() uses strstr()
+ * to match sub-strings rather than !strcmp(),
+ * so "Acer" below matches "Acer, inc." above.
+ */
/*
* Disable OSI(Linux) warnings on all "Acer"
*
* _OSI(Linux) effect unknown:
- * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
* DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
* DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720Z"),
* DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5520"),
* DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 6460"),
* DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 7510"),
* DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5220"),
+ *
+ * _OSI(Linux) is a NOP:
+ * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"),
*/
{
- .callback = dmi_unknown_osi_linux,
+ .callback = dmi_disable_osi_linux,
.ident = "Acer",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -242,21 +242,22 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
/*
* Disable OSI(Linux) warnings on all "Apple Computer, Inc."
+ * Disable OSI(Linux) warnings on all "Apple Inc."
*
* _OSI(Linux) confirmed to be a NOP:
* DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
* DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"),
* DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2"),
+ * DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3,1"),
* _OSI(Linux) effect unknown:
* DMI_MATCH(DMI_PRODUCT_NAME, "MacPro2,1"),
* DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,1"),
- * DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3,1"),
*/
{
.callback = dmi_disable_osi_linux,
.ident = "Apple",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
},
},
/*
@@ -294,13 +295,13 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
* DMI_MATCH(DMI_BOARD_NAME, "IFL91"),
*/
{
- .callback = dmi_unknown_osi_linux,
+ .callback = dmi_disable_osi_linux,
.ident = "Compal",
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
},
},
- { /* OSI(Linux) touches USB, breaks suspend to disk */
+ { /* OSI(Linux) touches USB, unknown side-effect */
.callback = dmi_disable_osi_linux,
.ident = "Dell Dimension 5150",
.matches = {
@@ -310,7 +311,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{ /* OSI(Linux) is a NOP */
.callback = dmi_disable_osi_linux,
- .ident = "Dell",
+ .ident = "Dell i1501",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1501"),
@@ -318,7 +319,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{ /* OSI(Linux) effect unknown */
.callback = dmi_unknown_osi_linux,
- .ident = "Dell",
+ .ident = "Dell Latitude D830",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D830"),
@@ -326,7 +327,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{ /* OSI(Linux) effect unknown */
.callback = dmi_unknown_osi_linux,
- .ident = "Dell",
+ .ident = "Dell OP GX620",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex GX620"),
@@ -334,15 +335,23 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{ /* OSI(Linux) effect unknown */
.callback = dmi_unknown_osi_linux,
- .ident = "Dell",
+ .ident = "Dell PE 1900",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1900"),
},
},
+ { /* OSI(Linux) is a NOP */
+ .callback = dmi_disable_osi_linux,
+ .ident = "Dell PE R200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R200"),
+ },
+ },
{ /* OSI(Linux) touches USB */
.callback = dmi_disable_osi_linux,
- .ident = "Dell",
+ .ident = "Dell PR 390",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation 390"),
@@ -358,7 +367,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{ /* OSI(Linux) effect unknown */
.callback = dmi_unknown_osi_linux,
- .ident = "Dell",
+ .ident = "Dell PE SC440",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge SC440"),
@@ -474,6 +483,11 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
*
* _OSI(Linux) confirmed to be a NOP:
* DMI_MATCH(DMI_PRODUCT_NAME, "P1-J150B"),
+ * with DMI_MATCH(DMI_BOARD_NAME, "ROCKY"),
+ *
+ * unknown:
+ * DMI_MATCH(DMI_PRODUCT_NAME, "S1-MDGDG"),
+ * with DMI_MATCH(DMI_BOARD_NAME, "ROCKY"),
*/
{
.callback = dmi_disable_osi_linux,
@@ -516,7 +530,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
* DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ11M"),
*/
{
- .callback = dmi_unknown_osi_linux,
+ .callback = dmi_disable_osi_linux,
.ident = "Sony",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 1b4cf984b081..8b0d4b7d188a 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -122,6 +122,31 @@ int acpi_bus_get_status(struct acpi_device *device)
EXPORT_SYMBOL(acpi_bus_get_status);
+void acpi_bus_private_data_handler(acpi_handle handle,
+ u32 function, void *context)
+{
+ return;
+}
+EXPORT_SYMBOL(acpi_bus_private_data_handler);
+
+int acpi_bus_get_private_data(acpi_handle handle, void **data)
+{
+ acpi_status status = AE_OK;
+
+ if (!*data)
+ return -EINVAL;
+
+ status = acpi_get_data(handle, acpi_bus_private_data_handler, data);
+ if (ACPI_FAILURE(status) || !*data) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
+ handle));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(acpi_bus_get_private_data);
+
/* --------------------------------------------------------------------------
Power Management
-------------------------------------------------------------------------- */
@@ -366,7 +391,6 @@ int acpi_bus_receive_event(struct acpi_bus_event *event)
return 0;
}
-EXPORT_SYMBOL(acpi_bus_receive_event);
#endif /* CONFIG_ACPI_PROC_EVENT */
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
index bf513e07b773..6df564f4ca6e 100644
--- a/drivers/acpi/debug.c
+++ b/drivers/acpi/debug.c
@@ -130,6 +130,63 @@ static int param_get_debug_level(char *buffer, struct kernel_param *kp) {
module_param_call(debug_layer, param_set_uint, param_get_debug_layer, &acpi_dbg_layer, 0644);
module_param_call(debug_level, param_set_uint, param_get_debug_level, &acpi_dbg_level, 0644);
+static char trace_method_name[6];
+module_param_string(trace_method_name, trace_method_name, 6, 0644);
+static unsigned int trace_debug_layer;
+module_param(trace_debug_layer, uint, 0644);
+static unsigned int trace_debug_level;
+module_param(trace_debug_level, uint, 0644);
+
+static int param_set_trace_state(const char *val, struct kernel_param *kp)
+{
+ int result = 0;
+
+ if (!strncmp(val, "enable", strlen("enable") - 1)) {
+ result = acpi_debug_trace(trace_method_name, trace_debug_level,
+ trace_debug_layer, 0);
+ if (result)
+ result = -EBUSY;
+ goto exit;
+ }
+
+ if (!strncmp(val, "disable", strlen("disable") - 1)) {
+ int name = 0;
+ result = acpi_debug_trace((char *)&name, trace_debug_level,
+ trace_debug_layer, 0);
+ if (result)
+ result = -EBUSY;
+ goto exit;
+ }
+
+ if (!strncmp(val, "1", 1)) {
+ result = acpi_debug_trace(trace_method_name, trace_debug_level,
+ trace_debug_layer, 1);
+ if (result)
+ result = -EBUSY;
+ goto exit;
+ }
+
+ result = -EINVAL;
+exit:
+ return result;
+}
+
+static int param_get_trace_state(char *buffer, struct kernel_param *kp)
+{
+ if (!acpi_gbl_trace_method_name)
+ return sprintf(buffer, "disable");
+ else {
+ if (acpi_gbl_trace_flags & 1)
+ return sprintf(buffer, "1");
+ else
+ return sprintf(buffer, "enable");
+ }
+ return 0;
+}
+
+module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
+ NULL, 0644);
+
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
index fc9da4879cbf..f501e083aac7 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -359,7 +359,9 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
status = acpi_os_validate_address(obj_desc->region.space_id,
obj_desc->region.address,
- (acpi_size) obj_desc->region.length);
+ (acpi_size) obj_desc->region.length,
+ acpi_ut_get_node_name(node));
+
if (ACPI_FAILURE(status)) {
/*
* Invalid address/length. We will emit an error message and mark
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 1dabdf4c07b3..307cef65c247 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -51,6 +51,12 @@ static struct atomic_notifier_head dock_notifier_list;
static struct platform_device *dock_device;
static char dock_device_name[] = "dock";
+static const struct acpi_device_id dock_device_ids[] = {
+ {"LNXDOCK", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, dock_device_ids);
+
struct dock_station {
acpi_handle handle;
unsigned long last_dock_time;
@@ -680,7 +686,7 @@ static ssize_t show_docked(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", dock_present(dock_station));
}
-DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
+static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
/*
* show_flags - read method for flags file in sysfs
@@ -691,7 +697,7 @@ static ssize_t show_flags(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
}
-DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL);
+static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL);
/*
* write_undock - write method for "undock" file in sysfs
@@ -707,7 +713,7 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST);
return ret ? ret: count;
}
-DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
+static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
/*
* show_dock_uid - read method for "uid" file in sysfs
@@ -723,7 +729,7 @@ static ssize_t show_dock_uid(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%lx\n", lbuf);
}
-DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
+static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
/**
* dock_add - add a new dock station
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 987b967c7467..7222a18a0319 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -573,7 +573,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
void *handler_context, void *region_context)
{
struct acpi_ec *ec = handler_context;
- int result = 0, i = 0;
+ int result = 0, i;
u8 temp = 0;
if ((address > 0xFF) || !value || !handler_context)
@@ -585,7 +585,18 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
if (bits != 8 && acpi_strict)
return AE_BAD_PARAMETER;
- while (bits - i > 0) {
+ acpi_ec_burst_enable(ec);
+
+ if (function == ACPI_READ) {
+ result = acpi_ec_read(ec, address, &temp);
+ *value = temp;
+ } else {
+ temp = 0xff & (*value);
+ result = acpi_ec_write(ec, address, temp);
+ }
+
+ for (i = 8; unlikely(bits - i > 0); i += 8) {
+ ++address;
if (function == ACPI_READ) {
result = acpi_ec_read(ec, address, &temp);
(*value) |= ((acpi_integer)temp) << i;
@@ -593,10 +604,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
temp = 0xff & ((*value) >> i);
result = acpi_ec_write(ec, address, temp);
}
- i += 8;
- ++address;
}
+ acpi_ec_burst_disable(ec);
+
switch (result) {
case -EINVAL:
return AE_BAD_PARAMETER;
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 5c95863f8fa9..5479dc0eeeec 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -109,6 +109,34 @@ static const struct file_operations acpi_system_event_ops = {
};
#endif /* CONFIG_ACPI_PROC_EVENT */
+/* ACPI notifier chain */
+BLOCKING_NOTIFIER_HEAD(acpi_chain_head);
+
+int acpi_notifier_call_chain(struct acpi_device *dev, u32 type, u32 data)
+{
+ struct acpi_bus_event event;
+
+ strcpy(event.device_class, dev->pnp.device_class);
+ strcpy(event.bus_id, dev->pnp.bus_id);
+ event.type = type;
+ event.data = data;
+ return (blocking_notifier_call_chain(&acpi_chain_head, 0, (void *)&event)
+ == NOTIFY_BAD) ? -EINVAL : 0;
+}
+EXPORT_SYMBOL(acpi_notifier_call_chain);
+
+int register_acpi_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&acpi_chain_head, nb);
+}
+EXPORT_SYMBOL(register_acpi_notifier);
+
+int unregister_acpi_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&acpi_chain_head, nb);
+}
+EXPORT_SYMBOL(unregister_acpi_notifier);
+
#ifdef CONFIG_NET
static unsigned int acpi_event_seqnum;
struct acpi_genl_event {
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
index e41287815ea1..3048801a37b5 100644
--- a/drivers/acpi/events/evevent.c
+++ b/drivers/acpi/events/evevent.c
@@ -259,7 +259,7 @@ u32 acpi_ev_fixed_event_detect(void)
enable_bit_mask)) {
/* Found an active (signalled) event */
-
+ acpi_os_fixed_event_count(i);
int_status |= acpi_ev_fixed_event_dispatch((u32) i);
}
}
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
index e22f4a973c0f..0dadd2adc800 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/events/evgpe.c
@@ -270,18 +270,18 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
case ACPI_GPE_TYPE_WAKE_RUN:
ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
- /*lint -fallthrough */
+ /* fallthrough */
case ACPI_GPE_TYPE_RUNTIME:
/* Disable the requested runtime GPE */
ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
- status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
- break;
+
+ /* fallthrough */
default:
- return_ACPI_STATUS(AE_BAD_PARAMETER);
+ acpi_hw_write_gpe_enable_reg(gpe_event_info);
}
return_ACPI_STATUS(AE_OK);
@@ -501,6 +501,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
* an interrupt handler.
*
******************************************************************************/
+static void acpi_ev_asynch_enable_gpe(void *context);
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
{
@@ -576,22 +577,30 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
method_node)));
}
}
+ /* Defer enabling of GPE until all notify handlers are done */
+ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
+ gpe_event_info);
+ return_VOID;
+}
- if ((local_gpe_event_info.flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
+static void acpi_ev_asynch_enable_gpe(void *context)
+{
+ struct acpi_gpe_event_info *gpe_event_info = context;
+ acpi_status status;
+ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
ACPI_GPE_LEVEL_TRIGGERED) {
/*
* GPE is level-triggered, we clear the GPE status bit after
* handling the event.
*/
- status = acpi_hw_clear_gpe(&local_gpe_event_info);
+ status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
return_VOID;
}
}
/* Enable this GPE */
-
- (void)acpi_hw_write_gpe_enable_reg(&local_gpe_event_info);
+ (void)acpi_hw_write_gpe_enable_reg(gpe_event_info);
return_VOID;
}
@@ -618,7 +627,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
- acpi_gpe_count++;
+ acpi_os_gpe_count(gpe_number);
/*
* If edge-triggered, clear the GPE status bit now. Note that
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index a6e149d692cb..48cb705b274a 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -30,7 +30,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/uaccess.h>
-
+#include <linux/thermal.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -68,9 +68,55 @@ static struct acpi_driver acpi_fan_driver = {
},
};
+/* thermal cooling device callbacks */
+static int fan_get_max_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ /* ACPI fan device only support two states: ON/OFF */
+ return sprintf(buf, "1\n");
+}
+
+static int fan_get_cur_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ int state;
+ int result;
+
+ if (!device)
+ return -EINVAL;
+
+ result = acpi_bus_get_power(device->handle, &state);
+ if (result)
+ return result;
+
+ return sprintf(buf, "%s\n", state == ACPI_STATE_D3 ? "0" :
+ (state == ACPI_STATE_D0 ? "1" : "unknown"));
+}
+
+static int
+fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned int state)
+{
+ struct acpi_device *device = cdev->devdata;
+ int result;
+
+ if (!device || (state != 0 && state != 1))
+ return -EINVAL;
+
+ result = acpi_bus_set_power(device->handle,
+ state ? ACPI_STATE_D0 : ACPI_STATE_D3);
+
+ return result;
+}
+
+static struct thermal_cooling_device_ops fan_cooling_ops = {
+ .get_max_state = fan_get_max_state,
+ .get_cur_state = fan_get_cur_state,
+ .set_cur_state = fan_set_cur_state,
+};
+
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
+#ifdef CONFIG_ACPI_PROCFS
static struct proc_dir_entry *acpi_fan_dir;
@@ -171,7 +217,17 @@ static int acpi_fan_remove_fs(struct acpi_device *device)
return 0;
}
+#else
+static int acpi_fan_add_fs(struct acpi_device *device)
+{
+ return 0;
+}
+static int acpi_fan_remove_fs(struct acpi_device *device)
+{
+ return 0;
+}
+#endif
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -179,9 +235,8 @@ static int acpi_fan_remove_fs(struct acpi_device *device)
static int acpi_fan_add(struct acpi_device *device)
{
int result = 0;
- struct acpi_fan *fan = NULL;
int state = 0;
-
+ struct thermal_cooling_device *cdev;
if (!device)
return -EINVAL;
@@ -199,6 +254,25 @@ static int acpi_fan_add(struct acpi_device *device)
acpi_bus_set_power(device->handle, state);
device->flags.force_power_state = 0;
+ cdev = thermal_cooling_device_register("Fan", device,
+ &fan_cooling_ops);
+ if (cdev)
+ printk(KERN_INFO PREFIX
+ "%s is registered as cooling_device%d\n",
+ device->dev.bus_id, cdev->id);
+ else
+ goto end;
+ acpi_driver_data(device) = cdev;
+ result = sysfs_create_link(&device->dev.kobj, &cdev->device.kobj,
+ "thermal_cooling");
+ if (result)
+ return result;
+
+ result = sysfs_create_link(&cdev->device.kobj, &device->dev.kobj,
+ "device");
+ if (result)
+ return result;
+
result = acpi_fan_add_fs(device);
if (result)
goto end;
@@ -208,18 +282,20 @@ static int acpi_fan_add(struct acpi_device *device)
!device->power.state ? "on" : "off");
end:
- if (result)
- kfree(fan);
-
return result;
}
static int acpi_fan_remove(struct acpi_device *device, int type)
{
- if (!device || !acpi_driver_data(device))
+ struct thermal_cooling_device *cdev = acpi_driver_data(device);
+
+ if (!device || !cdev)
return -EINVAL;
acpi_fan_remove_fs(device);
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+ sysfs_remove_link(&cdev->device.kobj, "device");
+ thermal_cooling_device_unregister(cdev);
return 0;
}
@@ -261,10 +337,12 @@ static int __init acpi_fan_init(void)
int result = 0;
+#ifdef CONFIG_ACPI_PROCFS
acpi_fan_dir = proc_mkdir(ACPI_FAN_CLASS, acpi_root_dir);
if (!acpi_fan_dir)
return -ENODEV;
acpi_fan_dir->owner = THIS_MODULE;
+#endif
result = acpi_bus_register_driver(&acpi_fan_driver);
if (result < 0) {
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 4893e256e399..eda0978b57c6 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -36,8 +36,6 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
return -ENODEV;
}
-EXPORT_SYMBOL(register_acpi_bus_type);
-
int unregister_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
@@ -53,8 +51,6 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type)
return -ENODEV;
}
-EXPORT_SYMBOL(unregister_acpi_bus_type);
-
static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
{
struct acpi_bus_type *tmp, *ret = NULL;
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index fd1c4ba63367..058d0be5cbe2 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -286,13 +286,13 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
}
/*
+ * 1) Disable/Clear all GPEs
* 2) Enable all wakeup GPEs
*/
status = acpi_hw_disable_all_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
-
acpi_gbl_system_awake_and_running = FALSE;
status = acpi_hw_enable_all_wakeup_gpes();
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index f39fbc6b9237..b92133faf5b7 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -443,6 +443,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
struct acpica_device_id hid;
struct acpi_compatible_id_list *cid;
acpi_native_uint i;
+ int found;
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
@@ -496,16 +497,19 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
/* Walk the CID list */
+ found = 0;
for (i = 0; i < cid->count; i++) {
if (ACPI_STRNCMP(cid->id[i].value, info->hid,
sizeof(struct
- acpi_compatible_id)) !=
+ acpi_compatible_id)) ==
0) {
- ACPI_FREE(cid);
- return (AE_OK);
+ found = 1;
+ break;
}
}
ACPI_FREE(cid);
+ if (!found)
+ return (AE_OK);
}
}
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 0822d9fc1cb4..5d59cb33b1a5 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -78,6 +78,7 @@ int acpi_map_pxm_to_node(int pxm)
return node;
}
+#if 0
void __cpuinit acpi_unmap_pxm_to_node(int node)
{
int pxm = node_to_pxm_map[node];
@@ -85,6 +86,7 @@ void __cpuinit acpi_unmap_pxm_to_node(int node)
node_to_pxm_map[node] = PXM_INVAL;
node_clear(node, nodes_found_map);
}
+#endif /* 0 */
static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
@@ -247,7 +249,6 @@ int acpi_get_pxm(acpi_handle h)
} while (ACPI_SUCCESS(status));
return -1;
}
-EXPORT_SYMBOL(acpi_get_pxm);
int acpi_get_node(acpi_handle *handle)
{
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index e53fb516f9d4..27ccd68b8f46 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -44,6 +44,8 @@
#include <asm/uaccess.h>
#include <linux/efi.h>
+#include <linux/ioport.h>
+#include <linux/list.h>
#define _COMPONENT ACPI_OS_SERVICES
ACPI_MODULE_NAME("osl");
@@ -74,9 +76,25 @@ static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
static struct workqueue_struct *kacpi_notify_wq;
+struct acpi_res_list {
+ resource_size_t start;
+ resource_size_t end;
+ acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
+ char name[5]; /* only can have a length of 4 chars, make use of this
+ one instead of res->name, no need to kalloc then */
+ struct list_head resource_list;
+};
+
+static LIST_HEAD(resource_list_head);
+static DEFINE_SPINLOCK(acpi_res_lock);
+
#define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
static char osi_additional_string[OSI_STRING_LENGTH_MAX];
+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
+static int acpi_no_initrd_override;
+#endif
+
/*
* "Ode to _OSI(Linux)"
*
@@ -120,7 +138,7 @@ static char osi_additional_string[OSI_STRING_LENGTH_MAX];
*/
#define OSI_LINUX_ENABLE 0
-struct osi_linux {
+static struct osi_linux {
unsigned int enable:1;
unsigned int dmi:1;
unsigned int cmdline:1;
@@ -219,8 +237,6 @@ void acpi_os_printf(const char *fmt, ...)
va_end(args);
}
-EXPORT_SYMBOL(acpi_os_printf);
-
void acpi_os_vprintf(const char *fmt, va_list args)
{
static char buffer[512];
@@ -250,11 +266,16 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
"System description tables not found\n");
return 0;
}
- } else
- return acpi_find_rsdp();
+ } else {
+ acpi_physical_address pa = 0;
+
+ acpi_find_root_pointer(&pa);
+ return pa;
+ }
}
-void __iomem *acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
+void __iomem *__init_refok
+acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
if (phys > ULONG_MAX) {
printk(KERN_ERR PREFIX "Cannot map memory that high\n");
@@ -312,6 +333,67 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
return AE_OK;
}
+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
+struct acpi_table_header *acpi_find_dsdt_initrd(void)
+{
+ struct file *firmware_file;
+ mm_segment_t oldfs;
+ unsigned long len, len2;
+ struct acpi_table_header *dsdt_buffer, *ret = NULL;
+ struct kstat stat;
+ char *ramfs_dsdt_name = "/DSDT.aml";
+
+ printk(KERN_INFO PREFIX "Checking initramfs for custom DSDT");
+
+ /*
+ * Never do this at home, only the user-space is allowed to open a file.
+ * The clean way would be to use the firmware loader.
+ * But this code must be run before there is any userspace available.
+ * A static/init firmware infrastructure doesn't exist yet...
+ */
+ if (vfs_stat(ramfs_dsdt_name, &stat) < 0)
+ return ret;
+
+ len = stat.size;
+ /* check especially against empty files */
+ if (len <= 4) {
+ printk(KERN_ERR PREFIX "Failed: DSDT only %lu bytes.\n", len);
+ return ret;
+ }
+
+ firmware_file = filp_open(ramfs_dsdt_name, O_RDONLY, 0);
+ if (IS_ERR(firmware_file)) {
+ printk(KERN_ERR PREFIX "Failed to open %s.\n", ramfs_dsdt_name);
+ return ret;
+ }
+
+ dsdt_buffer = kmalloc(len, GFP_ATOMIC);
+ if (!dsdt_buffer) {
+ printk(KERN_ERR PREFIX "Failed to allocate %lu bytes.\n", len);
+ goto err;
+ }
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ len2 = vfs_read(firmware_file, (char __user *)dsdt_buffer, len,
+ &firmware_file->f_pos);
+ set_fs(oldfs);
+ if (len2 < len) {
+ printk(KERN_ERR PREFIX "Failed to read %lu bytes from %s.\n",
+ len, ramfs_dsdt_name);
+ ACPI_FREE(dsdt_buffer);
+ goto err;
+ }
+
+ printk(KERN_INFO PREFIX "Found %lu byte DSDT in %s.\n",
+ len, ramfs_dsdt_name);
+ ret = dsdt_buffer;
+err:
+ filp_close(firmware_file, NULL);
+ return ret;
+}
+#endif
+
acpi_status
acpi_os_table_override(struct acpi_table_header * existing_table,
struct acpi_table_header ** new_table)
@@ -319,20 +401,52 @@ acpi_os_table_override(struct acpi_table_header * existing_table,
if (!existing_table || !new_table)
return AE_BAD_PARAMETER;
+ *new_table = NULL;
+
#ifdef CONFIG_ACPI_CUSTOM_DSDT
if (strncmp(existing_table->signature, "DSDT", 4) == 0)
*new_table = (struct acpi_table_header *)AmlCode;
- else
- *new_table = NULL;
-#else
- *new_table = NULL;
#endif
+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
+ if ((strncmp(existing_table->signature, "DSDT", 4) == 0) &&
+ !acpi_no_initrd_override) {
+ struct acpi_table_header *initrd_table;
+
+ initrd_table = acpi_find_dsdt_initrd();
+ if (initrd_table)
+ *new_table = initrd_table;
+ }
+#endif
+ if (*new_table != NULL) {
+ printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
+ "this is unsafe: tainting kernel\n",
+ existing_table->signature,
+ existing_table->oem_table_id);
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+ }
return AE_OK;
}
+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
+int __init acpi_no_initrd_override_setup(char *s)
+{
+ acpi_no_initrd_override = 1;
+ return 1;
+}
+__setup("acpi_no_initrd_override", acpi_no_initrd_override_setup);
+#endif
+
static irqreturn_t acpi_irq(int irq, void *dev_id)
{
- return (*acpi_irq_handler) (acpi_irq_context) ? IRQ_HANDLED : IRQ_NONE;
+ u32 handled;
+
+ handled = (*acpi_irq_handler) (acpi_irq_context);
+
+ if (handled) {
+ acpi_irq_handled++;
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
}
acpi_status
@@ -341,6 +455,8 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
{
unsigned int irq;
+ acpi_irq_stats_init();
+
/*
* Ignore the GSI from the core, and use the value in our copy of the
* FADT. It may not be the same if an interrupt source override exists
@@ -384,8 +500,6 @@ void acpi_os_sleep(acpi_integer ms)
schedule_timeout_interruptible(msecs_to_jiffies(ms));
}
-EXPORT_SYMBOL(acpi_os_sleep);
-
void acpi_os_stall(u32 us)
{
while (us) {
@@ -399,8 +513,6 @@ void acpi_os_stall(u32 us)
}
}
-EXPORT_SYMBOL(acpi_os_stall);
-
/*
* Support ACPI 3.0 AML Timer operand
* Returns 64-bit free-running, monotonically increasing timer
@@ -550,8 +662,6 @@ acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
return (result ? AE_ERROR : AE_OK);
}
-EXPORT_SYMBOL(acpi_os_read_pci_configuration);
-
acpi_status
acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
acpi_integer value, u32 width)
@@ -661,25 +771,6 @@ static void acpi_os_execute_deferred(struct work_struct *work)
dpc->function(dpc->context);
kfree(dpc);
- /* Yield cpu to notify thread */
- cond_resched();
-
- return;
-}
-
-static void acpi_os_execute_notify(struct work_struct *work)
-{
- struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
-
- if (!dpc) {
- printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
- return;
- }
-
- dpc->function(dpc->context);
-
- kfree(dpc);
-
return;
}
@@ -703,7 +794,7 @@ acpi_status acpi_os_execute(acpi_execute_type type,
{
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
-
+ struct workqueue_struct *queue;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Scheduling function [%p(%p)] for deferred execution.\n",
function, context));
@@ -727,20 +818,13 @@ acpi_status acpi_os_execute(acpi_execute_type type,
dpc->function = function;
dpc->context = context;
- if (type == OSL_NOTIFY_HANDLER) {
- INIT_WORK(&dpc->work, acpi_os_execute_notify);
- if (!queue_work(kacpi_notify_wq, &dpc->work)) {
- status = AE_ERROR;
- kfree(dpc);
- }
- } else {
- INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- if (!queue_work(kacpid_wq, &dpc->work)) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Call to queue_work() failed.\n"));
- status = AE_ERROR;
- kfree(dpc);
- }
+ INIT_WORK(&dpc->work, acpi_os_execute_deferred);
+ queue = (type == OSL_NOTIFY_HANDLER) ? kacpi_notify_wq : kacpid_wq;
+ if (!queue_work(queue, &dpc->work)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Call to queue_work() failed.\n"));
+ status = AE_ERROR;
+ kfree(dpc);
}
return_ACPI_STATUS(status);
}
@@ -793,8 +877,6 @@ acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
return AE_OK;
}
-EXPORT_SYMBOL(acpi_os_create_semaphore);
-
/*
* TODO: A better way to delete semaphores? Linux doesn't have a
* 'delete_semaphore()' function -- may result in an invalid
@@ -818,8 +900,6 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle)
return AE_OK;
}
-EXPORT_SYMBOL(acpi_os_delete_semaphore);
-
/*
* TODO: The kernel doesn't have a 'down_timeout' function -- had to
* improvise. The process is to sleep for one scheduler quantum
@@ -912,8 +992,6 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
return status;
}
-EXPORT_SYMBOL(acpi_os_wait_semaphore);
-
/*
* TODO: Support for units > 1?
*/
@@ -936,8 +1014,6 @@ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
return AE_OK;
}
-EXPORT_SYMBOL(acpi_os_signal_semaphore);
-
#ifdef ACPI_FUTURE_USAGE
u32 acpi_os_get_line(char *buffer)
{
@@ -981,8 +1057,6 @@ acpi_status acpi_os_signal(u32 function, void *info)
return AE_OK;
}
-EXPORT_SYMBOL(acpi_os_signal);
-
static int __init acpi_os_name_setup(char *str)
{
char *p = acpi_os_name;
@@ -1102,6 +1176,128 @@ static int __init acpi_wake_gpes_always_on_setup(char *str)
__setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
+/* Check of resource interference between native drivers and ACPI
+ * OperationRegions (SystemIO and System Memory only).
+ * IO ports and memory declared in ACPI might be used by the ACPI subsystem
+ * in arbitrary AML code and can interfere with legacy drivers.
+ * acpi_enforce_resources= can be set to:
+ *
+ * - strict (2)
+ * -> further driver trying to access the resources will not load
+ * - lax (default) (1)
+ * -> further driver trying to access the resources will load, but you
+ * get a system message that something might go wrong...
+ *
+ * - no (0)
+ * -> ACPI Operation Region resources will not be registered
+ *
+ */
+#define ENFORCE_RESOURCES_STRICT 2
+#define ENFORCE_RESOURCES_LAX 1
+#define ENFORCE_RESOURCES_NO 0
+
+static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
+
+static int __init acpi_enforce_resources_setup(char *str)
+{
+ if (str == NULL || *str == '\0')
+ return 0;
+
+ if (!strcmp("strict", str))
+ acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
+ else if (!strcmp("lax", str))
+ acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
+ else if (!strcmp("no", str))
+ acpi_enforce_resources = ENFORCE_RESOURCES_NO;
+
+ return 1;
+}
+
+__setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
+
+/* Check for resource conflicts between ACPI OperationRegions and native
+ * drivers */
+int acpi_check_resource_conflict(struct resource *res)
+{
+ struct acpi_res_list *res_list_elem;
+ int ioport;
+ int clash = 0;
+
+ if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
+ return 0;
+ if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
+ return 0;
+
+ ioport = res->flags & IORESOURCE_IO;
+
+ spin_lock(&acpi_res_lock);
+ list_for_each_entry(res_list_elem, &resource_list_head,
+ resource_list) {
+ if (ioport && (res_list_elem->resource_type
+ != ACPI_ADR_SPACE_SYSTEM_IO))
+ continue;
+ if (!ioport && (res_list_elem->resource_type
+ != ACPI_ADR_SPACE_SYSTEM_MEMORY))
+ continue;
+
+ if (res->end < res_list_elem->start
+ || res_list_elem->end < res->start)
+ continue;
+ clash = 1;
+ break;
+ }
+ spin_unlock(&acpi_res_lock);
+
+ if (clash) {
+ if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
+ printk(KERN_INFO "%sACPI: %s resource %s [0x%llx-0x%llx]"
+ " conflicts with ACPI region %s"
+ " [0x%llx-0x%llx]\n",
+ acpi_enforce_resources == ENFORCE_RESOURCES_LAX
+ ? KERN_WARNING : KERN_ERR,
+ ioport ? "I/O" : "Memory", res->name,
+ (long long) res->start, (long long) res->end,
+ res_list_elem->name,
+ (long long) res_list_elem->start,
+ (long long) res_list_elem->end);
+ printk(KERN_INFO "ACPI: Device needs an ACPI driver\n");
+ }
+ if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
+ return -EBUSY;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(acpi_check_resource_conflict);
+
+int acpi_check_region(resource_size_t start, resource_size_t n,
+ const char *name)
+{
+ struct resource res = {
+ .start = start,
+ .end = start + n - 1,
+ .name = name,
+ .flags = IORESOURCE_IO,
+ };
+
+ return acpi_check_resource_conflict(&res);
+}
+EXPORT_SYMBOL(acpi_check_region);
+
+int acpi_check_mem_region(resource_size_t start, resource_size_t n,
+ const char *name)
+{
+ struct resource res = {
+ .start = start,
+ .end = start + n - 1,
+ .name = name,
+ .flags = IORESOURCE_MEM,
+ };
+
+ return acpi_check_resource_conflict(&res);
+
+}
+EXPORT_SYMBOL(acpi_check_mem_region);
+
/*
* Acquire a spinlock.
*
@@ -1213,24 +1409,24 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
*
* Returns 0 on success
*/
-int acpi_dmi_dump(void)
+static int acpi_dmi_dump(void)
{
if (!dmi_available)
return -1;
printk(KERN_NOTICE PREFIX "DMI System Vendor: %s\n",
- dmi_get_slot(DMI_SYS_VENDOR));
+ dmi_get_system_info(DMI_SYS_VENDOR));
printk(KERN_NOTICE PREFIX "DMI Product Name: %s\n",
- dmi_get_slot(DMI_PRODUCT_NAME));
+ dmi_get_system_info(DMI_PRODUCT_NAME));
printk(KERN_NOTICE PREFIX "DMI Product Version: %s\n",
- dmi_get_slot(DMI_PRODUCT_VERSION));
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
printk(KERN_NOTICE PREFIX "DMI Board Name: %s\n",
- dmi_get_slot(DMI_BOARD_NAME));
+ dmi_get_system_info(DMI_BOARD_NAME));
printk(KERN_NOTICE PREFIX "DMI BIOS Vendor: %s\n",
- dmi_get_slot(DMI_BIOS_VENDOR));
+ dmi_get_system_info(DMI_BIOS_VENDOR));
printk(KERN_NOTICE PREFIX "DMI BIOS Date: %s\n",
- dmi_get_slot(DMI_BIOS_DATE));
+ dmi_get_system_info(DMI_BIOS_DATE));
return 0;
}
@@ -1303,10 +1499,46 @@ acpi_status
acpi_os_validate_address (
u8 space_id,
acpi_physical_address address,
- acpi_size length)
+ acpi_size length,
+ char *name)
{
+ struct acpi_res_list *res;
+ if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
+ return AE_OK;
- return AE_OK;
+ switch (space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ /* Only interference checks against SystemIO and SytemMemory
+ are needed */
+ res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
+ if (!res)
+ return AE_OK;
+ /* ACPI names are fixed to 4 bytes, still better use strlcpy */
+ strlcpy(res->name, name, 5);
+ res->start = address;
+ res->end = address + length - 1;
+ res->resource_type = space_id;
+ spin_lock(&acpi_res_lock);
+ list_add(&res->resource_list, &resource_list_head);
+ spin_unlock(&acpi_res_lock);
+ pr_debug("Added %s resource: start: 0x%llx, end: 0x%llx, "
+ "name: %s\n", (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ ? "SystemIO" : "System Memory",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end,
+ res->name);
+ break;
+ case ACPI_ADR_SPACE_PCI_CONFIG:
+ case ACPI_ADR_SPACE_EC:
+ case ACPI_ADR_SPACE_SMBUS:
+ case ACPI_ADR_SPACE_CMOS:
+ case ACPI_ADR_SPACE_PCI_BAR_TARGET:
+ case ACPI_ADR_SPACE_DATA_TABLE:
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ break;
+ }
+ return AE_OK;
}
#endif
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index 388300de005d..4b252ea0e952 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -44,6 +44,8 @@ struct acpi_pci_data {
struct pci_dev *dev;
};
+static int acpi_pci_unbind(struct acpi_device *device);
+
static void acpi_pci_data_handler(acpi_handle handle, u32 function,
void *context)
{
@@ -267,7 +269,7 @@ int acpi_pci_bind(struct acpi_device *device)
return result;
}
-int acpi_pci_unbind(struct acpi_device *device)
+static int acpi_pci_unbind(struct acpi_device *device)
{
int result = 0;
acpi_status status = AE_OK;
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 62010c2481b3..7f19859580c7 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -51,10 +51,8 @@ static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(int segment,
int bus,
int device, int pin)
{
- struct list_head *node = NULL;
struct acpi_prt_entry *entry = NULL;
-
if (!acpi_prt.count)
return NULL;
@@ -64,8 +62,7 @@ static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(int segment,
*
*/
spin_lock(&acpi_prt_lock);
- list_for_each(node, &acpi_prt.entries) {
- entry = list_entry(node, struct acpi_prt_entry, node);
+ list_for_each_entry(entry, &acpi_prt.entries, node) {
if ((segment == entry->id.segment)
&& (bus == entry->id.bus)
&& (device == entry->id.device)
@@ -478,8 +475,6 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return 0;
}
-EXPORT_SYMBOL(acpi_pci_irq_enable);
-
/* FIXME: implement x86/x86_64 version */
void __attribute__ ((weak)) acpi_unregister_gsi(u32 i)
{
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 5400ea173f6f..233c40c51684 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -95,7 +95,7 @@ static struct {
int count;
struct list_head entries;
} acpi_link;
-DEFINE_MUTEX(acpi_link_lock);
+static DEFINE_MUTEX(acpi_link_lock);
/* --------------------------------------------------------------------------
PCI Link Device Management
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index af1769a20c7a..76bf6d90c700 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -458,11 +458,9 @@ int acpi_power_transition(struct acpi_device *device, int state)
}
end:
- if (result) {
+ if (result)
device->power.state = ACPI_STATE_UNKNOWN;
- printk(KERN_WARNING PREFIX "Transitioning device [%s] to D%d\n",
- device->pnp.bus_id, state);
- } else {
+ else {
/* We shouldn't change the state till all above operations succeed */
device->power.state = state;
}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index e48ee4f8749f..75ccf5d18bf4 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -668,6 +668,24 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device)
acpi_processor_power_init(pr, device);
+ pr->cdev = thermal_cooling_device_register("Processor", device,
+ &processor_cooling_ops);
+ if (pr->cdev)
+ printk(KERN_INFO PREFIX
+ "%s is registered as cooling_device%d\n",
+ device->dev.bus_id, pr->cdev->id);
+ else
+ goto end;
+
+ result = sysfs_create_link(&device->dev.kobj, &pr->cdev->device.kobj,
+ "thermal_cooling");
+ if (result)
+ return result;
+ result = sysfs_create_link(&pr->cdev->device.kobj, &device->dev.kobj,
+ "device");
+ if (result)
+ return result;
+
if (pr->flags.throttling) {
printk(KERN_INFO PREFIX "%s [%s] (supports",
acpi_device_name(device), acpi_device_bid(device));
@@ -791,6 +809,11 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
acpi_processor_remove_fs(device);
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+ sysfs_remove_link(&pr->cdev->device.kobj, "device");
+ thermal_cooling_device_unregister(pr->cdev);
+ pr->cdev = NULL;
+
processors[pr->id] = NULL;
kfree(pr);
@@ -812,11 +835,18 @@ static int is_processor_present(acpi_handle handle)
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
- if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) {
- ACPI_EXCEPTION((AE_INFO, status, "Processor Device is not present"));
- return 0;
- }
- return 1;
+ /*
+ * if a processor object does not have an _STA object,
+ * OSPM assumes that the processor is present.
+ */
+ if (status == AE_NOT_FOUND)
+ return 1;
+
+ if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
+ return 1;
+
+ ACPI_EXCEPTION((AE_INFO, status, "Processor Device is not present"));
+ return 0;
}
static
@@ -1061,6 +1091,8 @@ static int __init acpi_processor_init(void)
acpi_processor_ppc_init();
+ acpi_processor_throttling_init();
+
return 0;
out_cpuidle:
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index eb1f82f79153..32003fdc91e8 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -38,7 +38,7 @@
#include <linux/dmi.h>
#include <linux/moduleparam.h>
#include <linux/sched.h> /* need_resched() */
-#include <linux/latency.h>
+#include <linux/pm_qos_params.h>
#include <linux/clockchips.h>
#include <linux/cpuidle.h>
@@ -98,6 +98,9 @@ module_param(bm_history, uint, 0644);
static int acpi_processor_set_power_policy(struct acpi_processor *pr);
+#else /* CONFIG_CPU_IDLE */
+static unsigned int latency_factor __read_mostly = 2;
+module_param(latency_factor, uint, 0644);
#endif
/*
@@ -201,6 +204,10 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
}
+/*
+ * Callers should disable interrupts before the call and enable
+ * interrupts after return.
+ */
static void acpi_safe_halt(void)
{
current_thread_info()->status &= ~TS_POLLING;
@@ -261,7 +268,7 @@ static atomic_t c3_cpu_count;
/* Common C-state entry for C2, C3, .. */
static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
{
- if (cstate->space_id == ACPI_CSTATE_FFH) {
+ if (cstate->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cstate);
} else {
@@ -413,6 +420,8 @@ static void acpi_processor_idle(void)
pm_idle_save();
else
acpi_safe_halt();
+
+ local_irq_enable();
return;
}
@@ -521,6 +530,7 @@ static void acpi_processor_idle(void)
* skew otherwise.
*/
sleep_ticks = 0xFFFFFFFF;
+ local_irq_enable();
break;
case ACPI_STATE_C2:
@@ -648,7 +658,8 @@ static void acpi_processor_idle(void)
if (cx->promotion.state &&
((cx->promotion.state - pr->power.states) <= max_cstate)) {
if (sleep_ticks > cx->promotion.threshold.ticks &&
- cx->promotion.state->latency <= system_latency_constraint()) {
+ cx->promotion.state->latency <=
+ pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
cx->promotion.count++;
cx->demotion.count = 0;
if (cx->promotion.count >=
@@ -692,7 +703,8 @@ static void acpi_processor_idle(void)
* or if the latency of the current state is unacceptable
*/
if ((pr->power.state - pr->power.states) > max_cstate ||
- pr->power.state->latency > system_latency_constraint()) {
+ pr->power.state->latency >
+ pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
if (cx->demotion.state)
next_state = cx->demotion.state;
}
@@ -920,20 +932,20 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
cx.address = reg->address;
cx.index = current_count + 1;
- cx.space_id = ACPI_CSTATE_SYSTEMIO;
+ cx.entry_method = ACPI_CSTATE_SYSTEMIO;
if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
if (acpi_processor_ffh_cstate_probe
(pr->id, &cx, reg) == 0) {
- cx.space_id = ACPI_CSTATE_FFH;
- } else if (cx.type != ACPI_STATE_C1) {
+ cx.entry_method = ACPI_CSTATE_FFH;
+ } else if (cx.type == ACPI_STATE_C1) {
/*
* C1 is a special case where FIXED_HARDWARE
* can be handled in non-MWAIT way as well.
* In that case, save this _CST entry info.
- * That is, we retain space_id of SYSTEM_IO for
- * halt based C1.
* Otherwise, ignore this info and continue.
*/
+ cx.entry_method = ACPI_CSTATE_HALT;
+ } else {
continue;
}
}
@@ -1200,7 +1212,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
"maximum allowed latency: %d usec\n",
pr->power.state ? pr->power.state - pr->power.states : 0,
max_cstate, (unsigned)pr->power.bm_activity,
- system_latency_constraint());
+ pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
seq_puts(seq, "states:\n");
@@ -1367,12 +1379,16 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
/**
* acpi_idle_do_entry - a helper function that does C2 and C3 type entry
* @cx: cstate data
+ *
+ * Caller disables interrupt before call and enables interrupt after return.
*/
static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
{
- if (cx->space_id == ACPI_CSTATE_FFH) {
+ if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cx);
+ } else if (cx->entry_method == ACPI_CSTATE_HALT) {
+ acpi_safe_halt();
} else {
int unused;
/* IO port based C-state */
@@ -1394,21 +1410,27 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
struct cpuidle_state *state)
{
+ u32 t1, t2;
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
+
pr = processors[smp_processor_id()];
if (unlikely(!pr))
return 0;
+ local_irq_disable();
if (pr->flags.bm_check)
acpi_idle_update_bm_rld(pr, cx);
- acpi_safe_halt();
+ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ acpi_idle_do_entry(cx);
+ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ local_irq_enable();
cx->usage++;
- return 0;
+ return ticks_elapsed_in_us(t1, t2);
}
/**
@@ -1515,7 +1537,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
if (dev->safe_state) {
return dev->safe_state->enter(dev, dev->safe_state);
} else {
+ local_irq_disable();
acpi_safe_halt();
+ local_irq_enable();
return 0;
}
}
@@ -1607,7 +1631,7 @@ struct cpuidle_driver acpi_idle_driver = {
*/
static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
{
- int i, count = 0;
+ int i, count = CPUIDLE_DRIVER_STATE_START;
struct acpi_processor_cx *cx;
struct cpuidle_state *state;
struct cpuidle_device *dev = &pr->power.dev;
@@ -1636,13 +1660,14 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
state->exit_latency = cx->latency;
- state->target_residency = cx->latency * 6;
+ state->target_residency = cx->latency * latency_factor;
state->power_usage = cx->power;
state->flags = 0;
switch (cx->type) {
case ACPI_STATE_C1:
state->flags |= CPUIDLE_FLAG_SHALLOW;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = acpi_idle_enter_c1;
dev->safe_state = state;
break;
@@ -1665,6 +1690,8 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
}
count++;
+ if (count == CPUIDLE_STATE_MAX)
+ break;
}
dev->state_count = count;
@@ -1718,8 +1745,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
"ACPI: processor limited to max C-state %d\n",
max_cstate);
first_run++;
-#if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP)
- register_latency_notifier(&acpi_processor_latency_notifier);
+#if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
+ pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
+ &acpi_processor_latency_notifier);
#endif
}
@@ -1806,7 +1834,8 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
*/
cpu_idle_wait();
#ifdef CONFIG_SMP
- unregister_latency_notifier(&acpi_processor_latency_notifier);
+ pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
+ &acpi_processor_latency_notifier);
#endif
}
#endif
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 463b0247cbc5..f32010bee4d5 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -60,6 +60,11 @@ static DEFINE_MUTEX(performance_mutex);
* policy is adjusted accordingly.
*/
+static unsigned int ignore_ppc = 0;
+module_param(ignore_ppc, uint, 0644);
+MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
+ "limited by BIOS, this should help");
+
#define PPC_REGISTERED 1
#define PPC_IN_USE 2
@@ -72,6 +77,9 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
struct acpi_processor *pr;
unsigned int ppc = 0;
+ if (ignore_ppc)
+ return 0;
+
mutex_lock(&performance_mutex);
if (event != CPUFREQ_INCOMPATIBLE)
@@ -130,7 +138,13 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
{
- int ret = acpi_processor_get_platform_limit(pr);
+ int ret;
+
+ if (ignore_ppc)
+ return 0;
+
+ ret = acpi_processor_get_platform_limit(pr);
+
if (ret < 0)
return (ret);
else
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 06e6f3fb8825..9cb43f52f7b6 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -32,6 +32,7 @@
#include <linux/cpufreq.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/sysdev.h>
#include <asm/uaccess.h>
@@ -93,6 +94,9 @@ static int acpi_processor_apply_limit(struct acpi_processor *pr)
* _any_ cpufreq driver and not only the acpi-cpufreq driver.
*/
+#define CPUFREQ_THERMAL_MIN_STEP 0
+#define CPUFREQ_THERMAL_MAX_STEP 3
+
static unsigned int cpufreq_thermal_reduction_pctg[NR_CPUS];
static unsigned int acpi_thermal_cpufreq_is_init = 0;
@@ -109,8 +113,9 @@ static int acpi_thermal_cpufreq_increase(unsigned int cpu)
if (!cpu_has_cpufreq(cpu))
return -ENODEV;
- if (cpufreq_thermal_reduction_pctg[cpu] < 60) {
- cpufreq_thermal_reduction_pctg[cpu] += 20;
+ if (cpufreq_thermal_reduction_pctg[cpu] <
+ CPUFREQ_THERMAL_MAX_STEP) {
+ cpufreq_thermal_reduction_pctg[cpu]++;
cpufreq_update_policy(cpu);
return 0;
}
@@ -123,8 +128,9 @@ static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
if (!cpu_has_cpufreq(cpu))
return -ENODEV;
- if (cpufreq_thermal_reduction_pctg[cpu] > 20)
- cpufreq_thermal_reduction_pctg[cpu] -= 20;
+ if (cpufreq_thermal_reduction_pctg[cpu] >
+ (CPUFREQ_THERMAL_MIN_STEP + 1))
+ cpufreq_thermal_reduction_pctg[cpu]--;
else
cpufreq_thermal_reduction_pctg[cpu] = 0;
cpufreq_update_policy(cpu);
@@ -143,7 +149,7 @@ static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
max_freq =
(policy->cpuinfo.max_freq *
- (100 - cpufreq_thermal_reduction_pctg[policy->cpu])) / 100;
+ (100 - cpufreq_thermal_reduction_pctg[policy->cpu] * 20)) / 100;
cpufreq_verify_within_limits(policy, 0, max_freq);
@@ -155,6 +161,32 @@ static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
.notifier_call = acpi_thermal_cpufreq_notifier,
};
+static int cpufreq_get_max_state(unsigned int cpu)
+{
+ if (!cpu_has_cpufreq(cpu))
+ return 0;
+
+ return CPUFREQ_THERMAL_MAX_STEP;
+}
+
+static int cpufreq_get_cur_state(unsigned int cpu)
+{
+ if (!cpu_has_cpufreq(cpu))
+ return 0;
+
+ return cpufreq_thermal_reduction_pctg[cpu];
+}
+
+static int cpufreq_set_cur_state(unsigned int cpu, int state)
+{
+ if (!cpu_has_cpufreq(cpu))
+ return 0;
+
+ cpufreq_thermal_reduction_pctg[cpu] = state;
+ cpufreq_update_policy(cpu);
+ return 0;
+}
+
void acpi_thermal_cpufreq_init(void)
{
int i;
@@ -179,6 +211,20 @@ void acpi_thermal_cpufreq_exit(void)
}
#else /* ! CONFIG_CPU_FREQ */
+static int cpufreq_get_max_state(unsigned int cpu)
+{
+ return 0;
+}
+
+static int cpufreq_get_cur_state(unsigned int cpu)
+{
+ return 0;
+}
+
+static int cpufreq_set_cur_state(unsigned int cpu, int state)
+{
+ return 0;
+}
static int acpi_thermal_cpufreq_increase(unsigned int cpu)
{
@@ -310,6 +356,84 @@ int acpi_processor_get_limit_info(struct acpi_processor *pr)
return 0;
}
+/* thermal coolign device callbacks */
+static int acpi_processor_max_state(struct acpi_processor *pr)
+{
+ int max_state = 0;
+
+ /*
+ * There exists four states according to
+ * cpufreq_thermal_reduction_ptg. 0, 1, 2, 3
+ */
+ max_state += cpufreq_get_max_state(pr->id);
+ if (pr->flags.throttling)
+ max_state += (pr->throttling.state_count -1);
+
+ return max_state;
+}
+static int
+processor_get_max_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_processor *pr = acpi_driver_data(device);
+
+ if (!device || !pr)
+ return -EINVAL;
+
+ return sprintf(buf, "%d\n", acpi_processor_max_state(pr));
+}
+
+static int
+processor_get_cur_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_processor *pr = acpi_driver_data(device);
+ int cur_state;
+
+ if (!device || !pr)
+ return -EINVAL;
+
+ cur_state = cpufreq_get_cur_state(pr->id);
+ if (pr->flags.throttling)
+ cur_state += pr->throttling.state;
+
+ return sprintf(buf, "%d\n", cur_state);
+}
+
+static int
+processor_set_cur_state(struct thermal_cooling_device *cdev, unsigned int state)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_processor *pr = acpi_driver_data(device);
+ int result = 0;
+ int max_pstate;
+
+ if (!device || !pr)
+ return -EINVAL;
+
+ max_pstate = cpufreq_get_max_state(pr->id);
+
+ if (state > acpi_processor_max_state(pr))
+ return -EINVAL;
+
+ if (state <= max_pstate) {
+ if (pr->flags.throttling && pr->throttling.state)
+ result = acpi_processor_set_throttling(pr, 0);
+ cpufreq_set_cur_state(pr->id, state);
+ } else {
+ cpufreq_set_cur_state(pr->id, max_pstate);
+ result = acpi_processor_set_throttling(pr,
+ state - max_pstate);
+ }
+ return result;
+}
+
+struct thermal_cooling_device_ops processor_cooling_ops = {
+ .get_max_state = processor_get_max_state,
+ .get_cur_state = processor_get_cur_state,
+ .set_cur_state = processor_set_cur_state,
+};
+
/* /proc interface */
static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset)
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 1685b40abda7..1b8e592a8241 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -45,9 +45,229 @@
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_throttling");
+struct throttling_tstate {
+ unsigned int cpu; /* cpu nr */
+ int target_state; /* target T-state */
+};
+
+#define THROTTLING_PRECHANGE (1)
+#define THROTTLING_POSTCHANGE (2)
+
static int acpi_processor_get_throttling(struct acpi_processor *pr);
int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
+static int acpi_processor_update_tsd_coord(void)
+{
+ int count, count_target;
+ int retval = 0;
+ unsigned int i, j;
+ cpumask_t covered_cpus;
+ struct acpi_processor *pr, *match_pr;
+ struct acpi_tsd_package *pdomain, *match_pdomain;
+ struct acpi_processor_throttling *pthrottling, *match_pthrottling;
+
+ /*
+ * Now that we have _TSD data from all CPUs, lets setup T-state
+ * coordination between all CPUs.
+ */
+ for_each_possible_cpu(i) {
+ pr = processors[i];
+ if (!pr)
+ continue;
+
+ /* Basic validity check for domain info */
+ pthrottling = &(pr->throttling);
+
+ /*
+ * If tsd package for one cpu is invalid, the coordination
+ * among all CPUs is thought as invalid.
+ * Maybe it is ugly.
+ */
+ if (!pthrottling->tsd_valid_flag) {
+ retval = -EINVAL;
+ break;
+ }
+ }
+ if (retval)
+ goto err_ret;
+
+ cpus_clear(covered_cpus);
+ for_each_possible_cpu(i) {
+ pr = processors[i];
+ if (!pr)
+ continue;
+
+ if (cpu_isset(i, covered_cpus))
+ continue;
+ pthrottling = &pr->throttling;
+
+ pdomain = &(pthrottling->domain_info);
+ cpu_set(i, pthrottling->shared_cpu_map);
+ cpu_set(i, covered_cpus);
+ /*
+ * If the number of processor in the TSD domain is 1, it is
+ * unnecessary to parse the coordination for this CPU.
+ */
+ if (pdomain->num_processors <= 1)
+ continue;
+
+ /* Validate the Domain info */
+ count_target = pdomain->num_processors;
+ count = 1;
+
+ for_each_possible_cpu(j) {
+ if (i == j)
+ continue;
+
+ match_pr = processors[j];
+ if (!match_pr)
+ continue;
+
+ match_pthrottling = &(match_pr->throttling);
+ match_pdomain = &(match_pthrottling->domain_info);
+ if (match_pdomain->domain != pdomain->domain)
+ continue;
+
+ /* Here i and j are in the same domain.
+ * If two TSD packages have the same domain, they
+ * should have the same num_porcessors and
+ * coordination type. Otherwise it will be regarded
+ * as illegal.
+ */
+ if (match_pdomain->num_processors != count_target) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+
+ if (pdomain->coord_type != match_pdomain->coord_type) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+
+ cpu_set(j, covered_cpus);
+ cpu_set(j, pthrottling->shared_cpu_map);
+ count++;
+ }
+ for_each_possible_cpu(j) {
+ if (i == j)
+ continue;
+
+ match_pr = processors[j];
+ if (!match_pr)
+ continue;
+
+ match_pthrottling = &(match_pr->throttling);
+ match_pdomain = &(match_pthrottling->domain_info);
+ if (match_pdomain->domain != pdomain->domain)
+ continue;
+
+ /*
+ * If some CPUS have the same domain, they
+ * will have the same shared_cpu_map.
+ */
+ match_pthrottling->shared_cpu_map =
+ pthrottling->shared_cpu_map;
+ }
+ }
+
+err_ret:
+ for_each_possible_cpu(i) {
+ pr = processors[i];
+ if (!pr)
+ continue;
+
+ /*
+ * Assume no coordination on any error parsing domain info.
+ * The coordination type will be forced as SW_ALL.
+ */
+ if (retval) {
+ pthrottling = &(pr->throttling);
+ cpus_clear(pthrottling->shared_cpu_map);
+ cpu_set(i, pthrottling->shared_cpu_map);
+ pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
+ }
+ }
+
+ return retval;
+}
+
+/*
+ * Update the T-state coordination after the _TSD
+ * data for all cpus is obtained.
+ */
+void acpi_processor_throttling_init(void)
+{
+ if (acpi_processor_update_tsd_coord())
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Assume no T-state coordination\n"));
+
+ return;
+}
+
+static int acpi_processor_throttling_notifier(unsigned long event, void *data)
+{
+ struct throttling_tstate *p_tstate = data;
+ struct acpi_processor *pr;
+ unsigned int cpu ;
+ int target_state;
+ struct acpi_processor_limit *p_limit;
+ struct acpi_processor_throttling *p_throttling;
+
+ cpu = p_tstate->cpu;
+ pr = processors[cpu];
+ if (!pr) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
+ return 0;
+ }
+ if (!pr->flags.throttling) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
+ "unsupported on CPU %d\n", cpu));
+ return 0;
+ }
+ target_state = p_tstate->target_state;
+ p_throttling = &(pr->throttling);
+ switch (event) {
+ case THROTTLING_PRECHANGE:
+ /*
+ * Prechange event is used to choose one proper t-state,
+ * which meets the limits of thermal, user and _TPC.
+ */
+ p_limit = &pr->limit;
+ if (p_limit->thermal.tx > target_state)
+ target_state = p_limit->thermal.tx;
+ if (p_limit->user.tx > target_state)
+ target_state = p_limit->user.tx;
+ if (pr->throttling_platform_limit > target_state)
+ target_state = pr->throttling_platform_limit;
+ if (target_state >= p_throttling->state_count) {
+ printk(KERN_WARNING
+ "Exceed the limit of T-state \n");
+ target_state = p_throttling->state_count - 1;
+ }
+ p_tstate->target_state = target_state;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
+ "target T-state of CPU %d is T%d\n",
+ cpu, target_state));
+ break;
+ case THROTTLING_POSTCHANGE:
+ /*
+ * Postchange event is only used to update the
+ * T-state flag of acpi_processor_throttling.
+ */
+ p_throttling->state = target_state;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
+ "CPU %d is switched to T%d\n",
+ cpu, target_state));
+ break;
+ default:
+ printk(KERN_WARNING
+ "Unsupported Throttling notifier event\n");
+ break;
+ }
+
+ return 0;
+}
+
/*
* _TPC - Throttling Present Capabilities
*/
@@ -293,6 +513,10 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
struct acpi_buffer state = { 0, NULL };
union acpi_object *tsd = NULL;
struct acpi_tsd_package *pdomain;
+ struct acpi_processor_throttling *pthrottling;
+
+ pthrottling = &pr->throttling;
+ pthrottling->tsd_valid_flag = 0;
status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
if (ACPI_FAILURE(status)) {
@@ -340,6 +564,22 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
goto end;
}
+ pthrottling = &pr->throttling;
+ pthrottling->tsd_valid_flag = 1;
+ pthrottling->shared_type = pdomain->coord_type;
+ cpu_set(pr->id, pthrottling->shared_cpu_map);
+ /*
+ * If the coordination type is not defined in ACPI spec,
+ * the tsd_valid_flag will be clear and coordination type
+ * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
+ */
+ if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
+ pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
+ pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
+ pthrottling->tsd_valid_flag = 0;
+ pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
+ }
+
end:
kfree(buffer.pointer);
return result;
@@ -589,6 +829,11 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
cpumask_t saved_mask;
int ret;
+ if (!pr)
+ return -EINVAL;
+
+ if (!pr->flags.throttling)
+ return -ENODEV;
/*
* Migrate task to the cpu pointed by pr.
*/
@@ -742,13 +987,92 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
{
cpumask_t saved_mask;
- int ret;
+ int ret = 0;
+ unsigned int i;
+ struct acpi_processor *match_pr;
+ struct acpi_processor_throttling *p_throttling;
+ struct throttling_tstate t_state;
+ cpumask_t online_throttling_cpus;
+
+ if (!pr)
+ return -EINVAL;
+
+ if (!pr->flags.throttling)
+ return -ENODEV;
+
+ if ((state < 0) || (state > (pr->throttling.state_count - 1)))
+ return -EINVAL;
+
+ saved_mask = current->cpus_allowed;
+ t_state.target_state = state;
+ p_throttling = &(pr->throttling);
+ cpus_and(online_throttling_cpus, cpu_online_map,
+ p_throttling->shared_cpu_map);
/*
- * Migrate task to the cpu pointed by pr.
+ * The throttling notifier will be called for every
+ * affected cpu in order to get one proper T-state.
+ * The notifier event is THROTTLING_PRECHANGE.
*/
- saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(pr->id));
- ret = pr->throttling.acpi_processor_set_throttling(pr, state);
+ for_each_cpu_mask(i, online_throttling_cpus) {
+ t_state.cpu = i;
+ acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
+ &t_state);
+ }
+ /*
+ * The function of acpi_processor_set_throttling will be called
+ * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
+ * it is necessary to call it for every affected cpu. Otherwise
+ * it can be called only for the cpu pointed by pr.
+ */
+ if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
+ set_cpus_allowed(current, cpumask_of_cpu(pr->id));
+ ret = p_throttling->acpi_processor_set_throttling(pr,
+ t_state.target_state);
+ } else {
+ /*
+ * When the T-state coordination is SW_ALL or HW_ALL,
+ * it is necessary to set T-state for every affected
+ * cpus.
+ */
+ for_each_cpu_mask(i, online_throttling_cpus) {
+ match_pr = processors[i];
+ /*
+ * If the pointer is invalid, we will report the
+ * error message and continue.
+ */
+ if (!match_pr) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Invalid Pointer for CPU %d\n", i));
+ continue;
+ }
+ /*
+ * If the throttling control is unsupported on CPU i,
+ * we will report the error message and continue.
+ */
+ if (!match_pr->flags.throttling) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Throttling Controll is unsupported "
+ "on CPU %d\n", i));
+ continue;
+ }
+ t_state.cpu = i;
+ set_cpus_allowed(current, cpumask_of_cpu(i));
+ ret = match_pr->throttling.
+ acpi_processor_set_throttling(
+ match_pr, t_state.target_state);
+ }
+ }
+ /*
+ * After the set_throttling is called, the
+ * throttling notifier is called for every
+ * affected cpu to update the T-states.
+ * The notifier event is THROTTLING_POSTCHANGE
+ */
+ for_each_cpu_mask(i, online_throttling_cpus) {
+ t_state.cpu = i;
+ acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
+ &t_state);
+ }
/* restore the previous state */
set_cpus_allowed(current, saved_mask);
return ret;
@@ -757,6 +1081,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
int acpi_processor_get_throttling_info(struct acpi_processor *pr)
{
int result = 0;
+ struct acpi_processor_throttling *pthrottling;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
@@ -788,7 +1113,16 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
&acpi_processor_set_throttling_ptc;
}
- acpi_processor_get_tsd(pr);
+ /*
+ * If TSD package for one CPU can't be parsed successfully, it means
+ * that this CPU will have no coordination with other CPUs.
+ */
+ if (acpi_processor_get_tsd(pr)) {
+ pthrottling = &pr->throttling;
+ pthrottling->tsd_valid_flag = 0;
+ cpu_set(pr->id, pthrottling->shared_cpu_map);
+ pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
+ }
/*
* PIIX4 Errata: We don't support throttling on the original PIIX4.
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index f136c7d3b3c2..1194105cc3ca 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -888,7 +888,7 @@ static void acpi_charger_remove(struct acpi_sbs *sbs)
#endif
}
-void acpi_sbs_callback(void *context)
+static void acpi_sbs_callback(void *context)
{
int id;
struct acpi_sbs *sbs = context;
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index fd40b6a1d639..ae9a90438e2f 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -111,8 +111,8 @@ static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
return -ETIME;
}
-int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, u8 address,
- u8 command, u8 *data, u8 length)
+static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
+ u8 address, u8 command, u8 *data, u8 length)
{
int ret = -EFAULT, i;
u8 temp, sz = 0;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index cbfe9ae7a9e5..3fac011f9cf9 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -59,7 +59,7 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
count = snprintf(&modalias[len], size, "%s:",
cid_list->id[i].value);
if (count < 0 || count >= size) {
- printk(KERN_ERR "acpi: %s cid[%i] exceeds event buffer size",
+ printk(KERN_ERR PREFIX "%s cid[%i] exceeds event buffer size",
acpi_dev->pnp.device_name, i);
break;
}
@@ -453,7 +453,7 @@ static int acpi_device_register(struct acpi_device *device,
device->dev.release = &acpi_device_release;
result = device_add(&device->dev);
if(result) {
- printk("Error adding device %s", device->dev.bus_id);
+ printk(KERN_ERR PREFIX "Error adding device %s", device->dev.bus_id);
goto end;
}
@@ -830,7 +830,7 @@ static int acpi_bus_get_flags(struct acpi_device *device)
if (ACPI_SUCCESS(status))
device->flags.wake_capable = 1;
- /* TBD: Peformance management */
+ /* TBD: Performance management */
return 0;
}
@@ -941,6 +941,15 @@ static int acpi_bay_match(struct acpi_device *device){
return -ENODEV;
}
+/*
+ * acpi_dock_match - see if a device has a _DCK method
+ */
+static int acpi_dock_match(struct acpi_device *device)
+{
+ acpi_handle tmp;
+ return acpi_get_handle(device->handle, "_DCK", &tmp);
+}
+
static void acpi_device_set_id(struct acpi_device *device,
struct acpi_device *parent, acpi_handle handle,
int type)
@@ -950,13 +959,14 @@ static void acpi_device_set_id(struct acpi_device *device,
char *hid = NULL;
char *uid = NULL;
struct acpi_compatible_id_list *cid_list = NULL;
+ const char *cid_add = NULL;
acpi_status status;
switch (type) {
case ACPI_BUS_TYPE_DEVICE:
status = acpi_get_object_info(handle, &buffer);
if (ACPI_FAILURE(status)) {
- printk("%s: Error reading device info\n", __FUNCTION__);
+ printk(KERN_ERR PREFIX "%s: Error reading device info\n", __FUNCTION__);
return;
}
@@ -972,15 +982,18 @@ static void acpi_device_set_id(struct acpi_device *device,
device->flags.bus_address = 1;
}
- if(!(info->valid & (ACPI_VALID_HID | ACPI_VALID_CID))){
- status = acpi_video_bus_match(device);
- if(ACPI_SUCCESS(status))
- hid = ACPI_VIDEO_HID;
+ /* If we have a video/bay/dock device, add our selfdefined
+ HID to the CID list. Like that the video/bay/dock drivers
+ will get autoloaded and the device might still match
+ against another driver.
+ */
+ if (ACPI_SUCCESS(acpi_video_bus_match(device)))
+ cid_add = ACPI_VIDEO_HID;
+ else if (ACPI_SUCCESS(acpi_bay_match(device)))
+ cid_add = ACPI_BAY_HID;
+ else if (ACPI_SUCCESS(acpi_dock_match(device)))
+ cid_add = ACPI_DOCK_HID;
- status = acpi_bay_match(device);
- if (ACPI_SUCCESS(status))
- hid = ACPI_BAY_HID;
- }
break;
case ACPI_BUS_TYPE_POWER:
hid = ACPI_POWER_HID;
@@ -1021,12 +1034,45 @@ static void acpi_device_set_id(struct acpi_device *device,
strcpy(device->pnp.unique_id, uid);
device->flags.unique_id = 1;
}
- if (cid_list) {
- device->pnp.cid_list = kmalloc(cid_list->size, GFP_KERNEL);
- if (device->pnp.cid_list)
- memcpy(device->pnp.cid_list, cid_list, cid_list->size);
- else
- printk(KERN_ERR "Memory allocation error\n");
+ if (cid_list || cid_add) {
+ struct acpi_compatible_id_list *list;
+ int size = 0;
+ int count = 0;
+
+ if (cid_list) {
+ size = cid_list->size;
+ } else if (cid_add) {
+ size = sizeof(struct acpi_compatible_id_list);
+ cid_list = ACPI_ALLOCATE_ZEROED((acpi_size) size);
+ if (!cid_list) {
+ printk(KERN_ERR "Memory allocation error\n");
+ kfree(buffer.pointer);
+ return;
+ } else {
+ cid_list->count = 0;
+ cid_list->size = size;
+ }
+ }
+ if (cid_add)
+ size += sizeof(struct acpi_compatible_id);
+ list = kmalloc(size, GFP_KERNEL);
+
+ if (list) {
+ if (cid_list) {
+ memcpy(list, cid_list, cid_list->size);
+ count = cid_list->count;
+ }
+ if (cid_add) {
+ strncpy(list->id[count].value, cid_add,
+ ACPI_MAX_CID_LENGTH);
+ count++;
+ device->flags.compatible_ids = 1;
+ }
+ list->size = size;
+ list->count = count;
+ device->pnp.cid_list = list;
+ } else
+ printk(KERN_ERR PREFIX "Memory allocation error\n");
}
kfree(buffer.pointer);
@@ -1050,7 +1096,7 @@ static int acpi_device_set_context(struct acpi_device *device, int type)
acpi_bus_data_handler, device);
if (ACPI_FAILURE(status)) {
- printk("Error attaching device data\n");
+ printk(KERN_ERR PREFIX "Error attaching device data\n");
result = -ENODEV;
}
}
@@ -1081,6 +1127,20 @@ static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
}
static int
+acpi_is_child_device(struct acpi_device *device,
+ int (*matcher)(struct acpi_device *))
+{
+ int result = -ENODEV;
+
+ do {
+ if (ACPI_SUCCESS(matcher(device)))
+ return AE_OK;
+ } while ((device = device->parent));
+
+ return result;
+}
+
+static int
acpi_add_single_object(struct acpi_device **child,
struct acpi_device *parent, acpi_handle handle, int type,
struct acpi_bus_ops *ops)
@@ -1131,10 +1191,20 @@ acpi_add_single_object(struct acpi_device **child,
case ACPI_BUS_TYPE_PROCESSOR:
case ACPI_BUS_TYPE_DEVICE:
result = acpi_bus_get_status(device);
- if (ACPI_FAILURE(result) || !device->status.present) {
- result = -ENOENT;
+ if (ACPI_FAILURE(result)) {
+ result = -ENODEV;
goto end;
}
+ if (!device->status.present) {
+ /* Bay and dock should be handled even if absent */
+ if (!ACPI_SUCCESS(
+ acpi_is_child_device(device, acpi_bay_match)) &&
+ !ACPI_SUCCESS(
+ acpi_is_child_device(device, acpi_dock_match))) {
+ result = -ENODEV;
+ goto end;
+ }
+ }
break;
default:
STRUCT_TO_INT(device->status) =
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index 485de1347075..293a1cbb47c0 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -170,7 +170,7 @@ static int acpi_pm_enter(suspend_state_t pm_state)
/* Reprogram control registers and execute _BFS */
acpi_leave_sleep_state_prep(acpi_state);
- /* ACPI 3.0 specs (P62) says that it's the responsabilty
+ /* ACPI 3.0 specs (P62) says that it's the responsibility
* of the OSPM to clear the status bit [ implying that the
* POWER_BUTTON event should not reach userspace ]
*/
@@ -472,11 +472,20 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p)
if (acpi_target_sleep_state == ACPI_STATE_S0 ||
(wake && adev->wakeup.state.enabled &&
adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
+ acpi_status status;
+
acpi_method[3] = 'W';
- acpi_evaluate_integer(handle, acpi_method, NULL, &d_max);
- /* Sanity check */
- if (d_max < d_min)
+ status = acpi_evaluate_integer(handle, acpi_method, NULL,
+ &d_max);
+ if (ACPI_FAILURE(status)) {
+ d_max = d_min;
+ } else if (d_max < d_min) {
+ /* Warn the user of the broken DSDT */
+ printk(KERN_WARNING "ACPI: Wrong value from %s\n",
+ acpi_method);
+ /* Sanitize it */
d_min = d_max;
+ }
}
if (d_min_p)
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 1538355c266b..f8df5217d477 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -178,6 +178,9 @@ static int get_date_field(char **p, u32 * value)
* Try to find delimeter, only to insert null. The end of the
* string won't have one, but is still valid.
*/
+ if (*p == NULL)
+ return result;
+
next = strpbrk(*p, "- :");
if (next)
*next++ = '\0';
@@ -190,6 +193,8 @@ static int get_date_field(char **p, u32 * value)
if (next)
*p = next;
+ else
+ *p = NULL;
return result;
}
@@ -251,27 +256,6 @@ acpi_system_write_alarm(struct file *file,
if ((result = get_date_field(&p, &sec)))
goto end;
- if (sec > 59) {
- min += 1;
- sec -= 60;
- }
- if (min > 59) {
- hr += 1;
- min -= 60;
- }
- if (hr > 23) {
- day += 1;
- hr -= 24;
- }
- if (day > 31) {
- mo += 1;
- day -= 31;
- }
- if (mo > 12) {
- yr += 1;
- mo -= 12;
- }
-
spin_lock_irq(&rtc_lock);
rtc_control = CMOS_READ(RTC_CONTROL);
@@ -288,24 +272,24 @@ acpi_system_write_alarm(struct file *file,
spin_unlock_irq(&rtc_lock);
if (sec > 59) {
- min++;
- sec -= 60;
+ min += sec/60;
+ sec = sec%60;
}
if (min > 59) {
- hr++;
- min -= 60;
+ hr += min/60;
+ min = min%60;
}
if (hr > 23) {
- day++;
- hr -= 24;
+ day += hr/24;
+ hr = hr%24;
}
if (day > 31) {
- mo++;
- day -= 31;
+ mo += day/32;
+ day = day%32;
}
if (mo > 12) {
- yr++;
- mo -= 12;
+ yr += mo/13;
+ mo = mo%13;
}
spin_lock_irq(&rtc_lock);
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 5ffe0ea18967..55cf4c05bb74 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -40,6 +40,8 @@ ACPI_MODULE_NAME("system");
#define ACPI_SYSTEM_CLASS "system"
#define ACPI_SYSTEM_DEVICE_NAME "System"
+u32 acpi_irq_handled;
+
/*
* Make ACPICA version work as module param
*/
@@ -166,6 +168,212 @@ static int acpi_system_sysfs_init(void)
return 0;
}
+/*
+ * Detailed ACPI IRQ counters in /sys/firmware/acpi/interrupts/
+ * See Documentation/ABI/testing/sysfs-firmware-acpi
+ */
+
+#define COUNT_GPE 0
+#define COUNT_SCI 1 /* acpi_irq_handled */
+#define COUNT_ERROR 2 /* other */
+#define NUM_COUNTERS_EXTRA 3
+
+static u32 *all_counters;
+static u32 num_gpes;
+static u32 num_counters;
+static struct attribute **all_attrs;
+static u32 acpi_gpe_count;
+
+static struct attribute_group interrupt_stats_attr_group = {
+ .name = "interrupts",
+};
+static struct kobj_attribute *counter_attrs;
+
+static int count_num_gpes(void)
+{
+ int count = 0;
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+ struct acpi_gpe_block_info *gpe_block;
+ acpi_cpu_flags flags;
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_info) {
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
+ while (gpe_block) {
+ count += gpe_block->register_count *
+ ACPI_GPE_REGISTER_WIDTH;
+ gpe_block = gpe_block->next;
+ }
+ gpe_xrupt_info = gpe_xrupt_info->next;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ return count;
+}
+
+static void delete_gpe_attr_array(void)
+{
+ u32 *tmp = all_counters;
+
+ all_counters = NULL;
+ kfree(tmp);
+
+ if (counter_attrs) {
+ int i;
+
+ for (i = 0; i < num_gpes; i++)
+ kfree(counter_attrs[i].attr.name);
+
+ kfree(counter_attrs);
+ }
+ kfree(all_attrs);
+
+ return;
+}
+
+void acpi_os_gpe_count(u32 gpe_number)
+{
+ acpi_gpe_count++;
+
+ if (!all_counters)
+ return;
+
+ if (gpe_number < num_gpes)
+ all_counters[gpe_number]++;
+ else
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]++;
+
+ return;
+}
+
+void acpi_os_fixed_event_count(u32 event_number)
+{
+ if (!all_counters)
+ return;
+
+ if (event_number < ACPI_NUM_FIXED_EVENTS)
+ all_counters[num_gpes + event_number]++;
+ else
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]++;
+
+ return;
+}
+
+static ssize_t counter_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI] =
+ acpi_irq_handled;
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE] =
+ acpi_gpe_count;
+
+ return sprintf(buf, "%d\n", all_counters[attr - counter_attrs]);
+}
+
+/*
+ * counter_set() sets the specified counter.
+ * setting the total "sci" file to any value clears all counters.
+ */
+static ssize_t counter_set(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t size)
+{
+ int index = attr - counter_attrs;
+
+ if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
+ int i;
+ for (i = 0; i < num_counters; ++i)
+ all_counters[i] = 0;
+ acpi_gpe_count = 0;
+ acpi_irq_handled = 0;
+
+ } else
+ all_counters[index] = strtoul(buf, NULL, 0);
+
+ return size;
+}
+
+void acpi_irq_stats_init(void)
+{
+ int i;
+
+ if (all_counters)
+ return;
+
+ num_gpes = count_num_gpes();
+ num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
+
+ all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1),
+ GFP_KERNEL);
+ if (all_attrs == NULL)
+ return;
+
+ all_counters = kzalloc(sizeof(u32) * (num_counters), GFP_KERNEL);
+ if (all_counters == NULL)
+ goto fail;
+
+ counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
+ GFP_KERNEL);
+ if (counter_attrs == NULL)
+ goto fail;
+
+ for (i = 0; i < num_counters; ++i) {
+ char buffer[10];
+ char *name;
+
+ if (i < num_gpes)
+ sprintf(buffer, "gpe%02X", i);
+ else if (i == num_gpes + ACPI_EVENT_PMTIMER)
+ sprintf(buffer, "ff_pmtimer");
+ else if (i == num_gpes + ACPI_EVENT_GLOBAL)
+ sprintf(buffer, "ff_gbl_lock");
+ else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
+ sprintf(buffer, "ff_pwr_btn");
+ else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
+ sprintf(buffer, "ff_slp_btn");
+ else if (i == num_gpes + ACPI_EVENT_RTC)
+ sprintf(buffer, "ff_rt_clk");
+ else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
+ sprintf(buffer, "gpe_all");
+ else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
+ sprintf(buffer, "sci");
+ else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
+ sprintf(buffer, "error");
+ else
+ sprintf(buffer, "bug%02X", i);
+
+ name = kzalloc(strlen(buffer) + 1, GFP_KERNEL);
+ if (name == NULL)
+ goto fail;
+ strncpy(name, buffer, strlen(buffer) + 1);
+
+ counter_attrs[i].attr.name = name;
+ counter_attrs[i].attr.mode = 0644;
+ counter_attrs[i].show = counter_show;
+ counter_attrs[i].store = counter_set;
+
+ all_attrs[i] = &counter_attrs[i].attr;
+ }
+
+ interrupt_stats_attr_group.attrs = all_attrs;
+ if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
+ return;
+
+fail:
+ delete_gpe_attr_array();
+ return;
+}
+
+static void __exit interrupt_stats_exit(void)
+{
+ sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
+
+ delete_gpe_attr_array();
+
+ return;
+}
+
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/tables/Makefile b/drivers/acpi/tables/Makefile
index 0a7d7afac255..7385efa61622 100644
--- a/drivers/acpi/tables/Makefile
+++ b/drivers/acpi/tables/Makefile
@@ -2,6 +2,6 @@
# Makefile for all Linux ACPI interpreter subdirectories
#
-obj-y := tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o
+obj-y := tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/tables/tbxfroot.c
index cf8fa514189f..9ecb4b6c1e7d 100644
--- a/drivers/acpi/tables/tbxfroot.c
+++ b/drivers/acpi/tables/tbxfroot.c
@@ -100,7 +100,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
/*******************************************************************************
*
- * FUNCTION: acpi_tb_find_rsdp
+ * FUNCTION: acpi_find_root_pointer
*
* PARAMETERS: table_address - Where the table pointer is returned
*
@@ -219,8 +219,6 @@ acpi_status acpi_find_root_pointer(acpi_native_uint * table_address)
return_ACPI_STATUS(AE_NOT_FOUND);
}
-ACPI_EXPORT_SYMBOL(acpi_find_root_pointer)
-
/*******************************************************************************
*
* FUNCTION: acpi_tb_scan_memory_for_rsdp
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 5f79b4451212..8d4b79b4f933 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -43,7 +43,7 @@
#include <linux/seq_file.h>
#include <linux/reboot.h>
#include <asm/uaccess.h>
-
+#include <linux/thermal.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -65,9 +65,6 @@
#define ACPI_THERMAL_MAX_ACTIVE 10
#define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65
-#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732>=0) ? ((long)t-2732+5)/10 : ((long)t-2732-5)/10)
-#define CELSIUS_TO_KELVIN(t) ((t+273)*10)
-
#define _COMPONENT ACPI_THERMAL_COMPONENT
ACPI_MODULE_NAME("thermal");
@@ -195,6 +192,8 @@ struct acpi_thermal {
struct acpi_thermal_trips trips;
struct acpi_handle_list devices;
struct timer_list timer;
+ struct thermal_zone_device *thermal_zone;
+ int tz_enabled;
struct mutex lock;
};
@@ -321,178 +320,226 @@ static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
return 0;
}
-static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
-{
- acpi_status status = AE_OK;
- int i = 0;
+#define ACPI_TRIPS_CRITICAL 0x01
+#define ACPI_TRIPS_HOT 0x02
+#define ACPI_TRIPS_PASSIVE 0x04
+#define ACPI_TRIPS_ACTIVE 0x08
+#define ACPI_TRIPS_DEVICES 0x10
+#define ACPI_TRIPS_REFRESH_THRESHOLDS (ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE)
+#define ACPI_TRIPS_REFRESH_DEVICES ACPI_TRIPS_DEVICES
- if (!tz)
- return -EINVAL;
+#define ACPI_TRIPS_INIT (ACPI_TRIPS_CRITICAL | ACPI_TRIPS_HOT | \
+ ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE | \
+ ACPI_TRIPS_DEVICES)
- /* Critical Shutdown (required) */
-
- status = acpi_evaluate_integer(tz->device->handle, "_CRT", NULL,
- &tz->trips.critical.temperature);
- if (ACPI_FAILURE(status)) {
- tz->trips.critical.flags.valid = 0;
- ACPI_EXCEPTION((AE_INFO, status, "No critical threshold"));
- return -ENODEV;
- } else {
- tz->trips.critical.flags.valid = 1;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Found critical threshold [%lu]\n",
- tz->trips.critical.temperature));
- }
+/*
+ * This exception is thrown out in two cases:
+ * 1.An invalid trip point becomes invalid or a valid trip point becomes invalid
+ * when re-evaluating the AML code.
+ * 2.TODO: Devices listed in _PSL, _ALx, _TZD may change.
+ * We need to re-bind the cooling devices of a thermal zone when this occurs.
+ */
+#define ACPI_THERMAL_TRIPS_EXCEPTION(flags, str) \
+do { \
+ if (flags != ACPI_TRIPS_INIT) \
+ ACPI_EXCEPTION((AE_INFO, AE_ERROR, \
+ "ACPI thermal trip point %s changed\n" \
+ "Please send acpidump to linux-acpi@vger.kernel.org\n", str)); \
+} while (0)
+
+static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
+{
+ acpi_status status = AE_OK;
+ struct acpi_handle_list devices;
+ int valid = 0;
+ int i;
- if (tz->trips.critical.flags.valid == 1) {
- if (crt == -1) {
+ /* Critical Shutdown (required) */
+ if (flag & ACPI_TRIPS_CRITICAL) {
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_CRT", NULL, &tz->trips.critical.temperature);
+ if (ACPI_FAILURE(status)) {
tz->trips.critical.flags.valid = 0;
- } else if (crt > 0) {
- unsigned long crt_k = CELSIUS_TO_KELVIN(crt);
-
- /*
- * Allow override to lower critical threshold
- */
- if (crt_k < tz->trips.critical.temperature)
- tz->trips.critical.temperature = crt_k;
+ ACPI_EXCEPTION((AE_INFO, status,
+ "No critical threshold"));
+ return -ENODEV;
+ } else {
+ tz->trips.critical.flags.valid = 1;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Found critical threshold [%lu]\n",
+ tz->trips.critical.temperature));
+ }
+ if (tz->trips.critical.flags.valid == 1) {
+ if (crt == -1) {
+ tz->trips.critical.flags.valid = 0;
+ } else if (crt > 0) {
+ unsigned long crt_k = CELSIUS_TO_KELVIN(crt);
+ /*
+ * Allow override to lower critical threshold
+ */
+ if (crt_k < tz->trips.critical.temperature)
+ tz->trips.critical.temperature = crt_k;
+ }
}
}
/* Critical Sleep (optional) */
-
- status =
- acpi_evaluate_integer(tz->device->handle, "_HOT", NULL,
- &tz->trips.hot.temperature);
- if (ACPI_FAILURE(status)) {
- tz->trips.hot.flags.valid = 0;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No hot threshold\n"));
- } else {
- tz->trips.hot.flags.valid = 1;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found hot threshold [%lu]\n",
- tz->trips.hot.temperature));
- }
-
- /* Passive: Processors (optional) */
-
- if (psv == -1) {
- status = AE_SUPPORT;
- } else if (psv > 0) {
- tz->trips.passive.temperature = CELSIUS_TO_KELVIN(psv);
- status = AE_OK;
- } else {
+ if (flag & ACPI_TRIPS_HOT) {
status = acpi_evaluate_integer(tz->device->handle,
- "_PSV", NULL, &tz->trips.passive.temperature);
+ "_HOT", NULL, &tz->trips.hot.temperature);
+ if (ACPI_FAILURE(status)) {
+ tz->trips.hot.flags.valid = 0;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "No hot threshold\n"));
+ } else {
+ tz->trips.hot.flags.valid = 1;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Found hot threshold [%lu]\n",
+ tz->trips.critical.temperature));
+ }
}
- if (ACPI_FAILURE(status)) {
- tz->trips.passive.flags.valid = 0;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No passive threshold\n"));
- } else {
- tz->trips.passive.flags.valid = 1;
-
- status =
- acpi_evaluate_integer(tz->device->handle, "_TC1", NULL,
- &tz->trips.passive.tc1);
- if (ACPI_FAILURE(status))
- tz->trips.passive.flags.valid = 0;
-
- status =
- acpi_evaluate_integer(tz->device->handle, "_TC2", NULL,
- &tz->trips.passive.tc2);
- if (ACPI_FAILURE(status))
- tz->trips.passive.flags.valid = 0;
+ /* Passive (optional) */
+ if (flag & ACPI_TRIPS_PASSIVE) {
+ valid = tz->trips.passive.flags.valid;
+ if (psv == -1) {
+ status = AE_SUPPORT;
+ } else if (psv > 0) {
+ tz->trips.passive.temperature = CELSIUS_TO_KELVIN(psv);
+ status = AE_OK;
+ } else {
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_PSV", NULL, &tz->trips.passive.temperature);
+ }
- status =
- acpi_evaluate_integer(tz->device->handle, "_TSP", NULL,
- &tz->trips.passive.tsp);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
-
- status =
- acpi_evaluate_reference(tz->device->handle, "_PSL", NULL,
- &tz->trips.passive.devices);
+ else {
+ tz->trips.passive.flags.valid = 1;
+ if (flag == ACPI_TRIPS_INIT) {
+ status = acpi_evaluate_integer(
+ tz->device->handle, "_TC1",
+ NULL, &tz->trips.passive.tc1);
+ if (ACPI_FAILURE(status))
+ tz->trips.passive.flags.valid = 0;
+ status = acpi_evaluate_integer(
+ tz->device->handle, "_TC2",
+ NULL, &tz->trips.passive.tc2);
+ if (ACPI_FAILURE(status))
+ tz->trips.passive.flags.valid = 0;
+ status = acpi_evaluate_integer(
+ tz->device->handle, "_TSP",
+ NULL, &tz->trips.passive.tsp);
+ if (ACPI_FAILURE(status))
+ tz->trips.passive.flags.valid = 0;
+ }
+ }
+ }
+ if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.flags.valid) {
+ memset(&devices, 0, sizeof(struct acpi_handle_list));
+ status = acpi_evaluate_reference(tz->device->handle, "_PSL",
+ NULL, &devices);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
-
- if (!tz->trips.passive.flags.valid)
- printk(KERN_WARNING PREFIX "Invalid passive threshold\n");
else
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Found passive threshold [%lu]\n",
- tz->trips.passive.temperature));
- }
+ tz->trips.passive.flags.valid = 1;
- /* Active: Fans, etc. (optional) */
+ if (memcmp(&tz->trips.passive.devices, &devices,
+ sizeof(struct acpi_handle_list))) {
+ memcpy(&tz->trips.passive.devices, &devices,
+ sizeof(struct acpi_handle_list));
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
+ }
+ }
+ if ((flag & ACPI_TRIPS_PASSIVE) || (flag & ACPI_TRIPS_DEVICES)) {
+ if (valid != tz->trips.passive.flags.valid)
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, "state");
+ }
+ /* Active (optional) */
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
-
char name[5] = { '_', 'A', 'C', ('0' + i), '\0' };
+ valid = tz->trips.active[i].flags.valid;
if (act == -1)
- break; /* disable all active trip points */
-
- status = acpi_evaluate_integer(tz->device->handle,
- name, NULL, &tz->trips.active[i].temperature);
-
- if (ACPI_FAILURE(status)) {
- if (i == 0) /* no active trip points */
+ break; /* disable all active trip points */
+
+ if (flag & ACPI_TRIPS_ACTIVE) {
+ status = acpi_evaluate_integer(tz->device->handle,
+ name, NULL, &tz->trips.active[i].temperature);
+ if (ACPI_FAILURE(status)) {
+ tz->trips.active[i].flags.valid = 0;
+ if (i == 0)
+ break;
+ if (act <= 0)
+ break;
+ if (i == 1)
+ tz->trips.active[0].temperature =
+ CELSIUS_TO_KELVIN(act);
+ else
+ /*
+ * Don't allow override higher than
+ * the next higher trip point
+ */
+ tz->trips.active[i - 1].temperature =
+ (tz->trips.active[i - 2].temperature <
+ CELSIUS_TO_KELVIN(act) ?
+ tz->trips.active[i - 2].temperature :
+ CELSIUS_TO_KELVIN(act));
break;
- if (act <= 0) /* no override requested */
- break;
- if (i == 1) { /* 1 trip point */
- tz->trips.active[0].temperature =
- CELSIUS_TO_KELVIN(act);
- } else { /* multiple trips */
- /*
- * Don't allow override higher than
- * the next higher trip point
- */
- tz->trips.active[i - 1].temperature =
- (tz->trips.active[i - 2].temperature <
- CELSIUS_TO_KELVIN(act) ?
- tz->trips.active[i - 2].temperature :
- CELSIUS_TO_KELVIN(act));
- }
- break;
+ } else
+ tz->trips.active[i].flags.valid = 1;
}
name[2] = 'L';
- status =
- acpi_evaluate_reference(tz->device->handle, name, NULL,
- &tz->trips.active[i].devices);
- if (ACPI_SUCCESS(status)) {
- tz->trips.active[i].flags.valid = 1;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Found active threshold [%d]:[%lu]\n",
- i, tz->trips.active[i].temperature));
- } else
- ACPI_EXCEPTION((AE_INFO, status,
- "Invalid active threshold [%d]", i));
+ if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].flags.valid ) {
+ memset(&devices, 0, sizeof(struct acpi_handle_list));
+ status = acpi_evaluate_reference(tz->device->handle,
+ name, NULL, &devices);
+ if (ACPI_FAILURE(status))
+ tz->trips.active[i].flags.valid = 0;
+ else
+ tz->trips.active[i].flags.valid = 1;
+
+ if (memcmp(&tz->trips.active[i].devices, &devices,
+ sizeof(struct acpi_handle_list))) {
+ memcpy(&tz->trips.active[i].devices, &devices,
+ sizeof(struct acpi_handle_list));
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
+ }
+ }
+ if ((flag & ACPI_TRIPS_ACTIVE) || (flag & ACPI_TRIPS_DEVICES))
+ if (valid != tz->trips.active[i].flags.valid)
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, "state");
+
+ if (!tz->trips.active[i].flags.valid)
+ break;
+ }
+
+ if (flag & ACPI_TRIPS_DEVICES) {
+ memset(&devices, 0, sizeof(struct acpi_handle_list));
+ status = acpi_evaluate_reference(tz->device->handle, "_TZD",
+ NULL, &devices);
+ if (memcmp(&tz->devices, &devices,
+ sizeof(struct acpi_handle_list))) {
+ memcpy(&tz->devices, &devices,
+ sizeof(struct acpi_handle_list));
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
+ }
}
return 0;
}
-static int acpi_thermal_get_devices(struct acpi_thermal *tz)
+static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
{
- acpi_status status = AE_OK;
-
-
- if (!tz)
- return -EINVAL;
-
- status =
- acpi_evaluate_reference(tz->device->handle, "_TZD", NULL, &tz->devices);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- return 0;
+ return acpi_thermal_trips_update(tz, ACPI_TRIPS_INIT);
}
static int acpi_thermal_critical(struct acpi_thermal *tz)
{
- if (!tz || !tz->trips.critical.flags.valid || nocrt)
+ if (!tz || !tz->trips.critical.flags.valid)
return -EINVAL;
if (tz->temperature >= tz->trips.critical.temperature) {
@@ -501,9 +548,6 @@ static int acpi_thermal_critical(struct acpi_thermal *tz)
} else if (tz->trips.critical.flags.enabled)
tz->trips.critical.flags.enabled = 0;
- printk(KERN_EMERG
- "Critical temperature reached (%ld C), shutting down.\n",
- KELVIN_TO_CELSIUS(tz->temperature));
acpi_bus_generate_proc_event(tz->device, ACPI_THERMAL_NOTIFY_CRITICAL,
tz->trips.critical.flags.enabled);
acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
@@ -511,14 +555,20 @@ static int acpi_thermal_critical(struct acpi_thermal *tz)
ACPI_THERMAL_NOTIFY_CRITICAL,
tz->trips.critical.flags.enabled);
- orderly_poweroff(true);
+ /* take no action if nocrt is set */
+ if(!nocrt) {
+ printk(KERN_EMERG
+ "Critical temperature reached (%ld C), shutting down.\n",
+ KELVIN_TO_CELSIUS(tz->temperature));
+ orderly_poweroff(true);
+ }
return 0;
}
static int acpi_thermal_hot(struct acpi_thermal *tz)
{
- if (!tz || !tz->trips.hot.flags.valid || nocrt)
+ if (!tz || !tz->trips.hot.flags.valid)
return -EINVAL;
if (tz->temperature >= tz->trips.hot.temperature) {
@@ -534,7 +584,7 @@ static int acpi_thermal_hot(struct acpi_thermal *tz)
ACPI_THERMAL_NOTIFY_HOT,
tz->trips.hot.flags.enabled);
- /* TBD: Call user-mode "sleep(S4)" function */
+ /* TBD: Call user-mode "sleep(S4)" function if nocrt is cleared */
return 0;
}
@@ -732,6 +782,9 @@ static void acpi_thermal_check(void *data)
if (result)
goto unlock;
+ if (!tz->tz_enabled)
+ goto unlock;
+
memset(&tz->state, 0, sizeof(tz->state));
/*
@@ -825,6 +878,290 @@ static void acpi_thermal_check(void *data)
mutex_unlock(&tz->lock);
}
+/* sys I/F for generic thermal sysfs support */
+static int thermal_get_temp(struct thermal_zone_device *thermal, char *buf)
+{
+ struct acpi_thermal *tz = thermal->devdata;
+
+ if (!tz)
+ return -EINVAL;
+
+ return sprintf(buf, "%ld\n", KELVIN_TO_CELSIUS(tz->temperature));
+}
+
+static const char enabled[] = "kernel";
+static const char disabled[] = "user";
+static int thermal_get_mode(struct thermal_zone_device *thermal,
+ char *buf)
+{
+ struct acpi_thermal *tz = thermal->devdata;
+
+ if (!tz)
+ return -EINVAL;
+
+ return sprintf(buf, "%s\n", tz->tz_enabled ?
+ enabled : disabled);
+}
+
+static int thermal_set_mode(struct thermal_zone_device *thermal,
+ const char *buf)
+{
+ struct acpi_thermal *tz = thermal->devdata;
+ int enable;
+
+ if (!tz)
+ return -EINVAL;
+
+ /*
+ * enable/disable thermal management from ACPI thermal driver
+ */
+ if (!strncmp(buf, enabled, sizeof enabled - 1))
+ enable = 1;
+ else if (!strncmp(buf, disabled, sizeof disabled - 1))
+ enable = 0;
+ else
+ return -EINVAL;
+
+ if (enable != tz->tz_enabled) {
+ tz->tz_enabled = enable;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "%s ACPI thermal control\n",
+ tz->tz_enabled ? enabled : disabled));
+ acpi_thermal_check(tz);
+ }
+ return 0;
+}
+
+static int thermal_get_trip_type(struct thermal_zone_device *thermal,
+ int trip, char *buf)
+{
+ struct acpi_thermal *tz = thermal->devdata;
+ int i;
+
+ if (!tz || trip < 0)
+ return -EINVAL;
+
+ if (tz->trips.critical.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "critical\n");
+ trip--;
+ }
+
+ if (tz->trips.hot.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "hot\n");
+ trip--;
+ }
+
+ if (tz->trips.passive.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "passive\n");
+ trip--;
+ }
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
+ tz->trips.active[i].flags.valid; i++) {
+ if (!trip)
+ return sprintf(buf, "active%d\n", i);
+ trip--;
+ }
+
+ return -EINVAL;
+}
+
+static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
+ int trip, char *buf)
+{
+ struct acpi_thermal *tz = thermal->devdata;
+ int i;
+
+ if (!tz || trip < 0)
+ return -EINVAL;
+
+ if (tz->trips.critical.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "%ld\n", KELVIN_TO_CELSIUS(
+ tz->trips.critical.temperature));
+ trip--;
+ }
+
+ if (tz->trips.hot.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "%ld\n", KELVIN_TO_CELSIUS(
+ tz->trips.hot.temperature));
+ trip--;
+ }
+
+ if (tz->trips.passive.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "%ld\n", KELVIN_TO_CELSIUS(
+ tz->trips.passive.temperature));
+ trip--;
+ }
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
+ tz->trips.active[i].flags.valid; i++) {
+ if (!trip)
+ return sprintf(buf, "%ld\n", KELVIN_TO_CELSIUS(
+ tz->trips.active[i].temperature));
+ trip--;
+ }
+
+ return -EINVAL;
+}
+
+typedef int (*cb)(struct thermal_zone_device *, int,
+ struct thermal_cooling_device *);
+static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev,
+ cb action)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_thermal *tz = thermal->devdata;
+ struct acpi_device *dev;
+ acpi_status status;
+ acpi_handle handle;
+ int i;
+ int j;
+ int trip = -1;
+ int result = 0;
+
+ if (tz->trips.critical.flags.valid)
+ trip++;
+
+ if (tz->trips.hot.flags.valid)
+ trip++;
+
+ if (tz->trips.passive.flags.valid) {
+ trip++;
+ for (i = 0; i < tz->trips.passive.devices.count;
+ i++) {
+ handle = tz->trips.passive.devices.handles[i];
+ status = acpi_bus_get_device(handle, &dev);
+ if (ACPI_SUCCESS(status) && (dev == device)) {
+ result = action(thermal, trip, cdev);
+ if (result)
+ goto failed;
+ }
+ }
+ }
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
+ if (!tz->trips.active[i].flags.valid)
+ break;
+ trip++;
+ for (j = 0;
+ j < tz->trips.active[i].devices.count;
+ j++) {
+ handle = tz->trips.active[i].devices.handles[j];
+ status = acpi_bus_get_device(handle, &dev);
+ if (ACPI_SUCCESS(status) && (dev == device)) {
+ result = action(thermal, trip, cdev);
+ if (result)
+ goto failed;
+ }
+ }
+ }
+
+ for (i = 0; i < tz->devices.count; i++) {
+ handle = tz->devices.handles[i];
+ status = acpi_bus_get_device(handle, &dev);
+ if (ACPI_SUCCESS(status) && (dev == device)) {
+ result = action(thermal, -1, cdev);
+ if (result)
+ goto failed;
+ }
+ }
+
+failed:
+ return result;
+}
+
+static int
+acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ return acpi_thermal_cooling_device_cb(thermal, cdev,
+ thermal_zone_bind_cooling_device);
+}
+
+static int
+acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ return acpi_thermal_cooling_device_cb(thermal, cdev,
+ thermal_zone_unbind_cooling_device);
+}
+
+static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
+ .bind = acpi_thermal_bind_cooling_device,
+ .unbind = acpi_thermal_unbind_cooling_device,
+ .get_temp = thermal_get_temp,
+ .get_mode = thermal_get_mode,
+ .set_mode = thermal_set_mode,
+ .get_trip_type = thermal_get_trip_type,
+ .get_trip_temp = thermal_get_trip_temp,
+};
+
+static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
+{
+ int trips = 0;
+ int result;
+ acpi_status status;
+ int i;
+
+ if (tz->trips.critical.flags.valid)
+ trips++;
+
+ if (tz->trips.hot.flags.valid)
+ trips++;
+
+ if (tz->trips.passive.flags.valid)
+ trips++;
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
+ tz->trips.active[i].flags.valid; i++, trips++);
+ tz->thermal_zone = thermal_zone_device_register("ACPI thermal zone",
+ trips, tz, &acpi_thermal_zone_ops);
+ if (!tz->thermal_zone)
+ return -ENODEV;
+
+ result = sysfs_create_link(&tz->device->dev.kobj,
+ &tz->thermal_zone->device.kobj, "thermal_zone");
+ if (result)
+ return result;
+
+ result = sysfs_create_link(&tz->thermal_zone->device.kobj,
+ &tz->device->dev.kobj, "device");
+ if (result)
+ return result;
+
+ status = acpi_attach_data(tz->device->handle,
+ acpi_bus_private_data_handler,
+ tz->thermal_zone);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Error attaching device data\n"));
+ return -ENODEV;
+ }
+
+ tz->tz_enabled = 1;
+
+ printk(KERN_INFO PREFIX "%s is registered as thermal_zone%d\n",
+ tz->device->dev.bus_id, tz->thermal_zone->id);
+ return 0;
+}
+
+static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
+{
+ sysfs_remove_link(&tz->device->dev.kobj, "thermal_zone");
+ sysfs_remove_link(&tz->thermal_zone->device.kobj, "device");
+ thermal_zone_device_unregister(tz->thermal_zone);
+ tz->thermal_zone = NULL;
+ acpi_detach_data(tz->device->handle, acpi_bus_private_data_handler);
+}
+
+
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
@@ -1181,15 +1518,15 @@ static void acpi_thermal_notify(acpi_handle handle, u32 event, void *data)
acpi_thermal_check(tz);
break;
case ACPI_THERMAL_NOTIFY_THRESHOLDS:
- acpi_thermal_get_trip_points(tz);
+ acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
acpi_thermal_check(tz);
acpi_bus_generate_proc_event(device, event, 0);
acpi_bus_generate_netlink_event(device->pnp.device_class,
device->dev.bus_id, event, 0);
break;
case ACPI_THERMAL_NOTIFY_DEVICES:
- if (tz->flags.devices)
- acpi_thermal_get_devices(tz);
+ acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
+ acpi_thermal_check(tz);
acpi_bus_generate_proc_event(device, event, 0);
acpi_bus_generate_netlink_event(device->pnp.device_class,
device->dev.bus_id, event, 0);
@@ -1232,11 +1569,6 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz)
else
acpi_thermal_get_polling_frequency(tz);
- /* Get devices in this thermal zone [_TZD] (optional) */
- result = acpi_thermal_get_devices(tz);
- if (!result)
- tz->flags.devices = 1;
-
return 0;
}
@@ -1260,13 +1592,19 @@ static int acpi_thermal_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
acpi_driver_data(device) = tz;
mutex_init(&tz->lock);
+
+
result = acpi_thermal_get_info(tz);
if (result)
- goto end;
+ goto free_memory;
+
+ result = acpi_thermal_register_thermal_zone(tz);
+ if (result)
+ goto free_memory;
result = acpi_thermal_add_fs(device);
if (result)
- goto end;
+ goto unregister_thermal_zone;
init_timer(&tz->timer);
@@ -1277,19 +1615,21 @@ static int acpi_thermal_add(struct acpi_device *device)
acpi_thermal_notify, tz);
if (ACPI_FAILURE(status)) {
result = -ENODEV;
- goto end;
+ goto remove_fs;
}
printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n",
acpi_device_name(device), acpi_device_bid(device),
KELVIN_TO_CELSIUS(tz->temperature));
+ goto end;
- end:
- if (result) {
- acpi_thermal_remove_fs(device);
- kfree(tz);
- }
-
+remove_fs:
+ acpi_thermal_remove_fs(device);
+unregister_thermal_zone:
+ thermal_zone_device_unregister(tz->thermal_zone);
+free_memory:
+ kfree(tz);
+end:
return result;
}
@@ -1329,6 +1669,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
}
acpi_thermal_remove_fs(device);
+ acpi_thermal_unregister_thermal_zone(tz);
mutex_destroy(&tz->lock);
kfree(tz);
return 0;
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
index 93ea8290b4f7..630c9a2c5b7b 100644
--- a/drivers/acpi/utilities/utglobal.c
+++ b/drivers/acpi/utilities/utglobal.c
@@ -671,7 +671,6 @@ void acpi_ut_init_globals(void)
/* GPE support */
- acpi_gpe_count = 0;
acpi_gbl_gpe_xrupt_list_head = NULL;
acpi_gbl_gpe_fadt_blocks[0] = NULL;
acpi_gbl_gpe_fadt_blocks[1] = NULL;
@@ -735,4 +734,3 @@ void acpi_ut_init_globals(void)
ACPI_EXPORT_SYMBOL(acpi_dbg_level)
ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
- ACPI_EXPORT_SYMBOL(acpi_gpe_count)
diff --git a/drivers/acpi/utilities/utresrc.c b/drivers/acpi/utilities/utresrc.c
index cbbd3315a1e2..b630ee137ee1 100644
--- a/drivers/acpi/utilities/utresrc.c
+++ b/drivers/acpi/utilities/utresrc.c
@@ -1,6 +1,6 @@
/*******************************************************************************
*
- * Module Name: utresrc - Resource managment utilities
+ * Module Name: utresrc - Resource management utilities
*
******************************************************************************/
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index bd77e81e81c1..7f714fa2a454 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -34,6 +34,7 @@
#include <linux/seq_file.h>
#include <linux/input.h>
#include <linux/backlight.h>
+#include <linux/thermal.h>
#include <linux/video_output.h>
#include <asm/uaccess.h>
@@ -72,8 +73,12 @@ MODULE_AUTHOR("Bruno Ducrot");
MODULE_DESCRIPTION("ACPI Video Driver");
MODULE_LICENSE("GPL");
+static int brightness_switch_enabled = 1;
+module_param(brightness_switch_enabled, bool, 0644);
+
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device, int type);
+static int acpi_video_resume(struct acpi_device *device);
static const struct acpi_device_id video_device_ids[] = {
{ACPI_VIDEO_HID, 0},
@@ -88,6 +93,7 @@ static struct acpi_driver acpi_video_bus = {
.ops = {
.add = acpi_video_bus_add,
.remove = acpi_video_bus_remove,
+ .resume = acpi_video_resume,
},
};
@@ -179,6 +185,7 @@ struct acpi_video_device {
struct acpi_device *dev;
struct acpi_video_device_brightness *brightness;
struct backlight_device *backlight;
+ struct thermal_cooling_device *cdev;
struct output_device *output_dev;
};
@@ -273,7 +280,6 @@ static void acpi_video_device_rebind(struct acpi_video_bus *video);
static void acpi_video_device_bind(struct acpi_video_bus *video,
struct acpi_video_device *device);
static int acpi_video_device_enumerate(struct acpi_video_bus *video);
-static int acpi_video_switch_output(struct acpi_video_bus *video, int event);
static int acpi_video_device_lcd_set_level(struct acpi_video_device *device,
int level);
static int acpi_video_device_lcd_get_level_current(
@@ -292,18 +298,26 @@ static int acpi_video_device_set_state(struct acpi_video_device *device, int sta
static int acpi_video_get_brightness(struct backlight_device *bd)
{
unsigned long cur_level;
+ int i;
struct acpi_video_device *vd =
(struct acpi_video_device *)bl_get_data(bd);
acpi_video_device_lcd_get_level_current(vd, &cur_level);
- return (int) cur_level;
+ for (i = 2; i < vd->brightness->count; i++) {
+ if (vd->brightness->levels[i] == cur_level)
+ /* The first two entries are special - see page 575
+ of the ACPI spec 3.0 */
+ return i-2;
+ }
+ return 0;
}
static int acpi_video_set_brightness(struct backlight_device *bd)
{
- int request_level = bd->props.brightness;
+ int request_level = bd->props.brightness+2;
struct acpi_video_device *vd =
(struct acpi_video_device *)bl_get_data(bd);
- acpi_video_device_lcd_set_level(vd, request_level);
+ acpi_video_device_lcd_set_level(vd,
+ vd->brightness->levels[request_level]);
return 0;
}
@@ -334,6 +348,54 @@ static struct output_properties acpi_output_properties = {
.set_state = acpi_video_output_set,
.get_status = acpi_video_output_get,
};
+
+
+/* thermal cooling device callbacks */
+static int video_get_max_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_video_device *video = acpi_driver_data(device);
+
+ return sprintf(buf, "%d\n", video->brightness->count - 3);
+}
+
+static int video_get_cur_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_video_device *video = acpi_driver_data(device);
+ unsigned long level;
+ int state;
+
+ acpi_video_device_lcd_get_level_current(video, &level);
+ for (state = 2; state < video->brightness->count; state++)
+ if (level == video->brightness->levels[state])
+ return sprintf(buf, "%d\n",
+ video->brightness->count - state - 1);
+
+ return -EINVAL;
+}
+
+static int
+video_set_cur_state(struct thermal_cooling_device *cdev, unsigned int state)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_video_device *video = acpi_driver_data(device);
+ int level;
+
+ if ( state >= video->brightness->count - 2)
+ return -EINVAL;
+
+ state = video->brightness->count - state;
+ level = video->brightness->levels[state -1];
+ return acpi_video_device_lcd_set_level(video, level);
+}
+
+static struct thermal_cooling_device_ops video_cooling_ops = {
+ .get_max_state = video_get_max_state,
+ .get_cur_state = video_get_cur_state,
+ .set_cur_state = video_set_cur_state,
+};
+
/* --------------------------------------------------------------------------
Video Management
-------------------------------------------------------------------------- */
@@ -652,7 +714,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
kfree(obj);
if (device->cap._BCL && device->cap._BCM && device->cap._BQC && max_level > 0){
- unsigned long tmp;
+ int result;
static int count = 0;
char *name;
name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
@@ -660,14 +722,30 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
return;
sprintf(name, "acpi_video%d", count++);
- acpi_video_device_lcd_get_level_current(device, &tmp);
device->backlight = backlight_device_register(name,
NULL, device, &acpi_backlight_ops);
- device->backlight->props.max_brightness = max_level;
- device->backlight->props.brightness = (int)tmp;
+ device->backlight->props.max_brightness = device->brightness->count-3;
+ device->backlight->props.brightness = acpi_video_get_brightness(device->backlight);
backlight_update_status(device->backlight);
-
kfree(name);
+
+ device->cdev = thermal_cooling_device_register("LCD",
+ device->dev, &video_cooling_ops);
+ if (device->cdev) {
+ printk(KERN_INFO PREFIX
+ "%s is registered as cooling_device%d\n",
+ device->dev->dev.bus_id, device->cdev->id);
+ result = sysfs_create_link(&device->dev->dev.kobj,
+ &device->cdev->device.kobj,
+ "thermal_cooling");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ result = sysfs_create_link(&device->cdev->device.kobj,
+ &device->dev->dev.kobj,
+ "device");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ }
}
if (device->cap._DCS && device->cap._DSS){
static int count = 0;
@@ -726,11 +804,40 @@ static void acpi_video_bus_find_cap(struct acpi_video_bus *video)
static int acpi_video_bus_check(struct acpi_video_bus *video)
{
acpi_status status = -ENOENT;
-
+ long device_id;
+ struct device *dev;
+ struct acpi_device *device;
if (!video)
return -EINVAL;
+ device = video->device;
+
+ status =
+ acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id);
+
+ if (!ACPI_SUCCESS(status))
+ return -ENODEV;
+
+ /* We need to attempt to determine whether the _ADR refers to a
+ PCI device or not. There's no terribly good way to do this,
+ so the best we can hope for is to assume that there'll never
+ be a video device in the host bridge */
+ if (device_id >= 0x10000) {
+ /* It looks like a PCI device. Does it exist? */
+ dev = acpi_get_physical_device(device->handle);
+ } else {
+ /* It doesn't look like a PCI device. Does its parent
+ exist? */
+ acpi_handle phandle;
+ if (acpi_get_parent(device->handle, &phandle))
+ return -ENODEV;
+ dev = acpi_get_physical_device(phandle);
+ }
+ if (!dev)
+ return -ENODEV;
+ put_device(dev);
+
/* Since there is no HID, CID and so on for VGA driver, we have
* to check well known required nodes.
*/
@@ -1256,8 +1363,37 @@ acpi_video_bus_write_DOS(struct file *file,
static int acpi_video_bus_add_fs(struct acpi_device *device)
{
+ long device_id;
+ int status;
struct proc_dir_entry *entry = NULL;
struct acpi_video_bus *video;
+ struct device *dev;
+
+ status =
+ acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id);
+
+ if (!ACPI_SUCCESS(status))
+ return -ENODEV;
+
+ /* We need to attempt to determine whether the _ADR refers to a
+ PCI device or not. There's no terribly good way to do this,
+ so the best we can hope for is to assume that there'll never
+ be a video device in the host bridge */
+ if (device_id >= 0x10000) {
+ /* It looks like a PCI device. Does it exist? */
+ dev = acpi_get_physical_device(device->handle);
+ } else {
+ /* It doesn't look like a PCI device. Does its parent
+ exist? */
+ acpi_handle phandle;
+ if (acpi_get_parent(device->handle, &phandle))
+ return -ENODEV;
+ dev = acpi_get_physical_device(phandle);
+ }
+ if (!dev)
+ return -ENODEV;
+ put_device(dev);
+
video = acpi_driver_data(device);
@@ -1580,64 +1716,6 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
return status;
}
-/*
- * Arg:
- * video : video bus device
- * event : notify event
- *
- * Return:
- * < 0 : error
- *
- * 1. Find out the current active output device.
- * 2. Identify the next output device to switch to.
- * 3. call _DSS to do actual switch.
- */
-
-static int acpi_video_switch_output(struct acpi_video_bus *video, int event)
-{
- struct list_head *node;
- struct acpi_video_device *dev = NULL;
- struct acpi_video_device *dev_next = NULL;
- struct acpi_video_device *dev_prev = NULL;
- unsigned long state;
- int status = 0;
-
- mutex_lock(&video->device_list_lock);
-
- list_for_each(node, &video->video_device_list) {
- dev = container_of(node, struct acpi_video_device, entry);
- status = acpi_video_device_get_state(dev, &state);
- if (state & 0x2) {
- dev_next = container_of(node->next,
- struct acpi_video_device, entry);
- dev_prev = container_of(node->prev,
- struct acpi_video_device, entry);
- goto out;
- }
- }
-
- dev_next = container_of(node->next, struct acpi_video_device, entry);
- dev_prev = container_of(node->prev, struct acpi_video_device, entry);
-
- out:
- mutex_unlock(&video->device_list_lock);
-
- switch (event) {
- case ACPI_VIDEO_NOTIFY_CYCLE:
- case ACPI_VIDEO_NOTIFY_NEXT_OUTPUT:
- acpi_video_device_set_state(dev, 0);
- acpi_video_device_set_state(dev_next, 0x80000001);
- break;
- case ACPI_VIDEO_NOTIFY_PREV_OUTPUT:
- acpi_video_device_set_state(dev, 0);
- acpi_video_device_set_state(dev_prev, 0x80000001);
- default:
- break;
- }
-
- return status;
-}
-
static int
acpi_video_get_next_level(struct acpi_video_device *device,
u32 level_current, u32 event)
@@ -1729,6 +1807,14 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
ACPI_DEVICE_NOTIFY,
acpi_video_device_notify);
backlight_device_unregister(device->backlight);
+ if (device->cdev) {
+ sysfs_remove_link(&device->dev->dev.kobj,
+ "thermal_cooling");
+ sysfs_remove_link(&device->cdev->device.kobj,
+ "device");
+ thermal_cooling_device_unregister(device->cdev);
+ device->cdev = NULL;
+ }
video_output_unregister(device->output_dev);
return 0;
@@ -1797,23 +1883,19 @@ static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data)
* connector. */
acpi_video_device_enumerate(video);
acpi_video_device_rebind(video);
- acpi_video_switch_output(video, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_CYCLE: /* Cycle Display output hotkey pressed. */
- acpi_video_switch_output(video, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_NEXT_OUTPUT: /* Next Display output hotkey pressed. */
- acpi_video_switch_output(video, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_VIDEO_NEXT;
break;
case ACPI_VIDEO_NOTIFY_PREV_OUTPUT: /* previous Display output hotkey pressed. */
- acpi_video_switch_output(video, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_VIDEO_PREV;
break;
@@ -1825,6 +1907,7 @@ static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data)
break;
}
+ acpi_notifier_call_chain(device, event, 0);
input_report_key(input, keycode, 1);
input_sync(input);
input_report_key(input, keycode, 0);
@@ -1850,27 +1933,32 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
switch (event) {
case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS: /* Cycle brightness */
- acpi_video_switch_brightness(video_device, event);
+ if (brightness_switch_enabled)
+ acpi_video_switch_brightness(video_device, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESS_CYCLE;
break;
case ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS: /* Increase brightness */
- acpi_video_switch_brightness(video_device, event);
+ if (brightness_switch_enabled)
+ acpi_video_switch_brightness(video_device, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESSUP;
break;
case ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS: /* Decrease brightness */
- acpi_video_switch_brightness(video_device, event);
+ if (brightness_switch_enabled)
+ acpi_video_switch_brightness(video_device, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESSDOWN;
break;
case ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS: /* zero brightnesss */
- acpi_video_switch_brightness(video_device, event);
+ if (brightness_switch_enabled)
+ acpi_video_switch_brightness(video_device, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESS_ZERO;
break;
case ACPI_VIDEO_NOTIFY_DISPLAY_OFF: /* display device off */
- acpi_video_switch_brightness(video_device, event);
+ if (brightness_switch_enabled)
+ acpi_video_switch_brightness(video_device, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_DISPLAY_OFF;
break;
@@ -1881,6 +1969,7 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
break;
}
+ acpi_notifier_call_chain(device, event, 0);
input_report_key(input, keycode, 1);
input_sync(input);
input_report_key(input, keycode, 0);
@@ -1890,6 +1979,25 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
}
static int instance;
+static int acpi_video_resume(struct acpi_device *device)
+{
+ struct acpi_video_bus *video;
+ struct acpi_video_device *video_device;
+ int i;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ video = acpi_driver_data(device);
+
+ for (i = 0; i < video->attached_count; i++) {
+ video_device = video->attached_array[i].bind_info;
+ if (video_device && video_device->backlight)
+ acpi_video_set_brightness(video_device->backlight);
+ }
+ return AE_OK;
+}
+
static int acpi_video_bus_add(struct acpi_device *device)
{
acpi_status status;
diff --git a/drivers/acpi/wmi.c b/drivers/acpi/wmi.c
new file mode 100644
index 000000000000..36b84ab418dd
--- /dev/null
+++ b/drivers/acpi/wmi.c
@@ -0,0 +1,710 @@
+/*
+ * ACPI-WMI mapping driver
+ *
+ * Copyright (C) 2007-2008 Carlos Corbacho <carlos@strangeworlds.co.uk>
+ *
+ * GUID parsing code from ldm.c is:
+ * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
+ * Copyright (c) 2001-2007 Anton Altaparmakov
+ * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+ACPI_MODULE_NAME("wmi");
+MODULE_AUTHOR("Carlos Corbacho");
+MODULE_DESCRIPTION("ACPI-WMI Mapping Driver");
+MODULE_LICENSE("GPL");
+
+#define ACPI_WMI_CLASS "wmi"
+
+#undef PREFIX
+#define PREFIX "ACPI: WMI: "
+
+static DEFINE_MUTEX(wmi_data_lock);
+
+struct guid_block {
+ char guid[16];
+ union {
+ char object_id[2];
+ struct {
+ unsigned char notify_id;
+ unsigned char reserved;
+ };
+ };
+ u8 instance_count;
+ u8 flags;
+};
+
+struct wmi_block {
+ struct list_head list;
+ struct guid_block gblock;
+ acpi_handle handle;
+ wmi_notify_handler handler;
+ void *handler_data;
+};
+
+static struct wmi_block wmi_blocks;
+
+/*
+ * If the GUID data block is marked as expensive, we must enable and
+ * explicitily disable data collection.
+ */
+#define ACPI_WMI_EXPENSIVE 0x1
+#define ACPI_WMI_METHOD 0x2 /* GUID is a method */
+#define ACPI_WMI_STRING 0x4 /* GUID takes & returns a string */
+#define ACPI_WMI_EVENT 0x8 /* GUID is an event */
+
+static int acpi_wmi_remove(struct acpi_device *device, int type);
+static int acpi_wmi_add(struct acpi_device *device);
+
+static const struct acpi_device_id wmi_device_ids[] = {
+ {"PNP0C14", 0},
+ {"pnp0c14", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, wmi_device_ids);
+
+static struct acpi_driver acpi_wmi_driver = {
+ .name = "wmi",
+ .class = ACPI_WMI_CLASS,
+ .ids = wmi_device_ids,
+ .ops = {
+ .add = acpi_wmi_add,
+ .remove = acpi_wmi_remove,
+ },
+};
+
+/*
+ * GUID parsing functions
+ */
+
+/**
+ * wmi_parse_hexbyte - Convert a ASCII hex number to a byte
+ * @src: Pointer to at least 2 characters to convert.
+ *
+ * Convert a two character ASCII hex string to a number.
+ *
+ * Return: 0-255 Success, the byte was parsed correctly
+ * -1 Error, an invalid character was supplied
+ */
+static int wmi_parse_hexbyte(const u8 *src)
+{
+ unsigned int x; /* For correct wrapping */
+ int h;
+
+ /* high part */
+ x = src[0];
+ if (x - '0' <= '9' - '0') {
+ h = x - '0';
+ } else if (x - 'a' <= 'f' - 'a') {
+ h = x - 'a' + 10;
+ } else if (x - 'A' <= 'F' - 'A') {
+ h = x - 'A' + 10;
+ } else {
+ return -1;
+ }
+ h <<= 4;
+
+ /* low part */
+ x = src[1];
+ if (x - '0' <= '9' - '0')
+ return h | (x - '0');
+ if (x - 'a' <= 'f' - 'a')
+ return h | (x - 'a' + 10);
+ if (x - 'A' <= 'F' - 'A')
+ return h | (x - 'A' + 10);
+ return -1;
+}
+
+/**
+ * wmi_swap_bytes - Rearrange GUID bytes to match GUID binary
+ * @src: Memory block holding binary GUID (16 bytes)
+ * @dest: Memory block to hold byte swapped binary GUID (16 bytes)
+ *
+ * Byte swap a binary GUID to match it's real GUID value
+ */
+static void wmi_swap_bytes(u8 *src, u8 *dest)
+{
+ int i;
+
+ for (i = 0; i <= 3; i++)
+ memcpy(dest + i, src + (3 - i), 1);
+
+ for (i = 0; i <= 1; i++)
+ memcpy(dest + 4 + i, src + (5 - i), 1);
+
+ for (i = 0; i <= 1; i++)
+ memcpy(dest + 6 + i, src + (7 - i), 1);
+
+ memcpy(dest + 8, src + 8, 8);
+}
+
+/**
+ * wmi_parse_guid - Convert GUID from ASCII to binary
+ * @src: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @dest: Memory block to hold binary GUID (16 bytes)
+ *
+ * N.B. The GUID need not be NULL terminated.
+ *
+ * Return: 'true' @dest contains binary GUID
+ * 'false' @dest contents are undefined
+ */
+static bool wmi_parse_guid(const u8 *src, u8 *dest)
+{
+ static const int size[] = { 4, 2, 2, 2, 6 };
+ int i, j, v;
+
+ if (src[8] != '-' || src[13] != '-' ||
+ src[18] != '-' || src[23] != '-')
+ return false;
+
+ for (j = 0; j < 5; j++, src++) {
+ for (i = 0; i < size[j]; i++, src += 2, *dest++ = v) {
+ v = wmi_parse_hexbyte(src);
+ if (v < 0)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool find_guid(const char *guid_string, struct wmi_block **out)
+{
+ char tmp[16], guid_input[16];
+ struct wmi_block *wblock;
+ struct guid_block *block;
+ struct list_head *p;
+
+ wmi_parse_guid(guid_string, tmp);
+ wmi_swap_bytes(tmp, guid_input);
+
+ list_for_each(p, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+ block = &wblock->gblock;
+
+ if (memcmp(block->guid, guid_input, 16) == 0) {
+ if (out)
+ *out = wblock;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Exported WMI functions
+ */
+/**
+ * wmi_evaluate_method - Evaluate a WMI method
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @instance: Instance index
+ * @method_id: Method ID to call
+ * &in: Buffer containing input for the method call
+ * &out: Empty buffer to return the method results
+ *
+ * Call an ACPI-WMI method
+ */
+acpi_status wmi_evaluate_method(const char *guid_string, u8 instance,
+u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
+{
+ struct guid_block *block = NULL;
+ struct wmi_block *wblock = NULL;
+ acpi_handle handle;
+ acpi_status status;
+ struct acpi_object_list input;
+ union acpi_object params[3];
+ char method[4] = "WM";
+
+ if (!find_guid(guid_string, &wblock))
+ return AE_BAD_ADDRESS;
+
+ block = &wblock->gblock;
+ handle = wblock->handle;
+
+ if (!block->flags & ACPI_WMI_METHOD)
+ return AE_BAD_DATA;
+
+ if (block->instance_count < instance)
+ return AE_BAD_PARAMETER;
+
+ input.count = 2;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = instance;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = method_id;
+
+ if (in) {
+ input.count = 3;
+
+ if (block->flags & ACPI_WMI_STRING) {
+ params[2].type = ACPI_TYPE_STRING;
+ } else {
+ params[2].type = ACPI_TYPE_BUFFER;
+ }
+ params[2].buffer.length = in->length;
+ params[2].buffer.pointer = in->pointer;
+ }
+
+ strncat(method, block->object_id, 2);
+
+ status = acpi_evaluate_object(handle, method, &input, out);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(wmi_evaluate_method);
+
+/**
+ * wmi_query_block - Return contents of a WMI block
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @instance: Instance index
+ * &out: Empty buffer to return the contents of the data block to
+ *
+ * Return the contents of an ACPI-WMI data block to a buffer
+ */
+acpi_status wmi_query_block(const char *guid_string, u8 instance,
+struct acpi_buffer *out)
+{
+ struct guid_block *block = NULL;
+ struct wmi_block *wblock = NULL;
+ acpi_handle handle;
+ acpi_status status, wc_status = AE_ERROR;
+ struct acpi_object_list input, wc_input;
+ union acpi_object wc_params[1], wq_params[1];
+ char method[4];
+ char wc_method[4] = "WC";
+
+ if (!guid_string || !out)
+ return AE_BAD_PARAMETER;
+
+ if (!find_guid(guid_string, &wblock))
+ return AE_BAD_ADDRESS;
+
+ block = &wblock->gblock;
+ handle = wblock->handle;
+
+ if (block->instance_count < instance)
+ return AE_BAD_PARAMETER;
+
+ /* Check GUID is a data block */
+ if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
+ return AE_BAD_ADDRESS;
+
+ input.count = 1;
+ input.pointer = wq_params;
+ wq_params[0].type = ACPI_TYPE_INTEGER;
+ wq_params[0].integer.value = instance;
+
+ /*
+ * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method first to
+ * enable collection.
+ */
+ if (block->flags & ACPI_WMI_EXPENSIVE) {
+ wc_input.count = 1;
+ wc_input.pointer = wc_params;
+ wc_params[0].type = ACPI_TYPE_INTEGER;
+ wc_params[0].integer.value = 1;
+
+ strncat(wc_method, block->object_id, 2);
+
+ /*
+ * Some GUIDs break the specification by declaring themselves
+ * expensive, but have no corresponding WCxx method. So we
+ * should not fail if this happens.
+ */
+ wc_status = acpi_evaluate_object(handle, wc_method,
+ &wc_input, NULL);
+ }
+
+ strcpy(method, "WQ");
+ strncat(method, block->object_id, 2);
+
+ status = acpi_evaluate_object(handle, method, NULL, out);
+
+ /*
+ * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if
+ * the WQxx method failed - we should disable collection anyway.
+ */
+ if ((block->flags & ACPI_WMI_EXPENSIVE) && wc_status) {
+ wc_params[0].integer.value = 0;
+ status = acpi_evaluate_object(handle,
+ wc_method, &wc_input, NULL);
+ }
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(wmi_query_block);
+
+/**
+ * wmi_set_block - Write to a WMI block
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @instance: Instance index
+ * &in: Buffer containing new values for the data block
+ *
+ * Write the contents of the input buffer to an ACPI-WMI data block
+ */
+acpi_status wmi_set_block(const char *guid_string, u8 instance,
+const struct acpi_buffer *in)
+{
+ struct guid_block *block = NULL;
+ struct wmi_block *wblock = NULL;
+ acpi_handle handle;
+ struct acpi_object_list input;
+ union acpi_object params[2];
+ char method[4] = "WS";
+
+ if (!guid_string || !in)
+ return AE_BAD_DATA;
+
+ if (!find_guid(guid_string, &wblock))
+ return AE_BAD_ADDRESS;
+
+ block = &wblock->gblock;
+ handle = wblock->handle;
+
+ if (block->instance_count < instance)
+ return AE_BAD_PARAMETER;
+
+ /* Check GUID is a data block */
+ if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
+ return AE_BAD_ADDRESS;
+
+ input.count = 2;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = instance;
+
+ if (block->flags & ACPI_WMI_STRING) {
+ params[1].type = ACPI_TYPE_STRING;
+ } else {
+ params[1].type = ACPI_TYPE_BUFFER;
+ }
+ params[1].buffer.length = in->length;
+ params[1].buffer.pointer = in->pointer;
+
+ strncat(method, block->object_id, 2);
+
+ return acpi_evaluate_object(handle, method, &input, NULL);
+}
+EXPORT_SYMBOL_GPL(wmi_set_block);
+
+/**
+ * wmi_install_notify_handler - Register handler for WMI events
+ * @handler: Function to handle notifications
+ * @data: Data to be returned to handler when event is fired
+ *
+ * Register a handler for events sent to the ACPI-WMI mapper device.
+ */
+acpi_status wmi_install_notify_handler(const char *guid,
+wmi_notify_handler handler, void *data)
+{
+ struct wmi_block *block;
+
+ if (!guid || !handler)
+ return AE_BAD_PARAMETER;
+
+ find_guid(guid, &block);
+ if (!block)
+ return AE_NOT_EXIST;
+
+ if (block->handler)
+ return AE_ALREADY_ACQUIRED;
+
+ block->handler = handler;
+ block->handler_data = data;
+
+ return AE_OK;
+}
+EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
+
+/**
+ * wmi_uninstall_notify_handler - Unregister handler for WMI events
+ *
+ * Unregister handler for events sent to the ACPI-WMI mapper device.
+ */
+acpi_status wmi_remove_notify_handler(const char *guid)
+{
+ struct wmi_block *block;
+
+ if (!guid)
+ return AE_BAD_PARAMETER;
+
+ find_guid(guid, &block);
+ if (!block)
+ return AE_NOT_EXIST;
+
+ if (!block->handler)
+ return AE_NULL_ENTRY;
+
+ block->handler = NULL;
+ block->handler_data = NULL;
+
+ return AE_OK;
+}
+EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
+
+/**
+ * wmi_get_event_data - Get WMI data associated with an event
+ *
+ * @event - Event to find
+ * &out - Buffer to hold event data
+ *
+ * Returns extra data associated with an event in WMI.
+ */
+acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
+{
+ struct acpi_object_list input;
+ union acpi_object params[1];
+ struct guid_block *gblock;
+ struct wmi_block *wblock;
+ struct list_head *p;
+
+ input.count = 1;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = event;
+
+ list_for_each(p, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+ gblock = &wblock->gblock;
+
+ if ((gblock->flags & ACPI_WMI_EVENT) &&
+ (gblock->notify_id == event))
+ return acpi_evaluate_object(wblock->handle, "_WED",
+ &input, out);
+ }
+
+ return AE_NOT_FOUND;
+}
+EXPORT_SYMBOL_GPL(wmi_get_event_data);
+
+/**
+ * wmi_has_guid - Check if a GUID is available
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ *
+ * Check if a given GUID is defined by _WDG
+ */
+bool wmi_has_guid(const char *guid_string)
+{
+ return find_guid(guid_string, NULL);
+}
+EXPORT_SYMBOL_GPL(wmi_has_guid);
+
+/*
+ * Parse the _WDG method for the GUID data blocks
+ */
+static __init acpi_status parse_wdg(acpi_handle handle)
+{
+ struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *obj;
+ struct guid_block *gblock;
+ struct wmi_block *wblock;
+ acpi_status status;
+ u32 i, total;
+
+ status = acpi_evaluate_object(handle, "_WDG", NULL, &out);
+
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) out.pointer;
+
+ if (obj->type != ACPI_TYPE_BUFFER)
+ return AE_ERROR;
+
+ total = obj->buffer.length / sizeof(struct guid_block);
+
+ gblock = kzalloc(obj->buffer.length, GFP_KERNEL);
+ if (!gblock)
+ return AE_NO_MEMORY;
+
+ memcpy(gblock, obj->buffer.pointer, obj->buffer.length);
+
+ for (i = 0; i < total; i++) {
+ wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
+ if (!wblock)
+ return AE_NO_MEMORY;
+
+ wblock->gblock = gblock[i];
+ wblock->handle = handle;
+ list_add_tail(&wblock->list, &wmi_blocks.list);
+ }
+
+ kfree(out.pointer);
+ kfree(gblock);
+
+ return status;
+}
+
+/*
+ * WMI can have EmbeddedControl access regions. In which case, we just want to
+ * hand these off to the EC driver.
+ */
+static acpi_status
+acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
+ u32 bits, acpi_integer * value,
+ void *handler_context, void *region_context)
+{
+ int result = 0, i = 0;
+ u8 temp = 0;
+
+ if ((address > 0xFF) || !value)
+ return AE_BAD_PARAMETER;
+
+ if (function != ACPI_READ && function != ACPI_WRITE)
+ return AE_BAD_PARAMETER;
+
+ if (bits != 8)
+ return AE_BAD_PARAMETER;
+
+ if (function == ACPI_READ) {
+ result = ec_read(address, &temp);
+ (*value) |= ((acpi_integer)temp) << i;
+ } else {
+ temp = 0xff & ((*value) >> i);
+ result = ec_write(address, temp);
+ }
+
+ switch (result) {
+ case -EINVAL:
+ return AE_BAD_PARAMETER;
+ break;
+ case -ENODEV:
+ return AE_NOT_FOUND;
+ break;
+ case -ETIME:
+ return AE_TIME;
+ break;
+ default:
+ return AE_OK;
+ }
+}
+
+static void acpi_wmi_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct guid_block *block;
+ struct wmi_block *wblock;
+ struct list_head *p;
+ struct acpi_device *device = data;
+
+ list_for_each(p, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+ block = &wblock->gblock;
+
+ if ((block->flags & ACPI_WMI_EVENT) &&
+ (block->notify_id == event)) {
+ if (wblock->handler)
+ wblock->handler(event, wblock->handler_data);
+
+ acpi_bus_generate_netlink_event(
+ device->pnp.device_class, device->dev.bus_id,
+ event, 0);
+ break;
+ }
+ }
+}
+
+static int acpi_wmi_remove(struct acpi_device *device, int type)
+{
+ acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_wmi_notify);
+
+ acpi_remove_address_space_handler(device->handle,
+ ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler);
+
+ return 0;
+}
+
+static int __init acpi_wmi_add(struct acpi_device *device)
+{
+ acpi_status status;
+ int result = 0;
+
+ status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_wmi_notify, device);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Error installing notify handler\n");
+ return -ENODEV;
+ }
+
+ status = acpi_install_address_space_handler(device->handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_wmi_ec_space_handler,
+ NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ status = parse_wdg(device->handle);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Error installing EC region handler\n");
+ return -ENODEV;
+ }
+
+ return result;
+}
+
+static int __init acpi_wmi_init(void)
+{
+ acpi_status result;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&wmi_blocks.list);
+
+ result = acpi_bus_register_driver(&acpi_wmi_driver);
+
+ if (result < 0) {
+ printk(KERN_INFO PREFIX "Error loading mapper\n");
+ } else {
+ printk(KERN_INFO PREFIX "Mapper loaded\n");
+ }
+
+ return result;
+}
+
+static void __exit acpi_wmi_exit(void)
+{
+ struct list_head *p, *tmp;
+ struct wmi_block *wblock;
+
+ acpi_bus_unregister_driver(&acpi_wmi_driver);
+
+ list_for_each_safe(p, tmp, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+
+ list_del(p);
+ kfree(wblock);
+ }
+
+ printk(KERN_INFO PREFIX "Mapper unloaded\n");
+}
+
+subsys_initcall(acpi_wmi_init);
+module_exit(acpi_wmi_exit);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 27c8d56111c2..29e71bddd6ff 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -679,24 +679,20 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
/* cross check port_map and cap.n_ports */
if (port_map) {
- u32 tmp_port_map = port_map;
- int n_ports = ahci_nr_ports(cap);
+ int map_ports = 0;
- for (i = 0; i < AHCI_MAX_PORTS && n_ports; i++) {
- if (tmp_port_map & (1 << i)) {
- n_ports--;
- tmp_port_map &= ~(1 << i);
- }
- }
+ for (i = 0; i < AHCI_MAX_PORTS; i++)
+ if (port_map & (1 << i))
+ map_ports++;
- /* If n_ports and port_map are inconsistent, whine and
- * clear port_map and let it be generated from n_ports.
+ /* If PI has more ports than n_ports, whine, clear
+ * port_map and let it be generated from n_ports.
*/
- if (n_ports || tmp_port_map) {
+ if (map_ports > ahci_nr_ports(cap)) {
dev_printk(KERN_WARNING, &pdev->dev,
- "nr_ports (%u) and implemented port map "
- "(0x%x) don't match, using nr_ports\n",
- ahci_nr_ports(cap), port_map);
+ "implemented port map (0x%x) contains more "
+ "ports than nr_ports (%u), using nr_ports\n",
+ port_map, ahci_nr_ports(cap));
port_map = 0;
}
}
@@ -2201,7 +2197,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct ata_host *host;
- int i, rc;
+ int n_ports, i, rc;
VPRINTK("ENTER\n");
@@ -2255,7 +2251,14 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
- host = ata_host_alloc_pinfo(&pdev->dev, ppi, fls(hpriv->port_map));
+ /* CAP.NP sometimes indicate the index of the last enabled
+ * port, at other times, that of the last possible port, so
+ * determining the maximum port number requires looking at
+ * both CAP.NP and port_map.
+ */
+ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host)
return -ENOMEM;
host->iomap = pcim_iomap_table(pdev);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 47892e6f5ded..9c2515f67de5 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -837,7 +837,7 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
if (is_slave) {
/* clear TIME1|IE1|PPE1|DTE1 */
master_data &= 0xff0f;
- /* Enable SITRE (seperate slave timing register) */
+ /* Enable SITRE (separate slave timing register) */
master_data |= 0x4000;
/* enable PPE1, IE1 and TIME1 as needed */
master_data |= (control << 4);
@@ -1603,7 +1603,8 @@ static void piix_iocfg_bit18_quirk(struct pci_dev *pdev)
* Zero on success, or -ERRNO value.
*/
-static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit piix_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static int printed_version;
struct device *dev = &pdev->dev;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index bdbd55af7022..3011919f3ec8 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3097,7 +3097,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
/**
* ata_do_set_mode - Program timings and issue SET FEATURES - XFER
* @link: link on which timings will be programmed
- * @r_failed_dev: out paramter for failed device
+ * @r_failed_dev: out parameter for failed device
*
* Standard implementation of the function used to tune and set
* ATA device disk transfer mode (PIO3, UDMA6, etc.). If
@@ -4154,8 +4154,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* NCQ is broken */
{ "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
{ "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
- { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
- { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
{ "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
{ "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
index 67e574de31e8..db057b183d60 100644
--- a/drivers/ata/pata_at32.c
+++ b/drivers/ata/pata_at32.c
@@ -324,7 +324,7 @@ static int __init pata_at32_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- /* Setup struct containing private infomation */
+ /* Setup struct containing private information */
info = kzalloc(sizeof(struct at32_ide_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 043dcd35106c..dc33220fe5b2 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -135,7 +135,7 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
idetm_data &= 0xCC0F;
idetm_data |= (control << 4);
- /* Slave timing in seperate register */
+ /* Slave timing in separate register */
pci_read_config_byte(dev, 0x44, &slave_data);
slave_data &= 0x0F << shift;
slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << shift;
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index 1eda821e5e39..e0c2cc29d0ca 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -128,7 +128,7 @@ static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
idetm_data &= 0xCC0F;
idetm_data |= (control << 4);
- /* Slave timing in seperate register */
+ /* Slave timing in separate register */
pci_read_config_byte(dev, 0x44, &slave_data);
slave_data &= 0xF0;
slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << 4;
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 938f48a807eb..408da30594c4 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_platform.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
static int __devinit pata_of_platform_probe(struct of_device *ofdev,
const struct of_device_id *match)
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 224bb6c2030a..aad7adc6ea56 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -19,7 +19,7 @@
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#define DRV_NAME "pata_platform"
#define DRV_VERSION "1.2"
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 87546d9f1ca0..dc7e91562e43 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -345,7 +345,7 @@ static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
if (adev->dma_mode < XFER_UDMA_0) {
/* bits 3-0 hold recovery timing bits 8-10 active timing and
- the higer bits are dependant on the device */
+ the higher bits are dependant on the device */
timing &= ~0x870F;
timing |= mwdma_bits[speed];
} else {
@@ -385,7 +385,7 @@ static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
if (adev->dma_mode < XFER_UDMA_0) {
/* bits 3-0 hold recovery timing bits 8-10 active timing and
- the higer bits are dependant on the device, bit 15 udma */
+ the higher bits are dependant on the device, bit 15 udma */
timing &= ~0x870F;
timing |= mwdma_bits[speed];
} else {
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 922d7b2efba8..efcb66b6ccef 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -355,8 +355,8 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
ata_port_printk(qc->ap, KERN_ERR,
"s/g len unaligned : 0x%x\n", sg_len);
- if ((num_prde == (SATA_FSL_MAX_PRD_DIRECT - 1)) &&
- (qc->n_iter + 1 != qc->n_elem)) {
+ if (num_prde == (SATA_FSL_MAX_PRD_DIRECT - 1) &&
+ sg_next(sg) != NULL) {
VPRINTK("setting indirect prde\n");
prd_ptr_to_indirect_ext = prd;
prd->dba = cpu_to_le32(indirect_ext_segment_paddr);
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 96e614a1c169..59e65edc5820 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -108,17 +108,6 @@ struct inic_port_priv {
u8 cached_pirq_mask;
};
-static int inic_slave_config(struct scsi_device *sdev)
-{
- /* This controller is braindamaged. dma_boundary is 0xffff
- * like others but it will lock up the whole machine HARD if
- * 65536 byte PRD entry is fed. Reduce maximum segment size.
- */
- blk_queue_max_segment_size(sdev->request_queue, 65536 - 512);
-
- return ata_scsi_slave_config(sdev);
-}
-
static struct scsi_host_template inic_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
@@ -132,7 +121,7 @@ static struct scsi_host_template inic_sht = {
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
- .slave_configure = inic_slave_config,
+ .slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
@@ -730,6 +719,18 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
+ /*
+ * This controller is braindamaged. dma_boundary is 0xffff
+ * like others but it will lock up the whole machine HARD if
+ * 65536 byte PRD entry is fed. Reduce maximum segment size.
+ */
+ rc = pci_set_dma_max_seg_size(pdev, 65536 - 512);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "failed to set the maximum segment size.\n");
+ return rc;
+ }
+
rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl);
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 3c1b5c9027db..080b8362f8d6 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -69,8 +69,11 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -179,6 +182,8 @@ enum {
HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
HC_MAIN_IRQ_MASK_OFS = 0x1d64,
+ HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
+ HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
PORT0_ERR = (1 << 0), /* shift by port # */
PORT0_DONE = (1 << 1), /* shift by port # */
HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
@@ -194,11 +199,13 @@ enum {
TWSI_INT = (1 << 24),
HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
+ HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
HC_MAIN_RSVD),
HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
HC_MAIN_RSVD_5),
+ HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
/* SATAHC registers */
HC_CFG_OFS = 0,
@@ -368,6 +375,7 @@ enum chip_type {
chip_608x,
chip_6042,
chip_7042,
+ chip_soc,
};
/* Command ReQuest Block: 32B */
@@ -424,6 +432,10 @@ struct mv_host_priv {
u32 hp_flags;
struct mv_port_signal signal[8];
const struct mv_hw_ops *ops;
+ int n_ports;
+ void __iomem *base;
+ void __iomem *main_cause_reg_addr;
+ void __iomem *main_mask_reg_addr;
u32 irq_cause_ofs;
u32 irq_mask_ofs;
u32 unmask_all_irqs;
@@ -482,6 +494,15 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int n_hc);
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
+static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
+ void __iomem *mmio);
+static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
+ void __iomem *mmio);
+static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
+ void __iomem *mmio, unsigned int n_hc);
+static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
+ void __iomem *mmio);
+static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no);
@@ -661,6 +682,12 @@ static const struct ata_port_info mv_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
},
+ { /* chip_soc */
+ .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &mv_iie_ops,
+ },
};
static const struct pci_device_id mv_pci_tbl[] = {
@@ -711,6 +738,15 @@ static const struct mv_hw_ops mv6xxx_ops = {
.reset_bus = mv_reset_pci_bus,
};
+static const struct mv_hw_ops mv_soc_ops = {
+ .phy_errata = mv6_phy_errata,
+ .enable_leds = mv_soc_enable_leds,
+ .read_preamp = mv_soc_read_preamp,
+ .reset_hc = mv_soc_reset_hc,
+ .reset_flash = mv_soc_reset_flash,
+ .reset_bus = mv_soc_reset_bus,
+};
+
/*
* Functions
*/
@@ -749,9 +785,15 @@ static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
}
+static inline void __iomem *mv_host_base(struct ata_host *host)
+{
+ struct mv_host_priv *hpriv = host->private_data;
+ return hpriv->base;
+}
+
static inline void __iomem *mv_ap_base(struct ata_port *ap)
{
- return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
+ return mv_port_base(mv_host_base(ap->host), ap->port_no);
}
static inline int mv_get_hc_count(unsigned long port_flags)
@@ -1649,16 +1691,21 @@ static void mv_intr_edma(struct ata_port *ap)
*/
static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
{
- void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
+ struct mv_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->base;
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
u32 hc_irq_cause;
- int port, port0;
+ int port, port0, last_port;
if (hc == 0)
port0 = 0;
else
port0 = MV_PORTS_PER_HC;
+ if (HAS_PCI(host))
+ last_port = port0 + MV_PORTS_PER_HC;
+ else
+ last_port = port0 + hpriv->n_ports;
/* we'll need the HC success int register in most cases */
hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
if (!hc_irq_cause)
@@ -1669,7 +1716,7 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
hc, relevant, hc_irq_cause);
- for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
+ for (port = port0; port < port0 + last_port; port++) {
struct ata_port *ap = host->ports[port];
struct mv_port_priv *pp = ap->private_data;
int have_err_bits, hard_port, shift;
@@ -1764,13 +1811,15 @@ static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
static irqreturn_t mv_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
+ struct mv_host_priv *hpriv = host->private_data;
unsigned int hc, handled = 0, n_hcs;
- void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
+ void __iomem *mmio = hpriv->base;
u32 irq_stat, irq_mask;
spin_lock(&host->lock);
- irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
- irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
+
+ irq_stat = readl(hpriv->main_cause_reg_addr);
+ irq_mask = readl(hpriv->main_mask_reg_addr);
/* check the cases where we either have nothing pending or have read
* a bogus register value which can indicate HW removal or PCI fault
@@ -1827,7 +1876,8 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
{
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->base;
void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
@@ -1840,7 +1890,8 @@ static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
{
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->base;
void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
@@ -2178,6 +2229,93 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
writel(m2, port_mmio + PHY_MODE2);
}
+/* TODO: use the generic LED interface to configure the SATA Presence */
+/* & Acitivy LEDs on the board */
+static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
+ void __iomem *mmio)
+{
+ return;
+}
+
+static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
+ void __iomem *mmio)
+{
+ void __iomem *port_mmio;
+ u32 tmp;
+
+ port_mmio = mv_port_base(mmio, idx);
+ tmp = readl(port_mmio + PHY_MODE2);
+
+ hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
+ hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
+}
+
+#undef ZERO
+#define ZERO(reg) writel(0, port_mmio + (reg))
+static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
+ void __iomem *mmio, unsigned int port)
+{
+ void __iomem *port_mmio = mv_port_base(mmio, port);
+
+ writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
+
+ mv_channel_reset(hpriv, mmio, port);
+
+ ZERO(0x028); /* command */
+ writel(0x101f, port_mmio + EDMA_CFG_OFS);
+ ZERO(0x004); /* timer */
+ ZERO(0x008); /* irq err cause */
+ ZERO(0x00c); /* irq err mask */
+ ZERO(0x010); /* rq bah */
+ ZERO(0x014); /* rq inp */
+ ZERO(0x018); /* rq outp */
+ ZERO(0x01c); /* respq bah */
+ ZERO(0x024); /* respq outp */
+ ZERO(0x020); /* respq inp */
+ ZERO(0x02c); /* test control */
+ writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
+}
+
+#undef ZERO
+
+#define ZERO(reg) writel(0, hc_mmio + (reg))
+static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
+ void __iomem *mmio)
+{
+ void __iomem *hc_mmio = mv_hc_base(mmio, 0);
+
+ ZERO(0x00c);
+ ZERO(0x010);
+ ZERO(0x014);
+
+}
+
+#undef ZERO
+
+static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
+ void __iomem *mmio, unsigned int n_hc)
+{
+ unsigned int port;
+
+ for (port = 0; port < hpriv->n_ports; port++)
+ mv_soc_reset_hc_port(hpriv, mmio, port);
+
+ mv_soc_reset_one_hc(hpriv, mmio);
+
+ return 0;
+}
+
+static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
+ void __iomem *mmio)
+{
+ return;
+}
+
+static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
+{
+ return;
+}
+
static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no)
{
@@ -2342,7 +2480,7 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class,
{
struct ata_port *ap = link->ap;
struct mv_host_priv *hpriv = ap->host->private_data;
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ void __iomem *mmio = hpriv->base;
mv_stop_dma(ap);
@@ -2383,7 +2521,7 @@ static void mv_error_handler(struct ata_port *ap)
static void mv_eh_freeze(struct ata_port *ap)
{
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ struct mv_host_priv *hpriv = ap->host->private_data;
unsigned int hc = (ap->port_no > 3) ? 1 : 0;
u32 tmp, mask;
unsigned int shift;
@@ -2397,13 +2535,14 @@ static void mv_eh_freeze(struct ata_port *ap)
mask = 0x3 << shift;
/* disable assertion of portN err, done events */
- tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
- writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
+ tmp = readl(hpriv->main_mask_reg_addr);
+ writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
}
static void mv_eh_thaw(struct ata_port *ap)
{
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->base;
unsigned int hc = (ap->port_no > 3) ? 1 : 0;
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
void __iomem *port_mmio = mv_ap_base(ap);
@@ -2430,8 +2569,8 @@ static void mv_eh_thaw(struct ata_port *ap)
writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
/* enable assertion of portN err, done events */
- tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
- writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
+ tmp = readl(hpriv->main_mask_reg_addr);
+ writelfl(tmp | mask, hpriv->main_mask_reg_addr);
}
/**
@@ -2598,9 +2737,13 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
break;
}
break;
+ case chip_soc:
+ hpriv->ops = &mv_soc_ops;
+ hp_flags |= MV_HP_ERRATA_60X1C0;
+ break;
default:
- dev_printk(KERN_ERR, &pdev->dev,
+ dev_printk(KERN_ERR, host->dev,
"BUG: invalid board index %u\n", board_idx);
return 1;
}
@@ -2633,15 +2776,25 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
static int mv_init_host(struct ata_host *host, unsigned int board_idx)
{
int rc = 0, n_hc, port, hc;
- void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
struct mv_host_priv *hpriv = host->private_data;
-
- /* global interrupt mask */
- writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
+ void __iomem *mmio = hpriv->base;
rc = mv_chip_id(host, board_idx);
if (rc)
- goto done;
+ goto done;
+
+ if (HAS_PCI(host)) {
+ hpriv->main_cause_reg_addr = hpriv->base +
+ HC_MAIN_IRQ_CAUSE_OFS;
+ hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
+ } else {
+ hpriv->main_cause_reg_addr = hpriv->base +
+ HC_SOC_MAIN_IRQ_CAUSE_OFS;
+ hpriv->main_mask_reg_addr = hpriv->base +
+ HC_SOC_MAIN_IRQ_MASK_OFS;
+ }
+ /* global interrupt mask */
+ writel(0, hpriv->main_mask_reg_addr);
n_hc = mv_get_hc_count(host->ports[0]->flags);
@@ -2672,13 +2825,15 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
for (port = 0; port < host->n_ports; port++) {
struct ata_port *ap = host->ports[port];
void __iomem *port_mmio = mv_port_base(mmio, port);
- unsigned int offset = port_mmio - mmio;
mv_port_init(&ap->ioaddr, port_mmio);
#ifdef CONFIG_PCI
- ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
- ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
+ if (HAS_PCI(host)) {
+ unsigned int offset = port_mmio - mmio;
+ ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
+ ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
+ }
#endif
}
@@ -2694,35 +2849,141 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
}
- /* Clear any currently outstanding host interrupt conditions */
- writelfl(0, mmio + hpriv->irq_cause_ofs);
+ if (HAS_PCI(host)) {
+ /* Clear any currently outstanding host interrupt conditions */
+ writelfl(0, mmio + hpriv->irq_cause_ofs);
- /* and unmask interrupt generation for host regs */
- writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
+ /* and unmask interrupt generation for host regs */
+ writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
+ if (IS_GEN_I(hpriv))
+ writelfl(~HC_MAIN_MASKED_IRQS_5,
+ hpriv->main_mask_reg_addr);
+ else
+ writelfl(~HC_MAIN_MASKED_IRQS,
+ hpriv->main_mask_reg_addr);
+
+ VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
+ "PCI int cause/mask=0x%08x/0x%08x\n",
+ readl(hpriv->main_cause_reg_addr),
+ readl(hpriv->main_mask_reg_addr),
+ readl(mmio + hpriv->irq_cause_ofs),
+ readl(mmio + hpriv->irq_mask_ofs));
+ } else {
+ writelfl(~HC_MAIN_MASKED_IRQS_SOC,
+ hpriv->main_mask_reg_addr);
+ VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
+ readl(hpriv->main_cause_reg_addr),
+ readl(hpriv->main_mask_reg_addr));
+ }
+done:
+ return rc;
+}
- if (IS_GEN_I(hpriv))
- writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
- else
- writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
+/**
+ * mv_platform_probe - handle a positive probe of an soc Marvell
+ * host
+ * @pdev: platform device found
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static int mv_platform_probe(struct platform_device *pdev)
+{
+ static int printed_version;
+ const struct mv_sata_platform_data *mv_platform_data;
+ const struct ata_port_info *ppi[] =
+ { &mv_port_info[chip_soc], NULL };
+ struct ata_host *host;
+ struct mv_host_priv *hpriv;
+ struct resource *res;
+ int n_ports, rc;
- VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
- "PCI int cause/mask=0x%08x/0x%08x\n",
- readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
- readl(mmio + HC_MAIN_IRQ_MASK_OFS),
- readl(mmio + hpriv->irq_cause_ofs),
- readl(mmio + hpriv->irq_mask_ofs));
+ if (!printed_version++)
+ dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
-done:
- return rc;
+ /*
+ * Simple resource validation ..
+ */
+ if (unlikely(pdev->num_resources != 2)) {
+ dev_err(&pdev->dev, "invalid number of resources\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Get the register base first
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -EINVAL;
+
+ /* allocate host */
+ mv_platform_data = pdev->dev.platform_data;
+ n_ports = mv_platform_data->n_ports;
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+ hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+
+ if (!host || !hpriv)
+ return -ENOMEM;
+ host->private_data = hpriv;
+ hpriv->n_ports = n_ports;
+
+ host->iomap = NULL;
+ hpriv->base = ioremap(res->start, res->end - res->start + 1);
+ hpriv->base -= MV_SATAHC0_REG_BASE;
+
+ /* initialize adapter */
+ rc = mv_init_host(host, chip_soc);
+ if (rc)
+ return rc;
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
+ host->n_ports);
+
+ return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
+ IRQF_SHARED, &mv6_sht);
+}
+
+/*
+ *
+ * mv_platform_remove - unplug a platform interface
+ * @pdev: platform device
+ *
+ * A platform bus SATA device has been unplugged. Perform the needed
+ * cleanup. Also called on module unload for any active devices.
+ */
+static int __devexit mv_platform_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct mv_host_priv *hpriv = host->private_data;
+ void __iomem *base = hpriv->base;
+
+ ata_host_detach(host);
+ iounmap(base);
+ return 0;
}
+static struct platform_driver mv_platform_driver = {
+ .probe = mv_platform_probe,
+ .remove = __devexit_p(mv_platform_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+
#ifdef CONFIG_PCI
-static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int mv_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
static struct pci_driver mv_pci_driver = {
.name = DRV_NAME,
.id_table = mv_pci_tbl,
- .probe = mv_init_one,
+ .probe = mv_pci_init_one,
.remove = ata_pci_remove_one,
};
@@ -2828,14 +3089,15 @@ static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
}
/**
- * mv_init_one - handle a positive probe of a Marvell host
+ * mv_pci_init_one - handle a positive probe of a PCI Marvell host
* @pdev: PCI device found
* @ent: PCI device ID entry for the matched host
*
* LOCKING:
* Inherited from caller.
*/
-static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int mv_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static int printed_version;
unsigned int board_idx = (unsigned int)ent->driver_data;
@@ -2855,6 +3117,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!host || !hpriv)
return -ENOMEM;
host->private_data = hpriv;
+ hpriv->n_ports = n_ports;
/* acquire resources */
rc = pcim_enable_device(pdev);
@@ -2867,6 +3130,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
+ hpriv->base = host->iomap[MV_PRIMARY_BAR];
rc = pci_go_64(pdev);
if (rc)
@@ -2895,11 +3159,22 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif
+static int mv_platform_probe(struct platform_device *pdev);
+static int __devexit mv_platform_remove(struct platform_device *pdev);
+
static int __init mv_init(void)
{
int rc = -ENODEV;
#ifdef CONFIG_PCI
rc = pci_register_driver(&mv_pci_driver);
+ if (rc < 0)
+ return rc;
+#endif
+ rc = platform_driver_register(&mv_platform_driver);
+
+#ifdef CONFIG_PCI
+ if (rc < 0)
+ pci_unregister_driver(&mv_pci_driver);
#endif
return rc;
}
@@ -2909,6 +3184,7 @@ static void __exit mv_exit(void)
#ifdef CONFIG_PCI
pci_unregister_driver(&mv_pci_driver);
#endif
+ platform_driver_unregister(&mv_platform_driver);
}
MODULE_AUTHOR("Brett Russ");
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index bfe92a43cf89..ed5473bf7a0a 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -247,6 +247,7 @@ struct nv_adma_port_priv {
void __iomem *ctl_block;
void __iomem *gen_block;
void __iomem *notifier_clear_block;
+ u64 adma_dma_mask;
u8 flags;
int last_issue_ncq;
};
@@ -715,9 +716,10 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data;
+ struct nv_adma_port_priv *port0, *port1;
+ struct scsi_device *sdev0, *sdev1;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u64 bounce_limit;
- unsigned long segment_boundary;
+ unsigned long segment_boundary, flags;
unsigned short sg_tablesize;
int rc;
int adma_enable;
@@ -729,6 +731,8 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
/* Not a proper libata device, ignore */
return rc;
+ spin_lock_irqsave(ap->lock, flags);
+
if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
/*
* NVIDIA reports that ADMA mode does not support ATAPI commands.
@@ -737,7 +741,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
* Restrict DMA parameters as required by the legacy interface
* when an ATAPI device is connected.
*/
- bounce_limit = ATA_DMA_MASK;
segment_boundary = ATA_DMA_BOUNDARY;
/* Subtract 1 since an extra entry may be needed for padding, see
libata-scsi.c */
@@ -748,7 +751,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
adma_enable = 0;
nv_adma_register_mode(ap);
} else {
- bounce_limit = *ap->dev->dma_mask;
segment_boundary = NV_ADMA_DMA_BOUNDARY;
sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
adma_enable = 1;
@@ -774,12 +776,49 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
if (current_reg != new_reg)
pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
- blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
+ port0 = ap->host->ports[0]->private_data;
+ port1 = ap->host->ports[1]->private_data;
+ sdev0 = ap->host->ports[0]->link.device[0].sdev;
+ sdev1 = ap->host->ports[1]->link.device[0].sdev;
+ if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
+ (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
+ /** We have to set the DMA mask to 32-bit if either port is in
+ ATAPI mode, since they are on the same PCI device which is
+ used for DMA mapping. If we set the mask we also need to set
+ the bounce limit on both ports to ensure that the block
+ layer doesn't feed addresses that cause DMA mapping to
+ choke. If either SCSI device is not allocated yet, it's OK
+ since that port will discover its correct setting when it
+ does get allocated.
+ Note: Setting 32-bit mask should not fail. */
+ if (sdev0)
+ blk_queue_bounce_limit(sdev0->request_queue,
+ ATA_DMA_MASK);
+ if (sdev1)
+ blk_queue_bounce_limit(sdev1->request_queue,
+ ATA_DMA_MASK);
+
+ pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ } else {
+ /** This shouldn't fail as it was set to this value before */
+ pci_set_dma_mask(pdev, pp->adma_dma_mask);
+ if (sdev0)
+ blk_queue_bounce_limit(sdev0->request_queue,
+ pp->adma_dma_mask);
+ if (sdev1)
+ blk_queue_bounce_limit(sdev1->request_queue,
+ pp->adma_dma_mask);
+ }
+
blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
ata_port_printk(ap, KERN_INFO,
- "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
- (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
+ "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
+ (unsigned long long)*ap->host->dev->dma_mask,
+ segment_boundary, sg_tablesize);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
return rc;
}
@@ -1140,10 +1179,20 @@ static int nv_adma_port_start(struct ata_port *ap)
void *mem;
dma_addr_t mem_dma;
void __iomem *mmio;
+ struct pci_dev *pdev = to_pci_dev(dev);
u16 tmp;
VPRINTK("ENTER\n");
+ /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
+ pad buffers */
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
rc = ata_port_start(ap);
if (rc)
return rc;
@@ -1159,6 +1208,15 @@ static int nv_adma_port_start(struct ata_port *ap)
pp->notifier_clear_block = pp->gen_block +
NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
+ /* Now that the legacy PRD and padding buffer are allocated we can
+ safely raise the DMA mask to allocate the CPB/APRD table.
+ These are allowed to fail since we store the value that ends up
+ being used to set as the bounce limit in slave_config later if
+ needed. */
+ pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ pp->adma_dma_mask = *dev->dma_mask;
+
mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
&mem_dma, GFP_KERNEL);
if (!mem)
@@ -2417,12 +2475,6 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
hpriv->type = type;
host->private_data = hpriv;
- /* set 64bit dma masks, may fail */
- if (type == ADMA) {
- if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
- pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
- }
-
/* request and iomap NV_MMIO_BAR */
rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
if (rc)
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 3ef072ff319d..30caa0337190 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -30,8 +30,6 @@
* Hardware documentation available under NDA.
*
*
- * To-do list:
- * - VT6421 PATA support
*
*/
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 63e09c015ca0..c66637392bbc 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -5,7 +5,7 @@ obj-y := core.o sys.o bus.o dd.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o
obj-y += power/
-obj-$(CONFIG_HAS_DMA) += dma-mapping.o dmapool.o
+obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_ISA) += isa.o
obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b1727876182c..9c0070b5bd3e 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -423,10 +423,8 @@ struct kset *devices_kset;
int device_create_file(struct device *dev, struct device_attribute *attr)
{
int error = 0;
- if (get_device(dev)) {
+ if (dev)
error = sysfs_create_file(&dev->kobj, &attr->attr);
- put_device(dev);
- }
return error;
}
@@ -437,10 +435,8 @@ int device_create_file(struct device *dev, struct device_attribute *attr)
*/
void device_remove_file(struct device *dev, struct device_attribute *attr)
{
- if (get_device(dev)) {
+ if (dev)
sysfs_remove_file(&dev->kobj, &attr->attr);
- put_device(dev);
- }
}
/**
@@ -1144,25 +1140,11 @@ error:
}
EXPORT_SYMBOL_GPL(device_create);
-/**
- * find_device - finds a device that was created with device_create()
- * @class: pointer to the struct class that this device was registered with
- * @devt: the dev_t of the device that was previously registered
- */
-static struct device *find_device(struct class *class, dev_t devt)
+static int __match_devt(struct device *dev, void *data)
{
- struct device *dev = NULL;
- struct device *dev_tmp;
+ dev_t *devt = data;
- down(&class->sem);
- list_for_each_entry(dev_tmp, &class->devices, node) {
- if (dev_tmp->devt == devt) {
- dev = dev_tmp;
- break;
- }
- }
- up(&class->sem);
- return dev;
+ return dev->devt == *devt;
}
/**
@@ -1177,9 +1159,11 @@ void device_destroy(struct class *class, dev_t devt)
{
struct device *dev;
- dev = find_device(class, devt);
- if (dev)
+ dev = class_find_device(class, &devt, __match_devt);
+ if (dev) {
+ put_device(dev);
device_unregister(dev);
+ }
}
EXPORT_SYMBOL_GPL(device_destroy);
@@ -1203,9 +1187,11 @@ void destroy_suspended_device(struct class *class, dev_t devt)
{
struct device *dev;
- dev = find_device(class, devt);
- if (dev)
+ dev = class_find_device(class, &devt, __match_devt);
+ if (dev) {
device_pm_schedule_removal(dev);
+ put_device(dev);
+ }
}
EXPORT_SYMBOL_GPL(destroy_suspended_device);
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index c5885f5ce0ac..499b003f9278 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -110,7 +110,7 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
*
* Initialize and register the CPU device.
*/
-int __devinit register_cpu(struct cpu *cpu, int num)
+int __cpuinit register_cpu(struct cpu *cpu, int num)
{
int error;
cpu->node_id = cpu_to_node(num);
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c
deleted file mode 100644
index b5034dc72a05..000000000000
--- a/drivers/base/dmapool.c
+++ /dev/null
@@ -1,481 +0,0 @@
-
-#include <linux/device.h>
-#include <linux/mm.h>
-#include <asm/io.h> /* Needed for i386 to build */
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/poison.h>
-#include <linux/sched.h>
-
-/*
- * Pool allocator ... wraps the dma_alloc_coherent page allocator, so
- * small blocks are easily used by drivers for bus mastering controllers.
- * This should probably be sharing the guts of the slab allocator.
- */
-
-struct dma_pool { /* the pool */
- struct list_head page_list;
- spinlock_t lock;
- size_t blocks_per_page;
- size_t size;
- struct device *dev;
- size_t allocation;
- char name [32];
- wait_queue_head_t waitq;
- struct list_head pools;
-};
-
-struct dma_page { /* cacheable header for 'allocation' bytes */
- struct list_head page_list;
- void *vaddr;
- dma_addr_t dma;
- unsigned in_use;
- unsigned long bitmap [0];
-};
-
-#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
-
-static DEFINE_MUTEX (pools_lock);
-
-static ssize_t
-show_pools (struct device *dev, struct device_attribute *attr, char *buf)
-{
- unsigned temp;
- unsigned size;
- char *next;
- struct dma_page *page;
- struct dma_pool *pool;
-
- next = buf;
- size = PAGE_SIZE;
-
- temp = scnprintf(next, size, "poolinfo - 0.1\n");
- size -= temp;
- next += temp;
-
- mutex_lock(&pools_lock);
- list_for_each_entry(pool, &dev->dma_pools, pools) {
- unsigned pages = 0;
- unsigned blocks = 0;
-
- list_for_each_entry(page, &pool->page_list, page_list) {
- pages++;
- blocks += page->in_use;
- }
-
- /* per-pool info, no real statistics yet */
- temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
- pool->name,
- blocks, pages * pool->blocks_per_page,
- pool->size, pages);
- size -= temp;
- next += temp;
- }
- mutex_unlock(&pools_lock);
-
- return PAGE_SIZE - size;
-}
-static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL);
-
-/**
- * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
- * @name: name of pool, for diagnostics
- * @dev: device that will be doing the DMA
- * @size: size of the blocks in this pool.
- * @align: alignment requirement for blocks; must be a power of two
- * @allocation: returned blocks won't cross this boundary (or zero)
- * Context: !in_interrupt()
- *
- * Returns a dma allocation pool with the requested characteristics, or
- * null if one can't be created. Given one of these pools, dma_pool_alloc()
- * may be used to allocate memory. Such memory will all have "consistent"
- * DMA mappings, accessible by the device and its driver without using
- * cache flushing primitives. The actual size of blocks allocated may be
- * larger than requested because of alignment.
- *
- * If allocation is nonzero, objects returned from dma_pool_alloc() won't
- * cross that size boundary. This is useful for devices which have
- * addressing restrictions on individual DMA transfers, such as not crossing
- * boundaries of 4KBytes.
- */
-struct dma_pool *
-dma_pool_create (const char *name, struct device *dev,
- size_t size, size_t align, size_t allocation)
-{
- struct dma_pool *retval;
-
- if (align == 0)
- align = 1;
- if (size == 0)
- return NULL;
- else if (size < align)
- size = align;
- else if ((size % align) != 0) {
- size += align + 1;
- size &= ~(align - 1);
- }
-
- if (allocation == 0) {
- if (PAGE_SIZE < size)
- allocation = size;
- else
- allocation = PAGE_SIZE;
- // FIXME: round up for less fragmentation
- } else if (allocation < size)
- return NULL;
-
- if (!(retval = kmalloc_node (sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
- return retval;
-
- strlcpy (retval->name, name, sizeof retval->name);
-
- retval->dev = dev;
-
- INIT_LIST_HEAD (&retval->page_list);
- spin_lock_init (&retval->lock);
- retval->size = size;
- retval->allocation = allocation;
- retval->blocks_per_page = allocation / size;
- init_waitqueue_head (&retval->waitq);
-
- if (dev) {
- int ret;
-
- mutex_lock(&pools_lock);
- if (list_empty (&dev->dma_pools))
- ret = device_create_file (dev, &dev_attr_pools);
- else
- ret = 0;
- /* note: not currently insisting "name" be unique */
- if (!ret)
- list_add (&retval->pools, &dev->dma_pools);
- else {
- kfree(retval);
- retval = NULL;
- }
- mutex_unlock(&pools_lock);
- } else
- INIT_LIST_HEAD (&retval->pools);
-
- return retval;
-}
-
-
-static struct dma_page *
-pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags)
-{
- struct dma_page *page;
- int mapsize;
-
- mapsize = pool->blocks_per_page;
- mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
- mapsize *= sizeof (long);
-
- page = kmalloc(mapsize + sizeof *page, mem_flags);
- if (!page)
- return NULL;
- page->vaddr = dma_alloc_coherent (pool->dev,
- pool->allocation,
- &page->dma,
- mem_flags);
- if (page->vaddr) {
- memset (page->bitmap, 0xff, mapsize); // bit set == free
-#ifdef CONFIG_DEBUG_SLAB
- memset (page->vaddr, POOL_POISON_FREED, pool->allocation);
-#endif
- list_add (&page->page_list, &pool->page_list);
- page->in_use = 0;
- } else {
- kfree (page);
- page = NULL;
- }
- return page;
-}
-
-
-static inline int
-is_page_busy (int blocks, unsigned long *bitmap)
-{
- while (blocks > 0) {
- if (*bitmap++ != ~0UL)
- return 1;
- blocks -= BITS_PER_LONG;
- }
- return 0;
-}
-
-static void
-pool_free_page (struct dma_pool *pool, struct dma_page *page)
-{
- dma_addr_t dma = page->dma;
-
-#ifdef CONFIG_DEBUG_SLAB
- memset (page->vaddr, POOL_POISON_FREED, pool->allocation);
-#endif
- dma_free_coherent (pool->dev, pool->allocation, page->vaddr, dma);
- list_del (&page->page_list);
- kfree (page);
-}
-
-
-/**
- * dma_pool_destroy - destroys a pool of dma memory blocks.
- * @pool: dma pool that will be destroyed
- * Context: !in_interrupt()
- *
- * Caller guarantees that no more memory from the pool is in use,
- * and that nothing will try to use the pool after this call.
- */
-void
-dma_pool_destroy (struct dma_pool *pool)
-{
- mutex_lock(&pools_lock);
- list_del (&pool->pools);
- if (pool->dev && list_empty (&pool->dev->dma_pools))
- device_remove_file (pool->dev, &dev_attr_pools);
- mutex_unlock(&pools_lock);
-
- while (!list_empty (&pool->page_list)) {
- struct dma_page *page;
- page = list_entry (pool->page_list.next,
- struct dma_page, page_list);
- if (is_page_busy (pool->blocks_per_page, page->bitmap)) {
- if (pool->dev)
- dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n",
- pool->name, page->vaddr);
- else
- printk (KERN_ERR "dma_pool_destroy %s, %p busy\n",
- pool->name, page->vaddr);
- /* leak the still-in-use consistent memory */
- list_del (&page->page_list);
- kfree (page);
- } else
- pool_free_page (pool, page);
- }
-
- kfree (pool);
-}
-
-
-/**
- * dma_pool_alloc - get a block of consistent memory
- * @pool: dma pool that will produce the block
- * @mem_flags: GFP_* bitmask
- * @handle: pointer to dma address of block
- *
- * This returns the kernel virtual address of a currently unused block,
- * and reports its dma address through the handle.
- * If such a memory block can't be allocated, null is returned.
- */
-void *
-dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle)
-{
- unsigned long flags;
- struct dma_page *page;
- int map, block;
- size_t offset;
- void *retval;
-
-restart:
- spin_lock_irqsave (&pool->lock, flags);
- list_for_each_entry(page, &pool->page_list, page_list) {
- int i;
- /* only cachable accesses here ... */
- for (map = 0, i = 0;
- i < pool->blocks_per_page;
- i += BITS_PER_LONG, map++) {
- if (page->bitmap [map] == 0)
- continue;
- block = ffz (~ page->bitmap [map]);
- if ((i + block) < pool->blocks_per_page) {
- clear_bit (block, &page->bitmap [map]);
- offset = (BITS_PER_LONG * map) + block;
- offset *= pool->size;
- goto ready;
- }
- }
- }
- if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) {
- if (mem_flags & __GFP_WAIT) {
- DECLARE_WAITQUEUE (wait, current);
-
- __set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue (&pool->waitq, &wait);
- spin_unlock_irqrestore (&pool->lock, flags);
-
- schedule_timeout (POOL_TIMEOUT_JIFFIES);
-
- remove_wait_queue (&pool->waitq, &wait);
- goto restart;
- }
- retval = NULL;
- goto done;
- }
-
- clear_bit (0, &page->bitmap [0]);
- offset = 0;
-ready:
- page->in_use++;
- retval = offset + page->vaddr;
- *handle = offset + page->dma;
-#ifdef CONFIG_DEBUG_SLAB
- memset (retval, POOL_POISON_ALLOCATED, pool->size);
-#endif
-done:
- spin_unlock_irqrestore (&pool->lock, flags);
- return retval;
-}
-
-
-static struct dma_page *
-pool_find_page (struct dma_pool *pool, dma_addr_t dma)
-{
- unsigned long flags;
- struct dma_page *page;
-
- spin_lock_irqsave (&pool->lock, flags);
- list_for_each_entry(page, &pool->page_list, page_list) {
- if (dma < page->dma)
- continue;
- if (dma < (page->dma + pool->allocation))
- goto done;
- }
- page = NULL;
-done:
- spin_unlock_irqrestore (&pool->lock, flags);
- return page;
-}
-
-
-/**
- * dma_pool_free - put block back into dma pool
- * @pool: the dma pool holding the block
- * @vaddr: virtual address of block
- * @dma: dma address of block
- *
- * Caller promises neither device nor driver will again touch this block
- * unless it is first re-allocated.
- */
-void
-dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma)
-{
- struct dma_page *page;
- unsigned long flags;
- int map, block;
-
- if ((page = pool_find_page(pool, dma)) == NULL) {
- if (pool->dev)
- dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n",
- pool->name, vaddr, (unsigned long) dma);
- else
- printk (KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
- pool->name, vaddr, (unsigned long) dma);
- return;
- }
-
- block = dma - page->dma;
- block /= pool->size;
- map = block / BITS_PER_LONG;
- block %= BITS_PER_LONG;
-
-#ifdef CONFIG_DEBUG_SLAB
- if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
- if (pool->dev)
- dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
- pool->name, vaddr, (unsigned long long) dma);
- else
- printk (KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
- pool->name, vaddr, (unsigned long long) dma);
- return;
- }
- if (page->bitmap [map] & (1UL << block)) {
- if (pool->dev)
- dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n",
- pool->name, (unsigned long long)dma);
- else
- printk (KERN_ERR "dma_pool_free %s, dma %Lx already free\n",
- pool->name, (unsigned long long)dma);
- return;
- }
- memset (vaddr, POOL_POISON_FREED, pool->size);
-#endif
-
- spin_lock_irqsave (&pool->lock, flags);
- page->in_use--;
- set_bit (block, &page->bitmap [map]);
- if (waitqueue_active (&pool->waitq))
- wake_up (&pool->waitq);
- /*
- * Resist a temptation to do
- * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
- * Better have a few empty pages hang around.
- */
- spin_unlock_irqrestore (&pool->lock, flags);
-}
-
-/*
- * Managed DMA pool
- */
-static void dmam_pool_release(struct device *dev, void *res)
-{
- struct dma_pool *pool = *(struct dma_pool **)res;
-
- dma_pool_destroy(pool);
-}
-
-static int dmam_pool_match(struct device *dev, void *res, void *match_data)
-{
- return *(struct dma_pool **)res == match_data;
-}
-
-/**
- * dmam_pool_create - Managed dma_pool_create()
- * @name: name of pool, for diagnostics
- * @dev: device that will be doing the DMA
- * @size: size of the blocks in this pool.
- * @align: alignment requirement for blocks; must be a power of two
- * @allocation: returned blocks won't cross this boundary (or zero)
- *
- * Managed dma_pool_create(). DMA pool created with this function is
- * automatically destroyed on driver detach.
- */
-struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
- size_t size, size_t align, size_t allocation)
-{
- struct dma_pool **ptr, *pool;
-
- ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
- if (pool)
- devres_add(dev, ptr);
- else
- devres_free(ptr);
-
- return pool;
-}
-
-/**
- * dmam_pool_destroy - Managed dma_pool_destroy()
- * @pool: dma pool that will be destroyed
- *
- * Managed dma_pool_destroy().
- */
-void dmam_pool_destroy(struct dma_pool *pool)
-{
- struct device *dev = pool->dev;
-
- dma_pool_destroy(pool);
- WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
-}
-
-EXPORT_SYMBOL (dma_pool_create);
-EXPORT_SYMBOL (dma_pool_destroy);
-EXPORT_SYMBOL (dma_pool_alloc);
-EXPORT_SYMBOL (dma_pool_free);
-EXPORT_SYMBOL (dmam_pool_create);
-EXPORT_SYMBOL (dmam_pool_destroy);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index a35f04121a00..ba75184c653c 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -97,10 +97,9 @@ int driver_create_file(struct device_driver *drv,
struct driver_attribute *attr)
{
int error;
- if (get_driver(drv)) {
+ if (drv)
error = sysfs_create_file(&drv->p->kobj, &attr->attr);
- put_driver(drv);
- } else
+ else
error = -EINVAL;
return error;
}
@@ -114,10 +113,8 @@ EXPORT_SYMBOL_GPL(driver_create_file);
void driver_remove_file(struct device_driver *drv,
struct driver_attribute *attr)
{
- if (get_driver(drv)) {
+ if (drv)
sysfs_remove_file(&drv->p->kobj, &attr->attr);
- put_driver(drv);
- }
}
EXPORT_SYMBOL_GPL(driver_remove_file);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 200ed5fafd50..bdc03f7e8424 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -129,6 +129,7 @@ void device_pm_schedule_removal(struct device *dev)
list_move_tail(&dev->power.entry, &dpm_destroy);
mutex_unlock(&dpm_list_mtx);
}
+EXPORT_SYMBOL_GPL(device_pm_schedule_removal);
/**
* pm_sleep_lock - mutual exclusion for registration and suspend
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 6f0dfca8ebdd..e32d3bdb92c1 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -13,7 +13,6 @@ static inline struct device *to_device(struct list_head *entry)
extern void device_pm_add(struct device *);
extern void device_pm_remove(struct device *);
-extern void device_pm_schedule_removal(struct device *);
extern int pm_sleep_lock(void);
extern void pm_sleep_unlock(void);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index f2122855d4ec..64e5148d82bc 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -440,6 +440,7 @@ config VIRTIO_BLK
tristate "Virtio block driver (EXPERIMENTAL)"
depends on EXPERIMENTAL && VIRTIO
---help---
- This is the virtual block driver for lguest. Say Y or M.
+ This is the virtual block driver for virtio. It can be used with
+ lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
endif # BLK_DEV
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 94268c75d04f..424995073c6b 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -90,7 +90,7 @@ static struct atari_disk_type {
unsigned blocks; /* total number of blocks */
unsigned fdc_speed; /* fdc_speed setting */
unsigned stretch; /* track doubling ? */
-} disk_type[] = {
+} atari_disk_type[] = {
{ "d360", 9, 720, 0, 0}, /* 0: 360kB diskette */
{ "D360", 9, 720, 0, 1}, /* 1: 360kb in 720k or 1.2MB drive */
{ "D720", 9,1440, 0, 0}, /* 2: 720kb in 720k or 1.2MB drive */
@@ -658,7 +658,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
return -EINVAL;
}
type = minor2disktype[type].index;
- UDT = &disk_type[type];
+ UDT = &atari_disk_type[type];
}
if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) {
@@ -1064,7 +1064,7 @@ static void fd_rwsec_done1(int status)
searched for a non-existent sector! */
!(read_track && FDC_READ(FDCREG_SECTOR) > SUDT->spt)) {
if (Probing) {
- if (SUDT > disk_type) {
+ if (SUDT > atari_disk_type) {
if (SUDT[-1].blocks > ReqBlock) {
/* try another disk type */
SUDT--;
@@ -1082,7 +1082,7 @@ static void fd_rwsec_done1(int status)
} else {
/* record not found, but not probing. Maybe stretch wrong ? Restart probing */
if (SUD.autoprobe) {
- SUDT = disk_type + StartDiskType[DriveType];
+ SUDT = atari_disk_type + StartDiskType[DriveType];
set_capacity(unit[SelectedDrive].disk,
SUDT->blocks);
Probing = 1;
@@ -1421,7 +1421,7 @@ repeat:
if (type == 0) {
if (!UDT) {
Probing = 1;
- UDT = disk_type + StartDiskType[DriveType];
+ UDT = atari_disk_type + StartDiskType[DriveType];
set_capacity(floppy->disk, UDT->blocks);
UD.autoprobe = 1;
}
@@ -1439,7 +1439,7 @@ repeat:
goto repeat;
}
type = minor2disktype[type].index;
- UDT = &disk_type[type];
+ UDT = &atari_disk_type[type];
set_capacity(floppy->disk, UDT->blocks);
UD.autoprobe = 0;
}
@@ -1505,7 +1505,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
if (minor2disktype[type].drive_types > DriveType)
return -ENODEV;
type = minor2disktype[type].index;
- dtp = &disk_type[type];
+ dtp = &atari_disk_type[type];
if (UD.flags & FTD_MSG)
printk (KERN_ERR "floppy%d: found dtp %p name %s!\n",
drive, dtp, dtp->name);
@@ -1576,7 +1576,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
continue;
}
setidx = minor2disktype[settype].index;
- dtp = &disk_type[setidx];
+ dtp = &atari_disk_type[setidx];
/* found matching entry ?? */
if ( dtp->blocks == setprm.size
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 855ce8e5efba..9715be3f2487 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2630,12 +2630,14 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDB[8] = creq->nr_sectors & 0xff;
c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
} else {
+ u32 upper32 = upper_32_bits(start_blk);
+
c->Request.CDBLen = 16;
c->Request.CDB[1]= 0;
- c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
- c->Request.CDB[3]= (start_blk >> 48) & 0xff;
- c->Request.CDB[4]= (start_blk >> 40) & 0xff;
- c->Request.CDB[5]= (start_blk >> 32) & 0xff;
+ c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB
+ c->Request.CDB[3]= (upper32 >> 16) & 0xff;
+ c->Request.CDB[4]= (upper32 >> 8) & 0xff;
+ c->Request.CDB[5]= upper32 & 0xff;
c->Request.CDB[6]= (start_blk >> 24) & 0xff;
c->Request.CDB[7]= (start_blk >> 16) & 0xff;
c->Request.CDB[8]= (start_blk >> 8) & 0xff;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 63ee6c076cb3..55178e9973a0 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -1453,7 +1453,7 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
rc = sendcmd(CCISS_RESET_MSG, ctlr, NULL, 0, 2, 0, 0,
(unsigned char *) &cmd_in_trouble->Header.LUN.LunAddrBytes[0],
TYPE_MSG);
- /* sendcmd turned off interrputs on the board, turn 'em back on. */
+ /* sendcmd turned off interrupts on the board, turn 'em back on. */
(*c)->access.set_intr_mask(*c, CCISS_INTR_ON);
if (rc == 0)
return SUCCESS;
@@ -1483,7 +1483,7 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
0, 2, 0, 0,
(unsigned char *) &cmd_to_abort->Header.LUN.LunAddrBytes[0],
TYPE_MSG);
- /* sendcmd turned off interrputs on the board, turn 'em back on. */
+ /* sendcmd turned off interrupts on the board, turn 'em back on. */
(*c)->access.set_intr_mask(*c, CCISS_INTR_ON);
if (rc == 0)
return SUCCESS;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index b8af22e610df..91ebb007416c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -973,6 +973,10 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
lo->transfer = xfer->transfer;
lo->ioctl = xfer->ioctl;
+ if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) !=
+ (info->lo_flags & LO_FLAGS_AUTOCLEAR))
+ lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
+
lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
lo->lo_init[0] = info->lo_init[0];
lo->lo_init[1] = info->lo_init[1];
@@ -1331,6 +1335,10 @@ static int lo_release(struct inode *inode, struct file *file)
mutex_lock(&lo->lo_ctl_mutex);
--lo->lo_refcnt;
+
+ if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) && !lo->lo_refcnt)
+ loop_clr_fd(lo, inode->i_bdev);
+
mutex_unlock(&lo->lo_ctl_mutex);
return 0;
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 76096cad798f..8b9549ab4a4e 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -660,7 +660,7 @@ static int pt_open(struct inode *inode, struct file *file)
pt_identify(tape);
err = -ENODEV;
- if (!tape->flags & PT_MEDIA)
+ if (!(tape->flags & PT_MEDIA))
goto out;
err = -EROFS;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index e9de1712e5a0..674cd66dcaba 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2212,11 +2212,11 @@ static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed)
return ret;
}
- if (!buf[6] & 0x40) {
+ if (!(buf[6] & 0x40)) {
printk(DRIVER_NAME": Disc type is not CD-RW\n");
return 1;
}
- if (!buf[6] & 0x4) {
+ if (!(buf[6] & 0x4)) {
printk(DRIVER_NAME": A1 values on media are not valid, maybe not CDRW?\n");
return 1;
}
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 82f4eecc8699..06e23be70904 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -56,6 +56,7 @@
#include <linux/backing-dev.h>
#include <linux/blkpg.h>
#include <linux/writeback.h>
+#include <linux/log2.h>
#include <asm/uaccess.h>
@@ -450,7 +451,7 @@ static int __init rd_init(void)
err = -ENOMEM;
if (rd_blocksize > PAGE_SIZE || rd_blocksize < 512 ||
- (rd_blocksize & (rd_blocksize-1))) {
+ !is_power_of_2(rd_blocksize)) {
printk("RAMDISK: wrong blocksize %d, reverting to defaults\n",
rd_blocksize);
rd_blocksize = BLOCK_SIZE;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 924ddd8bccd2..3b1a68d6eddb 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -7,8 +7,10 @@
#include <linux/scatterlist.h>
#define VIRTIO_MAX_SG (3+MAX_PHYS_SEGMENTS)
+#define PART_BITS 4
+
+static int major, index;
-static unsigned char virtblk_index = 'a';
struct virtio_blk
{
spinlock_t lock;
@@ -36,7 +38,7 @@ struct virtblk_req
struct virtio_blk_inhdr in_hdr;
};
-static bool blk_done(struct virtqueue *vq)
+static void blk_done(struct virtqueue *vq)
{
struct virtio_blk *vblk = vq->vdev->priv;
struct virtblk_req *vbr;
@@ -65,7 +67,6 @@ static bool blk_done(struct virtqueue *vq)
/* In case queue is stopped waiting for more buffers. */
blk_start_queue(vblk->disk->queue);
spin_unlock_irqrestore(&vblk->lock, flags);
- return true;
}
static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
@@ -153,20 +154,37 @@ static int virtblk_ioctl(struct inode *inode, struct file *filp,
(void __user *)data);
}
+/* We provide getgeo only to please some old bootloader/partitioning tools */
+static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
+{
+ /* some standard values, similar to sd */
+ geo->heads = 1 << 6;
+ geo->sectors = 1 << 5;
+ geo->cylinders = get_capacity(bd->bd_disk) >> 11;
+ return 0;
+}
+
static struct block_device_operations virtblk_fops = {
- .ioctl = virtblk_ioctl,
- .owner = THIS_MODULE,
+ .ioctl = virtblk_ioctl,
+ .owner = THIS_MODULE,
+ .getgeo = virtblk_getgeo,
};
+static int index_to_minor(int index)
+{
+ return index << PART_BITS;
+}
+
static int virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
- int err, major;
- void *token;
- unsigned int len;
+ int err;
u64 cap;
u32 v;
+ if (index_to_minor(index) >= 1 << MINORBITS)
+ return -ENOSPC;
+
vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
if (!vblk) {
err = -ENOMEM;
@@ -178,7 +196,7 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->vdev = vdev;
/* We expect one virtqueue, for output. */
- vblk->vq = vdev->config->find_vq(vdev, blk_done);
+ vblk->vq = vdev->config->find_vq(vdev, 0, blk_done);
if (IS_ERR(vblk->vq)) {
err = PTR_ERR(vblk->vq);
goto out_free_vblk;
@@ -190,17 +208,11 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_free_vq;
}
- major = register_blkdev(0, "virtblk");
- if (major < 0) {
- err = major;
- goto out_mempool;
- }
-
/* FIXME: How many partitions? How long is a piece of string? */
- vblk->disk = alloc_disk(1 << 4);
+ vblk->disk = alloc_disk(1 << PART_BITS);
if (!vblk->disk) {
err = -ENOMEM;
- goto out_unregister_blkdev;
+ goto out_mempool;
}
vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
@@ -209,22 +221,32 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_put_disk;
}
- sprintf(vblk->disk->disk_name, "vd%c", virtblk_index++);
+ if (index < 26) {
+ sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
+ } else if (index < (26 + 1) * 26) {
+ sprintf(vblk->disk->disk_name, "vd%c%c",
+ 'a' + index / 26 - 1, 'a' + index % 26);
+ } else {
+ const unsigned int m1 = (index / 26 - 1) / 26 - 1;
+ const unsigned int m2 = (index / 26 - 1) % 26;
+ const unsigned int m3 = index % 26;
+ sprintf(vblk->disk->disk_name, "vd%c%c%c",
+ 'a' + m1, 'a' + m2, 'a' + m3);
+ }
+
vblk->disk->major = major;
- vblk->disk->first_minor = 0;
+ vblk->disk->first_minor = index_to_minor(index);
vblk->disk->private_data = vblk;
vblk->disk->fops = &virtblk_fops;
+ index++;
/* If barriers are supported, tell block layer that queue is ordered */
- token = vdev->config->find(vdev, VIRTIO_CONFIG_BLK_F, &len);
- if (virtio_use_bit(vdev, token, len, VIRTIO_BLK_F_BARRIER))
+ if (vdev->config->feature(vdev, VIRTIO_BLK_F_BARRIER))
blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL);
- err = virtio_config_val(vdev, VIRTIO_CONFIG_BLK_F_CAPACITY, &cap);
- if (err) {
- dev_err(&vdev->dev, "Bad/missing capacity in config\n");
- goto out_cleanup_queue;
- }
+ /* Host must always specify the capacity. */
+ __virtio_config_val(vdev, offsetof(struct virtio_blk_config, capacity),
+ &cap);
/* If capacity is too big, truncate with warning. */
if ((sector_t)cap != cap) {
@@ -234,31 +256,25 @@ static int virtblk_probe(struct virtio_device *vdev)
}
set_capacity(vblk->disk, cap);
- err = virtio_config_val(vdev, VIRTIO_CONFIG_BLK_F_SIZE_MAX, &v);
+ /* Host can optionally specify maximum segment size and number of
+ * segments. */
+ err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
+ offsetof(struct virtio_blk_config, size_max),
+ &v);
if (!err)
blk_queue_max_segment_size(vblk->disk->queue, v);
- else if (err != -ENOENT) {
- dev_err(&vdev->dev, "Bad SIZE_MAX in config\n");
- goto out_cleanup_queue;
- }
- err = virtio_config_val(vdev, VIRTIO_CONFIG_BLK_F_SEG_MAX, &v);
+ err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
+ offsetof(struct virtio_blk_config, seg_max),
+ &v);
if (!err)
blk_queue_max_hw_segments(vblk->disk->queue, v);
- else if (err != -ENOENT) {
- dev_err(&vdev->dev, "Bad SEG_MAX in config\n");
- goto out_cleanup_queue;
- }
add_disk(vblk->disk);
return 0;
-out_cleanup_queue:
- blk_cleanup_queue(vblk->disk->queue);
out_put_disk:
put_disk(vblk->disk);
-out_unregister_blkdev:
- unregister_blkdev(major, "virtblk");
out_mempool:
mempool_destroy(vblk->pool);
out_free_vq:
@@ -274,12 +290,16 @@ static void virtblk_remove(struct virtio_device *vdev)
struct virtio_blk *vblk = vdev->priv;
int major = vblk->disk->major;
+ /* Nothing should be pending. */
BUG_ON(!list_empty(&vblk->reqs));
+
+ /* Stop all the virtqueues. */
+ vdev->config->reset(vdev);
+
blk_cleanup_queue(vblk->disk->queue);
put_disk(vblk->disk);
unregister_blkdev(major, "virtblk");
mempool_destroy(vblk->pool);
- /* There should be nothing in the queue now, so no need to shutdown */
vdev->config->del_vq(vblk->vq);
kfree(vblk);
}
@@ -299,11 +319,15 @@ static struct virtio_driver virtio_blk = {
static int __init init(void)
{
+ major = register_blkdev(0, "virtblk");
+ if (major < 0)
+ return major;
return register_virtio_driver(&virtio_blk);
}
static void __exit fini(void)
{
+ unregister_blkdev(major, "virtblk");
unregister_virtio_driver(&virtio_blk);
}
module_init(init);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 78ebfffc77e3..4a7a059ebaf7 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1202,8 +1202,10 @@ static int __devexit ace_of_remove(struct of_device *op)
}
/* Match table for of_platform binding */
-static struct of_device_id __devinit ace_of_match[] = {
- { .compatible = "xilinx,xsysace", },
+static struct of_device_id ace_of_match[] __devinitdata = {
+ { .compatible = "xlnx,opb-sysace-1.00.b", },
+ { .compatible = "xlnx,opb-sysace-1.00.c", },
+ { .compatible = "xlnx,xps-sysace-1.00.a", },
{},
};
MODULE_DEVICE_TABLE(of, ace_of_match);
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 1375b5345a0a..3b28658f5a1f 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -423,6 +423,7 @@ static int bpa10x_send_frame(struct sk_buff *skb)
break;
default:
+ usb_free_urb(urb);
return -EILSEQ;
}
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index a18f9b8c9e12..7703d6e06fd9 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -704,7 +704,7 @@ static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *
static int bt3c_config(struct pcmcia_device *link)
{
- static kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
+ static unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
bt3c_info_t *info = link->priv;
tuple_t tuple;
u_short buf[256];
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index b786f6187902..58630cc1eff2 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -162,10 +162,8 @@ static int btsdio_rx_packet(struct btsdio_data *data)
bt_cb(skb)->pkt_type = hdr[3];
err = hci_recv_frame(skb);
- if (err < 0) {
- kfree(skb);
+ if (err < 0)
return err;
- }
sdio_writeb(data->func, 0x00, REG_PC_RRT, NULL);
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 08f48d577aba..68d1d258e6a4 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -383,7 +383,7 @@ static void btuart_change_speed(btuart_info_t *info, unsigned int speed)
outb(lcr, iobase + UART_LCR); /* Set 8N1 */
outb(fcr, iobase + UART_FCR); /* Enable FIFO's */
- /* Turn on interrups */
+ /* Turn on interrupts */
outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
spin_unlock_irqrestore(&(info->lock), flags);
@@ -634,7 +634,7 @@ static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *
static int btuart_config(struct pcmcia_device *link)
{
- static kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
+ static unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
btuart_info_t *info = link->priv;
tuple_t tuple;
u_short buf[256];
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 98a9cdeaffb6..372c7ef633da 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -111,6 +111,7 @@ static struct usb_device_id blacklist_ids[] = {
{ USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE },
/* Broadcom BCM2035 */
+ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 },
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 47e5b40510cb..db259e60289b 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1206,25 +1206,26 @@ int check_for_audio_disc(struct cdrom_device_info * cdi,
return 0;
}
-/* Admittedly, the logic below could be performed in a nicer way. */
int cdrom_release(struct cdrom_device_info *cdi, struct file *fp)
{
struct cdrom_device_ops *cdo = cdi->ops;
int opened_for_data;
- cdinfo(CD_CLOSE, "entering cdrom_release\n");
+ cdinfo(CD_CLOSE, "entering cdrom_release\n");
if (cdi->use_count > 0)
cdi->use_count--;
- if (cdi->use_count == 0)
+
+ if (cdi->use_count == 0) {
cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name);
- if (cdi->use_count == 0)
cdrom_dvd_rw_close_write(cdi);
- if (cdi->use_count == 0 &&
- (cdo->capability & CDC_LOCK) && !keeplocked) {
- cdinfo(CD_CLOSE, "Unlocking door!\n");
- cdo->lock_door(cdi, 0);
+
+ if ((cdo->capability & CDC_LOCK) && !keeplocked) {
+ cdinfo(CD_CLOSE, "Unlocking door!\n");
+ cdo->lock_door(cdi, 0);
+ }
}
+
opened_for_data = !(cdi->options & CDO_USE_FFLAGS) ||
!(fp && fp->f_flags & O_NONBLOCK);
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 8473b9f1da96..cac06bc1754b 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -558,7 +558,7 @@ static struct cdrom_device_ops viocd_dops = {
.capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM
};
-static int __init find_capability(const char *type)
+static int find_capability(const char *type)
{
struct capability_entry *entry;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 466629594776..f01ac9a07bf5 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -194,17 +194,6 @@ config MOXA_INTELLIO
module will be called moxa.
config MOXA_SMARTIO
- tristate "Moxa SmartIO support (OBSOLETE)"
- depends on SERIAL_NONSTANDARD
- help
- Say Y here if you have a Moxa SmartIO multiport serial card.
-
- This driver can also be built as a module ( = code which can be
- inserted in and removed from the running kernel whenever you want).
- The module will be called mxser. If you want to do that, say M
- here.
-
-config MOXA_SMARTIO_NEW
tristate "Moxa SmartIO support v. 2.0"
depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA)
help
@@ -215,7 +204,7 @@ config MOXA_SMARTIO_NEW
changes finally resulting in PCI probing.
This driver can also be built as a module. The module will be called
- mxser_new. If you want to do that, say M here.
+ mxser. If you want to do that, say M here.
config ISI
tristate "Multi-Tech multiport card support (EXPERIMENTAL)"
@@ -276,7 +265,7 @@ config N_HDLC
config RISCOM8
tristate "SDL RISCom/8 card support"
- depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP
+ depends on SERIAL_NONSTANDARD
help
This is a driver for the SDL Communications RISCom/8 multiport card,
which gives you many serial ports. You would need something like
@@ -765,7 +754,7 @@ config JS_RTC
config SGI_DS1286
tristate "SGI DS1286 RTC support"
- depends on SGI_IP22
+ depends on SGI_HAS_DS1286
help
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -841,6 +830,16 @@ config DTLK
To compile this driver as a module, choose M here: the
module will be called dtlk.
+config XILINX_HWICAP
+ tristate "Xilinx HWICAP Support"
+ depends on XILINX_VIRTEX
+ help
+ This option enables support for Xilinx Internal Configuration
+ Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex
+ FPGA platforms to partially reconfigure the FPGA at runtime.
+
+ If unsure, say N.
+
config R3964
tristate "Siemens R3964 line discipline"
---help---
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 96fc01eddefe..5407b7615614 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_MOXA_INTELLIO) += moxa.o
obj-$(CONFIG_A2232) += ser_a2232.o generic_serial.o
obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_MOXA_SMARTIO) += mxser.o
-obj-$(CONFIG_MOXA_SMARTIO_NEW) += mxser_new.o
obj-$(CONFIG_COMPUTONE) += ip2/
obj-$(CONFIG_RISCOM8) += riscom8.o
obj-$(CONFIG_ISI) += isicom.o
@@ -77,6 +76,7 @@ obj-$(CONFIG_EFI_RTC) += efirtc.o
obj-$(CONFIG_SGI_DS1286) += ds1286.o
obj-$(CONFIG_SGI_IP27_RTC) += ip27-rtc.o
obj-$(CONFIG_DS1302) += ds1302.o
+obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/
ifeq ($(CONFIG_GENERIC_NVRAM),y)
obj-$(CONFIG_NVRAM) += generic_nvram.o
else
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index b83824c41329..c69f79598e47 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -117,7 +117,8 @@ struct agp_bridge_driver {
void (*free_by_type)(struct agp_memory *);
void *(*agp_alloc_page)(struct agp_bridge_data *);
void (*agp_destroy_page)(void *, int flags);
- int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
+ int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
+ void (*chipset_flush)(struct agp_bridge_data *);
};
struct agp_bridge_data {
@@ -235,6 +236,9 @@ struct agp_bridge_data {
#define I965_PGETBL_SIZE_512KB (0 << 1)
#define I965_PGETBL_SIZE_256KB (1 << 1)
#define I965_PGETBL_SIZE_128KB (2 << 1)
+#define I965_PGETBL_SIZE_1MB (3 << 1)
+#define I965_PGETBL_SIZE_2MB (4 << 1)
+#define I965_PGETBL_SIZE_1_5MB (5 << 1)
#define G33_PGETBL_SIZE_MASK (3 << 8)
#define G33_PGETBL_SIZE_1M (1 << 8)
#define G33_PGETBL_SIZE_2M (2 << 8)
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index aa8f3a39a704..e77c17838c8a 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -11,29 +11,28 @@
#include "agp.h"
-static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma,
- unsigned long address,
- int *type)
+static int alpha_core_agp_vm_fault(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
dma_addr_t dma_addr;
unsigned long pa;
struct page *page;
- dma_addr = address - vma->vm_start + agp->aperture.bus_base;
+ dma_addr = (unsigned long)vmf->virtual_address - vma->vm_start
+ + agp->aperture.bus_base;
pa = agp->ops->translate(agp, dma_addr);
if (pa == (unsigned long)-EINVAL)
- return NULL; /* no translation */
+ return VM_FAULT_SIGBUS; /* no translation */
/*
* Get the page, inc the use count, and return it
*/
page = virt_to_page(__va(pa));
get_page(page);
- if (type)
- *type = VM_FAULT_MINOR;
- return page;
+ vmf->page = page;
+ return 0;
}
static struct aper_size_info_fixed alpha_core_agp_sizes[] =
@@ -42,7 +41,7 @@ static struct aper_size_info_fixed alpha_core_agp_sizes[] =
};
struct vm_operations_struct alpha_core_agp_vm_ops = {
- .nopage = alpha_core_agp_vm_nopage,
+ .fault = alpha_core_agp_vm_fault,
};
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 1405a42585e1..87be46406daf 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -436,10 +436,6 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
return -ENODEV;
}
cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
- if (!cap_ptr) {
- pci_dev_put(gfxcard);
- continue;
- }
}
/* With so many variants of NVidia cards, it's simpler just
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 2720882e66fe..b1bdd015165c 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -43,7 +43,7 @@
* fix some real stupidity. It's only by chance we can bump
* past 0.99 at all due to some boolean logic error. */
#define AGPGART_VERSION_MAJOR 0
-#define AGPGART_VERSION_MINOR 102
+#define AGPGART_VERSION_MINOR 103
static const struct agp_version agp_current_version =
{
.major = AGPGART_VERSION_MAJOR,
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
index ecd4248861b9..39275794fe63 100644
--- a/drivers/char/agp/compat_ioctl.c
+++ b/drivers/char/agp/compat_ioctl.c
@@ -273,6 +273,10 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case AGPIOC_UNBIND32:
ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg);
break;
+
+ case AGPIOC_CHIPSET_FLUSH32:
+ ret_val = agpioc_chipset_flush_wrap(curr_priv);
+ break;
}
ioctl_out:
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
index 71939d637236..0c9678ac0371 100644
--- a/drivers/char/agp/compat_ioctl.h
+++ b/drivers/char/agp/compat_ioctl.h
@@ -39,6 +39,7 @@
#define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t)
#define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t)
#define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t)
+#define AGPIOC_CHIPSET_FLUSH32 _IO (AGPIOC_BASE, 10)
struct agp_info32 {
struct agp_version version; /* version of the driver */
@@ -101,5 +102,6 @@ void agp_free_memory_wrap(struct agp_memory *memory);
struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
struct agp_memory *agp_find_mem_by_key(int key);
struct agp_client *agp_find_client_by_pid(pid_t id);
+int agpioc_chipset_flush_wrap(struct agp_file_private *priv);
#endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 7791e98de51c..55d7a82bd071 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -689,7 +689,7 @@ static int agp_open(struct inode *inode, struct file *file)
set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
priv->my_pid = current->pid;
- if ((current->uid == 0) || (current->suid == 0)) {
+ if (capable(CAP_SYS_RAWIO)) {
/* Root priv, can be controller */
set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
}
@@ -960,6 +960,13 @@ static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
return agp_unbind_memory(memory);
}
+int agpioc_chipset_flush_wrap(struct agp_file_private *priv)
+{
+ DBG("");
+ agp_flush_chipset(agp_bridge);
+ return 0;
+}
+
static int agp_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -1033,6 +1040,10 @@ static int agp_ioctl(struct inode *inode, struct file *file,
case AGPIOC_UNBIND:
ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
break;
+
+ case AGPIOC_CHIPSET_FLUSH:
+ ret_val = agpioc_chipset_flush_wrap(curr_priv);
+ break;
}
ioctl_out:
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 1a4674ce0c71..7484bc759c4c 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -80,6 +80,13 @@ static int agp_get_key(void)
return -1;
}
+void agp_flush_chipset(struct agp_bridge_data *bridge)
+{
+ if (bridge->driver->chipset_flush)
+ bridge->driver->chipset_flush(bridge);
+}
+EXPORT_SYMBOL(agp_flush_chipset);
+
/*
* Use kmalloc if possible for the page list. Otherwise fall back to
* vmalloc. This speeds things up and also saves memory for small AGP
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 189efb6ef970..eeea50a1d22a 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -14,8 +14,8 @@
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
-#define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980
-#define PCI_DEVICE_ID_INTEL_82965G_1_IG 0x2982
+#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
+#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
@@ -32,13 +32,24 @@
#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
+#define PCI_DEVICE_ID_INTEL_IGD_HB 0x2A40
+#define PCI_DEVICE_ID_INTEL_IGD_IG 0x2A42
+
+/* cover 915 and 945 variants */
+#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_1_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_HB)
#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
@@ -71,9 +82,11 @@ extern int agp_memory_reserved;
#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
+#define I915_IFPADDR 0x60
/* Intel 965G registers */
#define I965_MSAC 0x62
+#define I965_IFPADDR 0x70
/* Intel 7505 registers */
#define INTEL_I7505_APSIZE 0x74
@@ -115,6 +128,13 @@ static struct _intel_private {
* popup and for the GTT.
*/
int gtt_entries; /* i830+ */
+ union {
+ void __iomem *i9xx_flush_page;
+ void *i8xx_flush_page;
+ };
+ struct page *i8xx_page;
+ struct resource ifp_resource;
+ int resource_valid;
} intel_private;
static int intel_i810_fetch_size(void)
@@ -204,7 +224,7 @@ static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
/* Exists to support ARGB cursors */
static void *i8xx_alloc_pages(void)
{
- struct page * page;
+ struct page *page;
page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
if (page == NULL)
@@ -433,7 +453,7 @@ static void intel_i830_init_gtt_entries(void)
static const int ddt[4] = { 0, 16, 32, 64 };
int size; /* reserved space (in kb) at the top of stolen memory */
- pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
if (IS_I965) {
u32 pgetbl_ctl;
@@ -453,6 +473,15 @@ static void intel_i830_init_gtt_entries(void)
case I965_PGETBL_SIZE_512KB:
size = 512;
break;
+ case I965_PGETBL_SIZE_1MB:
+ size = 1024;
+ break;
+ case I965_PGETBL_SIZE_2MB:
+ size = 2048;
+ break;
+ case I965_PGETBL_SIZE_1_5MB:
+ size = 1024 + 512;
+ break;
default:
printk(KERN_INFO PFX "Unknown page table size, "
"assuming 512KB\n");
@@ -523,26 +552,14 @@ static void intel_i830_init_gtt_entries(void)
break;
case I915_GMCH_GMS_STOLEN_48M:
/* Check it's really I915G */
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB ||
- IS_I965 || IS_G33)
+ if (IS_I915 || IS_I965 || IS_G33)
gtt_entries = MB(48) - KB(size);
else
gtt_entries = 0;
break;
case I915_GMCH_GMS_STOLEN_64M:
/* Check it's really I915G */
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB ||
- IS_I965 || IS_G33)
+ if (IS_I915 || IS_I965 || IS_G33)
gtt_entries = MB(64) - KB(size);
else
gtt_entries = 0;
@@ -575,6 +592,45 @@ static void intel_i830_init_gtt_entries(void)
intel_private.gtt_entries = gtt_entries;
}
+static void intel_i830_fini_flush(void)
+{
+ kunmap(intel_private.i8xx_page);
+ intel_private.i8xx_flush_page = NULL;
+ unmap_page_from_agp(intel_private.i8xx_page);
+
+ __free_page(intel_private.i8xx_page);
+ intel_private.i8xx_page = NULL;
+}
+
+static void intel_i830_setup_flush(void)
+{
+ /* return if we've already set the flush mechanism up */
+ if (intel_private.i8xx_page)
+ return;
+
+ intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+ if (!intel_private.i8xx_page)
+ return;
+
+ /* make page uncached */
+ map_page_into_agp(intel_private.i8xx_page);
+
+ intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
+ if (!intel_private.i8xx_flush_page)
+ intel_i830_fini_flush();
+}
+
+static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
+{
+ unsigned int *pg = intel_private.i8xx_flush_page;
+ int i;
+
+ for (i = 0; i < 256; i += 2)
+ *(pg + i) = i;
+
+ wmb();
+}
+
/* The intel i830 automatically initializes the agp aperture during POST.
* Use the memory already set aside for in the GTT.
*/
@@ -590,10 +646,10 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
num_entries = size->num_entries;
agp_bridge->gatt_table_real = NULL;
- pci_read_config_dword(intel_private.pcidev,I810_MMADDR,&temp);
+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
temp &= 0xfff80000;
- intel_private.registers = ioremap(temp,128 * 4096);
+ intel_private.registers = ioremap(temp, 128 * 4096);
if (!intel_private.registers)
return -ENOMEM;
@@ -633,7 +689,7 @@ static int intel_i830_fetch_size(void)
return values[0].size;
}
- pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
@@ -657,12 +713,12 @@ static int intel_i830_configure(void)
current_size = A_SIZE_FIX(agp_bridge->current_size);
- pci_read_config_dword(intel_private.pcidev,I810_GMADDR,&temp);
+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
+ pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
@@ -675,6 +731,8 @@ static int intel_i830_configure(void)
}
global_cache_flush();
+
+ intel_i830_setup_flush();
return 0;
}
@@ -683,9 +741,10 @@ static void intel_i830_cleanup(void)
iounmap(intel_private.registers);
}
-static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int type)
+static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
{
- int i,j,num_entries;
+ int i, j, num_entries;
void *temp;
int ret = -EINVAL;
int mask_type;
@@ -697,10 +756,10 @@ static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int
num_entries = A_SIZE_FIX(temp)->num_entries;
if (pg_start < intel_private.gtt_entries) {
- printk (KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n",
- pg_start,intel_private.gtt_entries);
+ printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n",
+ pg_start, intel_private.gtt_entries);
- printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
+ printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n");
goto out_err;
}
@@ -738,8 +797,8 @@ out_err:
return ret;
}
-static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
- int type)
+static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
{
int i;
@@ -747,7 +806,7 @@ static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
return 0;
if (pg_start < intel_private.gtt_entries) {
- printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
+ printk(KERN_INFO PFX "Trying to disable local/stolen memory\n");
return -EINVAL;
}
@@ -760,7 +819,7 @@ static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
return 0;
}
-static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type)
+static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
{
if (type == AGP_PHYS_MEMORY)
return alloc_agpphysmem_i8xx(pg_count, type);
@@ -768,6 +827,95 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type)
return NULL;
}
+static int intel_alloc_chipset_flush_resource(void)
+{
+ int ret;
+ ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+ PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
+ pcibios_align_resource, agp_bridge->dev);
+
+ return ret;
+}
+
+static void intel_i915_setup_chipset_flush(void)
+{
+ int ret;
+ u32 temp;
+
+ pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
+ if (!(temp & 0x1)) {
+ intel_alloc_chipset_flush_resource();
+ intel_private.resource_valid = 1;
+ pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ temp &= ~1;
+
+ intel_private.resource_valid = 1;
+ intel_private.ifp_resource.start = temp;
+ intel_private.ifp_resource.end = temp + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+ /* some BIOSes reserve this area in a pnp some don't */
+ if (ret)
+ intel_private.resource_valid = 0;
+ }
+}
+
+static void intel_i965_g33_setup_chipset_flush(void)
+{
+ u32 temp_hi, temp_lo;
+ int ret;
+
+ pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
+ pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
+
+ if (!(temp_lo & 0x1)) {
+
+ intel_alloc_chipset_flush_resource();
+
+ intel_private.resource_valid = 1;
+ pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
+ upper_32_bits(intel_private.ifp_resource.start));
+ pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ u64 l64;
+
+ temp_lo &= ~0x1;
+ l64 = ((u64)temp_hi << 32) | temp_lo;
+
+ intel_private.resource_valid = 1;
+ intel_private.ifp_resource.start = l64;
+ intel_private.ifp_resource.end = l64 + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+ /* some BIOSes reserve this area in a pnp some don't */
+ if (ret)
+ intel_private.resource_valid = 0;
+ }
+}
+
+static void intel_i9xx_setup_flush(void)
+{
+ /* return if already configured */
+ if (intel_private.ifp_resource.start)
+ return;
+
+ /* setup a resource for this object */
+ intel_private.ifp_resource.name = "Intel Flush Page";
+ intel_private.ifp_resource.flags = IORESOURCE_MEM;
+
+ /* Setup chipset flush for 915 */
+ if (IS_I965 || IS_G33) {
+ intel_i965_g33_setup_chipset_flush();
+ } else {
+ intel_i915_setup_chipset_flush();
+ }
+
+ if (intel_private.ifp_resource.start) {
+ intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
+ if (!intel_private.i9xx_flush_page)
+ printk(KERN_INFO "unable to ioremap flush page - no chipset flushing");
+ }
+}
+
static int intel_i915_configure(void)
{
struct aper_size_info_fixed *current_size;
@@ -781,9 +929,9 @@ static int intel_i915_configure(void)
agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
+ pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
@@ -796,19 +944,34 @@ static int intel_i915_configure(void)
}
global_cache_flush();
+
+ intel_i9xx_setup_flush();
+
return 0;
}
static void intel_i915_cleanup(void)
{
+ if (intel_private.i9xx_flush_page)
+ iounmap(intel_private.i9xx_flush_page);
+ if (intel_private.resource_valid)
+ release_resource(&intel_private.ifp_resource);
+ intel_private.ifp_resource.start = 0;
+ intel_private.resource_valid = 0;
iounmap(intel_private.gtt);
iounmap(intel_private.registers);
}
-static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
- int type)
+static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
{
- int i,j,num_entries;
+ if (intel_private.i9xx_flush_page)
+ writel(1, intel_private.i9xx_flush_page);
+}
+
+static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i, j, num_entries;
void *temp;
int ret = -EINVAL;
int mask_type;
@@ -820,10 +983,10 @@ static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
num_entries = A_SIZE_FIX(temp)->num_entries;
if (pg_start < intel_private.gtt_entries) {
- printk (KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n",
- pg_start,intel_private.gtt_entries);
+ printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n",
+ pg_start, intel_private.gtt_entries);
- printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
+ printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n");
goto out_err;
}
@@ -861,8 +1024,8 @@ static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
return ret;
}
-static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
- int type)
+static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
{
int i;
@@ -870,13 +1033,13 @@ static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
return 0;
if (pg_start < intel_private.gtt_entries) {
- printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
+ printk(KERN_INFO PFX "Trying to disable local/stolen memory\n");
return -EINVAL;
}
- for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ for (i = pg_start; i < (mem->page_count + pg_start); i++)
writel(agp_bridge->scratch_page, intel_private.gtt+i);
- }
+
readl(intel_private.gtt+i-1);
agp_bridge->driver->tlb_flush(mem);
@@ -923,7 +1086,7 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gatt_table_real = NULL;
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
- pci_read_config_dword(intel_private.pcidev, I915_PTEADDR,&temp2);
+ pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
if (IS_G33)
gtt_map_size = 1024 * 1024; /* 1M on G33 */
@@ -933,7 +1096,7 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
temp &= 0xfff80000;
- intel_private.registers = ioremap(temp,128 * 4096);
+ intel_private.registers = ioremap(temp, 128 * 4096);
if (!intel_private.registers) {
iounmap(intel_private.gtt);
return -ENOMEM;
@@ -980,6 +1143,7 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
struct aper_size_info_fixed *size;
int num_entries;
u32 temp;
+ int gtt_offset, gtt_size;
size = agp_bridge->current_size;
page_order = size->page_order;
@@ -989,13 +1153,18 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
temp &= 0xfff00000;
- intel_private.gtt = ioremap((temp + (512 * 1024)) , 512 * 1024);
- if (!intel_private.gtt)
- return -ENOMEM;
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_HB)
+ gtt_offset = gtt_size = MB(2);
+ else
+ gtt_offset = gtt_size = KB(512);
+
+ intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
+ if (!intel_private.gtt)
+ return -ENOMEM;
- intel_private.registers = ioremap(temp,128 * 4096);
+ intel_private.registers = ioremap(temp, 128 * 4096);
if (!intel_private.registers) {
iounmap(intel_private.gtt);
return -ENOMEM;
@@ -1154,7 +1323,7 @@ static int intel_815_configure(void)
/* the Intel 815 chipset spec. says that bits 29-31 in the
* ATTBASE register are reserved -> try not to write them */
if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) {
- printk (KERN_EMERG PFX "gatt bus addr too high");
+ printk(KERN_EMERG PFX "gatt bus addr too high");
return -EINVAL;
}
@@ -1296,6 +1465,8 @@ static int intel_845_configure(void)
pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1));
/* clear any possible error conditions */
pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c);
+
+ intel_i830_setup_flush();
return 0;
}
@@ -1552,6 +1723,7 @@ static const struct agp_bridge_driver intel_830_driver = {
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i830_chipset_flush,
};
static const struct agp_bridge_driver intel_820_driver = {
@@ -1648,6 +1820,7 @@ static const struct agp_bridge_driver intel_845_driver = {
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
+ .chipset_flush = intel_i830_chipset_flush,
};
static const struct agp_bridge_driver intel_850_driver = {
@@ -1721,6 +1894,7 @@ static const struct agp_bridge_driver intel_915_driver = {
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
};
static const struct agp_bridge_driver intel_i965_driver = {
@@ -1746,6 +1920,7 @@ static const struct agp_bridge_driver intel_i965_driver = {
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
};
static const struct agp_bridge_driver intel_7505_driver = {
@@ -1795,6 +1970,7 @@ static const struct agp_bridge_driver intel_g33_driver = {
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
};
static int find_gmch(u16 device)
@@ -1804,7 +1980,7 @@ static int find_gmch(u16 device)
gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
- device, gmch_device);
+ device, gmch_device);
}
if (!gmch_device)
@@ -1867,7 +2043,7 @@ static const struct intel_driver_description {
NULL, &intel_915_driver },
{ PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965G_1_HB, PCI_DEVICE_ID_INTEL_82965G_1_IG, 0, "965G",
+ { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35",
NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q",
NULL, &intel_i965_driver },
@@ -1885,6 +2061,8 @@ static const struct intel_driver_description {
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
NULL, &intel_g33_driver },
+ { PCI_DEVICE_ID_INTEL_IGD_HB, PCI_DEVICE_ID_INTEL_IGD_IG, 0,
+ "Intel Integrated Graphics Device", NULL, &intel_i965_driver },
{ 0, 0, 0, NULL, NULL, NULL }
};
@@ -1924,7 +2102,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
if (intel_agp_chipsets[i].name == NULL) {
if (cap_ptr)
printk(KERN_WARNING PFX "Unsupported Intel chipset"
- "(device id: %04x)\n", pdev->device);
+ "(device id: %04x)\n", pdev->device);
agp_put_bridge(bridge);
return -ENODEV;
}
@@ -1937,7 +2115,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
intel_agp_chipsets[i].gmch_chip_id);
agp_put_bridge(bridge);
return -ENODEV;
- }
+ }
bridge->dev = pdev;
bridge->capndx = cap_ptr;
@@ -2067,7 +2245,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
- ID(PCI_DEVICE_ID_INTEL_82965G_1_HB),
+ ID(PCI_DEVICE_ID_INTEL_82G35_HB),
ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
ID(PCI_DEVICE_ID_INTEL_82965G_HB),
ID(PCI_DEVICE_ID_INTEL_82965GM_HB),
@@ -2075,6 +2253,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_G33_HB),
ID(PCI_DEVICE_ID_INTEL_Q35_HB),
ID(PCI_DEVICE_ID_INTEL_Q33_HB),
+ ID(PCI_DEVICE_ID_INTEL_IGD_HB),
{ }
};
diff --git a/drivers/char/drm/Kconfig b/drivers/char/drm/Kconfig
index ba3058dd39a7..610d6fd5bb50 100644
--- a/drivers/char/drm/Kconfig
+++ b/drivers/char/drm/Kconfig
@@ -38,7 +38,7 @@ config DRM_RADEON
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
run the Radeon in plain VGA mode.
-
+
If M is selected, the module will be called radeon.
config DRM_I810
@@ -71,9 +71,9 @@ config DRM_I915
852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
module will be called i915. AGP support is required for this driver
to work. This driver is used by the Intel driver in X.org 6.8 and
- XFree86 4.4 and above. If unsure, build this and i830 as modules and
+ XFree86 4.4 and above. If unsure, build this and i830 as modules and
the X server will load the correct one.
-
+
endchoice
config DRM_MGA
@@ -88,7 +88,7 @@ config DRM_SIS
tristate "SiS video cards"
depends on DRM && AGP
help
- Choose this option if you have a SiS 630 or compatible video
+ Choose this option if you have a SiS 630 or compatible video
chipset. If M is selected the module will be called sis. AGP
support is required for this driver to work.
@@ -105,4 +105,3 @@ config DRM_SAVAGE
help
Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
chipset. If M is selected the module will be called savage.
-
diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile
index 6915a0599dfb..1283ded88ead 100644
--- a/drivers/char/drm/Makefile
+++ b/drivers/char/drm/Makefile
@@ -38,5 +38,3 @@ obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_SIS) += sis.o
obj-$(CONFIG_DRM_SAVAGE)+= savage.o
obj-$(CONFIG_DRM_VIA) +=via.o
-
-
diff --git a/drivers/char/drm/README.drm b/drivers/char/drm/README.drm
index af74cd79a279..b5b332722581 100644
--- a/drivers/char/drm/README.drm
+++ b/drivers/char/drm/README.drm
@@ -41,4 +41,3 @@ For specific information about kernel-level support, see:
A Security Analysis of the Direct Rendering Infrastructure
http://dri.sourceforge.net/doc/security_low_level.html
-
diff --git a/drivers/char/drm/ati_pcigart.c b/drivers/char/drm/ati_pcigart.c
index 3345641ff904..d352dbb4ccf7 100644
--- a/drivers/char/drm/ati_pcigart.c
+++ b/drivers/char/drm/ati_pcigart.c
@@ -41,7 +41,7 @@ static void *drm_ati_alloc_pcigart_table(int order)
struct page *page;
int i;
- DRM_DEBUG("%s: alloc %d order\n", __FUNCTION__, order);
+ DRM_DEBUG("%d order\n", order);
address = __get_free_pages(GFP_KERNEL | __GFP_COMP,
order);
@@ -54,7 +54,7 @@ static void *drm_ati_alloc_pcigart_table(int order)
for (i = 0; i < order; i++, page++)
SetPageReserved(page);
- DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address);
+ DRM_DEBUG("returning 0x%08lx\n", address);
return (void *)address;
}
@@ -63,7 +63,7 @@ static void drm_ati_free_pcigart_table(void *address, int order)
struct page *page;
int i;
int num_pages = 1 << order;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
page = virt_to_page((unsigned long)address);
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index 82fb3d0d2785..3a05c6d5ebe1 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -202,7 +202,8 @@ enum drm_map_flags {
_DRM_KERNEL = 0x08, /**< kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
- _DRM_REMOVABLE = 0x40 /**< Removable mapping */
+ _DRM_REMOVABLE = 0x40, /**< Removable mapping */
+ _DRM_DRIVER = 0x80 /**< Managed by driver */
};
struct drm_ctx_priv_map {
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index dde02a15fa59..19d3be5c4b2d 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -292,7 +292,6 @@ struct drm_magic_entry {
struct list_head head;
struct drm_hash_item hash_item;
struct drm_file *priv;
- struct drm_magic_entry *next;
};
struct drm_vma_entry {
@@ -388,8 +387,8 @@ struct drm_file {
struct drm_head *head;
int remove_auth_on_close;
unsigned long lock_count;
- void *driver_priv;
struct file *filp;
+ void *driver_priv;
};
/** Wait queue */
@@ -401,11 +400,9 @@ struct drm_queue {
wait_queue_head_t read_queue; /**< Processes waiting on block_read */
atomic_t block_write; /**< Queue blocked for writes */
wait_queue_head_t write_queue; /**< Processes waiting on block_write */
-#if 1
atomic_t total_queued; /**< Total queued statistic */
atomic_t total_flushed; /**< Total flushes statistic */
atomic_t total_locks; /**< Total locks statistics */
-#endif
enum drm_ctx_flags flags; /**< Context preserving and 2D-only */
struct drm_waitlist waitlist; /**< Pending buffers */
wait_queue_head_t flush_queue; /**< Processes waiting until flush */
@@ -416,7 +413,8 @@ struct drm_queue {
*/
struct drm_lock_data {
struct drm_hw_lock *hw_lock; /**< Hardware lock */
- struct drm_file *file_priv; /**< File descr of lock holder (0=kernel) */
+ /** Private of lock holder's file (NULL=kernel) */
+ struct drm_file *file_priv;
wait_queue_head_t lock_queue; /**< Queue of blocked processes */
unsigned long lock_time; /**< Time of last lock in jiffies */
spinlock_t spinlock;
@@ -491,6 +489,27 @@ struct drm_sigdata {
struct drm_hw_lock *lock;
};
+
+/*
+ * Generic memory manager structs
+ */
+
+struct drm_mm_node {
+ struct list_head fl_entry;
+ struct list_head ml_entry;
+ int free;
+ unsigned long start;
+ unsigned long size;
+ struct drm_mm *mm;
+ void *private;
+};
+
+struct drm_mm {
+ struct list_head fl_entry;
+ struct list_head ml_entry;
+};
+
+
/**
* Mappings list
*/
@@ -498,7 +517,7 @@ struct drm_map_list {
struct list_head head; /**< list head */
struct drm_hash_item hash;
struct drm_map *map; /**< mapping */
- unsigned int user_token;
+ uint64_t user_token;
};
typedef struct drm_map drm_local_map_t;
@@ -536,24 +555,6 @@ struct drm_ati_pcigart_info {
int table_size;
};
-/*
- * Generic memory manager structs
- */
-struct drm_mm_node {
- struct list_head fl_entry;
- struct list_head ml_entry;
- int free;
- unsigned long start;
- unsigned long size;
- struct drm_mm *mm;
- void *private;
-};
-
-struct drm_mm {
- struct list_head fl_entry;
- struct list_head ml_entry;
-};
-
/**
* DRM driver structure. This structure represent the common code for
* a family of cards. There will one drm_device for each card present
@@ -567,6 +568,8 @@ struct drm_driver {
void (*postclose) (struct drm_device *, struct drm_file *);
void (*lastclose) (struct drm_device *);
int (*unload) (struct drm_device *);
+ int (*suspend) (struct drm_device *);
+ int (*resume) (struct drm_device *);
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
void (*dma_ready) (struct drm_device *);
int (*dma_quiescent) (struct drm_device *);
@@ -642,6 +645,7 @@ struct drm_head {
* may contain multiple heads.
*/
struct drm_device {
+ struct device dev; /**< Linux device */
char *unique; /**< Unique identifier: e.g., busid */
int unique_len; /**< Length of unique field */
char *devname; /**< For /proc/interrupts */
@@ -750,7 +754,6 @@ struct drm_device {
struct pci_controller *hose;
#endif
struct drm_sg_mem *sg; /**< Scatter gather memory */
- unsigned long *ctx_bitmap; /**< context bitmap */
void *dev_private; /**< device private data */
struct drm_sigdata sigdata; /**< For block_all_signals */
sigset_t sigmask;
@@ -847,6 +850,8 @@ extern int drm_release(struct inode *inode, struct file *filp);
/* Mapping support (drm_vm.h) */
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
+extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
+extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
/* Memory management support (drm_memory.h) */
@@ -1061,11 +1066,11 @@ extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
/* sysfs support (drm_sysfs.c) */
+struct drm_sysfs_class;
extern struct class *drm_sysfs_create(struct module *owner, char *name);
-extern void drm_sysfs_destroy(struct class *cs);
-extern struct class_device *drm_sysfs_device_add(struct class *cs,
- struct drm_head *head);
-extern void drm_sysfs_device_remove(struct class_device *class_dev);
+extern void drm_sysfs_destroy(void);
+extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head);
+extern void drm_sysfs_device_remove(struct drm_device *dev);
/*
* Basic memory manager support (drm_mm.c)
@@ -1073,7 +1078,7 @@ extern void drm_sysfs_device_remove(struct class_device *class_dev);
extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
unsigned long size,
unsigned alignment);
-void drm_mm_put_block(struct drm_mm_node * cur);
+extern void drm_mm_put_block(struct drm_mm_node * cur);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
unsigned alignment, int best_match);
extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
@@ -1144,8 +1149,5 @@ extern void *drm_calloc(size_t nmemb, size_t size, int area);
/*@}*/
-extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
-extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
-
#endif /* __KERNEL__ */
#endif
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index 214f4fbcba73..9468c7889ff1 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -166,7 +166,6 @@ int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
dev->agp->mode = mode.mode;
agp_enable(dev->agp->bridge, mode.mode);
- dev->agp->base = dev->agp->agp_info.aper_base;
dev->agp->enabled = 1;
return 0;
}
@@ -417,7 +416,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
INIT_LIST_HEAD(&head->memory);
head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask;
-
+ head->base = head->agp_info.aper_base;
return head;
}
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index d24a6c2c2c24..bde64b84166e 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -184,7 +184,7 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
return -ENOMEM;
}
}
-
+
break;
case _DRM_SHM:
list = drm_find_matching_map(dev, map);
@@ -229,11 +229,17 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
#ifdef __alpha__
map->offset += dev->hose->mem_space->start;
#endif
- /* Note: dev->agp->base may actually be 0 when the DRM
- * is not in control of AGP space. But if user space is
- * it should already have added the AGP base itself.
+ /* In some cases (i810 driver), user space may have already
+ * added the AGP base itself, because dev->agp->base previously
+ * only got set during AGP enable. So, only add the base
+ * address if the map's offset isn't already within the
+ * aperture.
*/
- map->offset += dev->agp->base;
+ if (map->offset < dev->agp->base ||
+ map->offset > dev->agp->base +
+ dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
+ map->offset += dev->agp->base;
+ }
map->mtrr = dev->agp->agp_mtrr; /* for getmap */
/* This assumes the DRM is in total control of AGP space.
@@ -429,6 +435,7 @@ int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
return ret;
}
+EXPORT_SYMBOL(drm_rmmap);
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
* the last close of the device, and this is necessary for cleanup when things
@@ -814,9 +821,9 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
page_count = 0;
while (entry->buf_count < count) {
-
+
dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
-
+
if (!dmah) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@@ -1592,5 +1599,3 @@ int drm_order(unsigned long size)
return order;
}
EXPORT_SYMBOL(drm_order);
-
-
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c
index 17fe69e7bfc1..d505f695421f 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/char/drm/drm_context.c
@@ -159,7 +159,7 @@ int drm_getsareactx(struct drm_device *dev, void *data,
request->handle = NULL;
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
- request->handle =
+ request->handle =
(void *)(unsigned long)_entry->user_token;
break;
}
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 44a46268b02b..0e7af53c87de 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -200,8 +200,10 @@ int drm_lastclose(struct drm_device * dev)
}
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
- drm_rmmap_locked(dev, r_list->map);
- r_list = NULL;
+ if (!(r_list->map->flags & _DRM_DRIVER)) {
+ drm_rmmap_locked(dev, r_list->map);
+ r_list = NULL;
+ }
}
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
@@ -255,8 +257,6 @@ int drm_init(struct drm_driver *driver)
DRM_DEBUG("\n");
- drm_mem_init();
-
for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
@@ -293,10 +293,6 @@ static void drm_cleanup(struct drm_device * dev)
drm_lastclose(dev);
- drm_ht_remove(&dev->map_hash);
-
- drm_ctxbitmap_cleanup(dev);
-
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
dev->agp && dev->agp->agp_mtrr >= 0) {
int retval;
@@ -314,6 +310,9 @@ static void drm_cleanup(struct drm_device * dev)
if (dev->driver->unload)
dev->driver->unload(dev);
+ drm_ht_remove(&dev->map_hash);
+ drm_ctxbitmap_cleanup(dev);
+
drm_put_head(&dev->primary);
if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n");
@@ -383,22 +382,24 @@ static int __init drm_core_init(void)
goto err_p3;
}
+ drm_mem_init();
+
DRM_INFO("Initialized %s %d.%d.%d %s\n",
CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
return 0;
- err_p3:
- drm_sysfs_destroy(drm_class);
- err_p2:
+err_p3:
+ drm_sysfs_destroy();
+err_p2:
unregister_chrdev(DRM_MAJOR, "drm");
drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
- err_p1:
+err_p1:
return ret;
}
static void __exit drm_core_exit(void)
{
remove_proc_entry("dri", NULL);
- drm_sysfs_destroy(drm_class);
+ drm_sysfs_destroy();
unregister_chrdev(DRM_MAJOR, "drm");
@@ -494,23 +495,25 @@ int drm_ioctl(struct inode *inode, struct file *filp,
} else {
if (cmd & (IOC_IN | IOC_OUT)) {
kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
- if (!kdata)
- return -ENOMEM;
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto err_i1;
+ }
}
if (cmd & IOC_IN) {
if (copy_from_user(kdata, (void __user *)arg,
_IOC_SIZE(cmd)) != 0) {
- retcode = -EACCES;
+ retcode = -EFAULT;
goto err_i1;
}
}
retcode = func(dev, kdata, file_priv);
- if (cmd & IOC_OUT) {
+ if ((retcode == 0) && (cmd & IOC_OUT)) {
if (copy_to_user((void __user *)arg, kdata,
_IOC_SIZE(cmd)) != 0)
- retcode = -EACCES;
+ retcode = -EFAULT;
}
}
diff --git a/drivers/char/drm/drm_hashtab.c b/drivers/char/drm/drm_hashtab.c
index 4b8e7db5a232..33160673a7b7 100644
--- a/drivers/char/drm/drm_hashtab.c
+++ b/drivers/char/drm/drm_hashtab.c
@@ -80,7 +80,7 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
}
}
-static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
+static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
unsigned long key)
{
struct drm_hash_item *entry;
@@ -129,7 +129,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
}
/*
- * Just insert an item and return any "bits" bit key that hasn't been
+ * Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
@@ -200,4 +200,3 @@ void drm_ht_remove(struct drm_open_hash *ht)
ht->table = NULL;
}
}
-
diff --git a/drivers/char/drm/drm_hashtab.h b/drivers/char/drm/drm_hashtab.h
index 573e333ac457..cd2b189e1be6 100644
--- a/drivers/char/drm/drm_hashtab.h
+++ b/drivers/char/drm/drm_hashtab.h
@@ -65,4 +65,3 @@ extern void drm_ht_remove(struct drm_open_hash *ht);
#endif
-
diff --git a/drivers/char/drm/drm_ioc32.c b/drivers/char/drm/drm_ioc32.c
index 2286f3312c5c..90f5a8d9bdcb 100644
--- a/drivers/char/drm/drm_ioc32.c
+++ b/drivers/char/drm/drm_ioc32.c
@@ -1051,8 +1051,12 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
drm_ioctl_compat_t *fn;
int ret;
+ /* Assume that ioctls without an explicit compat routine will just
+ * work. This may not always be a good assumption, but it's better
+ * than always failing.
+ */
if (nr >= ARRAY_SIZE(drm_compat_ioctls))
- return -ENOTTY;
+ return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
fn = drm_compat_ioctls[nr];
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c
index 3cbebf868e68..16829fb3089d 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/char/drm/drm_ioctl.c
@@ -234,26 +234,23 @@ int drm_getclient(struct drm_device *dev, void *data,
idx = client->idx;
mutex_lock(&dev->struct_mutex);
-
- if (list_empty(&dev->filelist)) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
i = 0;
list_for_each_entry(pt, &dev->filelist, lhead) {
- if (i++ >= idx)
- break;
+ if (i++ >= idx) {
+ client->auth = pt->authenticated;
+ client->pid = pt->pid;
+ client->uid = pt->uid;
+ client->magic = pt->magic;
+ client->iocs = pt->ioctl_count;
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
}
-
- client->auth = pt->authenticated;
- client->pid = pt->pid;
- client->uid = pt->uid;
- client->magic = pt->magic;
- client->iocs = pt->ioctl_count;
mutex_unlock(&dev->struct_mutex);
- return 0;
+ return -EINVAL;
}
/**
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c
index 05eae63f85ba..089c015c01d1 100644
--- a/drivers/char/drm/drm_irq.c
+++ b/drivers/char/drm/drm_irq.c
@@ -107,7 +107,7 @@ static int drm_irq_install(struct drm_device * dev)
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
+ DRM_DEBUG("irq=%d\n", dev->irq);
if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
init_waitqueue_head(&dev->vbl_queue);
@@ -164,7 +164,7 @@ int drm_irq_uninstall(struct drm_device * dev)
if (!irq_enabled)
return -EINVAL;
- DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
+ DRM_DEBUG("irq=%d\n", dev->irq);
dev->driver->irq_uninstall(dev);
diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c
index 93019901bd30..845081b44f63 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/char/drm/drm_memory.c
@@ -179,4 +179,3 @@ void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
iounmap(map->handle);
}
EXPORT_SYMBOL(drm_core_ioremapfree);
-
diff --git a/drivers/char/drm/drm_mm.c b/drivers/char/drm/drm_mm.c
index 86f4eb61a6a4..dcff9e9b52e3 100644
--- a/drivers/char/drm/drm_mm.c
+++ b/drivers/char/drm/drm_mm.c
@@ -293,4 +293,3 @@ void drm_mm_takedown(struct drm_mm * mm)
drm_free(entry, sizeof(*entry), DRM_MEM_MM);
}
-
diff --git a/drivers/char/drm/drm_os_linux.h b/drivers/char/drm/drm_os_linux.h
index daa69c9d8977..8dbd2572b7c3 100644
--- a/drivers/char/drm/drm_os_linux.h
+++ b/drivers/char/drm/drm_os_linux.h
@@ -69,9 +69,9 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
#define DRM_COPY_TO_USER(arg1, arg2, arg3) \
copy_to_user(arg1, arg2, arg3)
/* Macros for copyfrom user, but checking readability only once */
-#define DRM_VERIFYAREA_READ( uaddr, size ) \
+#define DRM_VERIFYAREA_READ( uaddr, size ) \
(access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT)
-#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
+#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
__copy_from_user(arg1, arg2, arg3)
#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
__copy_to_user(arg1, arg2, arg3)
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index 43d3c42df360..f52468843678 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -139,6 +139,101 @@
{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0, 0, 0}
@@ -311,5 +406,5 @@
{0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
-
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 12dfea89c7f3..d9b560fe9bbe 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -236,11 +236,11 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
type = "??";
else
type = types[map->type];
- DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08x ",
+ DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
i,
map->offset,
map->size, type, map->flags,
- r_list->user_token);
+ (unsigned long) r_list->user_token);
if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n");
} else {
diff --git a/drivers/char/drm/drm_sarea.h b/drivers/char/drm/drm_sarea.h
index e040f47f369f..480037331e4e 100644
--- a/drivers/char/drm/drm_sarea.h
+++ b/drivers/char/drm/drm_sarea.h
@@ -45,7 +45,7 @@
#endif
/** Maximum number of drawables in the SAREA */
-#define SAREA_MAX_DRAWABLES 256
+#define SAREA_MAX_DRAWABLES 256
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
diff --git a/drivers/char/drm/drm_scatter.c b/drivers/char/drm/drm_scatter.c
index eb7fa437355e..26d8f675ed5d 100644
--- a/drivers/char/drm/drm_scatter.c
+++ b/drivers/char/drm/drm_scatter.c
@@ -67,7 +67,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
struct drm_sg_mem *entry;
unsigned long pages, i, j;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
@@ -81,7 +81,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
memset(entry, 0, sizeof(*entry));
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
- DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages);
+ DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
entry->pages = pages;
entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist),
@@ -122,8 +122,8 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
entry->handle = ScatterHandle((unsigned long)entry->virtual);
- DRM_DEBUG("sg alloc handle = %08lx\n", entry->handle);
- DRM_DEBUG("sg alloc virtual = %p\n", entry->virtual);
+ DRM_DEBUG("handle = %08lx\n", entry->handle);
+ DRM_DEBUG("virtual = %p\n", entry->virtual);
for (i = (unsigned long)entry->virtual, j = 0; j < pages;
i += PAGE_SIZE, j++) {
@@ -210,7 +210,7 @@ int drm_sg_free(struct drm_device *dev, void *data,
if (!entry || entry->handle != request->handle)
return -EINVAL;
- DRM_DEBUG("sg free virtual = %p\n", entry->virtual);
+ DRM_DEBUG("virtual = %p\n", entry->virtual);
drm_sg_cleanup(entry);
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index ee83ff9efed6..d93a217f856a 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -98,10 +98,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
dev->driver = driver;
- if (dev->driver->load)
- if ((retcode = dev->driver->load(dev, ent->driver_data)))
- goto error_out_unreg;
-
if (drm_core_has_AGP(dev)) {
if (drm_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
@@ -120,6 +116,10 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
}
}
+ if (dev->driver->load)
+ if ((retcode = dev->driver->load(dev, ent->driver_data)))
+ goto error_out_unreg;
+
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
@@ -168,11 +168,10 @@ static int drm_get_head(struct drm_device * dev, struct drm_head * head)
goto err_g1;
}
- head->dev_class = drm_sysfs_device_add(drm_class, head);
- if (IS_ERR(head->dev_class)) {
+ ret = drm_sysfs_device_add(dev, head);
+ if (ret) {
printk(KERN_ERR
"DRM: Error sysfs_device_add.\n");
- ret = PTR_ERR(head->dev_class);
goto err_g2;
}
*heads = head;
@@ -218,13 +217,14 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
if (ret)
goto err_g1;
+ pci_set_master(pdev);
if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g2;
}
if ((ret = drm_get_head(dev, &dev->primary)))
goto err_g2;
-
+
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, dev->primary.minor);
@@ -283,7 +283,7 @@ int drm_put_head(struct drm_head * head)
DRM_DEBUG("release secondary minor %d\n", minor);
drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
- drm_sysfs_device_remove(head->dev_class);
+ drm_sysfs_device_remove(head->dev);
*head = (struct drm_head) {.dev = NULL};
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/char/drm/drm_sysfs.c
index cf4349b00b07..fa36153619e8 100644
--- a/drivers/char/drm/drm_sysfs.c
+++ b/drivers/char/drm/drm_sysfs.c
@@ -19,6 +19,45 @@
#include "drm_core.h"
#include "drmP.h"
+#define to_drm_device(d) container_of(d, struct drm_device, dev)
+
+/**
+ * drm_sysfs_suspend - DRM class suspend hook
+ * @dev: Linux device to suspend
+ * @state: power state to enter
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its suspend hook, if present.
+ */
+static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
+{
+ struct drm_device *drm_dev = to_drm_device(dev);
+
+ printk(KERN_ERR "%s\n", __FUNCTION__);
+
+ if (drm_dev->driver->suspend)
+ return drm_dev->driver->suspend(drm_dev);
+
+ return 0;
+}
+
+/**
+ * drm_sysfs_resume - DRM class resume hook
+ * @dev: Linux device to resume
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its resume hook, if present.
+ */
+static int drm_sysfs_resume(struct device *dev)
+{
+ struct drm_device *drm_dev = to_drm_device(dev);
+
+ if (drm_dev->driver->resume)
+ return drm_dev->driver->resume(drm_dev);
+
+ return 0;
+}
+
/* Display the version of drm_core. This doesn't work right in current design */
static ssize_t version_show(struct class *dev, char *buf)
{
@@ -33,7 +72,7 @@ static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
* @owner: pointer to the module that is to "own" this struct drm_sysfs_class
* @name: pointer to a string for the name of this class.
*
- * This is used to create a struct drm_sysfs_class pointer that can then be used
+ * This is used to create DRM class pointer that can then be used
* in calls to drm_sysfs_device_add().
*
* Note, the pointer created here is to be destroyed when finished by making a
@@ -50,6 +89,9 @@ struct class *drm_sysfs_create(struct module *owner, char *name)
goto err_out;
}
+ class->suspend = drm_sysfs_suspend;
+ class->resume = drm_sysfs_resume;
+
err = class_create_file(class, &class_attr_version);
if (err)
goto err_out_class;
@@ -63,94 +105,100 @@ err_out:
}
/**
- * drm_sysfs_destroy - destroys a struct drm_sysfs_class structure
- * @cs: pointer to the struct drm_sysfs_class that is to be destroyed
+ * drm_sysfs_destroy - destroys DRM class
*
- * Note, the pointer to be destroyed must have been created with a call to
- * drm_sysfs_create().
+ * Destroy the DRM device class.
*/
-void drm_sysfs_destroy(struct class *class)
+void drm_sysfs_destroy(void)
{
- if ((class == NULL) || (IS_ERR(class)))
+ if ((drm_class == NULL) || (IS_ERR(drm_class)))
return;
-
- class_remove_file(class, &class_attr_version);
- class_destroy(class);
+ class_remove_file(drm_class, &class_attr_version);
+ class_destroy(drm_class);
}
-static ssize_t show_dri(struct class_device *class_device, char *buf)
+static ssize_t show_dri(struct device *device, struct device_attribute *attr,
+ char *buf)
{
- struct drm_device * dev = ((struct drm_head *)class_get_devdata(class_device))->dev;
+ struct drm_device *dev = to_drm_device(device);
if (dev->driver->dri_library_name)
return dev->driver->dri_library_name(dev, buf);
return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name);
}
-static struct class_device_attribute class_device_attrs[] = {
+static struct device_attribute device_attrs[] = {
__ATTR(dri_library_name, S_IRUGO, show_dri, NULL),
};
/**
+ * drm_sysfs_device_release - do nothing
+ * @dev: Linux device
+ *
+ * Normally, this would free the DRM device associated with @dev, along
+ * with cleaning up any other stuff. But we do that in the DRM core, so
+ * this function can just return and hope that the core does its job.
+ */
+static void drm_sysfs_device_release(struct device *dev)
+{
+ return;
+}
+
+/**
* drm_sysfs_device_add - adds a class device to sysfs for a character driver
- * @cs: pointer to the struct class that this device should be registered to.
- * @dev: the dev_t for the device to be added.
- * @device: a pointer to a struct device that is assiociated with this class device.
- * @fmt: string for the class device's name
+ * @dev: DRM device to be added
+ * @head: DRM head in question
*
- * A struct class_device will be created in sysfs, registered to the specified
- * class. A "dev" file will be created, showing the dev_t for the device. The
- * pointer to the struct class_device will be returned from the call. Any further
- * sysfs files that might be required can be created using this pointer.
- * Note: the struct class passed to this function must have previously been
- * created with a call to drm_sysfs_create().
+ * Add a DRM device to the DRM's device model class. We use @dev's PCI device
+ * as the parent for the Linux device, and make sure it has a file containing
+ * the driver we're using (for userspace compatibility).
*/
-struct class_device *drm_sysfs_device_add(struct class *cs, struct drm_head *head)
+int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head)
{
- struct class_device *class_dev;
- int i, j, err;
-
- class_dev = class_device_create(cs, NULL,
- MKDEV(DRM_MAJOR, head->minor),
- &(head->dev->pdev)->dev,
- "card%d", head->minor);
- if (IS_ERR(class_dev)) {
- err = PTR_ERR(class_dev);
+ int err;
+ int i, j;
+
+ dev->dev.parent = &dev->pdev->dev;
+ dev->dev.class = drm_class;
+ dev->dev.release = drm_sysfs_device_release;
+ dev->dev.devt = head->device;
+ snprintf(dev->dev.bus_id, BUS_ID_SIZE, "card%d", head->minor);
+
+ err = device_register(&dev->dev);
+ if (err) {
+ DRM_ERROR("device add failed: %d\n", err);
goto err_out;
}
- class_set_devdata(class_dev, head);
-
- for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) {
- err = class_device_create_file(class_dev,
- &class_device_attrs[i]);
+ for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+ err = device_create_file(&dev->dev, &device_attrs[i]);
if (err)
goto err_out_files;
}
- return class_dev;
+ return 0;
err_out_files:
if (i > 0)
for (j = 0; j < i; j++)
- class_device_remove_file(class_dev,
- &class_device_attrs[i]);
- class_device_unregister(class_dev);
+ device_remove_file(&dev->dev, &device_attrs[i]);
+ device_unregister(&dev->dev);
err_out:
- return ERR_PTR(err);
+
+ return err;
}
/**
- * drm_sysfs_device_remove - removes a class device that was created with drm_sysfs_device_add()
- * @dev: the dev_t of the device that was previously registered.
+ * drm_sysfs_device_remove - remove DRM device
+ * @dev: DRM device to remove
*
* This call unregisters and cleans up a class device that was created with a
* call to drm_sysfs_device_add()
*/
-void drm_sysfs_device_remove(struct class_device *class_dev)
+void drm_sysfs_device_remove(struct drm_device *dev)
{
int i;
- for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
- class_device_remove_file(class_dev, &class_device_attrs[i]);
- class_device_unregister(class_dev);
+ for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
+ device_remove_file(&dev->dev, &device_attrs[i]);
+ device_unregister(&dev->dev);
}
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index e8d50af58201..cea4105374b2 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -180,7 +180,7 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
return NOPAGE_SIGBUS;
get_page(page);
- DRM_DEBUG("shm_nopage 0x%lx\n", address);
+ DRM_DEBUG("0x%lx\n", address);
return page;
}
@@ -294,7 +294,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
get_page(page);
- DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
+ DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr);
return page;
}
@@ -506,6 +506,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_dma_ops;
vma->vm_flags |= VM_RESERVED; /* Don't swap */
+ vma->vm_flags |= VM_DONTEXPAND;
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open_locked(vma);
@@ -655,6 +656,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
return -EINVAL; /* This should never happen. */
}
vma->vm_flags |= VM_RESERVED; /* Don't swap */
+ vma->vm_flags |= VM_DONTEXPAND;
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open_locked(vma);
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index eb381a7c5bee..8d7ea81c4b66 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -40,7 +40,7 @@
#define I810_BUF_FREE 2
#define I810_BUF_CLIENT 1
-#define I810_BUF_HARDWARE 0
+#define I810_BUF_HARDWARE 0
#define I810_BUF_UNMAPPED 0
#define I810_BUF_MAPPED 1
@@ -570,7 +570,7 @@ static void i810EmitState(struct drm_device * dev)
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
- DRM_DEBUG("%s %x\n", __FUNCTION__, dirty);
+ DRM_DEBUG("%x\n", dirty);
if (dirty & I810_UPLOAD_BUFFERS) {
i810EmitDestVerified(dev, sarea_priv->BufferState);
@@ -802,8 +802,7 @@ static void i810_dma_dispatch_flip(struct drm_device * dev)
int pitch = dev_priv->pitch;
RING_LOCALS;
- DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
- __FUNCTION__,
+ DRM_DEBUG("page=%d pfCurrentPage=%d\n",
dev_priv->current_page,
dev_priv->sarea_priv->pf_current_page);
@@ -848,8 +847,6 @@ static void i810_dma_quiescent(struct drm_device * dev)
drm_i810_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
-/* printk("%s\n", __FUNCTION__); */
-
i810_kernel_lost_context(dev);
BEGIN_LP_RING(4);
@@ -869,8 +866,6 @@ static int i810_flush_queue(struct drm_device * dev)
int i, ret = 0;
RING_LOCALS;
-/* printk("%s\n", __FUNCTION__); */
-
i810_kernel_lost_context(dev);
BEGIN_LP_RING(2);
@@ -949,7 +944,7 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
+ DRM_DEBUG("idx %d used %d discard %d\n",
vertex->idx, vertex->used, vertex->discard);
if (vertex->idx < 0 || vertex->idx > dma->buf_count)
@@ -987,7 +982,7 @@ static int i810_clear_bufs(struct drm_device *dev, void *data,
static int i810_swap_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- DRM_DEBUG("i810_swap_bufs\n");
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -1068,11 +1063,10 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf,
sarea_priv->dirty = 0x7f;
- DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n", address, used);
+ DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
dev_priv->counter++;
DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
- DRM_DEBUG("i810_dma_dispatch_mc\n");
DRM_DEBUG("start : %lx\n", start);
DRM_DEBUG("used : %d\n", used);
DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
@@ -1179,7 +1173,7 @@ static void i810_do_init_pageflip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
dev_priv->page_flipping = 1;
dev_priv->current_page = 0;
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
@@ -1189,7 +1183,7 @@ static int i810_do_cleanup_pageflip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
if (dev_priv->current_page != 0)
i810_dma_dispatch_flip(dev);
@@ -1202,7 +1196,7 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
{
drm_i810_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h
index 0af45872f67e..0118849a5672 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/char/drm/i810_drv.h
@@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
*
*/
@@ -134,7 +134,7 @@ extern int i810_max_ioctl;
#define I810_ADDR(reg) (I810_BASE(reg) + reg)
#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
#define I810_READ(reg) I810_DEREF(reg)
-#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
+#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg)
#define I810_READ16(reg) I810_DEREF16(reg)
#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
@@ -145,7 +145,7 @@ extern int i810_max_ioctl;
#define BEGIN_LP_RING(n) do { \
if (I810_VERBOSE) \
- DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__); \
+ DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \
if (dev_priv->ring.space < n*4) \
i810_wait_ring(dev, n*4); \
dev_priv->ring.space -= n*4; \
@@ -155,19 +155,19 @@ extern int i810_max_ioctl;
} while (0)
#define ADVANCE_LP_RING() do { \
- if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
- dev_priv->ring.tail = outring; \
+ if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
+ dev_priv->ring.tail = outring; \
I810_WRITE(LP_RING + RING_TAIL, outring); \
} while(0)
-#define OUT_RING(n) do { \
+#define OUT_RING(n) do { \
if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
*(volatile unsigned int *)(virt + outring) = n; \
outring += 4; \
outring &= ringmask; \
} while (0)
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
@@ -184,28 +184,28 @@ extern int i810_max_ioctl;
#define I810REG_HWSTAM 0x02098
#define I810REG_INT_IDENTITY_R 0x020a4
-#define I810REG_INT_MASK_R 0x020a8
+#define I810REG_INT_MASK_R 0x020a8
#define I810REG_INT_ENABLE_R 0x020a0
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define RING_TAIL 0x00
+#define LP_RING 0x2030
+#define HP_RING 0x2040
+#define RING_TAIL 0x00
#define TAIL_ADDR 0x000FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x00FFFFF8
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x000FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
+#define RING_HEAD 0x04
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define RING_START 0x08
+#define START_ADDR 0x00FFFFF8
+#define RING_LEN 0x0C
+#define RING_NR_PAGES 0x000FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index 69a363edb0d2..379cbdad4921 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -42,7 +42,7 @@
#define I830_BUF_FREE 2
#define I830_BUF_CLIENT 1
-#define I830_BUF_HARDWARE 0
+#define I830_BUF_HARDWARE 0
#define I830_BUF_UNMAPPED 0
#define I830_BUF_MAPPED 1
diff --git a/drivers/char/drm/i830_drm.h b/drivers/char/drm/i830_drm.h
index 968a6d9f9dcb..4b00d2dd4f68 100644
--- a/drivers/char/drm/i830_drm.h
+++ b/drivers/char/drm/i830_drm.h
@@ -12,9 +12,9 @@
#define _I830_DEFINES_
#define I830_DMA_BUF_ORDER 12
-#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
-#define I830_DMA_BUF_NR 256
-#define I830_NR_SAREA_CLIPRECTS 8
+#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
+#define I830_DMA_BUF_NR 256
+#define I830_NR_SAREA_CLIPRECTS 8
/* Each region is a minimum of 64k, and there are at most 64 of them.
*/
@@ -58,7 +58,7 @@
#define I830_UPLOAD_TEXBLEND_MASK 0xf00000
#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
-#define I830_UPLOAD_STIPPLE 0x8000000
+#define I830_UPLOAD_STIPPLE 0x8000000
/* Indices into buf.Setup where various bits of state are mirrored per
* context and per buffer. These can be fired at the card as a unit,
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index db3a9fa83960..4caba8c54455 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
*
*/
@@ -156,8 +156,7 @@ extern int i830_driver_device_is_agp(struct drm_device * dev);
#define BEGIN_LP_RING(n) do { \
if (I830_VERBOSE) \
- printk("BEGIN_LP_RING(%d) in %s\n", \
- n, __FUNCTION__); \
+ printk("BEGIN_LP_RING(%d)\n", (n)); \
if (dev_priv->ring.space < n*4) \
i830_wait_ring(dev, n*4, __FUNCTION__); \
outcount = 0; \
@@ -183,7 +182,7 @@ extern int i830_driver_device_is_agp(struct drm_device * dev);
extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
@@ -203,30 +202,30 @@ extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
#define I830REG_HWSTAM 0x02098
#define I830REG_INT_IDENTITY_R 0x020a4
-#define I830REG_INT_MASK_R 0x020a8
+#define I830REG_INT_MASK_R 0x020a8
#define I830REG_INT_ENABLE_R 0x020a0
#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define RING_TAIL 0x00
+#define LP_RING 0x2030
+#define HP_RING 0x2040
+#define RING_TAIL 0x00
#define TAIL_ADDR 0x001FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x0xFFFFF000
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x001FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
+#define RING_HEAD 0x04
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define RING_START 0x08
+#define START_ADDR 0x0xFFFFF000
+#define RING_LEN 0x0C
+#define RING_NR_PAGES 0x001FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
@@ -279,9 +278,9 @@ extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
-#define MI_BATCH_BUFFER ((0x30<<23)|1)
-#define MI_BATCH_BUFFER_START (0x31<<23)
-#define MI_BATCH_BUFFER_END (0xA<<23)
+#define MI_BATCH_BUFFER ((0x30<<23)|1)
+#define MI_BATCH_BUFFER_START (0x31<<23)
+#define MI_BATCH_BUFFER_END (0xA<<23)
#define MI_BATCH_NON_SECURE (1)
#define MI_WAIT_FOR_EVENT ((0x3<<23))
diff --git a/drivers/char/drm/i830_irq.c b/drivers/char/drm/i830_irq.c
index 76403f4b6200..a33db5f0967f 100644
--- a/drivers/char/drm/i830_irq.c
+++ b/drivers/char/drm/i830_irq.c
@@ -144,7 +144,7 @@ int i830_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_irq_wait_t *irqwait = data;
+ drm_i830_irq_wait_t *irqwait = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index e61a43e5b3ac..43986d81ae34 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -31,17 +31,6 @@
#include "i915_drm.h"
#include "i915_drv.h"
-#define IS_I965G(dev) (dev->pci_device == 0x2972 || \
- dev->pci_device == 0x2982 || \
- dev->pci_device == 0x2992 || \
- dev->pci_device == 0x29A2 || \
- dev->pci_device == 0x2A02 || \
- dev->pci_device == 0x2A12)
-
-#define IS_G33(dev) (dev->pci_device == 0x29b2 || \
- dev->pci_device == 0x29c2 || \
- dev->pci_device == 0x29d2)
-
/* Really want an OS-independent resettable timer. Would like to have
* this loop run for (eg) 3 sec, but have the timer reset every time
* the head pointer changes, so that EBUSY only happens if the ring
@@ -90,6 +79,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
static int i915_dma_cleanup(struct drm_device * dev)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
@@ -97,52 +87,42 @@ static int i915_dma_cleanup(struct drm_device * dev)
if (dev->irq)
drm_irq_uninstall(dev);
- if (dev->dev_private) {
- drm_i915_private_t *dev_priv =
- (drm_i915_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start) {
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
- }
-
- if (dev_priv->status_page_dmah) {
- drm_pci_free(dev, dev_priv->status_page_dmah);
- /* Need to rewrite hardware status page */
- I915_WRITE(0x02080, 0x1ffff000);
- }
-
- if (dev_priv->status_gfx_addr) {
- dev_priv->status_gfx_addr = 0;
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
- I915_WRITE(0x2080, 0x1ffff000);
- }
+ if (dev_priv->ring.virtual_start) {
+ drm_core_ioremapfree(&dev_priv->ring.map, dev);
+ dev_priv->ring.virtual_start = 0;
+ dev_priv->ring.map.handle = 0;
+ dev_priv->ring.map.size = 0;
+ }
- drm_free(dev->dev_private, sizeof(drm_i915_private_t),
- DRM_MEM_DRIVER);
+ if (dev_priv->status_page_dmah) {
+ drm_pci_free(dev, dev_priv->status_page_dmah);
+ dev_priv->status_page_dmah = NULL;
+ /* Need to rewrite hardware status page */
+ I915_WRITE(0x02080, 0x1ffff000);
+ }
- dev->dev_private = NULL;
+ if (dev_priv->status_gfx_addr) {
+ dev_priv->status_gfx_addr = 0;
+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ I915_WRITE(0x2080, 0x1ffff000);
}
return 0;
}
-static int i915_initialize(struct drm_device * dev,
- drm_i915_private_t * dev_priv,
- drm_i915_init_t * init)
+static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
{
- memset(dev_priv, 0, sizeof(drm_i915_private_t));
+ drm_i915_private_t *dev_priv = dev->dev_private;
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("can not find sarea!\n");
- dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
return -EINVAL;
}
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio_map) {
- dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("can not find mmio map!\n");
return -EINVAL;
@@ -165,7 +145,6 @@ static int i915_initialize(struct drm_device * dev,
drm_core_ioremap(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
- dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -197,7 +176,6 @@ static int i915_initialize(struct drm_device * dev,
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
if (!dev_priv->status_page_dmah) {
- dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
@@ -209,7 +187,6 @@ static int i915_initialize(struct drm_device * dev,
I915_WRITE(0x02080, dev_priv->dma_status_page);
}
DRM_DEBUG("Enabled hardware status page\n");
- dev->dev_private = (void *)dev_priv;
return 0;
}
@@ -254,17 +231,12 @@ static int i915_dma_resume(struct drm_device * dev)
static int i915_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv;
drm_i915_init_t *init = data;
int retcode = 0;
switch (init->func) {
case I915_INIT_DMA:
- dev_priv = drm_alloc(sizeof(drm_i915_private_t),
- DRM_MEM_DRIVER);
- if (dev_priv == NULL)
- return -ENOMEM;
- retcode = i915_initialize(dev, dev_priv, init);
+ retcode = i915_initialize(dev, init);
break;
case I915_CLEANUP_DMA:
retcode = i915_dma_cleanup(dev);
@@ -351,7 +323,7 @@ static int validate_cmd(int cmd)
{
int ret = do_validate_cmd(cmd);
-/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
+/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
return ret;
}
@@ -685,7 +657,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
int value;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -719,7 +691,7 @@ static int i915_setparam(struct drm_device *dev, void *data,
drm_i915_setparam_t *param = data;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -749,7 +721,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_i915_hws_addr_t *hws = data;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -757,7 +729,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
- dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws->addr;
+ dev_priv->hws_map.offset = dev->agp->base + hws->addr;
dev_priv->hws_map.size = 4*1024;
dev_priv->hws_map.type = 0;
dev_priv->hws_map.flags = 0;
@@ -765,7 +737,6 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_core_ioremap(&dev_priv->hws_map, dev);
if (dev_priv->hws_map.handle == NULL) {
- dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
dev_priv->status_gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
@@ -784,6 +755,10 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long base, size;
+ int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@@ -791,24 +766,51 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
+ dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+
+ memset(dev_priv, 0, sizeof(drm_i915_private_t));
+
+ dev->dev_private = (void *)dev_priv;
+
+ /* Add register map (needed for suspend/resume) */
+ base = drm_get_resource_start(dev, mmio_bar);
+ size = drm_get_resource_len(dev, mmio_bar);
+
+ ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
+ _DRM_KERNEL | _DRM_DRIVER,
+ &dev_priv->mmio_map);
+ return ret;
+}
+
+int i915_driver_unload(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->mmio_map)
+ drm_rmmap(dev, dev_priv->mmio_map);
+
+ drm_free(dev->dev_private, sizeof(drm_i915_private_t),
+ DRM_MEM_DRIVER);
+
return 0;
}
void i915_driver_lastclose(struct drm_device * dev)
{
- if (dev->dev_private) {
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->agp_heap)
i915_mem_takedown(&(dev_priv->agp_heap));
- }
+
i915_dma_cleanup(dev);
}
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
- if (dev->dev_private) {
- drm_i915_private_t *dev_priv = dev->dev_private;
- i915_mem_release(dev, file_priv, dev_priv->agp_heap);
- }
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
struct drm_ioctl_desc i915_ioctls[] = {
diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c
index 85bcc276f804..52e51033d32c 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/char/drm/i915_drv.c
@@ -38,6 +38,465 @@ static struct pci_device_id pciidlist[] = {
i915_PCI_IDS
};
+enum pipe {
+ PIPE_A = 0,
+ PIPE_B,
+};
+
+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (pipe == PIPE_A)
+ return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
+ else
+ return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
+}
+
+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->save_palette_a;
+ else
+ array = dev_priv->save_palette_b;
+
+ for(i = 0; i < 256; i++)
+ array[i] = I915_READ(reg + (i << 2));
+}
+
+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->save_palette_a;
+ else
+ array = dev_priv->save_palette_b;
+
+ for(i = 0; i < 256; i++)
+ I915_WRITE(reg + (i << 2), array[i]);
+}
+
+static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
+{
+ outb(reg, index_port);
+ return inb(data_port);
+}
+
+static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
+{
+ inb(st01);
+ outb(palette_enable | reg, VGA_AR_INDEX);
+ return inb(VGA_AR_DATA_READ);
+}
+
+static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
+{
+ inb(st01);
+ outb(palette_enable | reg, VGA_AR_INDEX);
+ outb(val, VGA_AR_DATA_WRITE);
+}
+
+static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
+{
+ outb(reg, index_port);
+ outb(val, data_port);
+}
+
+static void i915_save_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+ u16 cr_index, cr_data, st01;
+
+ /* VGA color palette registers */
+ dev_priv->saveDACMASK = inb(VGA_DACMASK);
+ /* DACCRX automatically increments during read */
+ outb(0, VGA_DACRX);
+ /* Read 3 bytes of color data from each index */
+ for (i = 0; i < 256 * 3; i++)
+ dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
+
+ /* MSR bits */
+ dev_priv->saveMSR = inb(VGA_MSR_READ);
+ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ cr_index = VGA_CR_INDEX_CGA;
+ cr_data = VGA_CR_DATA_CGA;
+ st01 = VGA_ST01_CGA;
+ } else {
+ cr_index = VGA_CR_INDEX_MDA;
+ cr_data = VGA_CR_DATA_MDA;
+ st01 = VGA_ST01_MDA;
+ }
+
+ /* CRT controller regs */
+ i915_write_indexed(cr_index, cr_data, 0x11,
+ i915_read_indexed(cr_index, cr_data, 0x11) &
+ (~0x80));
+ for (i = 0; i < 0x24; i++)
+ dev_priv->saveCR[i] =
+ i915_read_indexed(cr_index, cr_data, i);
+ /* Make sure we don't turn off CR group 0 writes */
+ dev_priv->saveCR[0x11] &= ~0x80;
+
+ /* Attribute controller registers */
+ inb(st01);
+ dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
+ for (i = 0; i < 20; i++)
+ dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
+ inb(st01);
+ outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
+
+ /* Graphics controller registers */
+ for (i = 0; i < 9; i++)
+ dev_priv->saveGR[i] =
+ i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
+
+ dev_priv->saveGR[0x10] =
+ i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
+ dev_priv->saveGR[0x11] =
+ i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
+ dev_priv->saveGR[0x18] =
+ i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
+
+ /* Sequencer registers */
+ for (i = 0; i < 8; i++)
+ dev_priv->saveSR[i] =
+ i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
+}
+
+static void i915_restore_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+ u16 cr_index, cr_data, st01;
+
+ /* MSR bits */
+ outb(dev_priv->saveMSR, VGA_MSR_WRITE);
+ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ cr_index = VGA_CR_INDEX_CGA;
+ cr_data = VGA_CR_DATA_CGA;
+ st01 = VGA_ST01_CGA;
+ } else {
+ cr_index = VGA_CR_INDEX_MDA;
+ cr_data = VGA_CR_DATA_MDA;
+ st01 = VGA_ST01_MDA;
+ }
+
+ /* Sequencer registers, don't write SR07 */
+ for (i = 0; i < 7; i++)
+ i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
+ dev_priv->saveSR[i]);
+
+ /* CRT controller regs */
+ /* Enable CR group 0 writes */
+ i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+ for (i = 0; i < 0x24; i++)
+ i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
+
+ /* Graphics controller regs */
+ for (i = 0; i < 9; i++)
+ i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
+ dev_priv->saveGR[i]);
+
+ i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
+ dev_priv->saveGR[0x10]);
+ i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
+ dev_priv->saveGR[0x11]);
+ i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
+ dev_priv->saveGR[0x18]);
+
+ /* Attribute controller registers */
+ for (i = 0; i < 20; i++)
+ i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
+ inb(st01); /* switch back to index mode */
+ outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
+
+ /* VGA color palette registers */
+ outb(dev_priv->saveDACMASK, VGA_DACMASK);
+ /* DACCRX automatically increments during read */
+ outb(0, VGA_DACWX);
+ /* Read 3 bytes of color data from each index */
+ for (i = 0; i < 256 * 3; i++)
+ outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
+
+}
+
+static int i915_suspend(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (!dev || !dev_priv) {
+ printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
+ printk(KERN_ERR "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ pci_save_state(dev->pdev);
+ pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+
+ /* Pipe & plane A info */
+ dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
+ dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
+ dev_priv->saveFPA0 = I915_READ(FPA0);
+ dev_priv->saveFPA1 = I915_READ(FPA1);
+ dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+ if (IS_I965G(dev))
+ dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
+ dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
+ dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
+ dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
+ dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
+ dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
+ dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
+ dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+
+ dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
+ dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
+ dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
+ dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
+ dev_priv->saveDSPABASE = I915_READ(DSPABASE);
+ if (IS_I965G(dev)) {
+ dev_priv->saveDSPASURF = I915_READ(DSPASURF);
+ dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
+ }
+ i915_save_palette(dev, PIPE_A);
+
+ /* Pipe & plane B info */
+ dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
+ dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
+ dev_priv->saveFPB0 = I915_READ(FPB0);
+ dev_priv->saveFPB1 = I915_READ(FPB1);
+ dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+ if (IS_I965G(dev))
+ dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
+ dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
+ dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
+ dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
+ dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
+ dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
+ dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
+ dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+
+ dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
+ dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
+ dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
+ dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
+ dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
+ if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
+ dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
+ dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
+ }
+ i915_save_palette(dev, PIPE_B);
+
+ /* CRT state */
+ dev_priv->saveADPA = I915_READ(ADPA);
+
+ /* LVDS state */
+ dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
+ dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ if (IS_I965G(dev))
+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ dev_priv->saveLVDS = I915_READ(LVDS);
+ if (!IS_I830(dev) && !IS_845G(dev))
+ dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+ dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
+ dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
+ dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
+
+ /* FIXME: save TV & SDVO state */
+
+ /* FBC state */
+ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+
+ /* VGA state */
+ dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
+ dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
+ dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
+ dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+
+ /* Scratch space */
+ for (i = 0; i < 16; i++) {
+ dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
+ dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+ }
+ for (i = 0; i < 3; i++)
+ dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+
+ i915_save_vga(dev);
+
+ /* Shut down the device */
+ pci_disable_device(dev->pdev);
+ pci_set_power_state(dev->pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int i915_resume(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ pci_set_power_state(dev->pdev, PCI_D0);
+ pci_restore_state(dev->pdev);
+ if (pci_enable_device(dev->pdev))
+ return -1;
+
+ pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+
+ /* Pipe & plane A info */
+ /* Prime the clock */
+ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
+ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
+ ~DPLL_VCO_ENABLE);
+ udelay(150);
+ }
+ I915_WRITE(FPA0, dev_priv->saveFPA0);
+ I915_WRITE(FPA1, dev_priv->saveFPA1);
+ /* Actually enable it */
+ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
+ udelay(150);
+ if (IS_I965G(dev))
+ I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+ udelay(150);
+
+ /* Restore mode */
+ I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
+ I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
+ I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
+ I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
+ I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
+ I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
+ I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+
+ /* Restore plane info */
+ I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
+ I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
+ I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
+ I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
+ I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+ if (IS_I965G(dev)) {
+ I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
+ I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+ }
+
+ if ((dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) &&
+ (dev_priv->saveDPLL_A & DPLL_VGA_MODE_DIS))
+ I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+
+ i915_restore_palette(dev, PIPE_A);
+ /* Enable the plane */
+ I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
+ I915_WRITE(DSPABASE, I915_READ(DSPABASE));
+
+ /* Pipe & plane B info */
+ if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
+ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
+ ~DPLL_VCO_ENABLE);
+ udelay(150);
+ }
+ I915_WRITE(FPB0, dev_priv->saveFPB0);
+ I915_WRITE(FPB1, dev_priv->saveFPB1);
+ /* Actually enable it */
+ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
+ udelay(150);
+ if (IS_I965G(dev))
+ I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+ udelay(150);
+
+ /* Restore mode */
+ I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
+ I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
+ I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
+ I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
+ I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
+ I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
+ I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+
+ /* Restore plane info */
+ I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
+ I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
+ I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
+ I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
+ I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+ if (IS_I965G(dev)) {
+ I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
+ I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+ }
+
+ if ((dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) &&
+ (dev_priv->saveDPLL_B & DPLL_VGA_MODE_DIS))
+ I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
+ i915_restore_palette(dev, PIPE_A);
+ /* Enable the plane */
+ I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
+ I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
+
+ /* CRT state */
+ I915_WRITE(ADPA, dev_priv->saveADPA);
+
+ /* LVDS state */
+ if (IS_I965G(dev))
+ I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ I915_WRITE(LVDS, dev_priv->saveLVDS);
+ if (!IS_I830(dev) && !IS_845G(dev))
+ I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
+ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
+ I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
+ I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
+ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+
+ /* FIXME: restore TV & SDVO state */
+
+ /* FBC info */
+ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+
+ /* VGA state */
+ I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+ I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
+ I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
+ I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
+ udelay(150);
+
+ for (i = 0; i < 16; i++) {
+ I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
+ I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
+ }
+ for (i = 0; i < 3; i++)
+ I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+
+ i915_restore_vga(dev);
+
+ return 0;
+}
+
static struct drm_driver driver = {
/* don't use mtrr's here, the Xserver or user space app should
* deal with them for intel hardware.
@@ -47,8 +506,11 @@ static struct drm_driver driver = {
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
DRIVER_IRQ_VBL2,
.load = i915_driver_load,
+ .unload = i915_driver_unload,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
+ .suspend = i915_suspend,
+ .resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
.vblank_wait = i915_driver_vblank_wait,
.vblank_wait2 = i915_driver_vblank_wait2,
@@ -77,7 +539,7 @@ static struct drm_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
},
-
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index e064292e703a..37bbf6729b4e 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -114,6 +114,85 @@ typedef struct drm_i915_private {
spinlock_t swaps_lock;
drm_i915_vbl_swap_t vbl_swaps;
unsigned int swaps_pending;
+
+ /* Register state */
+ u8 saveLBB;
+ u32 saveDSPACNTR;
+ u32 saveDSPBCNTR;
+ u32 savePIPEACONF;
+ u32 savePIPEBCONF;
+ u32 savePIPEASRC;
+ u32 savePIPEBSRC;
+ u32 saveFPA0;
+ u32 saveFPA1;
+ u32 saveDPLL_A;
+ u32 saveDPLL_A_MD;
+ u32 saveHTOTAL_A;
+ u32 saveHBLANK_A;
+ u32 saveHSYNC_A;
+ u32 saveVTOTAL_A;
+ u32 saveVBLANK_A;
+ u32 saveVSYNC_A;
+ u32 saveBCLRPAT_A;
+ u32 saveDSPASTRIDE;
+ u32 saveDSPASIZE;
+ u32 saveDSPAPOS;
+ u32 saveDSPABASE;
+ u32 saveDSPASURF;
+ u32 saveDSPATILEOFF;
+ u32 savePFIT_PGM_RATIOS;
+ u32 saveBLC_PWM_CTL;
+ u32 saveBLC_PWM_CTL2;
+ u32 saveFPB0;
+ u32 saveFPB1;
+ u32 saveDPLL_B;
+ u32 saveDPLL_B_MD;
+ u32 saveHTOTAL_B;
+ u32 saveHBLANK_B;
+ u32 saveHSYNC_B;
+ u32 saveVTOTAL_B;
+ u32 saveVBLANK_B;
+ u32 saveVSYNC_B;
+ u32 saveBCLRPAT_B;
+ u32 saveDSPBSTRIDE;
+ u32 saveDSPBSIZE;
+ u32 saveDSPBPOS;
+ u32 saveDSPBBASE;
+ u32 saveDSPBSURF;
+ u32 saveDSPBTILEOFF;
+ u32 saveVCLK_DIVISOR_VGA0;
+ u32 saveVCLK_DIVISOR_VGA1;
+ u32 saveVCLK_POST_DIV;
+ u32 saveVGACNTRL;
+ u32 saveADPA;
+ u32 saveLVDS;
+ u32 saveLVDSPP_ON;
+ u32 saveLVDSPP_OFF;
+ u32 saveDVOA;
+ u32 saveDVOB;
+ u32 saveDVOC;
+ u32 savePP_ON;
+ u32 savePP_OFF;
+ u32 savePP_CONTROL;
+ u32 savePP_CYCLE;
+ u32 savePFIT_CONTROL;
+ u32 save_palette_a[256];
+ u32 save_palette_b[256];
+ u32 saveFBC_CFB_BASE;
+ u32 saveFBC_LL_BASE;
+ u32 saveFBC_CONTROL;
+ u32 saveFBC_CONTROL2;
+ u32 saveSWF0[16];
+ u32 saveSWF1[16];
+ u32 saveSWF2[3];
+ u8 saveMSR;
+ u8 saveSR[8];
+ u8 saveGR[24];
+ u8 saveAR_INDEX;
+ u8 saveAR[20];
+ u8 saveDACMASK;
+ u8 saveDACDATA[256*3]; /* 256 3-byte colors */
+ u8 saveCR[36];
} drm_i915_private_t;
extern struct drm_ioctl_desc i915_ioctls[];
@@ -122,6 +201,7 @@ extern int i915_max_ioctl;
/* i915_dma.c */
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
+extern int i915_driver_unload(struct drm_device *);
extern void i915_driver_lastclose(struct drm_device * dev);
extern void i915_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
@@ -163,7 +243,7 @@ extern void i915_mem_release(struct drm_device * dev,
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
-#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
+#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
#define I915_VERBOSE 0
@@ -173,9 +253,8 @@ extern void i915_mem_release(struct drm_device * dev,
#define BEGIN_LP_RING(n) do { \
if (I915_VERBOSE) \
- DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \
- (n), __FUNCTION__); \
- if (dev_priv->ring.space < (n)*4) \
+ DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
+ if (dev_priv->ring.space < (n)*4) \
i915_wait_ring(dev, (n)*4, __FUNCTION__); \
outcount = 0; \
outring = dev_priv->ring.tail; \
@@ -200,7 +279,51 @@ extern void i915_mem_release(struct drm_device * dev,
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
+/* Extended config space */
+#define LBB 0xf4
+
+/* VGA stuff */
+
+#define VGA_ST01_MDA 0x3ba
+#define VGA_ST01_CGA 0x3da
+
+#define VGA_MSR_WRITE 0x3c2
+#define VGA_MSR_READ 0x3cc
+#define VGA_MSR_MEM_EN (1<<1)
+#define VGA_MSR_CGA_MODE (1<<0)
+
+#define VGA_SR_INDEX 0x3c4
+#define VGA_SR_DATA 0x3c5
+
+#define VGA_AR_INDEX 0x3c0
+#define VGA_AR_VID_EN (1<<5)
+#define VGA_AR_DATA_WRITE 0x3c0
+#define VGA_AR_DATA_READ 0x3c1
+
+#define VGA_GR_INDEX 0x3ce
+#define VGA_GR_DATA 0x3cf
+/* GR05 */
+#define VGA_GR_MEM_READ_MODE_SHIFT 3
+#define VGA_GR_MEM_READ_MODE_PLANE 1
+/* GR06 */
+#define VGA_GR_MEM_MODE_MASK 0xc
+#define VGA_GR_MEM_MODE_SHIFT 2
+#define VGA_GR_MEM_A0000_AFFFF 0
+#define VGA_GR_MEM_A0000_BFFFF 1
+#define VGA_GR_MEM_B0000_B7FFF 2
+#define VGA_GR_MEM_B0000_BFFFF 3
+
+#define VGA_DACMASK 0x3c6
+#define VGA_DACRX 0x3c7
+#define VGA_DACWX 0x3c8
+#define VGA_DACDATA 0x3c9
+
+#define VGA_CR_INDEX_MDA 0x3b4
+#define VGA_CR_DATA_MDA 0x3b5
+#define VGA_CR_INDEX_CGA 0x3d4
+#define VGA_CR_DATA_CGA 0x3d5
+
+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
@@ -215,9 +338,47 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define BB1_UNPROTECTED (0<<0)
#define BB2_END_ADDR_MASK (~0x7)
+/* Framebuffer compression */
+#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
+#define FBC_LL_BASE 0x03204 /* 4k page aligned */
+#define FBC_CONTROL 0x03208
+#define FBC_CTL_EN (1<<31)
+#define FBC_CTL_PERIODIC (1<<30)
+#define FBC_CTL_INTERVAL_SHIFT (16)
+#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define FBC_CTL_STRIDE_SHIFT (5)
+#define FBC_CTL_FENCENO (1<<0)
+#define FBC_COMMAND 0x0320c
+#define FBC_CMD_COMPRESS (1<<0)
+#define FBC_STATUS 0x03210
+#define FBC_STAT_COMPRESSING (1<<31)
+#define FBC_STAT_COMPRESSED (1<<30)
+#define FBC_STAT_MODIFIED (1<<29)
+#define FBC_STAT_CURRENT_LINE (1<<0)
+#define FBC_CONTROL2 0x03214
+#define FBC_CTL_FENCE_DBL (0<<4)
+#define FBC_CTL_IDLE_IMM (0<<2)
+#define FBC_CTL_IDLE_FULL (1<<2)
+#define FBC_CTL_IDLE_LINE (2<<2)
+#define FBC_CTL_IDLE_DEBUG (3<<2)
+#define FBC_CTL_CPU_FENCE (1<<1)
+#define FBC_CTL_PLANEA (0<<0)
+#define FBC_CTL_PLANEB (1<<0)
+#define FBC_FENCE_OFF 0x0321b
+
+#define FBC_LL_SIZE (1536)
+#define FBC_LL_PAD (32)
+
+/* Interrupt bits:
+ */
+#define USER_INT_FLAG (1<<1)
+#define VSYNC_PIPEB_FLAG (1<<5)
+#define VSYNC_PIPEA_FLAG (1<<7)
+#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
+
#define I915REG_HWSTAM 0x02098
#define I915REG_INT_IDENTITY_R 0x020a4
-#define I915REG_INT_MASK_R 0x020a8
+#define I915REG_INT_MASK_R 0x020a8
#define I915REG_INT_ENABLE_R 0x020a0
#define I915REG_PIPEASTAT 0x70024
@@ -229,7 +390,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
#define SR01 1
-#define SR01_SCREEN_OFF (1<<5)
+#define SR01_SCREEN_OFF (1<<5)
#define PPCR 0x61204
#define PPCR_ON (1<<0)
@@ -249,31 +410,129 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define ADPA_DPMS_OFF (3<<10)
#define NOPID 0x2094
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define RING_TAIL 0x00
+#define LP_RING 0x2030
+#define HP_RING 0x2040
+/* The binner has its own ring buffer:
+ */
+#define HWB_RING 0x2400
+
+#define RING_TAIL 0x00
#define TAIL_ADDR 0x001FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x0xFFFFF000
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x001FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
+#define RING_HEAD 0x04
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define RING_START 0x08
+#define START_ADDR 0x0xFFFFF000
+#define RING_LEN 0x0C
+#define RING_NR_PAGES 0x001FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
+
+/* Instruction parser error reg:
+ */
+#define IPEIR 0x2088
+
+/* Scratch pad debug 0 reg:
+ */
+#define SCPD0 0x209c
+
+/* Error status reg:
+ */
+#define ESR 0x20b8
+
+/* Secondary DMA fetch address debug reg:
+ */
+#define DMA_FADD_S 0x20d4
+
+/* Cache mode 0 reg.
+ * - Manipulating render cache behaviour is central
+ * to the concept of zone rendering, tuning this reg can help avoid
+ * unnecessary render cache reads and even writes (for z/stencil)
+ * at beginning and end of scene.
+ *
+ * - To change a bit, write to this reg with a mask bit set and the
+ * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
+ */
+#define Cache_Mode_0 0x2120
+#define CM0_MASK_SHIFT 16
+#define CM0_IZ_OPT_DISABLE (1<<6)
+#define CM0_ZR_OPT_DISABLE (1<<5)
+#define CM0_DEPTH_EVICT_DISABLE (1<<4)
+#define CM0_COLOR_EVICT_DISABLE (1<<3)
+#define CM0_DEPTH_WRITE_DISABLE (1<<1)
+#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+
+
+/* Graphics flush control. A CPU write flushes the GWB of all writes.
+ * The data is discarded.
+ */
+#define GFX_FLSH_CNTL 0x2170
+
+/* Binner control. Defines the location of the bin pointer list:
+ */
+#define BINCTL 0x2420
+#define BC_MASK (1 << 9)
+
+/* Binned scene info.
+ */
+#define BINSCENE 0x2428
+#define BS_OP_LOAD (1 << 8)
+#define BS_MASK (1 << 22)
+
+/* Bin command parser debug reg:
+ */
+#define BCPD 0x2480
+
+/* Bin memory control debug reg:
+ */
+#define BMCD 0x2484
+
+/* Bin data cache debug reg:
+ */
+#define BDCD 0x2488
+
+/* Binner pointer cache debug reg:
+ */
+#define BPCD 0x248c
+
+/* Binner scratch pad debug reg:
+ */
+#define BINSKPD 0x24f0
+
+/* HWB scratch pad debug reg:
+ */
+#define HWBSKPD 0x24f4
+
+/* Binner memory pool reg:
+ */
+#define BMP_BUFFER 0x2430
+#define BMP_PAGE_SIZE_4K (0 << 10)
+#define BMP_BUFFER_SIZE_SHIFT 1
+#define BMP_ENABLE (1 << 0)
+
+/* Get/put memory from the binner memory pool:
+ */
+#define BMP_GET 0x2438
+#define BMP_PUT 0x2440
+#define BMP_OFFSET_SHIFT 5
+
+/* 3D state packets:
+ */
+#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
#define SC_ENABLE_MASK (0x1<<0)
#define SC_ENABLE (0x1<<0)
+#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+
#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
#define SCI_YMIN_MASK (0xffff<<16)
#define SCI_XMIN_MASK (0xffff<<0)
@@ -290,17 +549,19 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
+#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
-#define MI_BATCH_BUFFER ((0x30<<23)|1)
-#define MI_BATCH_BUFFER_START (0x31<<23)
-#define MI_BATCH_BUFFER_END (0xA<<23)
+#define MI_BATCH_BUFFER ((0x30<<23)|1)
+#define MI_BATCH_BUFFER_START (0x31<<23)
+#define MI_BATCH_BUFFER_END (0xA<<23)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_WAIT_FOR_EVENT ((0x3<<23))
+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
@@ -308,9 +569,538 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
#define ASYNC_FLIP (1<<22)
+#define DISPLAY_PLANE_A (0<<20)
+#define DISPLAY_PLANE_B (1<<20)
+
+/* Display regs */
+#define DSPACNTR 0x70180
+#define DSPBCNTR 0x71180
+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
+
+/* Define the region of interest for the binner:
+ */
+#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
-#define READ_BREADCRUMB(dev_priv) (((u32 *)(dev_priv->hw_status_page))[5])
+#define CMD_MI_FLUSH (0x04 << 23)
+#define MI_NO_WRITE_FLUSH (1 << 2)
+#define MI_READ_FLUSH (1 << 0)
+#define MI_EXE_FLUSH (1 << 1)
+#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
+#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
+
+#define BREADCRUMB_BITS 31
+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
+
+#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
+
+#define BLC_PWM_CTL 0x61254
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+
+#define BLC_PWM_CTL2 0x61250
+/**
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE (1 << 16)
+/**
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+
+#define I915_GCFGC 0xf0
+#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
+#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
+#define I915_DISPLAY_CLOCK_MASK (7 << 4)
+
+#define I855_HPLLCC 0xc0
+#define I855_CLOCK_CONTROL_MASK (3 << 0)
+#define I855_CLOCK_133_200 (0 << 0)
+#define I855_CLOCK_100_200 (1 << 0)
+#define I855_CLOCK_100_133 (2 << 0)
+#define I855_CLOCK_166_250 (3 << 0)
+
+/* p317, 319
+ */
+#define VCLK2_VCO_M 0x6008 /* treat as 16 bit? (includes msbs) */
+#define VCLK2_VCO_N 0x600a
+#define VCLK2_VCO_DIV_SEL 0x6012
+
+#define VCLK_DIVISOR_VGA0 0x6000
+#define VCLK_DIVISOR_VGA1 0x6004
+#define VCLK_POST_DIV 0x6010
+/** Selects a post divisor of 4 instead of 2. */
+# define VGA1_PD_P2_DIV_4 (1 << 15)
+/** Overrides the p2 post divisor field */
+# define VGA1_PD_P1_DIV_2 (1 << 13)
+# define VGA1_PD_P1_SHIFT 8
+/** P1 value is 2 greater than this field */
+# define VGA1_PD_P1_MASK (0x1f << 8)
+/** Selects a post divisor of 4 instead of 2. */
+# define VGA0_PD_P2_DIV_4 (1 << 7)
+/** Overrides the p2 post divisor field */
+# define VGA0_PD_P1_DIV_2 (1 << 5)
+# define VGA0_PD_P1_SHIFT 0
+/** P1 value is 2 greater than this field */
+# define VGA0_PD_P1_MASK (0x1f << 0)
+
+/* I830 CRTC registers */
+#define HTOTAL_A 0x60000
+#define HBLANK_A 0x60004
+#define HSYNC_A 0x60008
+#define VTOTAL_A 0x6000c
+#define VBLANK_A 0x60010
+#define VSYNC_A 0x60014
+#define PIPEASRC 0x6001c
+#define BCLRPAT_A 0x60020
+#define VSYNCSHIFT_A 0x60028
+
+#define HTOTAL_B 0x61000
+#define HBLANK_B 0x61004
+#define HSYNC_B 0x61008
+#define VTOTAL_B 0x6100c
+#define VBLANK_B 0x61010
+#define VSYNC_B 0x61014
+#define PIPEBSRC 0x6101c
+#define BCLRPAT_B 0x61020
+#define VSYNCSHIFT_B 0x61028
+
+#define PP_STATUS 0x61200
+# define PP_ON (1 << 31)
+/**
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+# define PP_READY (1 << 30)
+# define PP_SEQUENCE_NONE (0 << 28)
+# define PP_SEQUENCE_ON (1 << 28)
+# define PP_SEQUENCE_OFF (2 << 28)
+# define PP_SEQUENCE_MASK 0x30000000
+#define PP_CONTROL 0x61204
+# define POWER_TARGET_ON (1 << 0)
+
+#define LVDSPP_ON 0x61208
+#define LVDSPP_OFF 0x6120c
+#define PP_CYCLE 0x61210
+
+#define PFIT_CONTROL 0x61230
+# define PFIT_ENABLE (1 << 31)
+# define PFIT_PIPE_MASK (3 << 29)
+# define PFIT_PIPE_SHIFT 29
+# define VERT_INTERP_DISABLE (0 << 10)
+# define VERT_INTERP_BILINEAR (1 << 10)
+# define VERT_INTERP_MASK (3 << 10)
+# define VERT_AUTO_SCALE (1 << 9)
+# define HORIZ_INTERP_DISABLE (0 << 6)
+# define HORIZ_INTERP_BILINEAR (1 << 6)
+# define HORIZ_INTERP_MASK (3 << 6)
+# define HORIZ_AUTO_SCALE (1 << 5)
+# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
+
+#define PFIT_PGM_RATIOS 0x61234
+# define PFIT_VERT_SCALE_MASK 0xfff00000
+# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+
+#define PFIT_AUTO_RATIOS 0x61238
+
+
+#define DPLL_A 0x06014
+#define DPLL_B 0x06018
+# define DPLL_VCO_ENABLE (1 << 31)
+# define DPLL_DVO_HIGH_SPEED (1 << 30)
+# define DPLL_SYNCLOCK_ENABLE (1 << 29)
+# define DPLL_VGA_MODE_DIS (1 << 28)
+# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
+# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
+# define DPLL_MODE_MASK (3 << 26)
+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
+# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
+# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
+# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+/**
+ * The i830 generation, in DAC/serial mode, defines p1 as two plus this
+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+/**
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required in DVO non-gang */
+# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
+# define PLL_REF_INPUT_DREFCLK (0 << 13)
+# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
+# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
+# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+# define PLL_REF_INPUT_MASK (3 << 13)
+# define PLL_LOAD_PULSE_PHASE_SHIFT 9
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
+
+/**
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+# define SDVO_MULTIPLIER_MASK 0x000000ff
+# define SDVO_MULTIPLIER_SHIFT_HIRES 4
+# define SDVO_MULTIPLIER_SHIFT_VGA 0
+
+/** @defgroup DPLL_MD
+ * @{
+ */
+/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_A_MD 0x0601c
+/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_B_MD 0x06020
+/**
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
+ */
+# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
+# define DPLL_MD_UDI_DIVIDER_SHIFT 24
+/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
+# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
+/**
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
+# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
+/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
+# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+/** @} */
+
+#define DPLL_TEST 0x606c
+# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
+# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
+# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
+# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
+# define DPLLB_TEST_N_BYPASS (1 << 19)
+# define DPLLB_TEST_M_BYPASS (1 << 18)
+# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
+# define DPLLA_TEST_N_BYPASS (1 << 3)
+# define DPLLA_TEST_M_BYPASS (1 << 2)
+# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+
+#define ADPA 0x61100
+#define ADPA_DAC_ENABLE (1<<31)
+#define ADPA_DAC_DISABLE 0
+#define ADPA_PIPE_SELECT_MASK (1<<30)
+#define ADPA_PIPE_A_SELECT 0
+#define ADPA_PIPE_B_SELECT (1<<30)
+#define ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define ADPA_SETS_HVPOLARITY 0
+#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
+#define ADPA_VSYNC_CNTL_ENABLE 0
+#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
+#define ADPA_HSYNC_CNTL_ENABLE 0
+#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+#define ADPA_VSYNC_ACTIVE_LOW 0
+#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+#define ADPA_HSYNC_ACTIVE_LOW 0
+
+#define FPA0 0x06040
+#define FPA1 0x06044
+#define FPB0 0x06048
+#define FPB1 0x0604c
+# define FP_N_DIV_MASK 0x003f0000
+# define FP_N_DIV_SHIFT 16
+# define FP_M1_DIV_MASK 0x00003f00
+# define FP_M1_DIV_SHIFT 8
+# define FP_M2_DIV_MASK 0x0000003f
+# define FP_M2_DIV_SHIFT 0
+
+
+#define PORT_HOTPLUG_EN 0x61110
+# define SDVOB_HOTPLUG_INT_EN (1 << 26)
+# define SDVOC_HOTPLUG_INT_EN (1 << 25)
+# define TV_HOTPLUG_INT_EN (1 << 18)
+# define CRT_HOTPLUG_INT_EN (1 << 9)
+# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+
+#define PORT_HOTPLUG_STAT 0x61114
+# define CRT_HOTPLUG_INT_STATUS (1 << 11)
+# define TV_HOTPLUG_INT_STATUS (1 << 10)
+# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
+# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
+# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
+# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
+# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
+
+#define SDVOB 0x61140
+#define SDVOC 0x61160
+#define SDVO_ENABLE (1 << 31)
+#define SDVO_PIPE_B_SELECT (1 << 30)
+#define SDVO_STALL_SELECT (1 << 29)
+#define SDVO_INTERRUPT_ENABLE (1 << 26)
+/**
+ * 915G/GM SDVO pixel multiplier.
+ *
+ * Programmed value is multiplier - 1, up to 5x.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT 23
+#define SDVO_PHASE_SELECT_MASK (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
+#define SDVOC_GANG_MODE (1 << 16)
+#define SDVO_BORDER_ENABLE (1 << 7)
+#define SDVOB_PCIE_CONCURRENCY (1 << 3)
+#define SDVO_DETECTED (1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
+#define SDVOC_PRESERVE_MASK (1 << 17)
+
+/** @defgroup LVDS
+ * @{
+ */
+/**
+ * This register controls the LVDS output enable, pipe selection, and data
+ * format selection.
+ *
+ * All of the clock/data pairs are force powered down by power sequencing.
+ */
+#define LVDS 0x61180
+/**
+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+# define LVDS_PORT_EN (1 << 31)
+/** Selects pipe B for LVDS data. Must be set on pre-965. */
+# define LVDS_PIPEB_SELECT (1 << 30)
+
+/**
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
+# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
+# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
+/**
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+# define LVDS_A3_POWER_MASK (3 << 6)
+# define LVDS_A3_POWER_DOWN (0 << 6)
+# define LVDS_A3_POWER_UP (3 << 6)
+/**
+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+# define LVDS_CLKB_POWER_MASK (3 << 4)
+# define LVDS_CLKB_POWER_DOWN (0 << 4)
+# define LVDS_CLKB_POWER_UP (3 << 4)
+
+/**
+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode. The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+# define LVDS_B0B3_POWER_MASK (3 << 2)
+# define LVDS_B0B3_POWER_DOWN (0 << 2)
+# define LVDS_B0B3_POWER_UP (3 << 2)
+
+#define PIPEACONF 0x70008
+#define PIPEACONF_ENABLE (1<<31)
+#define PIPEACONF_DISABLE 0
+#define PIPEACONF_DOUBLE_WIDE (1<<30)
+#define I965_PIPECONF_ACTIVE (1<<30)
+#define PIPEACONF_SINGLE_WIDE 0
+#define PIPEACONF_PIPE_UNLOCKED 0
+#define PIPEACONF_PIPE_LOCKED (1<<25)
+#define PIPEACONF_PALETTE 0
+#define PIPEACONF_GAMMA (1<<24)
+#define PIPECONF_FORCE_BORDER (1<<25)
+#define PIPECONF_PROGRESSIVE (0 << 21)
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
+
+#define PIPEBCONF 0x71008
+#define PIPEBCONF_ENABLE (1<<31)
+#define PIPEBCONF_DISABLE 0
+#define PIPEBCONF_DOUBLE_WIDE (1<<30)
+#define PIPEBCONF_DISABLE 0
+#define PIPEBCONF_GAMMA (1<<24)
+#define PIPEBCONF_PALETTE 0
+
+#define PIPEBGCMAXRED 0x71010
+#define PIPEBGCMAXGREEN 0x71014
+#define PIPEBGCMAXBLUE 0x71018
+#define PIPEBSTAT 0x71024
+#define PIPEBFRAMEHIGH 0x71040
+#define PIPEBFRAMEPIXEL 0x71044
+
+#define DSPACNTR 0x70180
+#define DSPBCNTR 0x71180
+#define DISPLAY_PLANE_ENABLE (1<<31)
+#define DISPLAY_PLANE_DISABLE 0
+#define DISPPLANE_GAMMA_ENABLE (1<<30)
+#define DISPPLANE_GAMMA_DISABLE 0
+#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
+#define DISPPLANE_8BPP (0x2<<26)
+#define DISPPLANE_15_16BPP (0x4<<26)
+#define DISPPLANE_16BPP (0x5<<26)
+#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
+#define DISPPLANE_32BPP (0x7<<26)
+#define DISPPLANE_STEREO_ENABLE (1<<25)
+#define DISPPLANE_STEREO_DISABLE 0
+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
+#define DISPPLANE_SEL_PIPE_A 0
+#define DISPPLANE_SEL_PIPE_B (1<<24)
+#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
+#define DISPPLANE_SRC_KEY_DISABLE 0
+#define DISPPLANE_LINE_DOUBLE (1<<20)
+#define DISPPLANE_NO_LINE_DOUBLE 0
+#define DISPPLANE_STEREO_POLARITY_FIRST 0
+#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
+/* plane B only */
+#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
+#define DISPPLANE_ALPHA_TRANS_DISABLE 0
+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
+
+#define DSPABASE 0x70184
+#define DSPASTRIDE 0x70188
+
+#define DSPBBASE 0x71184
+#define DSPBADDR DSPBBASE
+#define DSPBSTRIDE 0x71188
+
+#define DSPAKEYVAL 0x70194
+#define DSPAKEYMASK 0x70198
+
+#define DSPAPOS 0x7018C /* reserved */
+#define DSPASIZE 0x70190
+#define DSPBPOS 0x7118C
+#define DSPBSIZE 0x71190
+
+#define DSPASURF 0x7019C
+#define DSPATILEOFF 0x701A4
+
+#define DSPBSURF 0x7119C
+#define DSPBTILEOFF 0x711A4
+
+#define VGACNTRL 0x71400
+# define VGA_DISP_DISABLE (1 << 31)
+# define VGA_2X_MODE (1 << 30)
+# define VGA_PIPE_B_SELECT (1 << 29)
+
+/*
+ * Some BIOS scratch area registers. The 845 (and 830?) store the amount
+ * of video memory available to the BIOS in SWF1.
+ */
+
+#define SWF0 0x71410
+
+/*
+ * 855 scratch registers.
+ */
+#define SWF10 0x70410
+
+#define SWF30 0x72414
+
+/*
+ * Overlay registers. These are overlay registers accessed via MMIO.
+ * Those loaded via the overlay register page are defined in i830_video.c.
+ */
+#define OVADD 0x30000
+
+#define DOVSTA 0x30008
+#define OC_BUF (0x3<<20)
+
+#define OGAMC5 0x30010
+#define OGAMC4 0x30014
+#define OGAMC3 0x30018
+#define OGAMC2 0x3001c
+#define OGAMC1 0x30020
+#define OGAMC0 0x30024
+/*
+ * Palette registers
+ */
+#define PALETTE_A 0x0a000
+#define PALETTE_B 0x0a800
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
+#define IS_I855(dev) ((dev)->pci_device == 0x3582)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+
+#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2)
+
+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
+ (dev)->pci_device == 0x2982 || \
+ (dev)->pci_device == 0x2992 || \
+ (dev)->pci_device == 0x29A2 || \
+ (dev)->pci_device == 0x2A02 || \
+ (dev)->pci_device == 0x2A12 || \
+ (dev)->pci_device == 0x2A42)
+
+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
+
+#define IS_IGD_GM(dev) ((dev)->pci_device == 0x2A42)
+
+#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
+ (dev)->pci_device == 0x29B2 || \
+ (dev)->pci_device == 0x29D2)
+
+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
+ IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
+
+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
+ IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev))
+
+#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#endif
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index a443f4a202e3..92653b38e64c 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -276,7 +276,7 @@ static int i915_emit_irq(struct drm_device * dev)
i915_kernel_lost_context(dev);
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
@@ -291,7 +291,7 @@ static int i915_emit_irq(struct drm_device * dev)
OUT_RING(0);
OUT_RING(GFX_OP_USER_INTERRUPT);
ADVANCE_LP_RING();
-
+
return dev_priv->counter;
}
@@ -300,7 +300,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = 0;
- DRM_DEBUG("%s irq_nr=%d breadcrumb=%d\n", __FUNCTION__, irq_nr,
+ DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
if (READ_BREADCRUMB(dev_priv) >= irq_nr)
@@ -312,8 +312,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
READ_BREADCRUMB(dev_priv) >= irq_nr);
if (ret == -EBUSY) {
- DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
- __FUNCTION__,
+ DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
}
@@ -329,14 +328,14 @@ static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequ
int ret = 0;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(counter))
- *sequence) <= (1<<23)));
-
+
*sequence = cur_vblank;
return ret;
@@ -365,7 +364,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -388,7 +387,7 @@ int i915_irq_wait(struct drm_device *dev, void *data,
drm_i915_irq_wait_t *irqwait = data;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -418,13 +417,12 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
drm_i915_vblank_pipe_t *pipe = data;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
- DRM_ERROR("%s called with invalid pipe 0x%x\n",
- __FUNCTION__, pipe->pipe);
+ DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
return -EINVAL;
}
@@ -443,7 +441,7 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
u16 flag;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -555,7 +553,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
- list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head);
+ list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
dev_priv->swaps_pending++;
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
diff --git a/drivers/char/drm/i915_mem.c b/drivers/char/drm/i915_mem.c
index 56fb9b30a5d7..6126a60dc9cb 100644
--- a/drivers/char/drm/i915_mem.c
+++ b/drivers/char/drm/i915_mem.c
@@ -276,7 +276,7 @@ int i915_mem_alloc(struct drm_device *dev, void *data,
struct mem_block *block, **heap;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -314,7 +314,7 @@ int i915_mem_free(struct drm_device *dev, void *data,
struct mem_block *block, **heap;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -342,7 +342,7 @@ int i915_mem_init_heap(struct drm_device *dev, void *data,
struct mem_block **heap;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -366,7 +366,7 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,
struct mem_block **heap;
if ( !dev_priv ) {
- DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
+ DRM_ERROR( "called with no initialization\n" );
return -EINVAL;
}
@@ -375,7 +375,7 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,
DRM_ERROR("get_heap failed");
return -EFAULT;
}
-
+
if (!*heap) {
DRM_ERROR("heap not initialized?");
return -EFAULT;
@@ -384,4 +384,3 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,
i915_mem_takedown( heap );
return 0;
}
-
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c
index c567c34cda78..c1d12dbfa8d8 100644
--- a/drivers/char/drm/mga_dma.c
+++ b/drivers/char/drm/mga_dma.c
@@ -493,7 +493,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
dma_bs->agp_size);
return err;
}
-
+
dev_priv->agp_size = agp_size;
dev_priv->agp_handle = agp_req.handle;
@@ -550,7 +550,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
{
struct drm_map_list *_entry;
unsigned long agp_token = 0;
-
+
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == dev->agp_buffer_map)
agp_token = _entry->user_token;
@@ -964,7 +964,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
free_req.handle = dev_priv->agp_handle;
drm_agp_free(dev, &free_req);
-
+
dev_priv->agp_textures = NULL;
dev_priv->agp_size = 0;
dev_priv->agp_handle = 0;
@@ -998,7 +998,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
}
}
- return 0;
+ return err;
}
int mga_dma_init(struct drm_device *dev, void *data,
@@ -1050,7 +1050,7 @@ int mga_dma_flush(struct drm_device *dev, void *data,
#if MGA_DMA_DEBUG
int ret = mga_do_wait_for_idle(dev_priv);
if (ret < 0)
- DRM_INFO("%s: -EBUSY\n", __FUNCTION__);
+ DRM_INFO("-EBUSY\n");
return ret;
#else
return mga_do_wait_for_idle(dev_priv);
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index cd94c04e31c0..f6ebd24bd587 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -216,8 +216,8 @@ static inline u32 _MGA_READ(u32 * addr)
#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val))
#endif
-#define DWGREG0 0x1c00
-#define DWGREG0_END 0x1dff
+#define DWGREG0 0x1c00
+#define DWGREG0_END 0x1dff
#define DWGREG1 0x2c00
#define DWGREG1_END 0x2dff
@@ -249,7 +249,7 @@ do { \
} else if ( dev_priv->prim.space < \
dev_priv->prim.high_mark ) { \
if ( MGA_DMA_DEBUG ) \
- DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
+ DRM_INFO( "wrap...\n"); \
return -EBUSY; \
} \
} \
@@ -260,7 +260,7 @@ do { \
if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \
if ( MGA_DMA_DEBUG ) \
- DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
+ DRM_INFO( "wrap...\n"); \
return -EBUSY; \
} \
mga_do_dma_wrap_end( dev_priv ); \
@@ -280,8 +280,7 @@ do { \
#define BEGIN_DMA( n ) \
do { \
if ( MGA_VERBOSE ) { \
- DRM_INFO( "BEGIN_DMA( %d ) in %s\n", \
- (n), __FUNCTION__ ); \
+ DRM_INFO( "BEGIN_DMA( %d )\n", (n) ); \
DRM_INFO( " space=0x%x req=0x%Zx\n", \
dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
} \
@@ -292,7 +291,7 @@ do { \
#define BEGIN_DMA_WRAP() \
do { \
if ( MGA_VERBOSE ) { \
- DRM_INFO( "BEGIN_DMA() in %s\n", __FUNCTION__ ); \
+ DRM_INFO( "BEGIN_DMA()\n" ); \
DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \
} \
prim = dev_priv->prim.start; \
@@ -311,7 +310,7 @@ do { \
#define FLUSH_DMA() \
do { \
if ( 0 ) { \
- DRM_INFO( "%s:\n", __FUNCTION__ ); \
+ DRM_INFO( "\n" ); \
DRM_INFO( " tail=0x%06x head=0x%06lx\n", \
dev_priv->prim.tail, \
MGA_READ( MGA_PRIMADDRESS ) - \
@@ -394,22 +393,22 @@ do { \
#define MGA_VINTCLR (1 << 4)
#define MGA_VINTEN (1 << 5)
-#define MGA_ALPHACTRL 0x2c7c
-#define MGA_AR0 0x1c60
-#define MGA_AR1 0x1c64
-#define MGA_AR2 0x1c68
-#define MGA_AR3 0x1c6c
-#define MGA_AR4 0x1c70
-#define MGA_AR5 0x1c74
-#define MGA_AR6 0x1c78
+#define MGA_ALPHACTRL 0x2c7c
+#define MGA_AR0 0x1c60
+#define MGA_AR1 0x1c64
+#define MGA_AR2 0x1c68
+#define MGA_AR3 0x1c6c
+#define MGA_AR4 0x1c70
+#define MGA_AR5 0x1c74
+#define MGA_AR6 0x1c78
#define MGA_CXBNDRY 0x1c80
-#define MGA_CXLEFT 0x1ca0
+#define MGA_CXLEFT 0x1ca0
#define MGA_CXRIGHT 0x1ca4
-#define MGA_DMAPAD 0x1c54
-#define MGA_DSTORG 0x2cb8
-#define MGA_DWGCTL 0x1c00
+#define MGA_DMAPAD 0x1c54
+#define MGA_DSTORG 0x2cb8
+#define MGA_DWGCTL 0x1c00
# define MGA_OPCOD_MASK (15 << 0)
# define MGA_OPCOD_TRAP (4 << 0)
# define MGA_OPCOD_TEXTURE_TRAP (6 << 0)
@@ -455,27 +454,27 @@ do { \
# define MGA_CLIPDIS (1 << 31)
#define MGA_DWGSYNC 0x2c4c
-#define MGA_FCOL 0x1c24
-#define MGA_FIFOSTATUS 0x1e10
-#define MGA_FOGCOL 0x1cf4
+#define MGA_FCOL 0x1c24
+#define MGA_FIFOSTATUS 0x1e10
+#define MGA_FOGCOL 0x1cf4
#define MGA_FXBNDRY 0x1c84
-#define MGA_FXLEFT 0x1ca8
+#define MGA_FXLEFT 0x1ca8
#define MGA_FXRIGHT 0x1cac
-#define MGA_ICLEAR 0x1e18
+#define MGA_ICLEAR 0x1e18
# define MGA_SOFTRAPICLR (1 << 0)
# define MGA_VLINEICLR (1 << 5)
-#define MGA_IEN 0x1e1c
+#define MGA_IEN 0x1e1c
# define MGA_SOFTRAPIEN (1 << 0)
# define MGA_VLINEIEN (1 << 5)
-#define MGA_LEN 0x1c5c
+#define MGA_LEN 0x1c5c
#define MGA_MACCESS 0x1c04
-#define MGA_PITCH 0x1c8c
-#define MGA_PLNWT 0x1c1c
-#define MGA_PRIMADDRESS 0x1e58
+#define MGA_PITCH 0x1c8c
+#define MGA_PLNWT 0x1c1c
+#define MGA_PRIMADDRESS 0x1e58
# define MGA_DMA_GENERAL (0 << 0)
# define MGA_DMA_BLIT (1 << 0)
# define MGA_DMA_VECTOR (2 << 0)
@@ -487,43 +486,43 @@ do { \
# define MGA_PRIMPTREN0 (1 << 0)
# define MGA_PRIMPTREN1 (1 << 1)
-#define MGA_RST 0x1e40
+#define MGA_RST 0x1e40
# define MGA_SOFTRESET (1 << 0)
# define MGA_SOFTEXTRST (1 << 1)
-#define MGA_SECADDRESS 0x2c40
-#define MGA_SECEND 0x2c44
-#define MGA_SETUPADDRESS 0x2cd0
-#define MGA_SETUPEND 0x2cd4
+#define MGA_SECADDRESS 0x2c40
+#define MGA_SECEND 0x2c44
+#define MGA_SETUPADDRESS 0x2cd0
+#define MGA_SETUPEND 0x2cd4
#define MGA_SGN 0x1c58
#define MGA_SOFTRAP 0x2c48
-#define MGA_SRCORG 0x2cb4
+#define MGA_SRCORG 0x2cb4
# define MGA_SRMMAP_MASK (1 << 0)
# define MGA_SRCMAP_FB (0 << 0)
# define MGA_SRCMAP_SYSMEM (1 << 0)
# define MGA_SRCACC_MASK (1 << 1)
# define MGA_SRCACC_PCI (0 << 1)
# define MGA_SRCACC_AGP (1 << 1)
-#define MGA_STATUS 0x1e14
+#define MGA_STATUS 0x1e14
# define MGA_SOFTRAPEN (1 << 0)
# define MGA_VSYNCPEN (1 << 4)
# define MGA_VLINEPEN (1 << 5)
# define MGA_DWGENGSTS (1 << 16)
# define MGA_ENDPRDMASTS (1 << 17)
#define MGA_STENCIL 0x2cc8
-#define MGA_STENCILCTL 0x2ccc
+#define MGA_STENCILCTL 0x2ccc
-#define MGA_TDUALSTAGE0 0x2cf8
-#define MGA_TDUALSTAGE1 0x2cfc
-#define MGA_TEXBORDERCOL 0x2c5c
-#define MGA_TEXCTL 0x2c30
+#define MGA_TDUALSTAGE0 0x2cf8
+#define MGA_TDUALSTAGE1 0x2cfc
+#define MGA_TEXBORDERCOL 0x2c5c
+#define MGA_TEXCTL 0x2c30
#define MGA_TEXCTL2 0x2c3c
# define MGA_DUALTEX (1 << 7)
# define MGA_G400_TC2_MAGIC (1 << 15)
# define MGA_MAP1_ENABLE (1 << 31)
-#define MGA_TEXFILTER 0x2c58
-#define MGA_TEXHEIGHT 0x2c2c
-#define MGA_TEXORG 0x2c24
+#define MGA_TEXFILTER 0x2c58
+#define MGA_TEXHEIGHT 0x2c2c
+#define MGA_TEXORG 0x2c24
# define MGA_TEXORGMAP_MASK (1 << 0)
# define MGA_TEXORGMAP_FB (0 << 0)
# define MGA_TEXORGMAP_SYSMEM (1 << 0)
@@ -534,45 +533,45 @@ do { \
#define MGA_TEXORG2 0x2ca8
#define MGA_TEXORG3 0x2cac
#define MGA_TEXORG4 0x2cb0
-#define MGA_TEXTRANS 0x2c34
-#define MGA_TEXTRANSHIGH 0x2c38
-#define MGA_TEXWIDTH 0x2c28
-
-#define MGA_WACCEPTSEQ 0x1dd4
-#define MGA_WCODEADDR 0x1e6c
-#define MGA_WFLAG 0x1dc4
-#define MGA_WFLAG1 0x1de0
+#define MGA_TEXTRANS 0x2c34
+#define MGA_TEXTRANSHIGH 0x2c38
+#define MGA_TEXWIDTH 0x2c28
+
+#define MGA_WACCEPTSEQ 0x1dd4
+#define MGA_WCODEADDR 0x1e6c
+#define MGA_WFLAG 0x1dc4
+#define MGA_WFLAG1 0x1de0
#define MGA_WFLAGNB 0x1e64
-#define MGA_WFLAGNB1 0x1e08
+#define MGA_WFLAGNB1 0x1e08
#define MGA_WGETMSB 0x1dc8
-#define MGA_WIADDR 0x1dc0
+#define MGA_WIADDR 0x1dc0
#define MGA_WIADDR2 0x1dd8
# define MGA_WMODE_SUSPEND (0 << 0)
# define MGA_WMODE_RESUME (1 << 0)
# define MGA_WMODE_JUMP (2 << 0)
# define MGA_WMODE_START (3 << 0)
# define MGA_WAGP_ENABLE (1 << 2)
-#define MGA_WMISC 0x1e70
+#define MGA_WMISC 0x1e70
# define MGA_WUCODECACHE_ENABLE (1 << 0)
# define MGA_WMASTER_ENABLE (1 << 1)
# define MGA_WCACHEFLUSH_ENABLE (1 << 3)
#define MGA_WVRTXSZ 0x1dcc
-#define MGA_YBOT 0x1c9c
-#define MGA_YDST 0x1c90
+#define MGA_YBOT 0x1c9c
+#define MGA_YDST 0x1c90
#define MGA_YDSTLEN 0x1c88
#define MGA_YDSTORG 0x1c94
-#define MGA_YTOP 0x1c98
+#define MGA_YTOP 0x1c98
-#define MGA_ZORG 0x1c0c
+#define MGA_ZORG 0x1c0c
/* This finishes the current batch of commands
*/
-#define MGA_EXEC 0x0100
+#define MGA_EXEC 0x0100
/* AGP PLL encoding (for G200 only).
*/
-#define MGA_AGP_PLL 0x1e4c
+#define MGA_AGP_PLL 0x1e4c
# define MGA_AGP2XPLL_DISABLE (0 << 0)
# define MGA_AGP2XPLL_ENABLE (1 << 0)
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c
index 5ec8b61c5d45..d3f8aade07b3 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/char/drm/mga_state.c
@@ -150,8 +150,8 @@ static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
DMA_LOCALS;
-/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
-/* tex->texctl, tex->texctl2); */
+/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
+/* tex->texctl, tex->texctl2); */
BEGIN_DMA(6);
@@ -190,8 +190,8 @@ static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
DMA_LOCALS;
-/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
-/* tex->texctl, tex->texctl2); */
+/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
+/* tex->texctl, tex->texctl2); */
BEGIN_DMA(5);
@@ -256,7 +256,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
unsigned int pipe = sarea_priv->warp_pipe;
DMA_LOCALS;
-/* printk("mga_g400_emit_pipe %x\n", pipe); */
+/* printk("mga_g400_emit_pipe %x\n", pipe); */
BEGIN_DMA(10);
@@ -619,7 +619,7 @@ static void mga_dma_dispatch_swap(struct drm_device * dev)
FLUSH_DMA();
- DRM_DEBUG("%s... done.\n", __FUNCTION__);
+ DRM_DEBUG("... done.\n");
}
static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
@@ -631,7 +631,7 @@ static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * bu
u32 length = (u32) buf->used;
int i = 0;
DMA_LOCALS;
- DRM_DEBUG("vertex: buf=%d used=%d\n", buf->idx, buf->used);
+ DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
if (buf->used) {
buf_priv->dispatched = 1;
@@ -678,7 +678,7 @@ static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * b
u32 address = (u32) buf->bus_address;
int i = 0;
DMA_LOCALS;
- DRM_DEBUG("indices: buf=%d start=%d end=%d\n", buf->idx, start, end);
+ DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end);
if (start != end) {
buf_priv->dispatched = 1;
@@ -955,7 +955,7 @@ static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *fi
#if 0
if (mga_do_wait_for_idle(dev_priv) < 0) {
if (MGA_DMA_DEBUG)
- DRM_INFO("%s: -EBUSY\n", __FUNCTION__);
+ DRM_INFO("-EBUSY\n");
return -EBUSY;
}
#endif
@@ -1014,7 +1014,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil
int value;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1046,7 +1046,7 @@ static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *fi
DMA_LOCALS;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1075,7 +1075,7 @@ file_priv)
u32 *fence = data;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index 7d550aba165e..892e0a589846 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -1,4 +1,4 @@
-/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
+/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
*/
/*
@@ -651,7 +651,7 @@ int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_pri
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
- DRM_DEBUG("%s while CCE running\n", __FUNCTION__);
+ DRM_DEBUG("while CCE running\n");
return 0;
}
@@ -710,7 +710,7 @@ int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_pri
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
- DRM_DEBUG("%s called before init done\n", __FUNCTION__);
+ DRM_DEBUG("called before init done\n");
return -EINVAL;
}
diff --git a/drivers/char/drm/r128_drv.h b/drivers/char/drm/r128_drv.h
index 5041bd8dbed8..011105e51ac6 100644
--- a/drivers/char/drm/r128_drv.h
+++ b/drivers/char/drm/r128_drv.h
@@ -462,8 +462,7 @@ do { \
#define BEGIN_RING( n ) do { \
if ( R128_VERBOSE ) { \
- DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
- (n), __FUNCTION__ ); \
+ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
} \
if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
COMMIT_RING(); \
@@ -493,7 +492,7 @@ do { \
write * sizeof(u32) ); \
} \
if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \
- DRM_ERROR( \
+ DRM_ERROR( \
"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
((dev_priv->ring.tail + _nr) & tail_mask), \
write, __LINE__); \
diff --git a/drivers/char/drm/r128_state.c b/drivers/char/drm/r128_state.c
index b7f483cac6d4..51a9afce7b9b 100644
--- a/drivers/char/drm/r128_state.c
+++ b/drivers/char/drm/r128_state.c
@@ -42,7 +42,7 @@ static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
{
u32 aux_sc_cntl = 0x00000000;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
@@ -85,7 +85,7 @@ static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(2);
@@ -100,7 +100,7 @@ static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(13);
@@ -126,7 +126,7 @@ static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(3);
@@ -142,7 +142,7 @@ static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(5);
@@ -161,7 +161,7 @@ static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(2);
@@ -178,7 +178,7 @@ static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
int i;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
@@ -204,7 +204,7 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
int i;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
@@ -226,7 +226,7 @@ static void r128_emit_state(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
- DRM_DEBUG("%s: dirty=0x%08x\n", __FUNCTION__, dirty);
+ DRM_DEBUG("dirty=0x%08x\n", dirty);
if (dirty & R128_UPLOAD_CORE) {
r128_emit_core(dev_priv);
@@ -362,7 +362,7 @@ static void r128_cce_dispatch_clear(struct drm_device * dev,
unsigned int flags = clear->flags;
int i;
RING_LOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
if (dev_priv->page_flipping && dev_priv->current_page == 1) {
unsigned int tmp = flags;
@@ -466,7 +466,7 @@ static void r128_cce_dispatch_swap(struct drm_device * dev)
struct drm_clip_rect *pbox = sarea_priv->boxes;
int i;
RING_LOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
#if R128_PERFORMANCE_BOXES
/* Do some trivial performance monitoring...
@@ -528,8 +528,7 @@ static void r128_cce_dispatch_flip(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
- DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
- __FUNCTION__,
+ DRM_DEBUG("page=%d pfCurrentPage=%d\n",
dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
#if R128_PERFORMANCE_BOXES
@@ -1156,7 +1155,7 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
int count, *x, *y;
int i, xbuf_size, ybuf_size;
RING_LOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
count = depth->n;
if (count > 4096 || count <= 0)
@@ -1226,7 +1225,7 @@ static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
drm_r128_private_t *dev_priv = dev->dev_private;
int i;
RING_LOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(33);
@@ -1309,7 +1308,7 @@ static int r128_do_cleanup_pageflip(struct drm_device * dev)
static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -1328,7 +1327,7 @@ static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *fi
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -1356,7 +1355,7 @@ static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1412,7 +1411,7 @@ static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1557,11 +1556,11 @@ static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
- DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
+ DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
indirect->idx, indirect->start, indirect->end,
indirect->discard);
@@ -1622,7 +1621,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
int value;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c
index 59b2944811c5..0f4afc44245c 100644
--- a/drivers/char/drm/r300_cmdbuf.c
+++ b/drivers/char/drm/r300_cmdbuf.c
@@ -77,23 +77,31 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
return -EFAULT;
}
- box.x1 =
- (box.x1 +
- R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
- box.y1 =
- (box.y1 +
- R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
- box.x2 =
- (box.x2 +
- R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
- box.y2 =
- (box.y2 +
- R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+ box.x1 = (box.x1) &
+ R300_CLIPRECT_MASK;
+ box.y1 = (box.y1) &
+ R300_CLIPRECT_MASK;
+ box.x2 = (box.x2) &
+ R300_CLIPRECT_MASK;
+ box.y2 = (box.y2) &
+ R300_CLIPRECT_MASK;
+ } else {
+ box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
+ R300_CLIPRECT_MASK;
+ box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
+ R300_CLIPRECT_MASK;
+ box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
+ R300_CLIPRECT_MASK;
+ box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
+ R300_CLIPRECT_MASK;
+ }
OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
(box.y1 << R300_CLIPRECT_Y_SHIFT));
OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
(box.y2 << R300_CLIPRECT_Y_SHIFT));
+
}
OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
@@ -133,9 +141,11 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
static u8 r300_reg_flags[0x10000 >> 2];
-void r300_init_reg_flags(void)
+void r300_init_reg_flags(struct drm_device *dev)
{
int i;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
memset(r300_reg_flags, 0, 0x10000 >> 2);
#define ADD_RANGE_MARK(reg, count,mark) \
for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
@@ -230,6 +240,9 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+ ADD_RANGE(0x4074, 16);
+ }
}
static __inline__ int r300_check_range(unsigned reg, int count)
@@ -486,7 +499,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
if (cmd[0] & 0x8000) {
u32 offset;
- if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+ if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
offset = cmd[2] << 10;
ret = !radeon_check_offset(dev_priv, offset);
@@ -504,7 +517,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
return -EINVAL;
}
-
+
}
}
@@ -723,54 +736,54 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
u32 *ref_age_base;
u32 i, buf_idx, h_pending;
RING_LOCALS;
-
- if (cmdbuf->bufsz <
+
+ if (cmdbuf->bufsz <
(sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
return -EINVAL;
}
-
+
if (header.scratch.reg >= 5) {
return -EINVAL;
}
-
+
dev_priv->scratch_ages[header.scratch.reg]++;
-
+
ref_age_base = (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf);
-
+
cmdbuf->buf += sizeof(u64);
cmdbuf->bufsz -= sizeof(u64);
-
+
for (i=0; i < header.scratch.n_bufs; i++) {
buf_idx = *(u32 *)cmdbuf->buf;
buf_idx *= 2; /* 8 bytes per buf */
-
+
if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
return -EINVAL;
}
-
+
if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
return -EINVAL;
}
-
+
if (h_pending == 0) {
return -EINVAL;
}
-
+
h_pending--;
-
+
if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
return -EINVAL;
}
-
+
cmdbuf->buf += sizeof(buf_idx);
cmdbuf->bufsz -= sizeof(buf_idx);
}
-
+
BEGIN_RING(2);
OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
ADVANCE_RING();
-
+
return 0;
}
@@ -919,7 +932,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
goto cleanup;
}
break;
-
+
default:
DRM_ERROR("bad cmd_type %i at %p\n",
header.header.cmd_type,
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
index 3ae57ecc7afd..8f664af9c4a4 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/char/drm/r300_reg.h
@@ -584,7 +584,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
#define R300_RE_FOG_START 0x4298
/* Not sure why there are duplicate of factor and constant values.
- * My best guess so far is that there are seperate zbiases for test and write.
+ * My best guess so far is that there are separate zbiases for test and write.
* Ordering might be wrong.
* Some of the tests indicate that fgl has a fallback implementation of zbias
* via pixel shaders.
@@ -853,13 +853,13 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_TX_FORMAT_W8Z8Y8X8 0xC
# define R300_TX_FORMAT_W2Z10Y10X10 0xD
# define R300_TX_FORMAT_W16Z16Y16X16 0xE
-# define R300_TX_FORMAT_DXT1 0xF
-# define R300_TX_FORMAT_DXT3 0x10
-# define R300_TX_FORMAT_DXT5 0x11
+# define R300_TX_FORMAT_DXT1 0xF
+# define R300_TX_FORMAT_DXT3 0x10
+# define R300_TX_FORMAT_DXT5 0x11
# define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */
-# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
-# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
-# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
+# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
+# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
+# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
/* 0x16 - some 16 bit green format.. ?? */
# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
# define R300_TX_FORMAT_CUBIC_MAP (1 << 26)
@@ -867,19 +867,19 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
/* gap */
/* Floating point formats */
/* Note - hardware supports both 16 and 32 bit floating point */
-# define R300_TX_FORMAT_FL_I16 0x18
-# define R300_TX_FORMAT_FL_I16A16 0x19
+# define R300_TX_FORMAT_FL_I16 0x18
+# define R300_TX_FORMAT_FL_I16A16 0x19
# define R300_TX_FORMAT_FL_R16G16B16A16 0x1A
-# define R300_TX_FORMAT_FL_I32 0x1B
-# define R300_TX_FORMAT_FL_I32A32 0x1C
+# define R300_TX_FORMAT_FL_I32 0x1B
+# define R300_TX_FORMAT_FL_I32A32 0x1C
# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
/* alpha modes, convenience mostly */
/* if you have alpha, pick constant appropriate to the
number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
-# define R300_TX_FORMAT_ALPHA_1CH 0x000
-# define R300_TX_FORMAT_ALPHA_2CH 0x200
-# define R300_TX_FORMAT_ALPHA_4CH 0x600
-# define R300_TX_FORMAT_ALPHA_NONE 0xA00
+# define R300_TX_FORMAT_ALPHA_1CH 0x000
+# define R300_TX_FORMAT_ALPHA_2CH 0x200
+# define R300_TX_FORMAT_ALPHA_4CH 0x600
+# define R300_TX_FORMAT_ALPHA_NONE 0xA00
/* Swizzling */
/* constants */
# define R300_TX_FORMAT_X 0
@@ -1360,11 +1360,11 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_RB3D_Z_DISABLED_2 0x00000014
# define R300_RB3D_Z_TEST 0x00000012
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
-# define R300_RB3D_Z_WRITE_ONLY 0x00000006
+# define R300_RB3D_Z_WRITE_ONLY 0x00000006
# define R300_RB3D_Z_TEST 0x00000012
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
-# define R300_RB3D_Z_WRITE_ONLY 0x00000006
+# define R300_RB3D_Z_WRITE_ONLY 0x00000006
# define R300_RB3D_STENCIL_ENABLE 0x00000001
#define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 24fca8ec1379..5dc799ab86b8 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -816,6 +816,46 @@ static const u32 R300_cp_microcode[][2] = {
{0000000000, 0000000000},
};
+static u32 RADEON_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+ u32 ret;
+ RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
+ ret = RADEON_READ(R520_MC_IND_DATA);
+ RADEON_WRITE(R520_MC_IND_INDEX, 0);
+ return ret;
+}
+
+u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
+{
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+ return RADEON_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+ return RADEON_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
+ else
+ return RADEON_READ(RADEON_MC_FB_LOCATION);
+}
+
+static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
+{
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+ RADEON_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+ RADEON_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
+ else
+ RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
+}
+
+static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
+{
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+ RADEON_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+ RADEON_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
+ else
+ RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
+}
+
static int RADEON_READ_PLL(struct drm_device * dev, int addr)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -824,7 +864,7 @@ static int RADEON_READ_PLL(struct drm_device * dev, int addr)
return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
}
-static int RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
+static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
{
RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
return RADEON_READ(RADEON_PCIE_DATA);
@@ -1074,41 +1114,43 @@ static int radeon_do_engine_reset(struct drm_device * dev)
radeon_do_pixcache_flush(dev_priv);
- clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
- mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
-
- RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
- RADEON_FORCEON_MCLKA |
- RADEON_FORCEON_MCLKB |
- RADEON_FORCEON_YCLKA |
- RADEON_FORCEON_YCLKB |
- RADEON_FORCEON_MC |
- RADEON_FORCEON_AIC));
-
- rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
-
- RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
- RADEON_SOFT_RESET_CP |
- RADEON_SOFT_RESET_HI |
- RADEON_SOFT_RESET_SE |
- RADEON_SOFT_RESET_RE |
- RADEON_SOFT_RESET_PP |
- RADEON_SOFT_RESET_E2 |
- RADEON_SOFT_RESET_RB));
- RADEON_READ(RADEON_RBBM_SOFT_RESET);
- RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
- ~(RADEON_SOFT_RESET_CP |
- RADEON_SOFT_RESET_HI |
- RADEON_SOFT_RESET_SE |
- RADEON_SOFT_RESET_RE |
- RADEON_SOFT_RESET_PP |
- RADEON_SOFT_RESET_E2 |
- RADEON_SOFT_RESET_RB)));
- RADEON_READ(RADEON_RBBM_SOFT_RESET);
-
- RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
- RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
- RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
+ clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
+ mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
+
+ RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
+ RADEON_FORCEON_MCLKA |
+ RADEON_FORCEON_MCLKB |
+ RADEON_FORCEON_YCLKA |
+ RADEON_FORCEON_YCLKB |
+ RADEON_FORCEON_MC |
+ RADEON_FORCEON_AIC));
+
+ rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
+
+ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
+ RADEON_SOFT_RESET_CP |
+ RADEON_SOFT_RESET_HI |
+ RADEON_SOFT_RESET_SE |
+ RADEON_SOFT_RESET_RE |
+ RADEON_SOFT_RESET_PP |
+ RADEON_SOFT_RESET_E2 |
+ RADEON_SOFT_RESET_RB));
+ RADEON_READ(RADEON_RBBM_SOFT_RESET);
+ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
+ ~(RADEON_SOFT_RESET_CP |
+ RADEON_SOFT_RESET_HI |
+ RADEON_SOFT_RESET_SE |
+ RADEON_SOFT_RESET_RE |
+ RADEON_SOFT_RESET_PP |
+ RADEON_SOFT_RESET_E2 |
+ RADEON_SOFT_RESET_RB)));
+ RADEON_READ(RADEON_RBBM_SOFT_RESET);
+
+ RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
+ RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
+ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
+ }
/* Reset the CP ring */
radeon_do_cp_reset(dev_priv);
@@ -1127,21 +1169,21 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
{
u32 ring_start, cur_read_ptr;
u32 tmp;
-
+
/* Initialize the memory controller. With new memory map, the fb location
* is not changed, it should have been properly initialized already. Part
* of the problem is that the code below is bogus, assuming the GART is
* always appended to the fb which is not necessarily the case
*/
if (!dev_priv->new_memmap)
- RADEON_WRITE(RADEON_MC_FB_LOCATION,
+ radeon_write_fb_location(dev_priv,
((dev_priv->gart_vm_start - 1) & 0xffff0000)
| (dev_priv->fb_location >> 16));
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
- RADEON_WRITE(RADEON_MC_AGP_LOCATION,
+ radeon_write_agp_location(dev_priv,
(((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) |
(dev_priv->gart_vm_start >> 16)));
@@ -1190,9 +1232,15 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
/* Set ring buffer size */
#ifdef __BIG_ENDIAN
RADEON_WRITE(RADEON_CP_RB_CNTL,
- dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT);
+ RADEON_BUF_SWAP_32BIT |
+ (dev_priv->ring.fetch_size_l2ow << 18) |
+ (dev_priv->ring.rptr_update_l2qw << 8) |
+ dev_priv->ring.size_l2qw);
#else
- RADEON_WRITE(RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw);
+ RADEON_WRITE(RADEON_CP_RB_CNTL,
+ (dev_priv->ring.fetch_size_l2ow << 18) |
+ (dev_priv->ring.rptr_update_l2qw << 8) |
+ dev_priv->ring.size_l2qw);
#endif
/* Start with assuming that writeback doesn't work */
@@ -1299,7 +1347,7 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev_priv->gart_vm_start);
dev_priv->gart_size = 32*1024*1024;
- RADEON_WRITE(RADEON_MC_AGP_LOCATION,
+ radeon_write_agp_location(dev_priv,
(((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) |
(dev_priv->gart_vm_start >> 16)));
@@ -1333,7 +1381,7 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
dev_priv->gart_vm_start +
dev_priv->gart_size - 1);
- RADEON_WRITE(RADEON_MC_AGP_LOCATION, 0xffffffc0); /* ?? */
+ radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
RADEON_PCIE_TX_GART_EN);
@@ -1358,7 +1406,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
return;
}
- tmp = RADEON_READ(RADEON_AIC_CNTL);
+ tmp = RADEON_READ(RADEON_AIC_CNTL);
if (on) {
RADEON_WRITE(RADEON_AIC_CNTL,
@@ -1376,7 +1424,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
/* Turn off AGP aperture -- is this required for PCI GART?
*/
- RADEON_WRITE(RADEON_MC_AGP_LOCATION, 0xffffffc0); /* ?? */
+ radeon_write_agp_location(dev_priv, 0xffffffc0);
RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */
} else {
RADEON_WRITE(RADEON_AIC_CNTL,
@@ -1581,10 +1629,9 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
dev->agp_buffer_map->handle);
}
- dev_priv->fb_location = (RADEON_READ(RADEON_MC_FB_LOCATION)
- & 0xffff) << 16;
- dev_priv->fb_size =
- ((RADEON_READ(RADEON_MC_FB_LOCATION) & 0xffff0000u) + 0x10000)
+ dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
+ dev_priv->fb_size =
+ ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
- dev_priv->fb_location;
dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
@@ -1630,7 +1677,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
((base + dev_priv->gart_size) & 0xfffffffful) < base)
base = dev_priv->fb_location
- dev_priv->gart_size;
- }
+ }
dev_priv->gart_vm_start = base & 0xffc00000u;
if (dev_priv->gart_vm_start != base)
DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
@@ -1663,6 +1710,11 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
dev_priv->ring.size = init->ring_size;
dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+ dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
+ dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
+
+ dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
+ dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
@@ -1830,7 +1882,7 @@ int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_pri
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (init->func == RADEON_INIT_R300_CP)
- r300_init_reg_flags();
+ r300_init_reg_flags(dev);
switch (init->func) {
case RADEON_INIT_CP:
@@ -1852,12 +1904,12 @@ int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_pr
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dev_priv->cp_running) {
- DRM_DEBUG("%s while CP running\n", __FUNCTION__);
+ DRM_DEBUG("while CP running\n");
return 0;
}
if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
- DRM_DEBUG("%s called with bogus CP mode (%d)\n",
- __FUNCTION__, dev_priv->cp_mode);
+ DRM_DEBUG("called with bogus CP mode (%d)\n",
+ dev_priv->cp_mode);
return 0;
}
@@ -1962,7 +2014,7 @@ int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_pr
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
- DRM_DEBUG("%s called before init done\n", __FUNCTION__);
+ DRM_DEBUG("called before init done\n");
return -EINVAL;
}
@@ -2239,6 +2291,10 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
case CHIP_R350:
case CHIP_R420:
case CHIP_RV410:
+ case CHIP_RV515:
+ case CHIP_R520:
+ case CHIP_RV570:
+ case CHIP_R580:
dev_priv->flags |= RADEON_HAS_HIERZ;
break;
default:
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h
index 5a8e23f916fc..71e5b21fad2c 100644
--- a/drivers/char/drm/radeon_drm.h
+++ b/drivers/char/drm/radeon_drm.h
@@ -223,10 +223,10 @@ typedef union {
#define R300_CMD_CP_DELAY 5
#define R300_CMD_DMA_DISCARD 6
#define R300_CMD_WAIT 7
-# define R300_WAIT_2D 0x1
-# define R300_WAIT_3D 0x2
-# define R300_WAIT_2D_CLEAN 0x3
-# define R300_WAIT_3D_CLEAN 0x4
+# define R300_WAIT_2D 0x1
+# define R300_WAIT_3D 0x2
+# define R300_WAIT_2D_CLEAN 0x3
+# define R300_WAIT_3D_CLEAN 0x4
#define R300_CMD_SCRATCH 8
typedef union {
@@ -656,6 +656,7 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_SCRATCH_OFFSET 11
#define RADEON_PARAM_CARD_TYPE 12
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
+#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
typedef struct drm_radeon_getparam {
int param;
@@ -722,7 +723,7 @@ typedef struct drm_radeon_surface_free {
unsigned int address;
} drm_radeon_surface_free_t;
-#define DRM_RADEON_VBLANK_CRTC1 1
-#define DRM_RADEON_VBLANK_CRTC2 2
+#define DRM_RADEON_VBLANK_CRTC1 1
+#define DRM_RADEON_VBLANK_CRTC2 2
#endif
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index bfbb60a9298c..4434332c79bc 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -123,6 +123,12 @@ enum radeon_family {
CHIP_R420,
CHIP_RV410,
CHIP_RS400,
+ CHIP_RV515,
+ CHIP_R520,
+ CHIP_RV530,
+ CHIP_RV560,
+ CHIP_RV570,
+ CHIP_R580,
CHIP_LAST,
};
@@ -166,6 +172,12 @@ typedef struct drm_radeon_ring_buffer {
int size;
int size_l2qw;
+ int rptr_update; /* Double Words */
+ int rptr_update_l2qw; /* log2 Quad Words */
+
+ int fetch_size; /* Double Words */
+ int fetch_size_l2ow; /* log2 Oct Words */
+
u32 tail;
u32 tail_mask;
int space;
@@ -336,6 +348,7 @@ extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file
extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
extern void radeon_freelist_reset(struct drm_device * dev);
extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
@@ -382,7 +395,7 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
/* r300_cmdbuf.c */
-extern void r300_init_reg_flags(void);
+extern void r300_init_reg_flags(struct drm_device *dev);
extern int r300_do_cp_cmdbuf(struct drm_device * dev,
struct drm_file *file_priv,
@@ -429,7 +442,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
#define RADEON_PCIE_INDEX 0x0030
#define RADEON_PCIE_DATA 0x0034
#define RADEON_PCIE_TX_GART_CNTL 0x10
-# define RADEON_PCIE_TX_GART_EN (1 << 0)
+# define RADEON_PCIE_TX_GART_EN (1 << 0)
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0<<1)
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1<<1)
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3<<1)
@@ -439,7 +452,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1<<8)
#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
-#define RADEON_PCIE_TX_GART_BASE 0x13
+#define RADEON_PCIE_TX_GART_BASE 0x13
#define RADEON_PCIE_TX_GART_START_LO 0x14
#define RADEON_PCIE_TX_GART_START_HI 0x15
#define RADEON_PCIE_TX_GART_END_LO 0x16
@@ -454,6 +467,16 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
#define RADEON_IGPGART_ENABLE 0x38
#define RADEON_IGPGART_UNK_39 0x39
+#define R520_MC_IND_INDEX 0x70
+#define R520_MC_IND_WR_EN (1<<24)
+#define R520_MC_IND_DATA 0x74
+
+#define RV515_MC_FB_LOCATION 0x01
+#define RV515_MC_AGP_LOCATION 0x02
+
+#define R520_MC_FB_LOCATION 0x04
+#define R520_MC_AGP_LOCATION 0x05
+
#define RADEON_MPP_TB_CONFIG 0x01c0
#define RADEON_MEM_CNTL 0x0140
#define RADEON_MEM_SDRAM_MODE_REG 0x0158
@@ -512,12 +535,12 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
#define RADEON_GEN_INT_STATUS 0x0044
# define RADEON_CRTC_VBLANK_STAT (1 << 0)
-# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
+# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
-# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
+# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19)
# define RADEON_SW_INT_TEST (1 << 25)
-# define RADEON_SW_INT_TEST_ACK (1 << 25)
+# define RADEON_SW_INT_TEST_ACK (1 << 25)
# define RADEON_SW_INT_FIRE (1 << 26)
#define RADEON_HOST_PATH_CNTL 0x0130
@@ -615,9 +638,51 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
# define RADEON_SOFT_RESET_E2 (1 << 5)
# define RADEON_SOFT_RESET_RB (1 << 6)
# define RADEON_SOFT_RESET_HDP (1 << 7)
+/*
+ * 6:0 Available slots in the FIFO
+ * 8 Host Interface active
+ * 9 CP request active
+ * 10 FIFO request active
+ * 11 Host Interface retry active
+ * 12 CP retry active
+ * 13 FIFO retry active
+ * 14 FIFO pipeline busy
+ * 15 Event engine busy
+ * 16 CP command stream busy
+ * 17 2D engine busy
+ * 18 2D portion of render backend busy
+ * 20 3D setup engine busy
+ * 26 GA engine busy
+ * 27 CBA 2D engine busy
+ * 31 2D engine busy or 3D engine busy or FIFO not empty or CP busy or
+ * command stream queue not empty or Ring Buffer not empty
+ */
#define RADEON_RBBM_STATUS 0x0e40
+/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register. */
+/* #define RADEON_RBBM_STATUS 0x1740 */
+/* bits 6:0 are dword slots available in the cmd fifo */
# define RADEON_RBBM_FIFOCNT_MASK 0x007f
-# define RADEON_RBBM_ACTIVE (1 << 31)
+# define RADEON_HIRQ_ON_RBB (1 << 8)
+# define RADEON_CPRQ_ON_RBB (1 << 9)
+# define RADEON_CFRQ_ON_RBB (1 << 10)
+# define RADEON_HIRQ_IN_RTBUF (1 << 11)
+# define RADEON_CPRQ_IN_RTBUF (1 << 12)
+# define RADEON_CFRQ_IN_RTBUF (1 << 13)
+# define RADEON_PIPE_BUSY (1 << 14)
+# define RADEON_ENG_EV_BUSY (1 << 15)
+# define RADEON_CP_CMDSTRM_BUSY (1 << 16)
+# define RADEON_E2_BUSY (1 << 17)
+# define RADEON_RB2D_BUSY (1 << 18)
+# define RADEON_RB3D_BUSY (1 << 19) /* not used on r300 */
+# define RADEON_VAP_BUSY (1 << 20)
+# define RADEON_RE_BUSY (1 << 21) /* not used on r300 */
+# define RADEON_TAM_BUSY (1 << 22) /* not used on r300 */
+# define RADEON_TDM_BUSY (1 << 23) /* not used on r300 */
+# define RADEON_PB_BUSY (1 << 24) /* not used on r300 */
+# define RADEON_TIM_BUSY (1 << 25) /* not used on r300 */
+# define RADEON_GA_BUSY (1 << 26)
+# define RADEON_CBA2D_BUSY (1 << 27)
+# define RADEON_RBBM_ACTIVE (1 << 31)
#define RADEON_RE_LINE_PATTERN 0x1cd0
#define RADEON_RE_MISC 0x26c4
#define RADEON_RE_TOP_LEFT 0x26c0
@@ -1004,6 +1069,13 @@ do { \
RADEON_WRITE( RADEON_PCIE_DATA, (val) ); \
} while (0)
+#define RADEON_WRITE_MCIND( addr, val ) \
+ do { \
+ RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \
+ RADEON_WRITE(R520_MC_IND_DATA, (val)); \
+ RADEON_WRITE(R520_MC_IND_INDEX, 0); \
+ } while (0)
+
#define CP_PACKET0( reg, n ) \
(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
#define CP_PACKET0_TABLE( reg, n ) \
@@ -1114,8 +1186,7 @@ do { \
#define BEGIN_RING( n ) do { \
if ( RADEON_VERBOSE ) { \
- DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
- n, __FUNCTION__ ); \
+ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
} \
if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
COMMIT_RING(); \
@@ -1133,7 +1204,7 @@ do { \
write, dev_priv->ring.tail ); \
} \
if (((dev_priv->ring.tail + _nr) & mask) != write) { \
- DRM_ERROR( \
+ DRM_ERROR( \
"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
((dev_priv->ring.tail + _nr) & mask), \
write, __LINE__); \
diff --git a/drivers/char/drm/radeon_irq.c b/drivers/char/drm/radeon_irq.c
index 84f5bc36252b..009af3814b6f 100644
--- a/drivers/char/drm/radeon_irq.c
+++ b/drivers/char/drm/radeon_irq.c
@@ -154,7 +154,7 @@ static int radeon_driver_vblank_do_wait(struct drm_device * dev,
int ack = 0;
atomic_t *counter;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -205,7 +205,7 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -227,7 +227,7 @@ int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_pr
drm_radeon_irq_wait_t *irqwait = data;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
diff --git a/drivers/char/drm/radeon_mem.c b/drivers/char/drm/radeon_mem.c
index a29acfe2f973..78b34fa7c89a 100644
--- a/drivers/char/drm/radeon_mem.c
+++ b/drivers/char/drm/radeon_mem.c
@@ -224,7 +224,7 @@ int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_p
struct mem_block *block, **heap;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -259,7 +259,7 @@ int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_pr
struct mem_block *block, **heap;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -285,7 +285,7 @@ int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *fi
struct mem_block **heap;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index f824f2f5fdc2..6f75512f591e 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -898,7 +898,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
int w = pbox[i].x2 - x;
int h = pbox[i].y2 - y;
- DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
+ DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n",
x, y, w, h, flags);
if (flags & RADEON_FRONT) {
@@ -1368,7 +1368,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev)
int w = pbox[i].x2 - x;
int h = pbox[i].y2 - y;
- DRM_DEBUG("dispatch swap %d,%d-%d,%d\n", x, y, w, h);
+ DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
BEGIN_RING(9);
@@ -1422,8 +1422,7 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev)
int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
? dev_priv->front_offset : dev_priv->back_offset;
RING_LOCALS;
- DRM_DEBUG("%s: pfCurrentPage=%d\n",
- __FUNCTION__,
+ DRM_DEBUG("pfCurrentPage=%d\n",
dev_priv->sarea_priv->pfCurrentPage);
/* Do some trivial performance monitoring...
@@ -1562,7 +1561,7 @@ static void radeon_cp_dispatch_indirect(struct drm_device * dev,
{
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
- DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
+ DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
if (start != end) {
int offset = (dev_priv->gart_buffers_offset
@@ -1758,7 +1757,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
buf = radeon_freelist_get(dev);
}
if (!buf) {
- DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
+ DRM_DEBUG("EAGAIN\n");
if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
return -EFAULT;
return -EAGAIN;
@@ -2413,7 +2412,7 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil
LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
+ DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
indirect->idx, indirect->start, indirect->end,
indirect->discard);
@@ -2779,7 +2778,7 @@ static int radeon_emit_wait(struct drm_device * dev, int flags)
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
- DRM_DEBUG("%s: %x\n", __FUNCTION__, flags);
+ DRM_DEBUG("%x\n", flags);
switch (flags) {
case RADEON_WAIT_2D:
BEGIN_RING(2);
@@ -3035,6 +3034,9 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
case RADEON_PARAM_VBLANK_CRTC:
value = radeon_vblank_crtc_get(dev);
break;
+ case RADEON_PARAM_FB_LOCATION:
+ value = radeon_read_fb_location(dev_priv);
+ break;
default:
DRM_DEBUG("Invalid parameter %d\n", param->param);
return -EINVAL;
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c
index bf8e0e10fe21..5f6238fdf1fa 100644
--- a/drivers/char/drm/savage_state.c
+++ b/drivers/char/drm/savage_state.c
@@ -512,7 +512,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
DMA_DRAW_PRIMITIVE(count, prim, skip);
if (vb_stride == vtx_size) {
- DMA_COPY(&vtxbuf[vb_stride * start],
+ DMA_COPY(&vtxbuf[vb_stride * start],
vtx_size * count);
} else {
for (i = start; i < start + count; ++i) {
@@ -742,7 +742,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
while (n != 0) {
/* Can emit up to 255 vertices (85 triangles) at once. */
unsigned int count = n > 255 ? 255 : n;
-
+
/* Check indices */
for (i = 0; i < count; ++i) {
if (idx[i] > vb_size / (vb_stride * 4)) {
@@ -933,7 +933,7 @@ static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
/* j was check in savage_bci_cmdbuf */
ret = savage_dispatch_vb_idx(dev_priv,
&cmd_header, (const uint16_t *)cmdbuf,
- (const uint32_t *)vtxbuf, vb_size,
+ (const uint32_t *)vtxbuf, vb_size,
vb_stride);
cmdbuf += j;
break;
diff --git a/drivers/char/drm/sis_mm.c b/drivers/char/drm/sis_mm.c
index a6b7ccdaf73d..b3878770fce1 100644
--- a/drivers/char/drm/sis_mm.c
+++ b/drivers/char/drm/sis_mm.c
@@ -115,7 +115,7 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
+ DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
return 0;
}
@@ -205,7 +205,7 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
+ DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
return 0;
}
@@ -249,7 +249,7 @@ int sis_idle(struct drm_device *dev)
return 0;
}
}
-
+
/*
* Implement a device switch here if needed
*/
diff --git a/drivers/char/drm/via_dma.c b/drivers/char/drm/via_dma.c
index 75d6b748c2c0..94baec692b57 100644
--- a/drivers/char/drm/via_dma.c
+++ b/drivers/char/drm/via_dma.c
@@ -179,14 +179,12 @@ static int via_initialize(struct drm_device * dev,
}
if (dev_priv->ring.virtual_start != NULL) {
- DRM_ERROR("%s called again without calling cleanup\n",
- __FUNCTION__);
+ DRM_ERROR("called again without calling cleanup\n");
return -EFAULT;
}
if (!dev->agp || !dev->agp->base) {
- DRM_ERROR("%s called with no agp memory available\n",
- __FUNCTION__);
+ DRM_ERROR("called with no agp memory available\n");
return -EFAULT;
}
@@ -267,8 +265,7 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
dev_priv = (drm_via_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start == NULL) {
- DRM_ERROR("%s called without initializing AGP ring buffer.\n",
- __FUNCTION__);
+ DRM_ERROR("called without initializing AGP ring buffer.\n");
return -EFAULT;
}
@@ -337,8 +334,7 @@ static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *fi
LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf->buf,
- cmdbuf->size);
+ DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
ret = via_dispatch_cmdbuffer(dev, cmdbuf);
if (ret) {
@@ -379,8 +375,7 @@ static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file
LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf->buf,
- cmdbuf->size);
+ DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
if (ret) {
@@ -400,7 +395,7 @@ static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
}
/*
- * This function is used internally by ring buffer mangement code.
+ * This function is used internally by ring buffer management code.
*
* Returns virtual pointer to ring buffer.
*/
@@ -648,14 +643,13 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
uint32_t tmp_size, count;
drm_via_private_t *dev_priv;
- DRM_DEBUG("via cmdbuf_size\n");
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
dev_priv = (drm_via_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start == NULL) {
- DRM_ERROR("%s called without initializing AGP ring buffer.\n",
- __FUNCTION__);
+ DRM_ERROR("called without initializing AGP ring buffer.\n");
return -EFAULT;
}
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index c6fd16f3cb43..33c5197b73c4 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -1,5 +1,5 @@
/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
- *
+ *
* Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -16,22 +16,22 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors:
+ * Authors:
* Thomas Hellstrom.
* Partially based on code obtained from Digeo Inc.
*/
/*
- * Unmaps the DMA mappings.
- * FIXME: Is this a NoOp on x86? Also
- * FIXME: What happens if this one is called and a pending blit has previously done
- * the same DMA mappings?
+ * Unmaps the DMA mappings.
+ * FIXME: Is this a NoOp on x86? Also
+ * FIXME: What happens if this one is called and a pending blit has previously done
+ * the same DMA mappings?
*/
#include "drmP.h"
@@ -65,7 +65,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
int num_desc = vsg->num_desc;
unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
- drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+ drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
descriptor_this_page;
dma_addr_t next = vsg->chain_start;
@@ -73,7 +73,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
if (descriptor_this_page-- == 0) {
cur_descriptor_page--;
descriptor_this_page = vsg->descriptors_per_page - 1;
- desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+ desc_ptr = vsg->desc_pages[cur_descriptor_page] +
descriptor_this_page;
}
dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
@@ -93,7 +93,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
static void
via_map_blit_for_device(struct pci_dev *pdev,
const drm_via_dmablit_t *xfer,
- drm_via_sg_info_t *vsg,
+ drm_via_sg_info_t *vsg,
int mode)
{
unsigned cur_descriptor_page = 0;
@@ -110,7 +110,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
dma_addr_t next = 0 | VIA_DMA_DPR_EC;
drm_via_descriptor_t *desc_ptr = NULL;
- if (mode == 1)
+ if (mode == 1)
desc_ptr = vsg->desc_pages[cur_descriptor_page];
for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
@@ -118,24 +118,24 @@ via_map_blit_for_device(struct pci_dev *pdev,
line_len = xfer->line_length;
cur_fb = fb_addr;
cur_mem = mem_addr;
-
+
while (line_len > 0) {
remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
line_len -= remaining_len;
if (mode == 1) {
- desc_ptr->mem_addr =
- dma_map_page(&pdev->dev,
- vsg->pages[VIA_PFN(cur_mem) -
+ desc_ptr->mem_addr =
+ dma_map_page(&pdev->dev,
+ vsg->pages[VIA_PFN(cur_mem) -
VIA_PFN(first_addr)],
- VIA_PGOFF(cur_mem), remaining_len,
+ VIA_PGOFF(cur_mem), remaining_len,
vsg->direction);
desc_ptr->dev_addr = cur_fb;
-
+
desc_ptr->size = remaining_len;
desc_ptr->next = (uint32_t) next;
- next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
+ next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
DMA_TO_DEVICE);
desc_ptr++;
if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
@@ -143,12 +143,12 @@ via_map_blit_for_device(struct pci_dev *pdev,
desc_ptr = vsg->desc_pages[++cur_descriptor_page];
}
}
-
+
num_desc++;
cur_mem += remaining_len;
cur_fb += remaining_len;
}
-
+
mem_addr += xfer->mem_stride;
fb_addr += xfer->fb_stride;
}
@@ -161,14 +161,14 @@ via_map_blit_for_device(struct pci_dev *pdev,
}
/*
- * Function that frees up all resources for a blit. It is usable even if the
+ * Function that frees up all resources for a blit. It is usable even if the
* blit info has only been partially built as long as the status enum is consistent
* with the actual status of the used resources.
*/
static void
-via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
+via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
{
struct page *page;
int i;
@@ -185,7 +185,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
case dr_via_pages_locked:
for (i=0; i<vsg->num_pages; ++i) {
if ( NULL != (page = vsg->pages[i])) {
- if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
+ if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
SetPageDirty(page);
page_cache_release(page);
}
@@ -200,7 +200,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
vsg->bounce_buffer = NULL;
}
vsg->free_on_sequence = 0;
-}
+}
/*
* Fire a blit engine.
@@ -213,7 +213,7 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
VIA_DMA_CSR_DE);
VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
@@ -233,9 +233,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
int ret;
unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
- vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
+ vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
first_pfn + 1;
-
+
if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
return -ENOMEM;
memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
@@ -248,7 +248,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
up_read(&current->mm->mmap_sem);
if (ret != vsg->num_pages) {
- if (ret < 0)
+ if (ret < 0)
return ret;
vsg->state = dr_via_pages_locked;
return -EINVAL;
@@ -264,21 +264,21 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
* quite large for some blits, and pages don't need to be contingous.
*/
-static int
+static int
via_alloc_desc_pages(drm_via_sg_info_t *vsg)
{
int i;
-
+
vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
- vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
+ vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
vsg->descriptors_per_page;
if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
return -ENOMEM;
-
+
vsg->state = dr_via_desc_pages_alloc;
for (i=0; i<vsg->num_desc_pages; ++i) {
- if (NULL == (vsg->desc_pages[i] =
+ if (NULL == (vsg->desc_pages[i] =
(drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
return -ENOMEM;
}
@@ -286,7 +286,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
vsg->num_desc);
return 0;
}
-
+
static void
via_abort_dmablit(struct drm_device *dev, int engine)
{
@@ -300,7 +300,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
}
@@ -311,7 +311,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
* task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
* the workqueue task takes care of processing associated with the old blit.
*/
-
+
void
via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
{
@@ -331,19 +331,19 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
- done_transfer = blitq->is_active &&
+ done_transfer = blitq->is_active &&
(( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
- done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
+ done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
cur = blitq->cur;
if (done_transfer) {
blitq->blits[cur]->aborted = blitq->aborting;
blitq->done_blit_handle++;
- DRM_WAKEUP(blitq->blit_queue + cur);
+ DRM_WAKEUP(blitq->blit_queue + cur);
cur++;
- if (cur >= VIA_NUM_BLIT_SLOTS)
+ if (cur >= VIA_NUM_BLIT_SLOTS)
cur = 0;
blitq->cur = cur;
@@ -355,7 +355,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->is_active = 0;
blitq->aborting = 0;
- schedule_work(&blitq->wq);
+ schedule_work(&blitq->wq);
} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
@@ -367,7 +367,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->aborting = 1;
blitq->end = jiffies + DRM_HZ;
}
-
+
if (!blitq->is_active) {
if (blitq->num_outstanding) {
via_fire_dmablit(dev, blitq->blits[cur], engine);
@@ -383,14 +383,14 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
}
via_dmablit_engine_off(dev, engine);
}
- }
+ }
if (from_irq) {
spin_unlock(&blitq->blit_lock);
} else {
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
}
-}
+}
@@ -426,13 +426,13 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
return active;
}
-
+
/*
* Sync. Wait for at least three seconds for the blit to be performed.
*/
static int
-via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
+via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -441,12 +441,12 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
int ret = 0;
if (via_dmablit_active(blitq, engine, handle, &queue)) {
- DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
!via_dmablit_active(blitq, engine, handle, NULL));
}
DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
handle, engine, ret);
-
+
return ret;
}
@@ -468,12 +468,12 @@ via_dmablit_timer(unsigned long data)
struct drm_device *dev = blitq->dev;
int engine = (int)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
-
- DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
+
+ DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
(unsigned long) jiffies);
via_dmablit_handler(dev, engine, 0);
-
+
if (!timer_pending(&blitq->poll_timer)) {
mod_timer(&blitq->poll_timer, jiffies + 1);
@@ -497,7 +497,7 @@ via_dmablit_timer(unsigned long data)
*/
-static void
+static void
via_dmablit_workqueue(struct work_struct *work)
{
drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
@@ -505,38 +505,38 @@ via_dmablit_workqueue(struct work_struct *work)
unsigned long irqsave;
drm_via_sg_info_t *cur_sg;
int cur_released;
-
-
- DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
+
+
+ DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
+
while(blitq->serviced != blitq->cur) {
cur_released = blitq->serviced++;
DRM_DEBUG("Releasing blit slot %d\n", cur_released);
- if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
+ if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
blitq->serviced = 0;
-
+
cur_sg = blitq->blits[cur_released];
blitq->num_free++;
-
+
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
+
DRM_WAKEUP(&blitq->busy_queue);
-
+
via_free_sg_info(dev->pdev, cur_sg);
kfree(cur_sg);
-
+
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
}
-
+
/*
* Init all blit engines. Currently we use two, but some hardware have 4.
@@ -550,8 +550,8 @@ via_init_dmablit(struct drm_device *dev)
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_blitq_t *blitq;
- pci_set_master(dev->pdev);
-
+ pci_set_master(dev->pdev);
+
for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
blitq = dev_priv->blit_queues + i;
blitq->dev = dev;
@@ -572,20 +572,20 @@ via_init_dmablit(struct drm_device *dev)
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
setup_timer(&blitq->poll_timer, via_dmablit_timer,
(unsigned long)blitq);
- }
+ }
}
/*
* Build all info and do all mappings required for a blit.
*/
-
+
static int
via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
int draw = xfer->to_fb;
int ret = 0;
-
+
vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
vsg->bounce_buffer = NULL;
@@ -599,7 +599,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
/*
* Below check is a driver limitation, not a hardware one. We
* don't want to lock unused pages, and don't want to incoporate the
- * extra logic of avoiding them. Make sure there are no.
+ * extra logic of avoiding them. Make sure there are no.
* (Not a big limitation anyway.)
*/
@@ -625,11 +625,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
DRM_ERROR("Too large PCI DMA bitblt.\n");
return -EINVAL;
- }
+ }
- /*
+ /*
* we allow a negative fb stride to allow flipping of images in
- * transfer.
+ * transfer.
*/
if (xfer->mem_stride < xfer->line_length ||
@@ -653,11 +653,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
#else
if ((((unsigned long)xfer->mem_addr & 15) ||
((unsigned long)xfer->fb_addr & 3)) ||
- ((xfer->num_lines > 1) &&
+ ((xfer->num_lines > 1) &&
((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
DRM_ERROR("Invalid DRM bitblt alignment.\n");
return -EINVAL;
- }
+ }
#endif
if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
@@ -673,17 +673,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
return ret;
}
via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
-
+
return 0;
}
-
+
/*
* Reserve one free slot in the blit queue. Will wait for one second for one
* to become available. Otherwise -EBUSY is returned.
*/
-static int
+static int
via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
{
int ret=0;
@@ -698,10 +698,10 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
if (ret) {
return (-EINTR == ret) ? -EAGAIN : ret;
}
-
+
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
-
+
blitq->num_free--;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
@@ -712,7 +712,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
* Hand back a free slot if we changed our mind.
*/
-static void
+static void
via_dmablit_release_slot(drm_via_blitq_t *blitq)
{
unsigned long irqsave;
@@ -728,8 +728,8 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
*/
-static int
-via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
+static int
+via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_sg_info_t *vsg;
@@ -760,15 +760,15 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
blitq->blits[blitq->head++] = vsg;
- if (blitq->head >= VIA_NUM_BLIT_SLOTS)
+ if (blitq->head >= VIA_NUM_BLIT_SLOTS)
blitq->head = 0;
blitq->num_outstanding++;
- xfer->sync.sync_handle = ++blitq->cur_blit_handle;
+ xfer->sync.sync_handle = ++blitq->cur_blit_handle;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
xfer->sync.engine = engine;
- via_dmablit_handler(dev, engine, 0);
+ via_dmablit_handler(dev, engine, 0);
return 0;
}
@@ -776,7 +776,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
/*
* Sync on a previously submitted blit. Note that the X server use signals extensively, and
* that there is a very big probability that this IOCTL will be interrupted by a signal. In that
- * case it returns with -EAGAIN for the signal to be delivered.
+ * case it returns with -EAGAIN for the signal to be delivered.
* The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
*/
@@ -786,7 +786,7 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
drm_via_blitsync_t *sync = data;
int err;
- if (sync->engine >= VIA_NUM_BLIT_ENGINES)
+ if (sync->engine >= VIA_NUM_BLIT_ENGINES)
return -EINVAL;
err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
@@ -796,15 +796,15 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
return err;
}
-
+
/*
* Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
- * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
+ * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
* be reissued. See the above IOCTL code.
*/
-int
+int
via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
{
drm_via_dmablit_t *xfer = data;
diff --git a/drivers/char/drm/via_dmablit.h b/drivers/char/drm/via_dmablit.h
index 6f6a513d5147..7408a547a036 100644
--- a/drivers/char/drm/via_dmablit.h
+++ b/drivers/char/drm/via_dmablit.h
@@ -1,5 +1,5 @@
/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
- *
+ *
* Copyright 2005 Thomas Hellstrom.
* All Rights Reserved.
*
@@ -17,12 +17,12 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors:
+ * Authors:
* Thomas Hellstrom.
* Register info from Digeo Inc.
*/
@@ -67,7 +67,7 @@ typedef struct _drm_via_blitq {
unsigned cur;
unsigned num_free;
unsigned num_outstanding;
- unsigned long end;
+ unsigned long end;
int aborting;
int is_active;
drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
@@ -77,46 +77,46 @@ typedef struct _drm_via_blitq {
struct work_struct wq;
struct timer_list poll_timer;
} drm_via_blitq_t;
-
-/*
+
+/*
* PCI DMA Registers
* Channels 2 & 3 don't seem to be implemented in hardware.
*/
-
-#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
-#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
-#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
-#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
-
-#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
-#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
-#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
-#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
-
-#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
-#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
-#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
-#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
-
-#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
-#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
-#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
-#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
-
-#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
-#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
-#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
-#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
-
-#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
-#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
-#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
-#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
-
-#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
-
-/* Define for DMA engine */
+
+#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
+#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
+#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
+#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
+
+#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
+#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
+#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
+#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
+
+#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
+#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
+#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
+#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
+
+#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
+#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
+#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
+#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
+
+#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
+#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
+#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
+#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
+
+#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
+#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
+#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
+#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
+
+#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
+
+/* Define for DMA engine */
/* DPR */
#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
diff --git a/drivers/char/drm/via_drm.h b/drivers/char/drm/via_drm.h
index 8f53c76062e9..a3b5c102b067 100644
--- a/drivers/char/drm/via_drm.h
+++ b/drivers/char/drm/via_drm.h
@@ -35,7 +35,7 @@
#include "via_drmclient.h"
#endif
-#define VIA_NR_SAREA_CLIPRECTS 8
+#define VIA_NR_SAREA_CLIPRECTS 8
#define VIA_NR_XVMC_PORTS 10
#define VIA_NR_XVMC_LOCKS 5
#define VIA_MAX_CACHELINE_SIZE 64
@@ -259,7 +259,7 @@ typedef struct drm_via_blitsync {
typedef struct drm_via_dmablit {
uint32_t num_lines;
uint32_t line_length;
-
+
uint32_t fb_addr;
uint32_t fb_stride;
diff --git a/drivers/char/drm/via_drv.c b/drivers/char/drm/via_drv.c
index 2d4957ab256a..80c01cdfa37d 100644
--- a/drivers/char/drm/via_drv.c
+++ b/drivers/char/drm/via_drv.c
@@ -71,7 +71,7 @@ static struct drm_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
},
-
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index 9c1d52bc92d7..c6bb978a1106 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -169,9 +169,9 @@ int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
unsigned int cur_vblank;
int ret = 0;
- DRM_DEBUG("viadrv_vblank_wait\n");
+ DRM_DEBUG("\n");
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -201,24 +201,23 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
maskarray_t *masks;
int real_irq;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
if (irq >= drm_via_irq_num) {
- DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
- irq);
+ DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
return -EINVAL;
}
real_irq = dev_priv->irq_map[irq];
if (real_irq < 0) {
- DRM_ERROR("%s Video IRQ %d not available on this hardware.\n",
- __FUNCTION__, irq);
+ DRM_ERROR("Video IRQ %d not available on this hardware.\n",
+ irq);
return -EINVAL;
}
@@ -251,7 +250,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
drm_via_irq_t *cur_irq;
int i;
- DRM_DEBUG("driver_irq_preinstall: dev_priv: %p\n", dev_priv);
+ DRM_DEBUG("dev_priv: %p\n", dev_priv);
if (dev_priv) {
cur_irq = dev_priv->via_irqs;
@@ -298,7 +297,7 @@ void via_driver_irq_postinstall(struct drm_device * dev)
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
- DRM_DEBUG("via_driver_irq_postinstall\n");
+ DRM_DEBUG("\n");
if (dev_priv) {
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
@@ -317,7 +316,7 @@ void via_driver_irq_uninstall(struct drm_device * dev)
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
- DRM_DEBUG("driver_irq_uninstall)\n");
+ DRM_DEBUG("\n");
if (dev_priv) {
/* Some more magic, oh for some data sheets ! */
@@ -344,7 +343,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
if (irqwait->request.irq >= dev_priv->num_irqs) {
- DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
+ DRM_ERROR("Trying to wait on unknown irq %d\n",
irqwait->request.irq);
return -EINVAL;
}
@@ -362,8 +361,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
if (irqwait->request.type & VIA_IRQ_SIGNAL) {
- DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n",
- __FUNCTION__);
+ DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
return -EINVAL;
}
diff --git a/drivers/char/drm/via_map.c b/drivers/char/drm/via_map.c
index 10091507a0dc..a967556be014 100644
--- a/drivers/char/drm/via_map.c
+++ b/drivers/char/drm/via_map.c
@@ -29,7 +29,7 @@ static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
{
drm_via_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
@@ -79,7 +79,7 @@ int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_init_t *init = data;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
switch (init->func) {
case VIA_INIT_MAP:
@@ -121,4 +121,3 @@ int via_driver_unload(struct drm_device *dev)
return 0;
}
-
diff --git a/drivers/char/drm/via_mm.c b/drivers/char/drm/via_mm.c
index 3ffbf8649833..e64094916e4f 100644
--- a/drivers/char/drm/via_mm.c
+++ b/drivers/char/drm/via_mm.c
@@ -53,7 +53,7 @@ int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
+ DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
return 0;
}
@@ -77,7 +77,7 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
+ DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
return 0;
@@ -113,7 +113,7 @@ void via_lastclose(struct drm_device *dev)
dev_priv->vram_initialized = 0;
dev_priv->agp_initialized = 0;
mutex_unlock(&dev->struct_mutex);
-}
+}
int via_mem_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
diff --git a/drivers/char/drm/via_video.c b/drivers/char/drm/via_video.c
index c15e75b54cb1..6ec04ac12459 100644
--- a/drivers/char/drm/via_video.c
+++ b/drivers/char/drm/via_video.c
@@ -33,7 +33,7 @@ void via_init_futex(drm_via_private_t * dev_priv)
{
unsigned int i;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
@@ -73,7 +73,7 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_
drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
int ret = 0;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
if (fx->lock > VIA_NR_XVMC_LOCKS)
return -EFAULT;
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index 004141d535a2..49233f589874 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -18,7 +18,7 @@
*
* NOTES:
* - Locking is required for safe execution of EFI calls with regards
- * to interrrupts and SMP.
+ * to interrupts and SMP.
*
* TODO (December 1999):
* - provide the API to set/get the WakeUp Alarm (different from the
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index ffcecde9e2a5..ffd747c5dff0 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -1797,7 +1797,7 @@ static unsigned termios2digi_c(struct channel *ch, unsigned cflag)
res |= cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB | CSTOPB | CSIZE);
/*
* This gets a little confusing. The Digi cards have their own
- * representation of c_cflags controling baud rate. For the most part
+ * representation of c_cflags controlling baud rate. For the most part
* this is identical to the Linux implementation. However; Digi
* supports one rate (76800) that Linux doesn't. This means that the
* c_cflag entry that would normally mean 76800 for Digi actually means
@@ -2068,7 +2068,7 @@ static int info_ioctl(struct tty_struct *tty, struct file *file,
{
/*
* This call is made by the apps to complete the
- * initilization of the board(s). This routine is
+ * initialization of the board(s). This routine is
* responsible for setting the card to its initial
* state and setting the drivers control fields to the
* sutianle settings for the card in question.
diff --git a/drivers/char/epca.h b/drivers/char/epca.h
index a297238cd3ba..3c77c02b5d65 100644
--- a/drivers/char/epca.h
+++ b/drivers/char/epca.h
@@ -77,7 +77,6 @@ static char *board_desc[] =
#define ON 1
#define FEPTIMEOUT 200000
-#define SERIAL_TYPE_NORMAL 1
#define SERIAL_TYPE_INFO 3
#define EPCA_EVENT_HANGUP 1
#define EPCA_MAGIC 0x5c6df104L
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
index 28607763ae64..c01e26d9ee5e 100644
--- a/drivers/char/esp.c
+++ b/drivers/char/esp.c
@@ -111,9 +111,6 @@ static char serial_version[] __initdata = "2.2";
static struct tty_driver *esp_driver;
-/* serial subtype definitions */
-#define SERIAL_TYPE_NORMAL 1
-
/*
* Serial driver configuration section. Here are the various options:
*
@@ -245,17 +242,6 @@ static void rs_start(struct tty_struct *tty)
* -----------------------------------------------------------------------
*/
-/*
- * This routine is used by the interrupt handler to schedule
- * processing in the software interrupt portion of the driver.
- */
-static inline void rs_sched_event(struct esp_struct *info,
- int event)
-{
- info->event |= 1 << event;
- schedule_work(&info->tqueue);
-}
-
static DEFINE_SPINLOCK(pio_lock);
static inline struct esp_pio_buffer *get_pio_buffer(void)
@@ -477,7 +463,8 @@ static inline void transmit_chars_pio(struct esp_struct *info,
}
if (info->xmit_cnt < WAKEUP_CHARS) {
- rs_sched_event(info, ESP_EVENT_WRITE_WAKEUP);
+ if (info->tty)
+ tty_wakeup(info->tty);
#ifdef SERIAL_DEBUG_INTR
printk("THRE...");
@@ -515,7 +502,8 @@ static inline void transmit_chars_dma(struct esp_struct *info, int num_bytes)
info->xmit_tail = (info->xmit_tail + dma_bytes) & (ESP_XMIT_SIZE - 1);
if (info->xmit_cnt < WAKEUP_CHARS) {
- rs_sched_event(info, ESP_EVENT_WRITE_WAKEUP);
+ if (info->tty)
+ tty_wakeup(info->tty);
#ifdef SERIAL_DEBUG_INTR
printk("THRE...");
@@ -607,7 +595,7 @@ static inline void check_modem_status(struct esp_struct *info)
#ifdef SERIAL_DEBUG_OPEN
printk("scheduling hangup...");
#endif
- schedule_work(&info->tqueue_hangup);
+ tty_hangup(info->tty);
}
}
}
@@ -723,41 +711,6 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
* -------------------------------------------------------------------
*/
-static void do_softint(struct work_struct *work)
-{
- struct esp_struct *info =
- container_of(work, struct esp_struct, tqueue);
- struct tty_struct *tty;
-
- tty = info->tty;
- if (!tty)
- return;
-
- if (test_and_clear_bit(ESP_EVENT_WRITE_WAKEUP, &info->event)) {
- tty_wakeup(tty);
- }
-}
-
-/*
- * This routine is called from the scheduler tqueue when the interrupt
- * routine has signalled that a hangup has occurred. The path of
- * hangup processing is:
- *
- * serial interrupt routine -> (scheduler tqueue) ->
- * do_serial_hangup() -> tty->hangup() -> esp_hangup()
- *
- */
-static void do_serial_hangup(struct work_struct *work)
-{
- struct esp_struct *info =
- container_of(work, struct esp_struct, tqueue_hangup);
- struct tty_struct *tty;
-
- tty = info->tty;
- if (tty)
- tty_hangup(tty);
-}
-
/*
* ---------------------------------------------------------------
* Low level utility subroutines for the serial driver: routines to
@@ -2041,7 +1994,6 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
tty->driver->flush_buffer(tty);
tty_ldisc_flush(tty);
tty->closing = 0;
- info->event = 0;
info->tty = NULL;
if (info->blocked_open) {
@@ -2109,7 +2061,6 @@ static void esp_hangup(struct tty_struct *tty)
rs_flush_buffer(tty);
shutdown(info);
- info->event = 0;
info->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
info->tty = NULL;
@@ -2495,8 +2446,6 @@ static int __init espserial_init(void)
info->magic = ESP_MAGIC;
info->close_delay = 5*HZ/10;
info->closing_wait = 30*HZ;
- INIT_WORK(&info->tqueue, do_softint);
- INIT_WORK(&info->tqueue_hangup, do_serial_hangup);
info->config.rx_timeout = rx_timeout;
info->config.flow_on = flow_on;
info->config.flow_off = flow_off;
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 0e8ceea5ea78..712d9f271aa6 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -26,7 +26,7 @@
* The hangcheck-timer driver uses the TSC to catch delays that
* jiffies does not notice. A timer is set. When the timer fires, it
* checks whether it was delayed and if that delay exceeds a given
- * margin of error. The hangcheck_tick module paramter takes the timer
+ * margin of error. The hangcheck_tick module parameter takes the timer
* duration in seconds. The hangcheck_margin parameter defines the
* margin of error, in seconds. The defaults are 60 seconds for the
* timer and 180 seconds for the margin of error. IOW, a timer is set
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 480fae29c9b2..44160d5ebca0 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -93,7 +93,7 @@ struct hvc_struct {
};
/* dynamic list of hvc_struct instances */
-static struct list_head hvc_structs = LIST_HEAD_INIT(hvc_structs);
+static LIST_HEAD(hvc_structs);
/*
* Protect the list of hvc_struct instances from inserts and removals during
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index fd7559084b82..786d518e9477 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -306,7 +306,7 @@ struct hvcs_struct {
/* Required to back map a kref to its containing object */
#define from_kref(k) container_of(k, struct hvcs_struct, kref)
-static struct list_head hvcs_structs = LIST_HEAD_INIT(hvcs_structs);
+static LIST_HEAD(hvcs_structs);
static DEFINE_SPINLOCK(hvcs_structs_lock);
static void hvcs_unthrottle(struct tty_struct *tty);
@@ -838,7 +838,7 @@ static int __devexit hvcs_remove(struct vio_dev *dev)
if (!hvcsd)
return -ENODEV;
- /* By this time the vty-server won't be getting any more interrups */
+ /* By this time the vty-server won't be getting any more interrupts */
spin_lock_irqsave(&hvcsd->lock, flags);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 0118b9817a95..84cdf9025737 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -234,11 +234,11 @@ static DEVICE_ATTR(rng_available, S_IRUGO,
NULL);
-static void unregister_miscdev(void)
+static void unregister_miscdev(bool suspended)
{
device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available);
device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current);
- misc_deregister(&rng_miscdev);
+ __misc_deregister(&rng_miscdev, suspended);
}
static int register_miscdev(void)
@@ -313,7 +313,7 @@ out:
}
EXPORT_SYMBOL_GPL(hwrng_register);
-void hwrng_unregister(struct hwrng *rng)
+void __hwrng_unregister(struct hwrng *rng, bool suspended)
{
int err;
@@ -332,11 +332,11 @@ void hwrng_unregister(struct hwrng *rng)
}
}
if (list_empty(&rng_list))
- unregister_miscdev();
+ unregister_miscdev(suspended);
mutex_unlock(&rng_mutex);
}
-EXPORT_SYMBOL_GPL(hwrng_unregister);
+EXPORT_SYMBOL_GPL(__hwrng_unregister);
MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 868e39fd42e4..f7feae4ebb5e 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -42,6 +42,8 @@ enum {
VIA_STRFILT_ENABLE = (1 << 14),
VIA_RAWBITS_ENABLE = (1 << 13),
VIA_RNG_ENABLE = (1 << 6),
+ VIA_NOISESRC1 = (1 << 8),
+ VIA_NOISESRC2 = (1 << 9),
VIA_XSTORE_CNT_MASK = 0x0F,
VIA_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */
@@ -119,6 +121,7 @@ static int via_rng_data_read(struct hwrng *rng, u32 *data)
static int via_rng_init(struct hwrng *rng)
{
+ struct cpuinfo_x86 *c = &cpu_data(0);
u32 lo, hi, old_lo;
/* Control the RNG via MSR. Tread lightly and pay very close
@@ -134,6 +137,17 @@ static int via_rng_init(struct hwrng *rng)
lo &= ~VIA_XSTORE_CNT_MASK;
lo &= ~(VIA_STRFILT_ENABLE | VIA_STRFILT_FAIL | VIA_RAWBITS_ENABLE);
lo |= VIA_RNG_ENABLE;
+ lo |= VIA_NOISESRC1;
+
+ /* Enable secondary noise source on CPUs where it is present. */
+
+ /* Nehemiah stepping 8 and higher */
+ if ((c->x86_model == 9) && (c->x86_mask > 7))
+ lo |= VIA_NOISESRC2;
+
+ /* Esther */
+ if (c->x86_model >= 10)
+ lo |= VIA_NOISESRC2;
if (lo != old_lo)
wrmsr(MSR_VIA_RNG, lo, hi);
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 30e564516422..8609b8236c67 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -113,6 +113,33 @@ static int i8k_smm(struct smm_regs *regs)
int rc;
int eax = regs->eax;
+#if defined(CONFIG_X86_64)
+ asm("pushq %%rax\n\t"
+ "movl 0(%%rax),%%edx\n\t"
+ "pushq %%rdx\n\t"
+ "movl 4(%%rax),%%ebx\n\t"
+ "movl 8(%%rax),%%ecx\n\t"
+ "movl 12(%%rax),%%edx\n\t"
+ "movl 16(%%rax),%%esi\n\t"
+ "movl 20(%%rax),%%edi\n\t"
+ "popq %%rax\n\t"
+ "out %%al,$0xb2\n\t"
+ "out %%al,$0x84\n\t"
+ "xchgq %%rax,(%%rsp)\n\t"
+ "movl %%ebx,4(%%rax)\n\t"
+ "movl %%ecx,8(%%rax)\n\t"
+ "movl %%edx,12(%%rax)\n\t"
+ "movl %%esi,16(%%rax)\n\t"
+ "movl %%edi,20(%%rax)\n\t"
+ "popq %%rdx\n\t"
+ "movl %%edx,0(%%rax)\n\t"
+ "lahf\n\t"
+ "shrl $8,%%eax\n\t"
+ "andl $1,%%eax\n"
+ :"=a"(rc)
+ : "a"(regs)
+ : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
+#else
asm("pushl %%eax\n\t"
"movl 0(%%eax),%%edx\n\t"
"push %%edx\n\t"
@@ -137,7 +164,7 @@ static int i8k_smm(struct smm_regs *regs)
"andl $1,%%eax\n":"=a"(rc)
: "a"(regs)
: "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
-
+#endif
if (rc != 0 || (regs->eax & 0xffff) == 0xffff || regs->eax == eax)
return -EINVAL;
@@ -439,6 +466,20 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude"),
},
},
+ { /* UK Inspiron 6400 */
+ .ident = "Dell Inspiron 3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MM061"),
+ },
+ },
+ {
+ .ident = "Dell Inspiron 3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MP061"),
+ },
+ },
{ }
};
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c
index e46120d05b68..d6567b32fb5c 100644
--- a/drivers/char/ip2/i2lib.c
+++ b/drivers/char/ip2/i2lib.c
@@ -661,7 +661,7 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
if (!in_interrupt()) {
schedule_timeout_interruptible(1); // short nap
} else {
- // we cannot sched/sleep in interrrupt silly
+ // we cannot sched/sleep in interrupt silly
return 0;
}
if (signal_pending(current)) {
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index e04e66cf2c68..b1d6cad84282 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -153,9 +153,6 @@ static char *pcVersion = "1.2.14";
static char *pcDriver_name = "ip2";
static char *pcIpl = "ip2ipl";
-/* Serial subtype definitions */
-#define SERIAL_TYPE_NORMAL 1
-
// cheezy kludge or genius - you decide?
int ip2_loadmain(int *, int *, unsigned char *, int);
static unsigned char *Fip_firmware;
@@ -1251,7 +1248,7 @@ ip2_poll(unsigned long arg)
// Just polled boards, IRQ = 0 will hit all non-interrupt boards.
// It will NOT poll boards handled by hard interrupts.
- // The issue of queued BH interrups is handled in ip2_interrupt().
+ // The issue of queued BH interrupts is handled in ip2_interrupt().
ip2_polled_interrupt();
PollTimer.expires = POLL_TIMEOUT;
diff --git a/drivers/char/ip27-rtc.c b/drivers/char/ip27-rtc.c
index 932264a657d0..86e6538a77b0 100644
--- a/drivers/char/ip27-rtc.c
+++ b/drivers/char/ip27-rtc.c
@@ -46,8 +46,8 @@
#include <asm/sn/sn0/hub.h>
#include <asm/sn/sn_private.h>
-static int rtc_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
+static long rtc_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
static int rtc_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data);
@@ -75,8 +75,7 @@ static unsigned long epoch = 1970; /* year corresponding to 0x00 */
static const unsigned char days_in_mo[] =
{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
+static long rtc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct rtc_time wtime;
@@ -197,7 +196,7 @@ static int rtc_release(struct inode *inode, struct file *file)
static const struct file_operations rtc_fops = {
.owner = THIS_MODULE,
- .ioctl = rtc_ioctl,
+ .unlocked_ioctl = rtc_ioctl,
.open = rtc_open,
.release = rtc_release,
};
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 5dc1265ce1d5..32b2b22996dc 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -365,12 +365,12 @@ static struct device_driver ipmidriver = {
};
static DEFINE_MUTEX(ipmidriver_mutex);
-static struct list_head ipmi_interfaces = LIST_HEAD_INIT(ipmi_interfaces);
+static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
/* List of watchers that want to know when smi's are added and
deleted. */
-static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
+static LIST_HEAD(smi_watchers);
static DEFINE_MUTEX(smi_watchers_mutex);
@@ -441,7 +441,7 @@ struct watcher_entry {
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
ipmi_smi_t intf;
- struct list_head to_deliver = LIST_HEAD_INIT(to_deliver);
+ LIST_HEAD(to_deliver);
struct watcher_entry *e, *e2;
mutex_lock(&smi_watchers_mutex);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 1f27be1ec3d4..c645455c3fd1 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -627,7 +627,6 @@ static int stli_initopen(struct stlibrd *brdp, struct stliport *portp);
static int stli_rawopen(struct stlibrd *brdp, struct stliport *portp, unsigned long arg, int wait);
static int stli_rawclose(struct stlibrd *brdp, struct stliport *portp, unsigned long arg, int wait);
static int stli_waitcarrier(struct stlibrd *brdp, struct stliport *portp, struct file *filp);
-static void stli_dohangup(struct work_struct *);
static int stli_setport(struct stliport *portp);
static int stli_cmdwait(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback);
static void stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback);
@@ -1824,25 +1823,6 @@ static void stli_start(struct tty_struct *tty)
/*****************************************************************************/
/*
- * Scheduler called hang up routine. This is called from the scheduler,
- * not direct from the driver "poll" routine. We can't call it there
- * since the real local hangup code will enable/disable the board and
- * other things that we can't do while handling the poll. Much easier
- * to deal with it some time later (don't really care when, hangups
- * aren't that time critical).
- */
-
-static void stli_dohangup(struct work_struct *ugly_api)
-{
- struct stliport *portp = container_of(ugly_api, struct stliport, tqhangup);
- if (portp->tty != NULL) {
- tty_hangup(portp->tty);
- }
-}
-
-/*****************************************************************************/
-
-/*
* Hangup this port. This is pretty much like closing the port, only
* a little more brutal. No waiting for data to drain. Shutdown the
* port and maybe drop signals. This is rather tricky really. We want
@@ -2405,7 +2385,7 @@ static int stli_hostcmd(struct stlibrd *brdp, struct stliport *portp)
((portp->sigs & TIOCM_CD) == 0)) {
if (portp->flags & ASYNC_CHECK_CD) {
if (tty)
- schedule_work(&portp->tqhangup);
+ tty_hangup(tty);
}
}
}
@@ -2733,7 +2713,6 @@ static int stli_initports(struct stlibrd *brdp)
portp->baud_base = STL_BAUDBASE;
portp->close_delay = STL_CLOSEDELAY;
portp->closing_wait = 30 * HZ;
- INIT_WORK(&portp->tqhangup, stli_dohangup);
init_waitqueue_head(&portp->open_wait);
init_waitqueue_head(&portp->close_wait);
init_waitqueue_head(&portp->raw_wait);
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 81674d7c56c7..60ac642752be 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -312,7 +312,7 @@ static ssize_t lp_write(struct file * file, const char __user * buf,
if (copy_size > LP_BUFFER_SIZE)
copy_size = LP_BUFFER_SIZE;
- if (down_interruptible (&lp_table[minor].port_mutex))
+ if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
return -EINTR;
if (copy_from_user (kbuf, buf, copy_size)) {
@@ -399,7 +399,7 @@ static ssize_t lp_write(struct file * file, const char __user * buf,
lp_release_parport (&lp_table[minor]);
}
out_unlock:
- up (&lp_table[minor].port_mutex);
+ mutex_unlock(&lp_table[minor].port_mutex);
return retv;
}
@@ -421,7 +421,7 @@ static ssize_t lp_read(struct file * file, char __user * buf,
if (count > LP_BUFFER_SIZE)
count = LP_BUFFER_SIZE;
- if (down_interruptible (&lp_table[minor].port_mutex))
+ if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
return -EINTR;
lp_claim_parport_or_block (&lp_table[minor]);
@@ -479,7 +479,7 @@ static ssize_t lp_read(struct file * file, char __user * buf,
if (retval > 0 && copy_to_user (buf, kbuf, retval))
retval = -EFAULT;
- up (&lp_table[minor].port_mutex);
+ mutex_unlock(&lp_table[minor].port_mutex);
return retval;
}
@@ -888,7 +888,7 @@ static int __init lp_init (void)
lp_table[i].last_error = 0;
init_waitqueue_head (&lp_table[i].waitq);
init_waitqueue_head (&lp_table[i].dataq);
- init_MUTEX (&lp_table[i].port_mutex);
+ mutex_init(&lp_table[i].port_mutex);
lp_table[i].timeout = 10 * HZ;
}
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index 3c5802ae1716..f4716ad7348a 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -23,6 +23,7 @@
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/uio.h>
+#include <linux/mutex.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -281,7 +282,7 @@ static inline int mbcs_algo_start(struct mbcs_soft *soft)
void *mmr_base = soft->mmr_base;
union cm_control cm_control;
- if (down_interruptible(&soft->algolock))
+ if (mutex_lock_interruptible(&soft->algolock))
return -ERESTARTSYS;
atomic_set(&soft->algo_done, 0);
@@ -298,7 +299,7 @@ static inline int mbcs_algo_start(struct mbcs_soft *soft)
cm_control.alg_go = 1;
MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
- up(&soft->algolock);
+ mutex_unlock(&soft->algolock);
return 0;
}
@@ -309,7 +310,7 @@ do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr,
{
int rv = 0;
- if (down_interruptible(&soft->dmawritelock))
+ if (mutex_lock_interruptible(&soft->dmawritelock))
return -ERESTARTSYS;
atomic_set(&soft->dmawrite_done, 0);
@@ -335,7 +336,7 @@ do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr,
*off += len;
dmawrite_exit:
- up(&soft->dmawritelock);
+ mutex_unlock(&soft->dmawritelock);
return rv;
}
@@ -346,7 +347,7 @@ do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr,
{
int rv = 0;
- if (down_interruptible(&soft->dmareadlock))
+ if (mutex_lock_interruptible(&soft->dmareadlock))
return -ERESTARTSYS;
atomic_set(&soft->dmawrite_done, 0);
@@ -371,7 +372,7 @@ do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr,
*off += len;
dmaread_exit:
- up(&soft->dmareadlock);
+ mutex_unlock(&soft->dmareadlock);
return rv;
}
@@ -762,9 +763,9 @@ static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id)
init_waitqueue_head(&soft->dmaread_queue);
init_waitqueue_head(&soft->algo_queue);
- init_MUTEX(&soft->dmawritelock);
- init_MUTEX(&soft->dmareadlock);
- init_MUTEX(&soft->algolock);
+ mutex_init(&soft->dmawritelock);
+ mutex_init(&soft->dmareadlock);
+ mutex_init(&soft->algolock);
mbcs_getdma_init(&soft->getdma);
mbcs_putdma_init(&soft->putdma);
diff --git a/drivers/char/mbcs.h b/drivers/char/mbcs.h
index c9905a3c3353..ba671589f4cb 100644
--- a/drivers/char/mbcs.h
+++ b/drivers/char/mbcs.h
@@ -537,9 +537,9 @@ struct mbcs_soft {
atomic_t dmawrite_done;
atomic_t dmaread_done;
atomic_t algo_done;
- struct semaphore dmawritelock;
- struct semaphore dmareadlock;
- struct semaphore algolock;
+ struct mutex dmawritelock;
+ struct mutex dmareadlock;
+ struct mutex algolock;
};
static int mbcs_open(struct inode *ip, struct file *fp);
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 71c8cd7fa15f..a39101feb2ed 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -232,8 +232,9 @@ int misc_register(struct miscdevice * misc)
}
/**
- * misc_deregister - unregister a miscellaneous device
+ * __misc_deregister - unregister a miscellaneous device
* @misc: device to unregister
+ * @suspended: to be set if the function is used during suspend/resume
*
* Unregister a miscellaneous device that was previously
* successfully registered with misc_register(). Success
@@ -241,7 +242,7 @@ int misc_register(struct miscdevice * misc)
* indicates an error.
*/
-int misc_deregister(struct miscdevice * misc)
+int __misc_deregister(struct miscdevice *misc, bool suspended)
{
int i = misc->minor;
@@ -250,7 +251,11 @@ int misc_deregister(struct miscdevice * misc)
mutex_lock(&misc_mtx);
list_del(&misc->list);
- device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor));
+ if (suspended)
+ destroy_suspended_device(misc_class,
+ MKDEV(MISC_MAJOR, misc->minor));
+ else
+ device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor));
if (i < DYNAMIC_MINORS && i>0) {
misc_minors[i>>3] &= ~(1 << (misc->minor & 7));
}
@@ -259,7 +264,7 @@ int misc_deregister(struct miscdevice * misc)
}
EXPORT_SYMBOL(misc_register);
-EXPORT_SYMBOL(misc_deregister);
+EXPORT_SYMBOL(__misc_deregister);
static int __init misc_init(void)
{
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 82f2e27dca7d..ff146c2b08fd 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -283,7 +283,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
vdata->refcnt = ATOMIC_INIT(1);
vma->vm_private_data = vdata;
- vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP);
+ vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &mspec_vm_ops;
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index fd0abef7ee08..68c2e9234691 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -1,43 +1,25 @@
/*
* mxser.c -- MOXA Smartio/Industio family multiport serial driver.
*
- * Copyright (C) 1999-2001 Moxa Technologies (support@moxa.com.tw).
+ * Copyright (C) 1999-2006 Moxa Technologies (support@moxa.com).
+ * Copyright (C) 2006-2008 Jiri Slaby <jirislaby@gmail.com>
*
- * This code is loosely based on the Linux serial driver, written by
- * Linus Torvalds, Theodore T'so and others.
+ * This code is loosely based on the 1.8 moxa driver which is based on
+ * Linux serial driver, written by Linus Torvalds, Theodore T'so and
+ * others.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Original release 10/26/00
- *
- * 02/06/01 Support MOXA Industio family boards.
- * 02/06/01 Support TIOCGICOUNT.
- * 02/06/01 Fix the problem for connecting to serial mouse.
- * 02/06/01 Fix the problem for H/W flow control.
- * 02/06/01 Fix the compling warning when CONFIG_PCI
- * don't be defined.
- *
* Fed through a cleanup, indent and remove of non 2.6 code by Alan Cox
* <alan@redhat.com>. The original 1.8 code is available on www.moxa.com.
* - Fixed x86_64 cleanness
* - Fixed sleep with spinlock held in mxser_send_break
*/
-
#include <linux/module.h>
-#include <linux/autoconf.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
@@ -65,33 +47,37 @@
#include "mxser.h"
-#define MXSER_VERSION "1.8"
+#define MXSER_VERSION "2.0.3" /* 1.11 */
#define MXSERMAJOR 174
#define MXSERCUMAJOR 175
-#define MXSER_EVENT_TXLOW 1
-#define MXSER_EVENT_HANGUP 2
-
#define MXSER_BOARDS 4 /* Max. boards */
-#define MXSER_PORTS 32 /* Max. ports */
#define MXSER_PORTS_PER_BOARD 8 /* Max. ports per board */
-#define MXSER_ISR_PASS_LIMIT 256
+#define MXSER_PORTS (MXSER_BOARDS * MXSER_PORTS_PER_BOARD)
+#define MXSER_ISR_PASS_LIMIT 100
#define MXSER_ERR_IOADDR -1
#define MXSER_ERR_IRQ -2
#define MXSER_ERR_IRQ_CONFLIT -3
#define MXSER_ERR_VECTOR -4
-#define SERIAL_TYPE_NORMAL 1
-#define SERIAL_TYPE_CALLOUT 2
+/*CheckIsMoxaMust return value*/
+#define MOXA_OTHER_UART 0x00
+#define MOXA_MUST_MU150_HWID 0x01
+#define MOXA_MUST_MU860_HWID 0x02
#define WAKEUP_CHARS 256
#define UART_MCR_AFE 0x20
#define UART_LSR_SPECIAL 0x1E
+#define PCI_DEVICE_ID_CB108 0x1080
+#define PCI_DEVICE_ID_CB114 0x1142
+#define PCI_DEVICE_ID_CP114UL 0x1143
+#define PCI_DEVICE_ID_CB134I 0x1341
+#define PCI_DEVICE_ID_CP138U 0x1380
+#define PCI_DEVICE_ID_POS104UL 0x1044
-#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? IRQF_SHARED : IRQF_DISABLED)
#define C168_ASIC_ID 1
#define C104_ASIC_ID 2
@@ -100,88 +86,11 @@
#define CI134_ASIC_ID 3
#define CI104J_ASIC_ID 5
-enum {
- MXSER_BOARD_C168_ISA = 1,
- MXSER_BOARD_C104_ISA,
- MXSER_BOARD_CI104J,
- MXSER_BOARD_C168_PCI,
- MXSER_BOARD_C104_PCI,
- MXSER_BOARD_C102_ISA,
- MXSER_BOARD_CI132,
- MXSER_BOARD_CI134,
- MXSER_BOARD_CP132,
- MXSER_BOARD_CP114,
- MXSER_BOARD_CT114,
- MXSER_BOARD_CP102,
- MXSER_BOARD_CP104U,
- MXSER_BOARD_CP168U,
- MXSER_BOARD_CP132U,
- MXSER_BOARD_CP134U,
- MXSER_BOARD_CP104JU,
- MXSER_BOARD_RC7000,
- MXSER_BOARD_CP118U,
- MXSER_BOARD_CP102UL,
- MXSER_BOARD_CP102U,
-};
-
-static char *mxser_brdname[] = {
- "C168 series",
- "C104 series",
- "CI-104J series",
- "C168H/PCI series",
- "C104H/PCI series",
- "C102 series",
- "CI-132 series",
- "CI-134 series",
- "CP-132 series",
- "CP-114 series",
- "CT-114 series",
- "CP-102 series",
- "CP-104U series",
- "CP-168U series",
- "CP-132U series",
- "CP-134U series",
- "CP-104JU series",
- "Moxa UC7000 Serial",
- "CP-118U series",
- "CP-102UL series",
- "CP-102U series",
-};
-
-static int mxser_numports[] = {
- 8, /* C168-ISA */
- 4, /* C104-ISA */
- 4, /* CI104J */
- 8, /* C168-PCI */
- 4, /* C104-PCI */
- 2, /* C102-ISA */
- 2, /* CI132 */
- 4, /* CI134 */
- 2, /* CP132 */
- 4, /* CP114 */
- 4, /* CT114 */
- 2, /* CP102 */
- 4, /* CP104U */
- 8, /* CP168U */
- 2, /* CP132U */
- 4, /* CP134U */
- 4, /* CP104JU */
- 8, /* RC7000 */
- 8, /* CP118U */
- 2, /* CP102UL */
- 2, /* CP102U */
-};
-
-#define UART_TYPE_NUM 2
-
-static const unsigned int Gmoxa_uart_id[UART_TYPE_NUM] = {
- MOXA_MUST_MU150_HWID,
- MOXA_MUST_MU860_HWID
-};
+#define MXSER_HIGHBAUD 1
+#define MXSER_HAS2 2
/* This is only for PCI */
-#define UART_INFO_NUM 3
-struct mxpciuart_info {
+static const struct {
int type;
int tx_fifo;
int rx_fifo;
@@ -190,51 +99,85 @@ struct mxpciuart_info {
int rx_trigger;
int rx_low_water;
long max_baud;
-};
-
-static const struct mxpciuart_info Gpci_uart_info[UART_INFO_NUM] = {
+} Gpci_uart_info[] = {
{MOXA_OTHER_UART, 16, 16, 16, 14, 14, 1, 921600L},
{MOXA_MUST_MU150_HWID, 64, 64, 64, 48, 48, 16, 230400L},
{MOXA_MUST_MU860_HWID, 128, 128, 128, 96, 96, 32, 921600L}
};
+#define UART_INFO_NUM ARRAY_SIZE(Gpci_uart_info)
+struct mxser_cardinfo {
+ char *name;
+ unsigned int nports;
+ unsigned int flags;
+};
-#ifdef CONFIG_PCI
+static const struct mxser_cardinfo mxser_cards[] = {
+/* 0*/ { "C168 series", 8, },
+ { "C104 series", 4, },
+ { "CI-104J series", 4, },
+ { "C168H/PCI series", 8, },
+ { "C104H/PCI series", 4, },
+/* 5*/ { "C102 series", 4, MXSER_HAS2 }, /* C102-ISA */
+ { "CI-132 series", 4, MXSER_HAS2 },
+ { "CI-134 series", 4, },
+ { "CP-132 series", 2, },
+ { "CP-114 series", 4, },
+/*10*/ { "CT-114 series", 4, },
+ { "CP-102 series", 2, MXSER_HIGHBAUD },
+ { "CP-104U series", 4, },
+ { "CP-168U series", 8, },
+ { "CP-132U series", 2, },
+/*15*/ { "CP-134U series", 4, },
+ { "CP-104JU series", 4, },
+ { "Moxa UC7000 Serial", 8, }, /* RC7000 */
+ { "CP-118U series", 8, },
+ { "CP-102UL series", 2, },
+/*20*/ { "CP-102U series", 2, },
+ { "CP-118EL series", 8, },
+ { "CP-168EL series", 8, },
+ { "CP-104EL series", 4, },
+ { "CB-108 series", 8, },
+/*25*/ { "CB-114 series", 4, },
+ { "CB-134I series", 4, },
+ { "CP-138U series", 8, },
+ { "POS-104UL series", 4, },
+ { "CP-114UL series", 4, }
+};
+/* driver_data correspond to the lines in the structure above
+ see also ISA probe function before you change something */
static struct pci_device_id mxser_pcibrds[] = {
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_C168, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_C168_PCI},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_C104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_C104_PCI},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP132},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP114},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CT114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CT114},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP102},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP104U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP104U},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP168U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP168U},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP132U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP132U},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP134U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP134U},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP104JU, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP104JU},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_RC7000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_RC7000},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP118U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP118U},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP102UL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP102UL},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP102U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP102U},
- {0}
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C168), .driver_data = 3 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C104), .driver_data = 4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132), .driver_data = 8 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP114), .driver_data = 9 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CT114), .driver_data = 10 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102), .driver_data = 11 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104U), .driver_data = 12 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168U), .driver_data = 13 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132U), .driver_data = 14 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP134U), .driver_data = 15 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104JU),.driver_data = 16 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_RC7000), .driver_data = 17 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118U), .driver_data = 18 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102UL),.driver_data = 19 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102U), .driver_data = 20 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118EL),.driver_data = 21 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168EL),.driver_data = 22 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104EL),.driver_data = 23 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB108), .driver_data = 24 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB114), .driver_data = 25 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB134I), .driver_data = 26 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP138U), .driver_data = 27 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_POS104UL), .driver_data = 28 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP114UL), .driver_data = 29 },
+ { }
};
-
MODULE_DEVICE_TABLE(pci, mxser_pcibrds);
-
-#endif
-
-typedef struct _moxa_pci_info {
- unsigned short busNum;
- unsigned short devNum;
- struct pci_dev *pdev; /* add by Victor Yu. 06-23-2003 */
-} moxa_pci_info;
-
static int ioaddr[MXSER_BOARDS] = { 0, 0, 0, 0 };
static int ttymajor = MXSERMAJOR;
-static int calloutmajor = MXSERCUMAJOR;
-static int verbose = 0;
/* Variables for insmod */
@@ -242,8 +185,6 @@ MODULE_AUTHOR("Casper Yang");
MODULE_DESCRIPTION("MOXA Smartio/Industio Family Multiport Board Device Driver");
module_param_array(ioaddr, int, NULL, 0);
module_param(ttymajor, int, 0);
-module_param(calloutmajor, int, 0);
-module_param(verbose, bool, 0);
MODULE_LICENSE("GPL");
struct mxser_log {
@@ -278,67 +219,69 @@ struct mxser_mon_ext {
int iftype[32];
};
-struct mxser_hwconf {
- int board_type;
- int ports;
- int irq;
- int vector;
- int vector_mask;
- int uart_type;
- int ioaddr[MXSER_PORTS_PER_BOARD];
- int baud_base[MXSER_PORTS_PER_BOARD];
- moxa_pci_info pciInfo;
- int IsMoxaMustChipFlag; /* add by Victor Yu. 08-30-2002 */
- int MaxCanSetBaudRate[MXSER_PORTS_PER_BOARD]; /* add by Victor Yu. 09-04-2002 */
- int opmode_ioaddr[MXSER_PORTS_PER_BOARD]; /* add by Victor Yu. 01-05-2004 */
-};
+struct mxser_board;
+
+struct mxser_port {
+ struct mxser_board *board;
+ struct tty_struct *tty;
+
+ unsigned long ioaddr;
+ unsigned long opmode_ioaddr;
+ int max_baud;
-struct mxser_struct {
- int port;
- int base; /* port base address */
- int irq; /* port using irq no. */
- int vector; /* port irq vector */
- int vectormask; /* port vector mask */
int rx_high_water;
int rx_trigger; /* Rx fifo trigger level */
int rx_low_water;
int baud_base; /* max. speed */
- int flags; /* defined in tty.h */
int type; /* UART type */
- struct tty_struct *tty;
- int read_status_mask;
- int ignore_status_mask;
- int xmit_fifo_size;
- int custom_divisor;
+ int flags; /* defined in tty.h */
+
int x_char; /* xon/xoff character */
- int close_delay;
- unsigned short closing_wait;
int IER; /* Interrupt Enable Register */
int MCR; /* Modem control register */
+
+ unsigned char stop_rx;
+ unsigned char ldisc_stop_rx;
+
+ int custom_divisor;
+ int close_delay;
+ unsigned short closing_wait;
+ unsigned char err_shadow;
unsigned long event;
+
int count; /* # of fd on device */
int blocked_open; /* # of blocked opens */
+ struct async_icount icount; /* kernel counters for 4 input interrupts */
+ int timeout;
+
+ int read_status_mask;
+ int ignore_status_mask;
+ int xmit_fifo_size;
unsigned char *xmit_buf;
int xmit_head;
int xmit_tail;
int xmit_cnt;
- struct work_struct tqueue;
+
struct ktermios normal_termios;
- struct ktermios callout_termios;
- wait_queue_head_t open_wait;
- wait_queue_head_t close_wait;
- wait_queue_head_t delta_msr_wait;
- struct async_icount icount; /* kernel counters for the 4 input interrupts */
- int timeout;
- int IsMoxaMustChipFlag; /* add by Victor Yu. 08-30-2002 */
- int MaxCanSetBaudRate; /* add by Victor Yu. 09-04-2002 */
- int opmode_ioaddr; /* add by Victor Yu. 01-05-2004 */
- unsigned char stop_rx;
- unsigned char ldisc_stop_rx;
- long realbaud;
+
struct mxser_mon mon_data;
- unsigned char err_shadow;
+
spinlock_t slock;
+ wait_queue_head_t open_wait;
+ wait_queue_head_t delta_msr_wait;
+};
+
+struct mxser_board {
+ unsigned int idx;
+ int irq;
+ const struct mxser_cardinfo *info;
+ unsigned long vector;
+ unsigned long vector_mask;
+
+ int chip_flag;
+ int uart_type;
+
+ struct mxser_port ports[MXSER_PORTS_PER_BOARD];
};
struct mxser_mstatus {
@@ -356,73 +299,16 @@ static int mxserBoardCAP[MXSER_BOARDS] = {
/* 0x180, 0x280, 0x200, 0x320 */
};
+static struct mxser_board mxser_boards[MXSER_BOARDS];
static struct tty_driver *mxvar_sdriver;
-static struct mxser_struct mxvar_table[MXSER_PORTS];
-static struct tty_struct *mxvar_tty[MXSER_PORTS + 1];
-static struct ktermios *mxvar_termios[MXSER_PORTS + 1];
-static struct ktermios *mxvar_termios_locked[MXSER_PORTS + 1];
static struct mxser_log mxvar_log;
static int mxvar_diagflag;
static unsigned char mxser_msr[MXSER_PORTS + 1];
static struct mxser_mon_ext mon_data_ext;
static int mxser_set_baud_method[MXSER_PORTS + 1];
-static spinlock_t gm_lock;
-
-/*
- * This is used to figure out the divisor speeds and the timeouts
- */
-
-static struct mxser_hwconf mxsercfg[MXSER_BOARDS];
-
-/*
- * static functions:
- */
-
-static void mxser_getcfg(int board, struct mxser_hwconf *hwconf);
-static int mxser_init(void);
-
-/* static void mxser_poll(unsigned long); */
-static int mxser_get_ISA_conf(int, struct mxser_hwconf *);
-static void mxser_do_softint(struct work_struct *);
-static int mxser_open(struct tty_struct *, struct file *);
-static void mxser_close(struct tty_struct *, struct file *);
-static int mxser_write(struct tty_struct *, const unsigned char *, int);
-static int mxser_write_room(struct tty_struct *);
-static void mxser_flush_buffer(struct tty_struct *);
-static int mxser_chars_in_buffer(struct tty_struct *);
-static void mxser_flush_chars(struct tty_struct *);
-static void mxser_put_char(struct tty_struct *, unsigned char);
-static int mxser_ioctl(struct tty_struct *, struct file *, uint, ulong);
-static int mxser_ioctl_special(unsigned int, void __user *);
-static void mxser_throttle(struct tty_struct *);
-static void mxser_unthrottle(struct tty_struct *);
-static void mxser_set_termios(struct tty_struct *, struct ktermios *);
-static void mxser_stop(struct tty_struct *);
-static void mxser_start(struct tty_struct *);
-static void mxser_hangup(struct tty_struct *);
-static void mxser_rs_break(struct tty_struct *, int);
-static irqreturn_t mxser_interrupt(int, void *);
-static void mxser_receive_chars(struct mxser_struct *, int *);
-static void mxser_transmit_chars(struct mxser_struct *);
-static void mxser_check_modem_status(struct mxser_struct *, int);
-static int mxser_block_til_ready(struct tty_struct *, struct file *, struct mxser_struct *);
-static int mxser_startup(struct mxser_struct *);
-static void mxser_shutdown(struct mxser_struct *);
-static int mxser_change_speed(struct mxser_struct *, struct ktermios *old_termios);
-static int mxser_get_serial_info(struct mxser_struct *, struct serial_struct __user *);
-static int mxser_set_serial_info(struct mxser_struct *, struct serial_struct __user *);
-static int mxser_get_lsr_info(struct mxser_struct *, unsigned int __user *);
-static void mxser_send_break(struct mxser_struct *, int);
-static int mxser_tiocmget(struct tty_struct *, struct file *);
-static int mxser_tiocmset(struct tty_struct *, struct file *, unsigned int, unsigned int);
-static int mxser_set_baud(struct mxser_struct *info, long newspd);
-static void mxser_wait_until_sent(struct tty_struct *tty, int timeout);
-
-static void mxser_startrx(struct tty_struct *tty);
-static void mxser_stoprx(struct tty_struct *tty);
#ifdef CONFIG_PCI
-static int CheckIsMoxaMust(int io)
+static int __devinit CheckIsMoxaMust(unsigned long io)
{
u8 oldmcr, hwid;
int i;
@@ -438,90 +324,15 @@ static int CheckIsMoxaMust(int io)
}
GET_MOXA_MUST_HARDWARE_ID(io, &hwid);
- for (i = 0; i < UART_TYPE_NUM; i++) {
- if (hwid == Gmoxa_uart_id[i])
+ for (i = 1; i < UART_INFO_NUM; i++) { /* 0 = OTHER_UART */
+ if (hwid == Gpci_uart_info[i].type)
return (int)hwid;
}
return MOXA_OTHER_UART;
}
#endif
-/* above is modified by Victor Yu. 08-15-2002 */
-
-static const struct tty_operations mxser_ops = {
- .open = mxser_open,
- .close = mxser_close,
- .write = mxser_write,
- .put_char = mxser_put_char,
- .flush_chars = mxser_flush_chars,
- .write_room = mxser_write_room,
- .chars_in_buffer = mxser_chars_in_buffer,
- .flush_buffer = mxser_flush_buffer,
- .ioctl = mxser_ioctl,
- .throttle = mxser_throttle,
- .unthrottle = mxser_unthrottle,
- .set_termios = mxser_set_termios,
- .stop = mxser_stop,
- .start = mxser_start,
- .hangup = mxser_hangup,
- .break_ctl = mxser_rs_break,
- .wait_until_sent = mxser_wait_until_sent,
- .tiocmget = mxser_tiocmget,
- .tiocmset = mxser_tiocmset,
-};
-
-/*
- * The MOXA Smartio/Industio serial driver boot-time initialization code!
- */
-
-static int __init mxser_module_init(void)
-{
- int ret;
-
- if (verbose)
- printk(KERN_DEBUG "Loading module mxser ...\n");
- ret = mxser_init();
- if (verbose)
- printk(KERN_DEBUG "Done.\n");
- return ret;
-}
-
-static void __exit mxser_module_exit(void)
-{
- int i, err;
-
- if (verbose)
- printk(KERN_DEBUG "Unloading module mxser ...\n");
-
- err = tty_unregister_driver(mxvar_sdriver);
- if (!err)
- put_tty_driver(mxvar_sdriver);
- else
- printk(KERN_ERR "Couldn't unregister MOXA Smartio/Industio family serial driver\n");
-
- for (i = 0; i < MXSER_BOARDS; i++) {
- struct pci_dev *pdev;
-
- if (mxsercfg[i].board_type == -1)
- continue;
- else {
- pdev = mxsercfg[i].pciInfo.pdev;
- free_irq(mxsercfg[i].irq, &mxvar_table[i * MXSER_PORTS_PER_BOARD]);
- if (pdev != NULL) { /* PCI */
- release_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
- release_region(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
- pci_dev_put(pdev);
- } else {
- release_region(mxsercfg[i].ioaddr[0], 8 * mxsercfg[i].ports);
- release_region(mxsercfg[i].vector, 1);
- }
- }
- }
- if (verbose)
- printk(KERN_DEBUG "Done.\n");
-}
-
-static void process_txrx_fifo(struct mxser_struct *info)
+static void process_txrx_fifo(struct mxser_port *info)
{
int i;
@@ -530,424 +341,548 @@ static void process_txrx_fifo(struct mxser_struct *info)
info->rx_high_water = 1;
info->rx_low_water = 1;
info->xmit_fifo_size = 1;
- } else {
- for (i = 0; i < UART_INFO_NUM; i++) {
- if (info->IsMoxaMustChipFlag == Gpci_uart_info[i].type) {
+ } else
+ for (i = 0; i < UART_INFO_NUM; i++)
+ if (info->board->chip_flag == Gpci_uart_info[i].type) {
info->rx_trigger = Gpci_uart_info[i].rx_trigger;
info->rx_low_water = Gpci_uart_info[i].rx_low_water;
info->rx_high_water = Gpci_uart_info[i].rx_high_water;
info->xmit_fifo_size = Gpci_uart_info[i].xmit_fifo_size;
break;
}
- }
- }
}
-static int mxser_initbrd(int board, struct mxser_hwconf *hwconf)
+static unsigned char mxser_get_msr(int baseaddr, int mode, int port)
{
- struct mxser_struct *info;
- int retval;
- int i, n;
+ unsigned char status = 0;
- n = board * MXSER_PORTS_PER_BOARD;
- info = &mxvar_table[n];
- /*if (verbose) */ {
- printk(KERN_DEBUG " ttyMI%d - ttyMI%d ",
- n, n + hwconf->ports - 1);
- printk(" max. baud rate = %d bps.\n",
- hwconf->MaxCanSetBaudRate[0]);
- }
-
- for (i = 0; i < hwconf->ports; i++, n++, info++) {
- info->port = n;
- info->base = hwconf->ioaddr[i];
- info->irq = hwconf->irq;
- info->vector = hwconf->vector;
- info->vectormask = hwconf->vector_mask;
- info->opmode_ioaddr = hwconf->opmode_ioaddr[i]; /* add by Victor Yu. 01-05-2004 */
- info->stop_rx = 0;
- info->ldisc_stop_rx = 0;
+ status = inb(baseaddr + UART_MSR);
- info->IsMoxaMustChipFlag = hwconf->IsMoxaMustChipFlag;
- /* Enhance mode enabled here */
- if (info->IsMoxaMustChipFlag != MOXA_OTHER_UART) {
- ENABLE_MOXA_MUST_ENCHANCE_MODE(info->base);
- }
+ mxser_msr[port] &= 0x0F;
+ mxser_msr[port] |= status;
+ status = mxser_msr[port];
+ if (mode)
+ mxser_msr[port] = 0;
- info->flags = ASYNC_SHARE_IRQ;
- info->type = hwconf->uart_type;
- info->baud_base = hwconf->baud_base[i];
+ return status;
+}
- info->MaxCanSetBaudRate = hwconf->MaxCanSetBaudRate[i];
+static int mxser_block_til_ready(struct tty_struct *tty, struct file *filp,
+ struct mxser_port *port)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int retval;
+ int do_clocal = 0;
+ unsigned long flags;
- process_txrx_fifo(info);
+ /*
+ * If non-blocking mode is set, or the port is not enabled,
+ * then make the check up front and then exit.
+ */
+ if ((filp->f_flags & O_NONBLOCK) ||
+ test_bit(TTY_IO_ERROR, &tty->flags)) {
+ port->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+ }
+ if (tty->termios->c_cflag & CLOCAL)
+ do_clocal = 1;
- info->custom_divisor = hwconf->baud_base[i] * 16;
- info->close_delay = 5 * HZ / 10;
- info->closing_wait = 30 * HZ;
- INIT_WORK(&info->tqueue, mxser_do_softint);
- info->normal_termios = mxvar_sdriver->init_termios;
- init_waitqueue_head(&info->open_wait);
- init_waitqueue_head(&info->close_wait);
- init_waitqueue_head(&info->delta_msr_wait);
- memset(&info->mon_data, 0, sizeof(struct mxser_mon));
- info->err_shadow = 0;
- spin_lock_init(&info->slock);
- }
/*
- * Allocate the IRQ if necessary
+ * Block waiting for the carrier detect and the line to become
+ * free (i.e., not in use by the callout). While we are in
+ * this loop, port->count is dropped by one, so that
+ * mxser_close() knows when to free things. We restore it upon
+ * exit, either normal or abnormal.
*/
+ retval = 0;
+ add_wait_queue(&port->open_wait, &wait);
-
- /* before set INT ISR, disable all int */
- for (i = 0; i < hwconf->ports; i++) {
- outb(inb(hwconf->ioaddr[i] + UART_IER) & 0xf0,
- hwconf->ioaddr[i] + UART_IER);
+ spin_lock_irqsave(&port->slock, flags);
+ if (!tty_hung_up_p(filp))
+ port->count--;
+ spin_unlock_irqrestore(&port->slock, flags);
+ port->blocked_open++;
+ while (1) {
+ spin_lock_irqsave(&port->slock, flags);
+ outb(inb(port->ioaddr + UART_MCR) |
+ UART_MCR_DTR | UART_MCR_RTS, port->ioaddr + UART_MCR);
+ spin_unlock_irqrestore(&port->slock, flags);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) {
+ if (port->flags & ASYNC_HUP_NOTIFY)
+ retval = -EAGAIN;
+ else
+ retval = -ERESTARTSYS;
+ break;
+ }
+ if (!(port->flags & ASYNC_CLOSING) &&
+ (do_clocal ||
+ (inb(port->ioaddr + UART_MSR) & UART_MSR_DCD)))
+ break;
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+ schedule();
}
-
- n = board * MXSER_PORTS_PER_BOARD;
- info = &mxvar_table[n];
-
- retval = request_irq(hwconf->irq, mxser_interrupt, IRQ_T(info),
- "mxser", info);
- if (retval) {
- printk(KERN_ERR "Board %d: %s",
- board, mxser_brdname[hwconf->board_type - 1]);
- printk(" Request irq failed, IRQ (%d) may conflict with"
- " another device.\n", info->irq);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&port->open_wait, &wait);
+ if (!tty_hung_up_p(filp))
+ port->count++;
+ port->blocked_open--;
+ if (retval)
return retval;
- }
+ port->flags |= ASYNC_NORMAL_ACTIVE;
return 0;
}
-static void mxser_getcfg(int board, struct mxser_hwconf *hwconf)
+static int mxser_set_baud(struct mxser_port *info, long newspd)
{
- mxsercfg[board] = *hwconf;
-}
+ int quot = 0, baud;
+ unsigned char cval;
-#ifdef CONFIG_PCI
-static int mxser_get_PCI_conf(int busnum, int devnum, int board_type, struct mxser_hwconf *hwconf)
-{
- int i, j;
- /* unsigned int val; */
- unsigned int ioaddress;
- struct pci_dev *pdev = hwconf->pciInfo.pdev;
+ if (!info->tty || !info->tty->termios)
+ return -1;
- /* io address */
- hwconf->board_type = board_type;
- hwconf->ports = mxser_numports[board_type - 1];
- ioaddress = pci_resource_start(pdev, 2);
- request_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2),
- "mxser(IO)");
+ if (!(info->ioaddr))
+ return -1;
- for (i = 0; i < hwconf->ports; i++)
- hwconf->ioaddr[i] = ioaddress + 8 * i;
+ if (newspd > info->max_baud)
+ return -1;
- /* vector */
- ioaddress = pci_resource_start(pdev, 3);
- request_region(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3),
- "mxser(vector)");
- hwconf->vector = ioaddress;
+ if (newspd == 134) {
+ quot = 2 * info->baud_base / 269;
+ tty_encode_baud_rate(info->tty, 134, 134);
+ } else if (newspd) {
+ quot = info->baud_base / newspd;
+ if (quot == 0)
+ quot = 1;
+ baud = info->baud_base/quot;
+ tty_encode_baud_rate(info->tty, baud, baud);
+ } else {
+ quot = 0;
+ }
- /* irq */
- hwconf->irq = hwconf->pciInfo.pdev->irq;
+ info->timeout = ((info->xmit_fifo_size * HZ * 10 * quot) / info->baud_base);
+ info->timeout += HZ / 50; /* Add .02 seconds of slop */
- hwconf->IsMoxaMustChipFlag = CheckIsMoxaMust(hwconf->ioaddr[0]);
- hwconf->uart_type = PORT_16550A;
- hwconf->vector_mask = 0;
+ if (quot) {
+ info->MCR |= UART_MCR_DTR;
+ outb(info->MCR, info->ioaddr + UART_MCR);
+ } else {
+ info->MCR &= ~UART_MCR_DTR;
+ outb(info->MCR, info->ioaddr + UART_MCR);
+ return 0;
+ }
+ cval = inb(info->ioaddr + UART_LCR);
- for (i = 0; i < hwconf->ports; i++) {
- for (j = 0; j < UART_INFO_NUM; j++) {
- if (Gpci_uart_info[j].type == hwconf->IsMoxaMustChipFlag) {
- hwconf->MaxCanSetBaudRate[i] = Gpci_uart_info[j].max_baud;
+ outb(cval | UART_LCR_DLAB, info->ioaddr + UART_LCR); /* set DLAB */
- /* exception....CP-102 */
- if (board_type == MXSER_BOARD_CP102)
- hwconf->MaxCanSetBaudRate[i] = 921600;
- break;
- }
- }
- }
+ outb(quot & 0xff, info->ioaddr + UART_DLL); /* LS of divisor */
+ outb(quot >> 8, info->ioaddr + UART_DLM); /* MS of divisor */
+ outb(cval, info->ioaddr + UART_LCR); /* reset DLAB */
- if (hwconf->IsMoxaMustChipFlag == MOXA_MUST_MU860_HWID) {
- for (i = 0; i < hwconf->ports; i++) {
- if (i < 4)
- hwconf->opmode_ioaddr[i] = ioaddress + 4;
- else
- hwconf->opmode_ioaddr[i] = ioaddress + 0x0c;
- }
- outb(0, ioaddress + 4); /* default set to RS232 mode */
- outb(0, ioaddress + 0x0c); /* default set to RS232 mode */
- }
+#ifdef BOTHER
+ if (C_BAUD(info->tty) == BOTHER) {
+ quot = info->baud_base % newspd;
+ quot *= 8;
+ if (quot % newspd > newspd / 2) {
+ quot /= newspd;
+ quot++;
+ } else
+ quot /= newspd;
+
+ SET_MOXA_MUST_ENUM_VALUE(info->ioaddr, quot);
+ } else
+#endif
+ SET_MOXA_MUST_ENUM_VALUE(info->ioaddr, 0);
- for (i = 0; i < hwconf->ports; i++) {
- hwconf->vector_mask |= (1 << i);
- hwconf->baud_base[i] = 921600;
- }
return 0;
}
-#endif
-static int mxser_init(void)
+/*
+ * This routine is called to set the UART divisor registers to match
+ * the specified baud rate for a serial port.
+ */
+static int mxser_change_speed(struct mxser_port *info,
+ struct ktermios *old_termios)
{
- int i, m, retval, b, n;
- struct pci_dev *pdev = NULL;
- int index;
- unsigned char busnum, devnum;
- struct mxser_hwconf hwconf;
-
- mxvar_sdriver = alloc_tty_driver(MXSER_PORTS + 1);
- if (!mxvar_sdriver)
- return -ENOMEM;
- spin_lock_init(&gm_lock);
-
- for (i = 0; i < MXSER_BOARDS; i++) {
- mxsercfg[i].board_type = -1;
- }
+ unsigned cflag, cval, fcr;
+ int ret = 0;
+ unsigned char status;
- printk(KERN_INFO "MOXA Smartio/Industio family driver version %s\n",
- MXSER_VERSION);
+ if (!info->tty || !info->tty->termios)
+ return ret;
+ cflag = info->tty->termios->c_cflag;
+ if (!(info->ioaddr))
+ return ret;
- /* Initialize the tty_driver structure */
- memset(mxvar_sdriver, 0, sizeof(struct tty_driver));
- mxvar_sdriver->owner = THIS_MODULE;
- mxvar_sdriver->magic = TTY_DRIVER_MAGIC;
- mxvar_sdriver->name = "ttyMI";
- mxvar_sdriver->major = ttymajor;
- mxvar_sdriver->minor_start = 0;
- mxvar_sdriver->num = MXSER_PORTS + 1;
- mxvar_sdriver->type = TTY_DRIVER_TYPE_SERIAL;
- mxvar_sdriver->subtype = SERIAL_TYPE_NORMAL;
- mxvar_sdriver->init_termios = tty_std_termios;
- mxvar_sdriver->init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL;
- mxvar_sdriver->init_termios.c_ispeed = 9600;
- mxvar_sdriver->init_termios.c_ospeed = 9600;
- mxvar_sdriver->flags = TTY_DRIVER_REAL_RAW;
- tty_set_operations(mxvar_sdriver, &mxser_ops);
- mxvar_sdriver->ttys = mxvar_tty;
- mxvar_sdriver->termios = mxvar_termios;
- mxvar_sdriver->termios_locked = mxvar_termios_locked;
+ if (mxser_set_baud_method[info->tty->index] == 0)
+ mxser_set_baud(info, tty_get_baud_rate(info->tty));
- mxvar_diagflag = 0;
- memset(mxvar_table, 0, MXSER_PORTS * sizeof(struct mxser_struct));
- memset(&mxvar_log, 0, sizeof(struct mxser_log));
+ /* byte size and parity */
+ switch (cflag & CSIZE) {
+ case CS5:
+ cval = 0x00;
+ break;
+ case CS6:
+ cval = 0x01;
+ break;
+ case CS7:
+ cval = 0x02;
+ break;
+ case CS8:
+ cval = 0x03;
+ break;
+ default:
+ cval = 0x00;
+ break; /* too keep GCC shut... */
+ }
+ if (cflag & CSTOPB)
+ cval |= 0x04;
+ if (cflag & PARENB)
+ cval |= UART_LCR_PARITY;
+ if (!(cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+ if (cflag & CMSPAR)
+ cval |= UART_LCR_SPAR;
- memset(&mxser_msr, 0, sizeof(unsigned char) * (MXSER_PORTS + 1));
- memset(&mon_data_ext, 0, sizeof(struct mxser_mon_ext));
- memset(&mxser_set_baud_method, 0, sizeof(int) * (MXSER_PORTS + 1));
- memset(&hwconf, 0, sizeof(struct mxser_hwconf));
+ if ((info->type == PORT_8250) || (info->type == PORT_16450)) {
+ if (info->board->chip_flag) {
+ fcr = UART_FCR_ENABLE_FIFO;
+ fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
+ SET_MOXA_MUST_FIFO_VALUE(info);
+ } else
+ fcr = 0;
+ } else {
+ fcr = UART_FCR_ENABLE_FIFO;
+ if (info->board->chip_flag) {
+ fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
+ SET_MOXA_MUST_FIFO_VALUE(info);
+ } else {
+ switch (info->rx_trigger) {
+ case 1:
+ fcr |= UART_FCR_TRIGGER_1;
+ break;
+ case 4:
+ fcr |= UART_FCR_TRIGGER_4;
+ break;
+ case 8:
+ fcr |= UART_FCR_TRIGGER_8;
+ break;
+ default:
+ fcr |= UART_FCR_TRIGGER_14;
+ break;
+ }
+ }
+ }
- m = 0;
- /* Start finding ISA boards here */
- for (b = 0; b < MXSER_BOARDS && m < MXSER_BOARDS; b++) {
- int cap;
-
- if (!(cap = mxserBoardCAP[b]))
- continue;
-
- retval = mxser_get_ISA_conf(cap, &hwconf);
-
- if (retval != 0)
- printk(KERN_INFO "Found MOXA %s board (CAP=0x%x)\n",
- mxser_brdname[hwconf.board_type - 1], ioaddr[b]);
-
- if (retval <= 0) {
- if (retval == MXSER_ERR_IRQ)
- printk(KERN_ERR "Invalid interrupt number, "
- "board not configured\n");
- else if (retval == MXSER_ERR_IRQ_CONFLIT)
- printk(KERN_ERR "Invalid interrupt number, "
- "board not configured\n");
- else if (retval == MXSER_ERR_VECTOR)
- printk(KERN_ERR "Invalid interrupt vector, "
- "board not configured\n");
- else if (retval == MXSER_ERR_IOADDR)
- printk(KERN_ERR "Invalid I/O address, "
- "board not configured\n");
-
- continue;
+ /* CTS flow control flag and modem status interrupts */
+ info->IER &= ~UART_IER_MSI;
+ info->MCR &= ~UART_MCR_AFE;
+ if (cflag & CRTSCTS) {
+ info->flags |= ASYNC_CTS_FLOW;
+ info->IER |= UART_IER_MSI;
+ if ((info->type == PORT_16550A) || (info->board->chip_flag)) {
+ info->MCR |= UART_MCR_AFE;
+ } else {
+ status = inb(info->ioaddr + UART_MSR);
+ if (info->tty->hw_stopped) {
+ if (status & UART_MSR_CTS) {
+ info->tty->hw_stopped = 0;
+ if (info->type != PORT_16550A &&
+ !info->board->chip_flag) {
+ outb(info->IER & ~UART_IER_THRI,
+ info->ioaddr +
+ UART_IER);
+ info->IER |= UART_IER_THRI;
+ outb(info->IER, info->ioaddr +
+ UART_IER);
+ }
+ tty_wakeup(info->tty);
+ }
+ } else {
+ if (!(status & UART_MSR_CTS)) {
+ info->tty->hw_stopped = 1;
+ if ((info->type != PORT_16550A) &&
+ (!info->board->chip_flag)) {
+ info->IER &= ~UART_IER_THRI;
+ outb(info->IER, info->ioaddr +
+ UART_IER);
+ }
+ }
+ }
}
+ } else {
+ info->flags &= ~ASYNC_CTS_FLOW;
+ }
+ outb(info->MCR, info->ioaddr + UART_MCR);
+ if (cflag & CLOCAL) {
+ info->flags &= ~ASYNC_CHECK_CD;
+ } else {
+ info->flags |= ASYNC_CHECK_CD;
+ info->IER |= UART_IER_MSI;
+ }
+ outb(info->IER, info->ioaddr + UART_IER);
- hwconf.pciInfo.busNum = 0;
- hwconf.pciInfo.devNum = 0;
- hwconf.pciInfo.pdev = NULL;
+ /*
+ * Set up parity check flag
+ */
+ info->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (I_INPCK(info->tty))
+ info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
+ info->read_status_mask |= UART_LSR_BI;
+
+ info->ignore_status_mask = 0;
- mxser_getcfg(m, &hwconf);
+ if (I_IGNBRK(info->tty)) {
+ info->ignore_status_mask |= UART_LSR_BI;
+ info->read_status_mask |= UART_LSR_BI;
/*
- * init mxsercfg first,
- * or mxsercfg data is not correct on ISR.
+ * If we're ignore parity and break indicators, ignore
+ * overruns too. (For real raw support).
*/
- /* mxser_initbrd will hook ISR. */
- if (mxser_initbrd(m, &hwconf) < 0)
- continue;
-
- m++;
+ if (I_IGNPAR(info->tty)) {
+ info->ignore_status_mask |=
+ UART_LSR_OE |
+ UART_LSR_PE |
+ UART_LSR_FE;
+ info->read_status_mask |=
+ UART_LSR_OE |
+ UART_LSR_PE |
+ UART_LSR_FE;
+ }
+ }
+ if (info->board->chip_flag) {
+ SET_MOXA_MUST_XON1_VALUE(info->ioaddr, START_CHAR(info->tty));
+ SET_MOXA_MUST_XOFF1_VALUE(info->ioaddr, STOP_CHAR(info->tty));
+ if (I_IXON(info->tty)) {
+ ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+ } else {
+ DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+ }
+ if (I_IXOFF(info->tty)) {
+ ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+ } else {
+ DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+ }
}
- /* Start finding ISA boards from module arg */
- for (b = 0; b < MXSER_BOARDS && m < MXSER_BOARDS; b++) {
- int cap;
- if (!(cap = ioaddr[b]))
- continue;
+ outb(fcr, info->ioaddr + UART_FCR); /* set fcr */
+ outb(cval, info->ioaddr + UART_LCR);
- retval = mxser_get_ISA_conf(cap, &hwconf);
+ return ret;
+}
- if (retval != 0)
- printk(KERN_INFO "Found MOXA %s board (CAP=0x%x)\n",
- mxser_brdname[hwconf.board_type - 1], ioaddr[b]);
+static void mxser_check_modem_status(struct mxser_port *port, int status)
+{
+ /* update input line counters */
+ if (status & UART_MSR_TERI)
+ port->icount.rng++;
+ if (status & UART_MSR_DDSR)
+ port->icount.dsr++;
+ if (status & UART_MSR_DDCD)
+ port->icount.dcd++;
+ if (status & UART_MSR_DCTS)
+ port->icount.cts++;
+ port->mon_data.modem_status = status;
+ wake_up_interruptible(&port->delta_msr_wait);
- if (retval <= 0) {
- if (retval == MXSER_ERR_IRQ)
- printk(KERN_ERR "Invalid interrupt number, "
- "board not configured\n");
- else if (retval == MXSER_ERR_IRQ_CONFLIT)
- printk(KERN_ERR "Invalid interrupt number, "
- "board not configured\n");
- else if (retval == MXSER_ERR_VECTOR)
- printk(KERN_ERR "Invalid interrupt vector, "
- "board not configured\n");
- else if (retval == MXSER_ERR_IOADDR)
- printk(KERN_ERR "Invalid I/O address, "
- "board not configured\n");
+ if ((port->flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) {
+ if (status & UART_MSR_DCD)
+ wake_up_interruptible(&port->open_wait);
+ }
- continue;
+ if (port->flags & ASYNC_CTS_FLOW) {
+ if (port->tty->hw_stopped) {
+ if (status & UART_MSR_CTS) {
+ port->tty->hw_stopped = 0;
+
+ if ((port->type != PORT_16550A) &&
+ (!port->board->chip_flag)) {
+ outb(port->IER & ~UART_IER_THRI,
+ port->ioaddr + UART_IER);
+ port->IER |= UART_IER_THRI;
+ outb(port->IER, port->ioaddr +
+ UART_IER);
+ }
+ tty_wakeup(port->tty);
+ }
+ } else {
+ if (!(status & UART_MSR_CTS)) {
+ port->tty->hw_stopped = 1;
+ if (port->type != PORT_16550A &&
+ !port->board->chip_flag) {
+ port->IER &= ~UART_IER_THRI;
+ outb(port->IER, port->ioaddr +
+ UART_IER);
+ }
+ }
}
+ }
+}
- hwconf.pciInfo.busNum = 0;
- hwconf.pciInfo.devNum = 0;
- hwconf.pciInfo.pdev = NULL;
+static int mxser_startup(struct mxser_port *info)
+{
+ unsigned long page;
+ unsigned long flags;
- mxser_getcfg(m, &hwconf);
- /*
- * init mxsercfg first,
- * or mxsercfg data is not correct on ISR.
- */
- /* mxser_initbrd will hook ISR. */
- if (mxser_initbrd(m, &hwconf) < 0)
- continue;
+ page = __get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&info->slock, flags);
- m++;
+ if (info->flags & ASYNC_INITIALIZED) {
+ free_page(page);
+ spin_unlock_irqrestore(&info->slock, flags);
+ return 0;
}
- /* start finding PCI board here */
-#ifdef CONFIG_PCI
- n = ARRAY_SIZE(mxser_pcibrds) - 1;
- index = 0;
- b = 0;
- while (b < n) {
- pdev = pci_get_device(mxser_pcibrds[b].vendor,
- mxser_pcibrds[b].device, pdev);
- if (pdev == NULL) {
- b++;
- continue;
- }
- hwconf.pciInfo.busNum = busnum = pdev->bus->number;
- hwconf.pciInfo.devNum = devnum = PCI_SLOT(pdev->devfn) << 3;
- hwconf.pciInfo.pdev = pdev;
- printk(KERN_INFO "Found MOXA %s board(BusNo=%d,DevNo=%d)\n",
- mxser_brdname[(int) (mxser_pcibrds[b].driver_data) - 1],
- busnum, devnum >> 3);
- index++;
- if (m >= MXSER_BOARDS)
- printk(KERN_ERR
- "Too many Smartio/Industio family boards find "
- "(maximum %d), board not configured\n",
- MXSER_BOARDS);
- else {
- if (pci_enable_device(pdev)) {
- printk(KERN_ERR "Moxa SmartI/O PCI enable "
- "fail !\n");
- continue;
- }
- retval = mxser_get_PCI_conf(busnum, devnum,
- (int)mxser_pcibrds[b].driver_data,
- &hwconf);
- if (retval < 0) {
- if (retval == MXSER_ERR_IRQ)
- printk(KERN_ERR
- "Invalid interrupt number, "
- "board not configured\n");
- else if (retval == MXSER_ERR_IRQ_CONFLIT)
- printk(KERN_ERR
- "Invalid interrupt number, "
- "board not configured\n");
- else if (retval == MXSER_ERR_VECTOR)
- printk(KERN_ERR
- "Invalid interrupt vector, "
- "board not configured\n");
- else if (retval == MXSER_ERR_IOADDR)
- printk(KERN_ERR
- "Invalid I/O address, "
- "board not configured\n");
- continue;
- }
- mxser_getcfg(m, &hwconf);
- /* init mxsercfg first,
- * or mxsercfg data is not correct on ISR.
- */
- /* mxser_initbrd will hook ISR. */
- if (mxser_initbrd(m, &hwconf) < 0)
- continue;
- m++;
- /* Keep an extra reference if we succeeded. It will
- be returned at unload time */
- pci_dev_get(pdev);
- }
+ if (!info->ioaddr || !info->type) {
+ if (info->tty)
+ set_bit(TTY_IO_ERROR, &info->tty->flags);
+ free_page(page);
+ spin_unlock_irqrestore(&info->slock, flags);
+ return 0;
}
-#endif
+ if (info->xmit_buf)
+ free_page(page);
+ else
+ info->xmit_buf = (unsigned char *) page;
- retval = tty_register_driver(mxvar_sdriver);
- if (retval) {
- printk(KERN_ERR "Couldn't install MOXA Smartio/Industio family"
- " driver !\n");
- put_tty_driver(mxvar_sdriver);
+ /*
+ * Clear the FIFO buffers and disable them
+ * (they will be reenabled in mxser_change_speed())
+ */
+ if (info->board->chip_flag)
+ outb((UART_FCR_CLEAR_RCVR |
+ UART_FCR_CLEAR_XMIT |
+ MOXA_MUST_FCR_GDA_MODE_ENABLE), info->ioaddr + UART_FCR);
+ else
+ outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
+ info->ioaddr + UART_FCR);
- for (i = 0; i < MXSER_BOARDS; i++) {
- if (mxsercfg[i].board_type == -1)
- continue;
- else {
- free_irq(mxsercfg[i].irq, &mxvar_table[i * MXSER_PORTS_PER_BOARD]);
- /* todo: release io, vector */
- }
- }
- return retval;
+ /*
+ * At this point there's no way the LSR could still be 0xFF;
+ * if it is, then bail out, because there's likely no UART
+ * here.
+ */
+ if (inb(info->ioaddr + UART_LSR) == 0xff) {
+ spin_unlock_irqrestore(&info->slock, flags);
+ if (capable(CAP_SYS_ADMIN)) {
+ if (info->tty)
+ set_bit(TTY_IO_ERROR, &info->tty->flags);
+ return 0;
+ } else
+ return -ENODEV;
}
+ /*
+ * Clear the interrupt registers.
+ */
+ (void) inb(info->ioaddr + UART_LSR);
+ (void) inb(info->ioaddr + UART_RX);
+ (void) inb(info->ioaddr + UART_IIR);
+ (void) inb(info->ioaddr + UART_MSR);
+
+ /*
+ * Now, initialize the UART
+ */
+ outb(UART_LCR_WLEN8, info->ioaddr + UART_LCR); /* reset DLAB */
+ info->MCR = UART_MCR_DTR | UART_MCR_RTS;
+ outb(info->MCR, info->ioaddr + UART_MCR);
+
+ /*
+ * Finally, enable interrupts
+ */
+ info->IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI;
+
+ if (info->board->chip_flag)
+ info->IER |= MOXA_MUST_IER_EGDAI;
+ outb(info->IER, info->ioaddr + UART_IER); /* enable interrupts */
+
+ /*
+ * And clear the interrupt registers again for luck.
+ */
+ (void) inb(info->ioaddr + UART_LSR);
+ (void) inb(info->ioaddr + UART_RX);
+ (void) inb(info->ioaddr + UART_IIR);
+ (void) inb(info->ioaddr + UART_MSR);
+
+ if (info->tty)
+ clear_bit(TTY_IO_ERROR, &info->tty->flags);
+ info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+
+ /*
+ * and set the speed of the serial port
+ */
+ mxser_change_speed(info, NULL);
+ info->flags |= ASYNC_INITIALIZED;
+ spin_unlock_irqrestore(&info->slock, flags);
+
return 0;
}
-static void mxser_do_softint(struct work_struct *work)
+/*
+ * This routine will shutdown a serial port; interrupts maybe disabled, and
+ * DTR is dropped if the hangup on close termio flag is on.
+ */
+static void mxser_shutdown(struct mxser_port *info)
{
- struct mxser_struct *info =
- container_of(work, struct mxser_struct, tqueue);
- struct tty_struct *tty;
+ unsigned long flags;
- tty = info->tty;
+ if (!(info->flags & ASYNC_INITIALIZED))
+ return;
+
+ spin_lock_irqsave(&info->slock, flags);
+
+ /*
+ * clear delta_msr_wait queue to avoid mem leaks: we may free the irq
+ * here so the queue might never be waken up
+ */
+ wake_up_interruptible(&info->delta_msr_wait);
- if (tty) {
- if (test_and_clear_bit(MXSER_EVENT_TXLOW, &info->event))
- tty_wakeup(tty);
- if (test_and_clear_bit(MXSER_EVENT_HANGUP, &info->event))
- tty_hangup(tty);
+ /*
+ * Free the IRQ, if necessary
+ */
+ if (info->xmit_buf) {
+ free_page((unsigned long) info->xmit_buf);
+ info->xmit_buf = NULL;
}
-}
-static unsigned char mxser_get_msr(int baseaddr, int mode, int port, struct mxser_struct *info)
-{
- unsigned char status = 0;
+ info->IER = 0;
+ outb(0x00, info->ioaddr + UART_IER);
- status = inb(baseaddr + UART_MSR);
+ if (!info->tty || (info->tty->termios->c_cflag & HUPCL))
+ info->MCR &= ~(UART_MCR_DTR | UART_MCR_RTS);
+ outb(info->MCR, info->ioaddr + UART_MCR);
- mxser_msr[port] &= 0x0F;
- mxser_msr[port] |= status;
- status = mxser_msr[port];
- if (mode)
- mxser_msr[port] = 0;
+ /* clear Rx/Tx FIFO's */
+ if (info->board->chip_flag)
+ outb(UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT |
+ MOXA_MUST_FCR_GDA_MODE_ENABLE,
+ info->ioaddr + UART_FCR);
+ else
+ outb(UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
+ info->ioaddr + UART_FCR);
- return status;
+ /* read data port to reset things */
+ (void) inb(info->ioaddr + UART_RX);
+
+ if (info->tty)
+ set_bit(TTY_IO_ERROR, &info->tty->flags);
+
+ info->flags &= ~ASYNC_INITIALIZED;
+
+ if (info->board->chip_flag)
+ SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+
+ spin_unlock_irqrestore(&info->slock, flags);
}
/*
@@ -958,19 +893,17 @@ static unsigned char mxser_get_msr(int baseaddr, int mode, int port, struct mxse
*/
static int mxser_open(struct tty_struct *tty, struct file *filp)
{
- struct mxser_struct *info;
+ struct mxser_port *info;
+ unsigned long flags;
int retval, line;
- /* initialize driver_data in case something fails */
- tty->driver_data = NULL;
-
line = tty->index;
if (line == MXSER_PORTS)
return 0;
if (line < 0 || line > MXSER_PORTS)
return -ENODEV;
- info = mxvar_table + line;
- if (!info->base)
+ info = &mxser_boards[line / MXSER_PORTS_PER_BOARD].ports[line % MXSER_PORTS_PER_BOARD];
+ if (!info->ioaddr)
return -ENODEV;
tty->driver_data = info;
@@ -978,6 +911,9 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
/*
* Start up serial port
*/
+ spin_lock_irqsave(&info->slock, flags);
+ info->count++;
+ spin_unlock_irqrestore(&info->slock, flags);
retval = mxser_startup(info);
if (retval)
return retval;
@@ -986,21 +922,6 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
if (retval)
return retval;
- info->count++;
-
- if ((info->count == 1) && (info->flags & ASYNC_SPLIT_TERMIOS)) {
- if (tty->driver->subtype == SERIAL_TYPE_NORMAL)
- *tty->termios = info->normal_termios;
- else
- *tty->termios = info->callout_termios;
- mxser_change_speed(info, NULL);
- }
-
- /*
- status = mxser_get_msr(info->base, 0, info->port);
- mxser_check_modem_status(info, status);
- */
-
/* unmark here for very high baud rate (ex. 921600 bps) used */
tty->low_latency = 1;
return 0;
@@ -1014,11 +935,10 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
*/
static void mxser_close(struct tty_struct *tty, struct file *filp)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
unsigned long timeout;
unsigned long flags;
- struct tty_ldisc *ld;
if (tty->index == MXSER_PORTS)
return;
@@ -1045,7 +965,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
}
if (--info->count < 0) {
printk(KERN_ERR "mxser_close: bad serial port count for "
- "ttys%d: %d\n", info->port, info->count);
+ "ttys%d: %d\n", tty->index, info->count);
info->count = 0;
}
if (info->count) {
@@ -1074,20 +994,18 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
* line status register.
*/
info->IER &= ~UART_IER_RLSI;
- if (info->IsMoxaMustChipFlag)
+ if (info->board->chip_flag)
info->IER &= ~MOXA_MUST_RECV_ISR;
-/* by William
- info->read_status_mask &= ~UART_LSR_DR;
-*/
+
if (info->flags & ASYNC_INITIALIZED) {
- outb(info->IER, info->base + UART_IER);
+ outb(info->IER, info->ioaddr + UART_IER);
/*
* Before we drop DTR, make sure the UART transmitter
* has completely drained; this is especially
* important if there is a transmit FIFO!
*/
timeout = jiffies + HZ;
- while (!(inb(info->base + UART_LSR) & UART_LSR_TEMT)) {
+ while (!(inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT)) {
schedule_timeout_interruptible(5);
if (time_after(jiffies, timeout))
break;
@@ -1097,14 +1015,9 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
if (tty->driver->flush_buffer)
tty->driver->flush_buffer(tty);
-
- ld = tty_ldisc_ref(tty);
- if (ld) {
- if (ld->flush_buffer)
- ld->flush_buffer(tty);
- tty_ldisc_deref(ld);
- }
-
+
+ tty_ldisc_flush(tty);
+
tty->closing = 0;
info->event = 0;
info->tty = NULL;
@@ -1115,14 +1028,12 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
}
info->flags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING);
- wake_up_interruptible(&info->close_wait);
-
}
static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
int c, total = 0;
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
unsigned long flags;
if (!info->xmit_buf)
@@ -1146,13 +1057,15 @@ static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int cou
total += c;
}
- if (info->xmit_cnt && !tty->stopped && !(info->IER & UART_IER_THRI)) {
+ if (info->xmit_cnt && !tty->stopped) {
if (!tty->hw_stopped ||
(info->type == PORT_16550A) ||
- (info->IsMoxaMustChipFlag)) {
+ (info->board->chip_flag)) {
spin_lock_irqsave(&info->slock, flags);
+ outb(info->IER & ~UART_IER_THRI, info->ioaddr +
+ UART_IER);
info->IER |= UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
+ outb(info->IER, info->ioaddr + UART_IER);
spin_unlock_irqrestore(&info->slock, flags);
}
}
@@ -1161,7 +1074,7 @@ static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int cou
static void mxser_put_char(struct tty_struct *tty, unsigned char ch)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
unsigned long flags;
if (!info->xmit_buf)
@@ -1175,13 +1088,14 @@ static void mxser_put_char(struct tty_struct *tty, unsigned char ch)
info->xmit_head &= SERIAL_XMIT_SIZE - 1;
info->xmit_cnt++;
spin_unlock_irqrestore(&info->slock, flags);
- if (!tty->stopped && !(info->IER & UART_IER_THRI)) {
+ if (!tty->stopped) {
if (!tty->hw_stopped ||
(info->type == PORT_16550A) ||
- info->IsMoxaMustChipFlag) {
+ info->board->chip_flag) {
spin_lock_irqsave(&info->slock, flags);
+ outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
info->IER |= UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
+ outb(info->IER, info->ioaddr + UART_IER);
spin_unlock_irqrestore(&info->slock, flags);
}
}
@@ -1190,7 +1104,7 @@ static void mxser_put_char(struct tty_struct *tty, unsigned char ch)
static void mxser_flush_chars(struct tty_struct *tty)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
unsigned long flags;
if (info->xmit_cnt <= 0 ||
@@ -1198,21 +1112,22 @@ static void mxser_flush_chars(struct tty_struct *tty)
!info->xmit_buf ||
(tty->hw_stopped &&
(info->type != PORT_16550A) &&
- (!info->IsMoxaMustChipFlag)
+ (!info->board->chip_flag)
))
return;
spin_lock_irqsave(&info->slock, flags);
+ outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
info->IER |= UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
+ outb(info->IER, info->ioaddr + UART_IER);
spin_unlock_irqrestore(&info->slock, flags);
}
static int mxser_write_room(struct tty_struct *tty)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
int ret;
ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
@@ -1223,13 +1138,13 @@ static int mxser_write_room(struct tty_struct *tty)
static int mxser_chars_in_buffer(struct tty_struct *tty)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
return info->xmit_cnt;
}
static void mxser_flush_buffer(struct tty_struct *tty)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
char fcr;
unsigned long flags;
@@ -1237,39 +1152,497 @@ static void mxser_flush_buffer(struct tty_struct *tty)
spin_lock_irqsave(&info->slock, flags);
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
- /* below added by shinhay */
- fcr = inb(info->base + UART_FCR);
+ fcr = inb(info->ioaddr + UART_FCR);
outb((fcr | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
- info->base + UART_FCR);
- outb(fcr, info->base + UART_FCR);
+ info->ioaddr + UART_FCR);
+ outb(fcr, info->ioaddr + UART_FCR);
spin_unlock_irqrestore(&info->slock, flags);
- /* above added by shinhay */
tty_wakeup(tty);
}
-static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg)
+/*
+ * ------------------------------------------------------------
+ * friends of mxser_ioctl()
+ * ------------------------------------------------------------
+ */
+static int mxser_get_serial_info(struct mxser_port *info,
+ struct serial_struct __user *retinfo)
{
- struct mxser_struct *info = tty->driver_data;
- int retval;
- struct async_icount cprev, cnow; /* kernel counter temps */
+ struct serial_struct tmp = {
+ .type = info->type,
+ .line = info->tty->index,
+ .port = info->ioaddr,
+ .irq = info->board->irq,
+ .flags = info->flags,
+ .baud_base = info->baud_base,
+ .close_delay = info->close_delay,
+ .closing_wait = info->closing_wait,
+ .custom_divisor = info->custom_divisor,
+ .hub6 = 0
+ };
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+ return 0;
+}
+
+static int mxser_set_serial_info(struct mxser_port *info,
+ struct serial_struct __user *new_info)
+{
+ struct serial_struct new_serial;
+ speed_t baud;
+ unsigned long sl_flags;
+ unsigned int flags;
+ int retval = 0;
+
+ if (!new_info || !info->ioaddr)
+ return -ENODEV;
+ if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
+ return -EFAULT;
+
+ if (new_serial.irq != info->board->irq ||
+ new_serial.port != info->ioaddr)
+ return -EINVAL;
+
+ flags = info->flags & ASYNC_SPD_MASK;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ if ((new_serial.baud_base != info->baud_base) ||
+ (new_serial.close_delay != info->close_delay) ||
+ ((new_serial.flags & ~ASYNC_USR_MASK) != (info->flags & ~ASYNC_USR_MASK)))
+ return -EPERM;
+ info->flags = ((info->flags & ~ASYNC_USR_MASK) |
+ (new_serial.flags & ASYNC_USR_MASK));
+ } else {
+ /*
+ * OK, past this point, all the error checking has been done.
+ * At this point, we start making changes.....
+ */
+ info->flags = ((info->flags & ~ASYNC_FLAGS) |
+ (new_serial.flags & ASYNC_FLAGS));
+ info->close_delay = new_serial.close_delay * HZ / 100;
+ info->closing_wait = new_serial.closing_wait * HZ / 100;
+ info->tty->low_latency =
+ (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ info->tty->low_latency = 0;
+ if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
+ (new_serial.baud_base != info->baud_base ||
+ new_serial.custom_divisor !=
+ info->custom_divisor)) {
+ baud = new_serial.baud_base / new_serial.custom_divisor;
+ tty_encode_baud_rate(info->tty, baud, baud);
+ }
+ }
+
+ info->type = new_serial.type;
+
+ process_txrx_fifo(info);
+
+ if (info->flags & ASYNC_INITIALIZED) {
+ if (flags != (info->flags & ASYNC_SPD_MASK)) {
+ spin_lock_irqsave(&info->slock, sl_flags);
+ mxser_change_speed(info, NULL);
+ spin_unlock_irqrestore(&info->slock, sl_flags);
+ }
+ } else
+ retval = mxser_startup(info);
+
+ return retval;
+}
+
+/*
+ * mxser_get_lsr_info - get line status register info
+ *
+ * Purpose: Let user call ioctl() to get info when the UART physically
+ * is emptied. On bus types like RS485, the transmitter must
+ * release the bus after transmitting. This must be done when
+ * the transmit shift register is empty, not be done when the
+ * transmit holding register is empty. This functionality
+ * allows an RS485 driver to be written in user space.
+ */
+static int mxser_get_lsr_info(struct mxser_port *info,
+ unsigned int __user *value)
+{
+ unsigned char status;
+ unsigned int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->slock, flags);
+ status = inb(info->ioaddr + UART_LSR);
+ spin_unlock_irqrestore(&info->slock, flags);
+ result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
+ return put_user(result, value);
+}
+
+/*
+ * This routine sends a break character out the serial port.
+ */
+static void mxser_send_break(struct mxser_port *info, int duration)
+{
+ unsigned long flags;
+
+ if (!info->ioaddr)
+ return;
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_lock_irqsave(&info->slock, flags);
+ outb(inb(info->ioaddr + UART_LCR) | UART_LCR_SBC,
+ info->ioaddr + UART_LCR);
+ spin_unlock_irqrestore(&info->slock, flags);
+ schedule_timeout(duration);
+ spin_lock_irqsave(&info->slock, flags);
+ outb(inb(info->ioaddr + UART_LCR) & ~UART_LCR_SBC,
+ info->ioaddr + UART_LCR);
+ spin_unlock_irqrestore(&info->slock, flags);
+}
+
+static int mxser_tiocmget(struct tty_struct *tty, struct file *file)
+{
+ struct mxser_port *info = tty->driver_data;
+ unsigned char control, status;
+ unsigned long flags;
+
+
+ if (tty->index == MXSER_PORTS)
+ return -ENOIOCTLCMD;
+ if (test_bit(TTY_IO_ERROR, &tty->flags))
+ return -EIO;
+
+ control = info->MCR;
+
+ spin_lock_irqsave(&info->slock, flags);
+ status = inb(info->ioaddr + UART_MSR);
+ if (status & UART_MSR_ANY_DELTA)
+ mxser_check_modem_status(info, status);
+ spin_unlock_irqrestore(&info->slock, flags);
+ return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) |
+ ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) |
+ ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) |
+ ((status & UART_MSR_RI) ? TIOCM_RNG : 0) |
+ ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) |
+ ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
+}
+
+static int mxser_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ struct mxser_port *info = tty->driver_data;
+ unsigned long flags;
+
+
+ if (tty->index == MXSER_PORTS)
+ return -ENOIOCTLCMD;
+ if (test_bit(TTY_IO_ERROR, &tty->flags))
+ return -EIO;
+
+ spin_lock_irqsave(&info->slock, flags);
+
+ if (set & TIOCM_RTS)
+ info->MCR |= UART_MCR_RTS;
+ if (set & TIOCM_DTR)
+ info->MCR |= UART_MCR_DTR;
+
+ if (clear & TIOCM_RTS)
+ info->MCR &= ~UART_MCR_RTS;
+ if (clear & TIOCM_DTR)
+ info->MCR &= ~UART_MCR_DTR;
+
+ outb(info->MCR, info->ioaddr + UART_MCR);
+ spin_unlock_irqrestore(&info->slock, flags);
+ return 0;
+}
+
+static int __init mxser_program_mode(int port)
+{
+ int id, i, j, n;
+
+ outb(0, port);
+ outb(0, port);
+ outb(0, port);
+ (void)inb(port);
+ (void)inb(port);
+ outb(0, port);
+ (void)inb(port);
+
+ id = inb(port + 1) & 0x1F;
+ if ((id != C168_ASIC_ID) &&
+ (id != C104_ASIC_ID) &&
+ (id != C102_ASIC_ID) &&
+ (id != CI132_ASIC_ID) &&
+ (id != CI134_ASIC_ID) &&
+ (id != CI104J_ASIC_ID))
+ return -1;
+ for (i = 0, j = 0; i < 4; i++) {
+ n = inb(port + 2);
+ if (n == 'M') {
+ j = 1;
+ } else if ((j == 1) && (n == 1)) {
+ j = 2;
+ break;
+ } else
+ j = 0;
+ }
+ if (j != 2)
+ id = -2;
+ return id;
+}
+
+static void __init mxser_normal_mode(int port)
+{
+ int i, n;
+
+ outb(0xA5, port + 1);
+ outb(0x80, port + 3);
+ outb(12, port + 0); /* 9600 bps */
+ outb(0, port + 1);
+ outb(0x03, port + 3); /* 8 data bits */
+ outb(0x13, port + 4); /* loop back mode */
+ for (i = 0; i < 16; i++) {
+ n = inb(port + 5);
+ if ((n & 0x61) == 0x60)
+ break;
+ if ((n & 1) == 1)
+ (void)inb(port);
+ }
+ outb(0x00, port + 4);
+}
+
+#define CHIP_SK 0x01 /* Serial Data Clock in Eprom */
+#define CHIP_DO 0x02 /* Serial Data Output in Eprom */
+#define CHIP_CS 0x04 /* Serial Chip Select in Eprom */
+#define CHIP_DI 0x08 /* Serial Data Input in Eprom */
+#define EN_CCMD 0x000 /* Chip's command register */
+#define EN0_RSARLO 0x008 /* Remote start address reg 0 */
+#define EN0_RSARHI 0x009 /* Remote start address reg 1 */
+#define EN0_RCNTLO 0x00A /* Remote byte count reg WR */
+#define EN0_RCNTHI 0x00B /* Remote byte count reg WR */
+#define EN0_DCFG 0x00E /* Data configuration reg WR */
+#define EN0_PORT 0x010 /* Rcv missed frame error counter RD */
+#define ENC_PAGE0 0x000 /* Select page 0 of chip registers */
+#define ENC_PAGE3 0x0C0 /* Select page 3 of chip registers */
+static int __init mxser_read_register(int port, unsigned short *regs)
+{
+ int i, k, value, id;
+ unsigned int j;
+
+ id = mxser_program_mode(port);
+ if (id < 0)
+ return id;
+ for (i = 0; i < 14; i++) {
+ k = (i & 0x3F) | 0x180;
+ for (j = 0x100; j > 0; j >>= 1) {
+ outb(CHIP_CS, port);
+ if (k & j) {
+ outb(CHIP_CS | CHIP_DO, port);
+ outb(CHIP_CS | CHIP_DO | CHIP_SK, port); /* A? bit of read */
+ } else {
+ outb(CHIP_CS, port);
+ outb(CHIP_CS | CHIP_SK, port); /* A? bit of read */
+ }
+ }
+ (void)inb(port);
+ value = 0;
+ for (k = 0, j = 0x8000; k < 16; k++, j >>= 1) {
+ outb(CHIP_CS, port);
+ outb(CHIP_CS | CHIP_SK, port);
+ if (inb(port) & CHIP_DI)
+ value |= j;
+ }
+ regs[i] = value;
+ outb(0, port);
+ }
+ mxser_normal_mode(port);
+ return id;
+}
+
+static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
+{
+ struct mxser_port *port;
+ int result, status;
+ unsigned int i, j;
+
+ switch (cmd) {
+ case MOXA_GET_MAJOR:
+ return put_user(ttymajor, (int __user *)argp);
+
+ case MOXA_CHKPORTENABLE:
+ result = 0;
+
+ for (i = 0; i < MXSER_BOARDS; i++)
+ for (j = 0; j < MXSER_PORTS_PER_BOARD; j++)
+ if (mxser_boards[i].ports[j].ioaddr)
+ result |= (1 << i);
+
+ return put_user(result, (unsigned long __user *)argp);
+ case MOXA_GETDATACOUNT:
+ if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log)))
+ return -EFAULT;
+ return 0;
+ case MOXA_GETMSTATUS:
+ for (i = 0; i < MXSER_BOARDS; i++)
+ for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
+ port = &mxser_boards[i].ports[j];
+
+ GMStatus[i].ri = 0;
+ if (!port->ioaddr) {
+ GMStatus[i].dcd = 0;
+ GMStatus[i].dsr = 0;
+ GMStatus[i].cts = 0;
+ continue;
+ }
+
+ if (!port->tty || !port->tty->termios)
+ GMStatus[i].cflag =
+ port->normal_termios.c_cflag;
+ else
+ GMStatus[i].cflag =
+ port->tty->termios->c_cflag;
+
+ status = inb(port->ioaddr + UART_MSR);
+ if (status & 0x80 /*UART_MSR_DCD */ )
+ GMStatus[i].dcd = 1;
+ else
+ GMStatus[i].dcd = 0;
+
+ if (status & 0x20 /*UART_MSR_DSR */ )
+ GMStatus[i].dsr = 1;
+ else
+ GMStatus[i].dsr = 0;
+
+
+ if (status & 0x10 /*UART_MSR_CTS */ )
+ GMStatus[i].cts = 1;
+ else
+ GMStatus[i].cts = 0;
+ }
+ if (copy_to_user(argp, GMStatus,
+ sizeof(struct mxser_mstatus) * MXSER_PORTS))
+ return -EFAULT;
+ return 0;
+ case MOXA_ASPP_MON_EXT: {
+ int p, shiftbit;
+ unsigned long opmode;
+ unsigned cflag, iflag;
+
+ for (i = 0; i < MXSER_BOARDS; i++)
+ for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
+ port = &mxser_boards[i].ports[j];
+ if (!port->ioaddr)
+ continue;
+
+ status = mxser_get_msr(port->ioaddr, 0, i);
+
+ if (status & UART_MSR_TERI)
+ port->icount.rng++;
+ if (status & UART_MSR_DDSR)
+ port->icount.dsr++;
+ if (status & UART_MSR_DDCD)
+ port->icount.dcd++;
+ if (status & UART_MSR_DCTS)
+ port->icount.cts++;
+
+ port->mon_data.modem_status = status;
+ mon_data_ext.rx_cnt[i] = port->mon_data.rxcnt;
+ mon_data_ext.tx_cnt[i] = port->mon_data.txcnt;
+ mon_data_ext.up_rxcnt[i] =
+ port->mon_data.up_rxcnt;
+ mon_data_ext.up_txcnt[i] =
+ port->mon_data.up_txcnt;
+ mon_data_ext.modem_status[i] =
+ port->mon_data.modem_status;
+ mon_data_ext.baudrate[i] =
+ tty_get_baud_rate(port->tty);
+
+ if (!port->tty || !port->tty->termios) {
+ cflag = port->normal_termios.c_cflag;
+ iflag = port->normal_termios.c_iflag;
+ } else {
+ cflag = port->tty->termios->c_cflag;
+ iflag = port->tty->termios->c_iflag;
+ }
+
+ mon_data_ext.databits[i] = cflag & CSIZE;
+
+ mon_data_ext.stopbits[i] = cflag & CSTOPB;
+
+ mon_data_ext.parity[i] =
+ cflag & (PARENB | PARODD | CMSPAR);
+
+ mon_data_ext.flowctrl[i] = 0x00;
+
+ if (cflag & CRTSCTS)
+ mon_data_ext.flowctrl[i] |= 0x03;
+
+ if (iflag & (IXON | IXOFF))
+ mon_data_ext.flowctrl[i] |= 0x0C;
+
+ if (port->type == PORT_16550A)
+ mon_data_ext.fifo[i] = 1;
+ else
+ mon_data_ext.fifo[i] = 0;
+
+ p = i % 4;
+ shiftbit = p * 2;
+ opmode = inb(port->opmode_ioaddr) >> shiftbit;
+ opmode &= OP_MODE_MASK;
+
+ mon_data_ext.iftype[i] = opmode;
+
+ }
+ if (copy_to_user(argp, &mon_data_ext,
+ sizeof(mon_data_ext)))
+ return -EFAULT;
+
+ return 0;
+
+ } default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static int mxser_cflags_changed(struct mxser_port *info, unsigned long arg,
+ struct async_icount *cprev)
+{
+ struct async_icount cnow;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&info->slock, flags);
+ cnow = info->icount; /* atomic copy */
+ spin_unlock_irqrestore(&info->slock, flags);
+
+ ret = ((arg & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
+ ((arg & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) ||
+ ((arg & TIOCM_CD) && (cnow.dcd != cprev->dcd)) ||
+ ((arg & TIOCM_CTS) && (cnow.cts != cprev->cts));
+
+ *cprev = cnow;
+
+ return ret;
+}
+
+static int mxser_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct mxser_port *info = tty->driver_data;
+ struct async_icount cnow;
struct serial_icounter_struct __user *p_cuser;
- unsigned long templ;
unsigned long flags;
void __user *argp = (void __user *)arg;
+ int retval;
if (tty->index == MXSER_PORTS)
return mxser_ioctl_special(cmd, argp);
- /* following add by Victor Yu. 01-05-2004 */
if (cmd == MOXA_SET_OP_MODE || cmd == MOXA_GET_OP_MODE) {
- int opmode, p;
+ int p;
+ unsigned long opmode;
static unsigned char ModeMask[] = { 0xfc, 0xf3, 0xcf, 0x3f };
int shiftbit;
unsigned char val, mask;
- p = info->port % 4;
+ p = tty->index % 4;
if (cmd == MOXA_SET_OP_MODE) {
if (get_user(opmode, (int __user *) argp))
return -EFAULT;
@@ -1288,17 +1661,16 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
shiftbit = p * 2;
opmode = inb(info->opmode_ioaddr) >> shiftbit;
opmode &= OP_MODE_MASK;
- if (copy_to_user(argp, &opmode, sizeof(int)))
+ if (put_user(opmode, (int __user *)argp))
return -EFAULT;
}
return 0;
}
- /* above add by Victor Yu. 01-05-2004 */
- if ((cmd != TIOCGSERIAL) && (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
- }
+ if (cmd != TIOCGSERIAL && cmd != TIOCMIWAIT && cmd != TIOCGICOUNT &&
+ test_bit(TTY_IO_ERROR, &tty->flags))
+ return -EIO;
+
switch (cmd) {
case TCSBRK: /* SVID version: non-zero arg --> no break */
retval = tty_check_change(tty);
@@ -1316,11 +1688,10 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
mxser_send_break(info, arg ? arg * (HZ / 10) : HZ / 4);
return 0;
case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
+ return put_user(!!C_CLOCAL(tty), (unsigned long __user *)argp);
case TIOCSSOFTCAR:
- if (get_user(templ, (unsigned long __user *) argp))
+ if (get_user(arg, (unsigned long __user *)argp))
return -EFAULT;
- arg = templ;
tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
return 0;
case TIOCGSERIAL:
@@ -1340,30 +1711,19 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
cnow = info->icount; /* note the counters on entry */
spin_unlock_irqrestore(&info->slock, flags);
- wait_event_interruptible(info->delta_msr_wait, ({
- cprev = cnow;
- spin_lock_irqsave(&info->slock, flags);
- cnow = info->icount; /* atomic copy */
- spin_unlock_irqrestore(&info->slock, flags);
-
- ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
- ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
- ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
- ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts));
- }));
- break;
- /*
- * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
- * Return: write counters to the user passed counter struct
- * NB: both 1->0 and 0->1 transitions are counted except for
- * RI where only 0->1 is counted.
- */
+ return wait_event_interruptible(info->delta_msr_wait,
+ mxser_cflags_changed(info, arg, &cnow));
+ /*
+ * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+ * Return: write counters to the user passed counter struct
+ * NB: both 1->0 and 0->1 transitions are counted except for
+ * RI where only 0->1 is counted.
+ */
case TIOCGICOUNT:
spin_lock_irqsave(&info->slock, flags);
cnow = info->icount;
spin_unlock_irqrestore(&info->slock, flags);
p_cuser = argp;
- /* modified by casper 1/11/2000 */
if (put_user(cnow.frame, &p_cuser->frame))
return -EFAULT;
if (put_user(cnow.brk, &p_cuser->brk))
@@ -1385,240 +1745,65 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
return 0;
case MOXA_HighSpeedOn:
return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
- case MOXA_SDS_RSTICOUNTER: {
- info->mon_data.rxcnt = 0;
- info->mon_data.txcnt = 0;
- return 0;
- }
-/* (above) added by James. */
- case MOXA_ASPP_SETBAUD:{
- long baud;
- if (get_user(baud, (long __user *)argp))
- return -EFAULT;
- mxser_set_baud(info, baud);
- return 0;
- }
- case MOXA_ASPP_GETBAUD:
- if (copy_to_user(argp, &info->realbaud, sizeof(long)))
- return -EFAULT;
-
+ case MOXA_SDS_RSTICOUNTER:
+ info->mon_data.rxcnt = 0;
+ info->mon_data.txcnt = 0;
return 0;
case MOXA_ASPP_OQUEUE:{
- int len, lsr;
+ int len, lsr;
- len = mxser_chars_in_buffer(tty);
+ len = mxser_chars_in_buffer(tty);
- lsr = inb(info->base + UART_LSR) & UART_LSR_TEMT;
+ lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT;
- len += (lsr ? 0 : 1);
+ len += (lsr ? 0 : 1);
- if (copy_to_user(argp, &len, sizeof(int)))
- return -EFAULT;
-
- return 0;
- }
+ return put_user(len, (int __user *)argp);
+ }
case MOXA_ASPP_MON: {
- int mcr, status;
-
- /* info->mon_data.ser_param = tty->termios->c_cflag; */
+ int mcr, status;
- status = mxser_get_msr(info->base, 1, info->port, info);
- mxser_check_modem_status(info, status);
-
- mcr = inb(info->base + UART_MCR);
- if (mcr & MOXA_MUST_MCR_XON_FLAG)
- info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFHOLD;
- else
- info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFHOLD;
-
- if (mcr & MOXA_MUST_MCR_TX_XON)
- info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFXENT;
- else
- info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFXENT;
-
- if (info->tty->hw_stopped)
- info->mon_data.hold_reason |= NPPI_NOTIFY_CTSHOLD;
- else
- info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD;
-
- if (copy_to_user(argp, &info->mon_data,
- sizeof(struct mxser_mon)))
- return -EFAULT;
-
- return 0;
- }
-
- case MOXA_ASPP_LSTATUS: {
- if (copy_to_user(argp, &info->err_shadow,
- sizeof(unsigned char)))
- return -EFAULT;
-
- info->err_shadow = 0;
- return 0;
- }
- case MOXA_SET_BAUD_METHOD: {
- int method;
-
- if (get_user(method, (int __user *)argp))
- return -EFAULT;
- mxser_set_baud_method[info->port] = method;
- if (copy_to_user(argp, &method, sizeof(int)))
- return -EFAULT;
+ status = mxser_get_msr(info->ioaddr, 1, tty->index);
+ mxser_check_modem_status(info, status);
- return 0;
- }
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
+ mcr = inb(info->ioaddr + UART_MCR);
+ if (mcr & MOXA_MUST_MCR_XON_FLAG)
+ info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFHOLD;
+ else
+ info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFHOLD;
-#ifndef CMSPAR
-#define CMSPAR 010000000000
-#endif
+ if (mcr & MOXA_MUST_MCR_TX_XON)
+ info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFXENT;
+ else
+ info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFXENT;
-static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
-{
- int i, result, status;
+ if (info->tty->hw_stopped)
+ info->mon_data.hold_reason |= NPPI_NOTIFY_CTSHOLD;
+ else
+ info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD;
- switch (cmd) {
- case MOXA_GET_CONF:
- if (copy_to_user(argp, mxsercfg,
- sizeof(struct mxser_hwconf) * 4))
+ if (copy_to_user(argp, &info->mon_data,
+ sizeof(struct mxser_mon)))
return -EFAULT;
- return 0;
- case MOXA_GET_MAJOR:
- if (copy_to_user(argp, &ttymajor, sizeof(int)))
- return -EFAULT;
- return 0;
- case MOXA_GET_CUMAJOR:
- if (copy_to_user(argp, &calloutmajor, sizeof(int)))
- return -EFAULT;
return 0;
-
- case MOXA_CHKPORTENABLE:
- result = 0;
- for (i = 0; i < MXSER_PORTS; i++) {
- if (mxvar_table[i].base)
- result |= (1 << i);
- }
- return put_user(result, (unsigned long __user *)argp);
- case MOXA_GETDATACOUNT:
- if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log)))
+ }
+ case MOXA_ASPP_LSTATUS: {
+ if (put_user(info->err_shadow, (unsigned char __user *)argp))
return -EFAULT;
- return 0;
- case MOXA_GETMSTATUS:
- for (i = 0; i < MXSER_PORTS; i++) {
- GMStatus[i].ri = 0;
- if (!mxvar_table[i].base) {
- GMStatus[i].dcd = 0;
- GMStatus[i].dsr = 0;
- GMStatus[i].cts = 0;
- continue;
- }
-
- if (!mxvar_table[i].tty || !mxvar_table[i].tty->termios)
- GMStatus[i].cflag = mxvar_table[i].normal_termios.c_cflag;
- else
- GMStatus[i].cflag = mxvar_table[i].tty->termios->c_cflag;
-
- status = inb(mxvar_table[i].base + UART_MSR);
- if (status & 0x80 /*UART_MSR_DCD */ )
- GMStatus[i].dcd = 1;
- else
- GMStatus[i].dcd = 0;
-
- if (status & 0x20 /*UART_MSR_DSR */ )
- GMStatus[i].dsr = 1;
- else
- GMStatus[i].dsr = 0;
-
- if (status & 0x10 /*UART_MSR_CTS */ )
- GMStatus[i].cts = 1;
- else
- GMStatus[i].cts = 0;
- }
- if (copy_to_user(argp, GMStatus,
- sizeof(struct mxser_mstatus) * MXSER_PORTS))
- return -EFAULT;
+ info->err_shadow = 0;
return 0;
- case MOXA_ASPP_MON_EXT: {
- int status;
- int opmode, p;
- int shiftbit;
- unsigned cflag, iflag;
-
- for (i = 0; i < MXSER_PORTS; i++) {
- if (!mxvar_table[i].base)
- continue;
-
- status = mxser_get_msr(mxvar_table[i].base, 0,
- i, &(mxvar_table[i]));
- /*
- mxser_check_modem_status(&mxvar_table[i],
- status);
- */
- if (status & UART_MSR_TERI)
- mxvar_table[i].icount.rng++;
- if (status & UART_MSR_DDSR)
- mxvar_table[i].icount.dsr++;
- if (status & UART_MSR_DDCD)
- mxvar_table[i].icount.dcd++;
- if (status & UART_MSR_DCTS)
- mxvar_table[i].icount.cts++;
-
- mxvar_table[i].mon_data.modem_status = status;
- mon_data_ext.rx_cnt[i] = mxvar_table[i].mon_data.rxcnt;
- mon_data_ext.tx_cnt[i] = mxvar_table[i].mon_data.txcnt;
- mon_data_ext.up_rxcnt[i] = mxvar_table[i].mon_data.up_rxcnt;
- mon_data_ext.up_txcnt[i] = mxvar_table[i].mon_data.up_txcnt;
- mon_data_ext.modem_status[i] = mxvar_table[i].mon_data.modem_status;
- mon_data_ext.baudrate[i] = mxvar_table[i].realbaud;
-
- if (!mxvar_table[i].tty || !mxvar_table[i].tty->termios) {
- cflag = mxvar_table[i].normal_termios.c_cflag;
- iflag = mxvar_table[i].normal_termios.c_iflag;
- } else {
- cflag = mxvar_table[i].tty->termios->c_cflag;
- iflag = mxvar_table[i].tty->termios->c_iflag;
- }
-
- mon_data_ext.databits[i] = cflag & CSIZE;
-
- mon_data_ext.stopbits[i] = cflag & CSTOPB;
-
- mon_data_ext.parity[i] = cflag & (PARENB | PARODD | CMSPAR);
-
- mon_data_ext.flowctrl[i] = 0x00;
-
- if (cflag & CRTSCTS)
- mon_data_ext.flowctrl[i] |= 0x03;
-
- if (iflag & (IXON | IXOFF))
- mon_data_ext.flowctrl[i] |= 0x0C;
-
- if (mxvar_table[i].type == PORT_16550A)
- mon_data_ext.fifo[i] = 1;
- else
- mon_data_ext.fifo[i] = 0;
-
- p = i % 4;
- shiftbit = p * 2;
- opmode = inb(mxvar_table[i].opmode_ioaddr) >> shiftbit;
- opmode &= OP_MODE_MASK;
-
- mon_data_ext.iftype[i] = opmode;
-
- }
- if (copy_to_user(argp, &mon_data_ext, sizeof(struct mxser_mon_ext)))
- return -EFAULT;
-
- return 0;
+ }
+ case MOXA_SET_BAUD_METHOD: {
+ int method;
- }
+ if (get_user(method, (int __user *)argp))
+ return -EFAULT;
+ mxser_set_baud_method[tty->index] = method;
+ return put_user(method, (int __user *)argp);
+ }
default:
return -ENOIOCTLCMD;
}
@@ -1627,107 +1812,105 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
static void mxser_stoprx(struct tty_struct *tty)
{
- struct mxser_struct *info = tty->driver_data;
- /* unsigned long flags; */
+ struct mxser_port *info = tty->driver_data;
info->ldisc_stop_rx = 1;
if (I_IXOFF(tty)) {
- /* MX_LOCK(&info->slock); */
- /* following add by Victor Yu. 09-02-2002 */
- if (info->IsMoxaMustChipFlag) {
+ if (info->board->chip_flag) {
info->IER &= ~MOXA_MUST_RECV_ISR;
- outb(info->IER, info->base + UART_IER);
+ outb(info->IER, info->ioaddr + UART_IER);
} else {
- /* above add by Victor Yu. 09-02-2002 */
info->x_char = STOP_CHAR(tty);
- /* mask by Victor Yu. 09-02-2002 */
- /* outb(info->IER, 0); */
- outb(0, info->base + UART_IER);
+ outb(0, info->ioaddr + UART_IER);
info->IER |= UART_IER_THRI;
- /* force Tx interrupt */
- outb(info->IER, info->base + UART_IER);
- } /* add by Victor Yu. 09-02-2002 */
- /* MX_UNLOCK(&info->slock); */
+ outb(info->IER, info->ioaddr + UART_IER);
+ }
}
if (info->tty->termios->c_cflag & CRTSCTS) {
- /* MX_LOCK(&info->slock); */
info->MCR &= ~UART_MCR_RTS;
- outb(info->MCR, info->base + UART_MCR);
- /* MX_UNLOCK(&info->slock); */
+ outb(info->MCR, info->ioaddr + UART_MCR);
}
}
-static void mxser_startrx(struct tty_struct *tty)
+/*
+ * This routine is called by the upper-layer tty layer to signal that
+ * incoming characters should be throttled.
+ */
+static void mxser_throttle(struct tty_struct *tty)
+{
+ mxser_stoprx(tty);
+}
+
+static void mxser_unthrottle(struct tty_struct *tty)
{
- struct mxser_struct *info = tty->driver_data;
- /* unsigned long flags; */
+ struct mxser_port *info = tty->driver_data;
+ /* startrx */
info->ldisc_stop_rx = 0;
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
else {
- /* MX_LOCK(&info->slock); */
-
- /* following add by Victor Yu. 09-02-2002 */
- if (info->IsMoxaMustChipFlag) {
+ if (info->board->chip_flag) {
info->IER |= MOXA_MUST_RECV_ISR;
- outb(info->IER, info->base + UART_IER);
+ outb(info->IER, info->ioaddr + UART_IER);
} else {
- /* above add by Victor Yu. 09-02-2002 */
-
info->x_char = START_CHAR(tty);
- /* mask by Victor Yu. 09-02-2002 */
- /* outb(info->IER, 0); */
- /* add by Victor Yu. 09-02-2002 */
- outb(0, info->base + UART_IER);
- /* force Tx interrupt */
+ outb(0, info->ioaddr + UART_IER);
info->IER |= UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- } /* add by Victor Yu. 09-02-2002 */
- /* MX_UNLOCK(&info->slock); */
+ outb(info->IER, info->ioaddr + UART_IER);
+ }
}
}
if (info->tty->termios->c_cflag & CRTSCTS) {
- /* MX_LOCK(&info->slock); */
info->MCR |= UART_MCR_RTS;
- outb(info->MCR, info->base + UART_MCR);
- /* MX_UNLOCK(&info->slock); */
+ outb(info->MCR, info->ioaddr + UART_MCR);
}
}
/*
- * This routine is called by the upper-layer tty layer to signal that
- * incoming characters should be throttled.
+ * mxser_stop() and mxser_start()
+ *
+ * This routines are called before setting or resetting tty->stopped.
+ * They enable or disable transmitter interrupts, as necessary.
*/
-static void mxser_throttle(struct tty_struct *tty)
+static void mxser_stop(struct tty_struct *tty)
{
- /* struct mxser_struct *info = tty->driver_data; */
- /* unsigned long flags; */
+ struct mxser_port *info = tty->driver_data;
+ unsigned long flags;
- /* MX_LOCK(&info->slock); */
- mxser_stoprx(tty);
- /* MX_UNLOCK(&info->slock); */
+ spin_lock_irqsave(&info->slock, flags);
+ if (info->IER & UART_IER_THRI) {
+ info->IER &= ~UART_IER_THRI;
+ outb(info->IER, info->ioaddr + UART_IER);
+ }
+ spin_unlock_irqrestore(&info->slock, flags);
}
-static void mxser_unthrottle(struct tty_struct *tty)
+static void mxser_start(struct tty_struct *tty)
{
- /* struct mxser_struct *info = tty->driver_data; */
- /* unsigned long flags; */
+ struct mxser_port *info = tty->driver_data;
+ unsigned long flags;
- /* MX_LOCK(&info->slock); */
- mxser_startrx(tty);
- /* MX_UNLOCK(&info->slock); */
+ spin_lock_irqsave(&info->slock, flags);
+ if (info->xmit_cnt && info->xmit_buf) {
+ outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
+ info->IER |= UART_IER_THRI;
+ outb(info->IER, info->ioaddr + UART_IER);
+ }
+ spin_unlock_irqrestore(&info->slock, flags);
}
static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
unsigned long flags;
+ spin_lock_irqsave(&info->slock, flags);
mxser_change_speed(info, old_termios);
+ spin_unlock_irqrestore(&info->slock, flags);
if ((old_termios->c_cflag & CRTSCTS) &&
!(tty->termios->c_cflag & CRTSCTS)) {
@@ -1735,61 +1918,27 @@ static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termi
mxser_start(tty);
}
-/* Handle sw stopped */
+ /* Handle sw stopped */
if ((old_termios->c_iflag & IXON) &&
!(tty->termios->c_iflag & IXON)) {
tty->stopped = 0;
- /* following add by Victor Yu. 09-02-2002 */
- if (info->IsMoxaMustChipFlag) {
+ if (info->board->chip_flag) {
spin_lock_irqsave(&info->slock, flags);
- DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->base);
+ DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
spin_unlock_irqrestore(&info->slock, flags);
}
- /* above add by Victor Yu. 09-02-2002 */
mxser_start(tty);
}
}
/*
- * mxser_stop() and mxser_start()
- *
- * This routines are called before setting or resetting tty->stopped.
- * They enable or disable transmitter interrupts, as necessary.
- */
-static void mxser_stop(struct tty_struct *tty)
-{
- struct mxser_struct *info = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
- if (info->IER & UART_IER_THRI) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- }
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-static void mxser_start(struct tty_struct *tty)
-{
- struct mxser_struct *info = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
- if (info->xmit_cnt && info->xmit_buf && !(info->IER & UART_IER_THRI)) {
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- }
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-/*
* mxser_wait_until_sent() --- wait until the transmitter is empty
*/
static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
unsigned long orig_jiffies, char_time;
int lsr;
@@ -1830,7 +1979,7 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
timeout, char_time);
printk("jiff=%lu...", jiffies);
#endif
- while (!((lsr = inb(info->base + UART_LSR)) & UART_LSR_TEMT)) {
+ while (!((lsr = inb(info->ioaddr + UART_LSR)) & UART_LSR_TEMT)) {
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("lsr = %d (jiff=%lu)...", lsr, jiffies);
#endif
@@ -1847,13 +1996,12 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
#endif
}
-
/*
* This routine is called by tty_hangup() when a hangup is signaled.
*/
-void mxser_hangup(struct tty_struct *tty)
+static void mxser_hangup(struct tty_struct *tty)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
mxser_flush_buffer(tty);
mxser_shutdown(info);
@@ -1864,231 +2012,73 @@ void mxser_hangup(struct tty_struct *tty)
wake_up_interruptible(&info->open_wait);
}
-
-/* added by James 03-12-2004. */
/*
* mxser_rs_break() --- routine which turns the break handling on or off
*/
static void mxser_rs_break(struct tty_struct *tty, int break_state)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
if (break_state == -1)
- outb(inb(info->base + UART_LCR) | UART_LCR_SBC,
- info->base + UART_LCR);
+ outb(inb(info->ioaddr + UART_LCR) | UART_LCR_SBC,
+ info->ioaddr + UART_LCR);
else
- outb(inb(info->base + UART_LCR) & ~UART_LCR_SBC,
- info->base + UART_LCR);
+ outb(inb(info->ioaddr + UART_LCR) & ~UART_LCR_SBC,
+ info->ioaddr + UART_LCR);
spin_unlock_irqrestore(&info->slock, flags);
}
-/* (above) added by James. */
-
-
-/*
- * This is the serial driver's generic interrupt routine
- */
-static irqreturn_t mxser_interrupt(int irq, void *dev_id)
+static void mxser_receive_chars(struct mxser_port *port, int *status)
{
- int status, iir, i;
- struct mxser_struct *info;
- struct mxser_struct *port;
- int max, irqbits, bits, msr;
- int pass_counter = 0;
- int handled = IRQ_NONE;
-
- port = NULL;
- /* spin_lock(&gm_lock); */
-
- for (i = 0; i < MXSER_BOARDS; i++) {
- if (dev_id == &(mxvar_table[i * MXSER_PORTS_PER_BOARD])) {
- port = dev_id;
- break;
- }
- }
-
- if (i == MXSER_BOARDS)
- goto irq_stop;
- if (port == 0)
- goto irq_stop;
- max = mxser_numports[mxsercfg[i].board_type - 1];
- while (1) {
- irqbits = inb(port->vector) & port->vectormask;
- if (irqbits == port->vectormask)
- break;
-
- handled = IRQ_HANDLED;
- for (i = 0, bits = 1; i < max; i++, irqbits |= bits, bits <<= 1) {
- if (irqbits == port->vectormask)
- break;
- if (bits & irqbits)
- continue;
- info = port + i;
-
- /* following add by Victor Yu. 09-13-2002 */
- iir = inb(info->base + UART_IIR);
- if (iir & UART_IIR_NO_INT)
- continue;
- iir &= MOXA_MUST_IIR_MASK;
- if (!info->tty) {
- status = inb(info->base + UART_LSR);
- outb(0x27, info->base + UART_FCR);
- inb(info->base + UART_MSR);
- continue;
- }
-
- /* mask by Victor Yu. 09-13-2002
- if ( !info->tty ||
- (inb(info->base + UART_IIR) & UART_IIR_NO_INT) )
- continue;
- */
- /* mask by Victor Yu. 09-02-2002
- status = inb(info->base + UART_LSR) & info->read_status_mask;
- */
-
- /* following add by Victor Yu. 09-02-2002 */
- status = inb(info->base + UART_LSR);
-
- if (status & UART_LSR_PE)
- info->err_shadow |= NPPI_NOTIFY_PARITY;
- if (status & UART_LSR_FE)
- info->err_shadow |= NPPI_NOTIFY_FRAMING;
- if (status & UART_LSR_OE)
- info->err_shadow |= NPPI_NOTIFY_HW_OVERRUN;
- if (status & UART_LSR_BI)
- info->err_shadow |= NPPI_NOTIFY_BREAK;
-
- if (info->IsMoxaMustChipFlag) {
- /*
- if ( (status & 0x02) && !(status & 0x01) ) {
- outb(info->base+UART_FCR, 0x23);
- continue;
- }
- */
- if (iir == MOXA_MUST_IIR_GDA ||
- iir == MOXA_MUST_IIR_RDA ||
- iir == MOXA_MUST_IIR_RTO ||
- iir == MOXA_MUST_IIR_LSR)
- mxser_receive_chars(info, &status);
-
- } else {
- /* above add by Victor Yu. 09-02-2002 */
-
- status &= info->read_status_mask;
- if (status & UART_LSR_DR)
- mxser_receive_chars(info, &status);
- }
- msr = inb(info->base + UART_MSR);
- if (msr & UART_MSR_ANY_DELTA) {
- mxser_check_modem_status(info, msr);
- }
- /* following add by Victor Yu. 09-13-2002 */
- if (info->IsMoxaMustChipFlag) {
- if ((iir == 0x02) && (status & UART_LSR_THRE)) {
- mxser_transmit_chars(info);
- }
- } else {
- /* above add by Victor Yu. 09-13-2002 */
-
- if (status & UART_LSR_THRE) {
-/* 8-2-99 by William
- if ( info->x_char || (info->xmit_cnt > 0) )
-*/
- mxser_transmit_chars(info);
- }
- }
- }
- if (pass_counter++ > MXSER_ISR_PASS_LIMIT) {
- break; /* Prevent infinite loops */
- }
- }
-
- irq_stop:
- /* spin_unlock(&gm_lock); */
- return handled;
-}
-
-static void mxser_receive_chars(struct mxser_struct *info, int *status)
-{
- struct tty_struct *tty = info->tty;
+ struct tty_struct *tty = port->tty;
unsigned char ch, gdl;
int ignored = 0;
int cnt = 0;
int recv_room;
int max = 256;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
recv_room = tty->receive_room;
- if ((recv_room == 0) && (!info->ldisc_stop_rx)) {
- /* mxser_throttle(tty); */
+ if ((recv_room == 0) && (!port->ldisc_stop_rx))
mxser_stoprx(tty);
- /* return; */
- }
- /* following add by Victor Yu. 09-02-2002 */
- if (info->IsMoxaMustChipFlag != MOXA_OTHER_UART) {
+ if (port->board->chip_flag != MOXA_OTHER_UART) {
- if (*status & UART_LSR_SPECIAL) {
+ if (*status & UART_LSR_SPECIAL)
goto intr_old;
- }
- /* following add by Victor Yu. 02-11-2004 */
- if (info->IsMoxaMustChipFlag == MOXA_MUST_MU860_HWID &&
+ if (port->board->chip_flag == MOXA_MUST_MU860_HWID &&
(*status & MOXA_MUST_LSR_RERR))
goto intr_old;
- /* above add by Victor Yu. 02-14-2004 */
if (*status & MOXA_MUST_LSR_RERR)
goto intr_old;
- gdl = inb(info->base + MOXA_MUST_GDL_REGISTER);
+ gdl = inb(port->ioaddr + MOXA_MUST_GDL_REGISTER);
- /* add by Victor Yu. 02-11-2004 */
- if (info->IsMoxaMustChipFlag == MOXA_MUST_MU150_HWID)
+ if (port->board->chip_flag == MOXA_MUST_MU150_HWID)
gdl &= MOXA_MUST_GDL_MASK;
if (gdl >= recv_room) {
- if (!info->ldisc_stop_rx) {
- /* mxser_throttle(tty); */
+ if (!port->ldisc_stop_rx)
mxser_stoprx(tty);
- }
- /* return; */
}
while (gdl--) {
- ch = inb(info->base + UART_RX);
+ ch = inb(port->ioaddr + UART_RX);
tty_insert_flip_char(tty, ch, 0);
cnt++;
- /*
- if ((cnt >= HI_WATER) && (info->stop_rx == 0)) {
- mxser_stoprx(tty);
- info->stop_rx = 1;
- break;
- } */
}
goto end_intr;
}
- intr_old:
- /* above add by Victor Yu. 09-02-2002 */
+intr_old:
do {
if (max-- < 0)
break;
- /*
- if ((cnt >= HI_WATER) && (info->stop_rx == 0)) {
- mxser_stoprx(tty);
- info->stop_rx=1;
- break;
- }
- */
- ch = inb(info->base + UART_RX);
- /* following add by Victor Yu. 09-02-2002 */
- if (info->IsMoxaMustChipFlag && (*status & UART_LSR_OE) /*&& !(*status&UART_LSR_DR) */ )
- outb(0x23, info->base + UART_FCR);
- *status &= info->read_status_mask;
- /* above add by Victor Yu. 09-02-2002 */
- if (*status & info->ignore_status_mask) {
+ ch = inb(port->ioaddr + UART_RX);
+ if (port->board->chip_flag && (*status & UART_LSR_OE))
+ outb(0x23, port->ioaddr + UART_FCR);
+ *status &= port->read_status_mask;
+ if (*status & port->ignore_status_mask) {
if (++ignored > 100)
break;
} else {
@@ -2096,1038 +2086,652 @@ static void mxser_receive_chars(struct mxser_struct *info, int *status)
if (*status & UART_LSR_SPECIAL) {
if (*status & UART_LSR_BI) {
flag = TTY_BREAK;
-/* added by casper 1/11/2000 */
- info->icount.brk++;
-/* */
- if (info->flags & ASYNC_SAK)
+ port->icount.brk++;
+
+ if (port->flags & ASYNC_SAK)
do_SAK(tty);
} else if (*status & UART_LSR_PE) {
flag = TTY_PARITY;
-/* added by casper 1/11/2000 */
- info->icount.parity++;
-/* */
+ port->icount.parity++;
} else if (*status & UART_LSR_FE) {
flag = TTY_FRAME;
-/* added by casper 1/11/2000 */
- info->icount.frame++;
-/* */
+ port->icount.frame++;
} else if (*status & UART_LSR_OE) {
flag = TTY_OVERRUN;
-/* added by casper 1/11/2000 */
- info->icount.overrun++;
-/* */
- }
+ port->icount.overrun++;
+ } else
+ flag = TTY_BREAK;
}
tty_insert_flip_char(tty, ch, flag);
cnt++;
if (cnt >= recv_room) {
- if (!info->ldisc_stop_rx) {
- /* mxser_throttle(tty); */
+ if (!port->ldisc_stop_rx)
mxser_stoprx(tty);
- }
break;
}
}
- /* following add by Victor Yu. 09-02-2002 */
- if (info->IsMoxaMustChipFlag)
+ if (port->board->chip_flag)
break;
- /* above add by Victor Yu. 09-02-2002 */
- /* mask by Victor Yu. 09-02-2002
- *status = inb(info->base + UART_LSR) & info->read_status_mask;
- */
- /* following add by Victor Yu. 09-02-2002 */
- *status = inb(info->base + UART_LSR);
- /* above add by Victor Yu. 09-02-2002 */
+ *status = inb(port->ioaddr + UART_LSR);
} while (*status & UART_LSR_DR);
-end_intr: /* add by Victor Yu. 09-02-2002 */
- mxvar_log.rxcnt[info->port] += cnt;
- info->mon_data.rxcnt += cnt;
- info->mon_data.up_rxcnt += cnt;
- spin_unlock_irqrestore(&info->slock, flags);
+end_intr:
+ mxvar_log.rxcnt[port->tty->index] += cnt;
+ port->mon_data.rxcnt += cnt;
+ port->mon_data.up_rxcnt += cnt;
+ /*
+ * We are called from an interrupt context with &port->slock
+ * being held. Drop it temporarily in order to prevent
+ * recursive locking.
+ */
+ spin_unlock(&port->slock);
tty_flip_buffer_push(tty);
+ spin_lock(&port->slock);
}
-static void mxser_transmit_chars(struct mxser_struct *info)
+static void mxser_transmit_chars(struct mxser_port *port)
{
int count, cnt;
- unsigned long flags;
- spin_lock_irqsave(&info->slock, flags);
-
- if (info->x_char) {
- outb(info->x_char, info->base + UART_TX);
- info->x_char = 0;
- mxvar_log.txcnt[info->port]++;
- info->mon_data.txcnt++;
- info->mon_data.up_txcnt++;
-
-/* added by casper 1/11/2000 */
- info->icount.tx++;
-/* */
- spin_unlock_irqrestore(&info->slock, flags);
+ if (port->x_char) {
+ outb(port->x_char, port->ioaddr + UART_TX);
+ port->x_char = 0;
+ mxvar_log.txcnt[port->tty->index]++;
+ port->mon_data.txcnt++;
+ port->mon_data.up_txcnt++;
+ port->icount.tx++;
return;
}
- if (info->xmit_buf == 0) {
- spin_unlock_irqrestore(&info->slock, flags);
+ if (port->xmit_buf == NULL)
return;
- }
- if ((info->xmit_cnt <= 0) || info->tty->stopped ||
- (info->tty->hw_stopped &&
- (info->type != PORT_16550A) &&
- (!info->IsMoxaMustChipFlag))) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- spin_unlock_irqrestore(&info->slock, flags);
+ if ((port->xmit_cnt <= 0) || port->tty->stopped ||
+ (port->tty->hw_stopped &&
+ (port->type != PORT_16550A) &&
+ (!port->board->chip_flag))) {
+ port->IER &= ~UART_IER_THRI;
+ outb(port->IER, port->ioaddr + UART_IER);
return;
}
- cnt = info->xmit_cnt;
- count = info->xmit_fifo_size;
+ cnt = port->xmit_cnt;
+ count = port->xmit_fifo_size;
do {
- outb(info->xmit_buf[info->xmit_tail++],
- info->base + UART_TX);
- info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE - 1);
- if (--info->xmit_cnt <= 0)
+ outb(port->xmit_buf[port->xmit_tail++],
+ port->ioaddr + UART_TX);
+ port->xmit_tail = port->xmit_tail & (SERIAL_XMIT_SIZE - 1);
+ if (--port->xmit_cnt <= 0)
break;
} while (--count > 0);
- mxvar_log.txcnt[info->port] += (cnt - info->xmit_cnt);
+ mxvar_log.txcnt[port->tty->index] += (cnt - port->xmit_cnt);
-/* added by James 03-12-2004. */
- info->mon_data.txcnt += (cnt - info->xmit_cnt);
- info->mon_data.up_txcnt += (cnt - info->xmit_cnt);
-/* (above) added by James. */
+ port->mon_data.txcnt += (cnt - port->xmit_cnt);
+ port->mon_data.up_txcnt += (cnt - port->xmit_cnt);
+ port->icount.tx += (cnt - port->xmit_cnt);
-/* added by casper 1/11/2000 */
- info->icount.tx += (cnt - info->xmit_cnt);
-/* */
+ if (port->xmit_cnt < WAKEUP_CHARS)
+ tty_wakeup(port->tty);
- if (info->xmit_cnt < WAKEUP_CHARS) {
- set_bit(MXSER_EVENT_TXLOW, &info->event);
- schedule_work(&info->tqueue);
+ if (port->xmit_cnt <= 0) {
+ port->IER &= ~UART_IER_THRI;
+ outb(port->IER, port->ioaddr + UART_IER);
}
- if (info->xmit_cnt <= 0) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- }
- spin_unlock_irqrestore(&info->slock, flags);
}
-static void mxser_check_modem_status(struct mxser_struct *info, int status)
+/*
+ * This is the serial driver's generic interrupt routine
+ */
+static irqreturn_t mxser_interrupt(int irq, void *dev_id)
{
- /* update input line counters */
- if (status & UART_MSR_TERI)
- info->icount.rng++;
- if (status & UART_MSR_DDSR)
- info->icount.dsr++;
- if (status & UART_MSR_DDCD)
- info->icount.dcd++;
- if (status & UART_MSR_DCTS)
- info->icount.cts++;
- info->mon_data.modem_status = status;
- wake_up_interruptible(&info->delta_msr_wait);
-
- if ((info->flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) {
- if (status & UART_MSR_DCD)
- wake_up_interruptible(&info->open_wait);
- schedule_work(&info->tqueue);
- }
-
- if (info->flags & ASYNC_CTS_FLOW) {
- if (info->tty->hw_stopped) {
- if (status & UART_MSR_CTS) {
- info->tty->hw_stopped = 0;
+ int status, iir, i;
+ struct mxser_board *brd = NULL;
+ struct mxser_port *port;
+ int max, irqbits, bits, msr;
+ unsigned int int_cnt, pass_counter = 0;
+ int handled = IRQ_NONE;
- if ((info->type != PORT_16550A) &&
- (!info->IsMoxaMustChipFlag)) {
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- }
- set_bit(MXSER_EVENT_TXLOW, &info->event);
- schedule_work(&info->tqueue); }
- } else {
- if (!(status & UART_MSR_CTS)) {
- info->tty->hw_stopped = 1;
- if ((info->type != PORT_16550A) &&
- (!info->IsMoxaMustChipFlag)) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- }
- }
+ for (i = 0; i < MXSER_BOARDS; i++)
+ if (dev_id == &mxser_boards[i]) {
+ brd = dev_id;
+ break;
}
- }
-}
-static int mxser_block_til_ready(struct tty_struct *tty, struct file *filp, struct mxser_struct *info)
-{
- DECLARE_WAITQUEUE(wait, current);
- int retval;
- int do_clocal = 0;
- unsigned long flags;
-
- /*
- * If non-blocking mode is set, or the port is not enabled,
- * then make the check up front and then exit.
- */
- if ((filp->f_flags & O_NONBLOCK) || (tty->flags & (1 << TTY_IO_ERROR))) {
- info->flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
- }
+ if (i == MXSER_BOARDS)
+ goto irq_stop;
+ if (brd == NULL)
+ goto irq_stop;
+ max = brd->info->nports;
+ while (pass_counter++ < MXSER_ISR_PASS_LIMIT) {
+ irqbits = inb(brd->vector) & brd->vector_mask;
+ if (irqbits == brd->vector_mask)
+ break;
- if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
+ handled = IRQ_HANDLED;
+ for (i = 0, bits = 1; i < max; i++, irqbits |= bits, bits <<= 1) {
+ if (irqbits == brd->vector_mask)
+ break;
+ if (bits & irqbits)
+ continue;
+ port = &brd->ports[i];
+
+ int_cnt = 0;
+ spin_lock(&port->slock);
+ do {
+ iir = inb(port->ioaddr + UART_IIR);
+ if (iir & UART_IIR_NO_INT)
+ break;
+ iir &= MOXA_MUST_IIR_MASK;
+ if (!port->tty ||
+ (port->flags & ASYNC_CLOSING) ||
+ !(port->flags &
+ ASYNC_INITIALIZED)) {
+ status = inb(port->ioaddr + UART_LSR);
+ outb(0x27, port->ioaddr + UART_FCR);
+ inb(port->ioaddr + UART_MSR);
+ break;
+ }
- /*
- * Block waiting for the carrier detect and the line to become
- * free (i.e., not in use by the callout). While we are in
- * this loop, info->count is dropped by one, so that
- * mxser_close() knows when to free things. We restore it upon
- * exit, either normal or abnormal.
- */
- retval = 0;
- add_wait_queue(&info->open_wait, &wait);
+ status = inb(port->ioaddr + UART_LSR);
+
+ if (status & UART_LSR_PE)
+ port->err_shadow |= NPPI_NOTIFY_PARITY;
+ if (status & UART_LSR_FE)
+ port->err_shadow |= NPPI_NOTIFY_FRAMING;
+ if (status & UART_LSR_OE)
+ port->err_shadow |=
+ NPPI_NOTIFY_HW_OVERRUN;
+ if (status & UART_LSR_BI)
+ port->err_shadow |= NPPI_NOTIFY_BREAK;
+
+ if (port->board->chip_flag) {
+ if (iir == MOXA_MUST_IIR_GDA ||
+ iir == MOXA_MUST_IIR_RDA ||
+ iir == MOXA_MUST_IIR_RTO ||
+ iir == MOXA_MUST_IIR_LSR)
+ mxser_receive_chars(port,
+ &status);
- spin_lock_irqsave(&info->slock, flags);
- if (!tty_hung_up_p(filp))
- info->count--;
- spin_unlock_irqrestore(&info->slock, flags);
- info->blocked_open++;
- while (1) {
- spin_lock_irqsave(&info->slock, flags);
- outb(inb(info->base + UART_MCR) |
- UART_MCR_DTR | UART_MCR_RTS, info->base + UART_MCR);
- spin_unlock_irqrestore(&info->slock, flags);
- set_current_state(TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)) {
- if (info->flags & ASYNC_HUP_NOTIFY)
- retval = -EAGAIN;
- else
- retval = -ERESTARTSYS;
- break;
- }
- if (!(info->flags & ASYNC_CLOSING) &&
- (do_clocal ||
- (inb(info->base + UART_MSR) & UART_MSR_DCD)))
- break;
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
+ } else {
+ status &= port->read_status_mask;
+ if (status & UART_LSR_DR)
+ mxser_receive_chars(port,
+ &status);
+ }
+ msr = inb(port->ioaddr + UART_MSR);
+ if (msr & UART_MSR_ANY_DELTA)
+ mxser_check_modem_status(port, msr);
+
+ if (port->board->chip_flag) {
+ if (iir == 0x02 && (status &
+ UART_LSR_THRE))
+ mxser_transmit_chars(port);
+ } else {
+ if (status & UART_LSR_THRE)
+ mxser_transmit_chars(port);
+ }
+ } while (int_cnt++ < MXSER_ISR_PASS_LIMIT);
+ spin_unlock(&port->slock);
}
- schedule();
}
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&info->open_wait, &wait);
- if (!tty_hung_up_p(filp))
- info->count++;
- info->blocked_open--;
- if (retval)
- return retval;
- info->flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
-}
-static int mxser_startup(struct mxser_struct *info)
-{
- unsigned long page;
- unsigned long flags;
-
- page = __get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- spin_lock_irqsave(&info->slock, flags);
-
- if (info->flags & ASYNC_INITIALIZED) {
- free_page(page);
- spin_unlock_irqrestore(&info->slock, flags);
- return 0;
- }
+irq_stop:
+ return handled;
+}
- if (!info->base || !info->type) {
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
- free_page(page);
- spin_unlock_irqrestore(&info->slock, flags);
- return 0;
- }
- if (info->xmit_buf)
- free_page(page);
- else
- info->xmit_buf = (unsigned char *) page;
+static const struct tty_operations mxser_ops = {
+ .open = mxser_open,
+ .close = mxser_close,
+ .write = mxser_write,
+ .put_char = mxser_put_char,
+ .flush_chars = mxser_flush_chars,
+ .write_room = mxser_write_room,
+ .chars_in_buffer = mxser_chars_in_buffer,
+ .flush_buffer = mxser_flush_buffer,
+ .ioctl = mxser_ioctl,
+ .throttle = mxser_throttle,
+ .unthrottle = mxser_unthrottle,
+ .set_termios = mxser_set_termios,
+ .stop = mxser_stop,
+ .start = mxser_start,
+ .hangup = mxser_hangup,
+ .break_ctl = mxser_rs_break,
+ .wait_until_sent = mxser_wait_until_sent,
+ .tiocmget = mxser_tiocmget,
+ .tiocmset = mxser_tiocmset,
+};
- /*
- * Clear the FIFO buffers and disable them
- * (they will be reenabled in mxser_change_speed())
- */
- if (info->IsMoxaMustChipFlag)
- outb((UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT |
- MOXA_MUST_FCR_GDA_MODE_ENABLE), info->base + UART_FCR);
- else
- outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
- info->base + UART_FCR);
+/*
+ * The MOXA Smartio/Industio serial driver boot-time initialization code!
+ */
- /*
- * At this point there's no way the LSR could still be 0xFF;
- * if it is, then bail out, because there's likely no UART
- * here.
- */
- if (inb(info->base + UART_LSR) == 0xff) {
- spin_unlock_irqrestore(&info->slock, flags);
- if (capable(CAP_SYS_ADMIN)) {
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
- return 0;
- } else
- return -ENODEV;
+static void mxser_release_res(struct mxser_board *brd, struct pci_dev *pdev,
+ unsigned int irq)
+{
+ if (irq)
+ free_irq(brd->irq, brd);
+ if (pdev != NULL) { /* PCI */
+#ifdef CONFIG_PCI
+ pci_release_region(pdev, 2);
+ pci_release_region(pdev, 3);
+#endif
+ } else {
+ release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
+ release_region(brd->vector, 1);
}
-
- /*
- * Clear the interrupt registers.
- */
- (void) inb(info->base + UART_LSR);
- (void) inb(info->base + UART_RX);
- (void) inb(info->base + UART_IIR);
- (void) inb(info->base + UART_MSR);
-
- /*
- * Now, initialize the UART
- */
- outb(UART_LCR_WLEN8, info->base + UART_LCR); /* reset DLAB */
- info->MCR = UART_MCR_DTR | UART_MCR_RTS;
- outb(info->MCR, info->base + UART_MCR);
-
- /*
- * Finally, enable interrupts
- */
- info->IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI;
- /* info->IER = UART_IER_RLSI | UART_IER_RDI; */
-
- /* following add by Victor Yu. 08-30-2002 */
- if (info->IsMoxaMustChipFlag)
- info->IER |= MOXA_MUST_IER_EGDAI;
- /* above add by Victor Yu. 08-30-2002 */
- outb(info->IER, info->base + UART_IER); /* enable interrupts */
-
- /*
- * And clear the interrupt registers again for luck.
- */
- (void) inb(info->base + UART_LSR);
- (void) inb(info->base + UART_RX);
- (void) inb(info->base + UART_IIR);
- (void) inb(info->base + UART_MSR);
-
- if (info->tty)
- clear_bit(TTY_IO_ERROR, &info->tty->flags);
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
-
- /*
- * and set the speed of the serial port
- */
- spin_unlock_irqrestore(&info->slock, flags);
- mxser_change_speed(info, NULL);
-
- info->flags |= ASYNC_INITIALIZED;
- return 0;
}
-/*
- * This routine will shutdown a serial port; interrupts maybe disabled, and
- * DTR is dropped if the hangup on close termio flag is on.
- */
-static void mxser_shutdown(struct mxser_struct *info)
+static int __devinit mxser_initbrd(struct mxser_board *brd,
+ struct pci_dev *pdev)
{
- unsigned long flags;
-
- if (!(info->flags & ASYNC_INITIALIZED))
- return;
-
- spin_lock_irqsave(&info->slock, flags);
-
- /*
- * clear delta_msr_wait queue to avoid mem leaks: we may free the irq
- * here so the queue might never be waken up
- */
- wake_up_interruptible(&info->delta_msr_wait);
-
- /*
- * Free the IRQ, if necessary
- */
- if (info->xmit_buf) {
- free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = NULL;
- }
+ struct mxser_port *info;
+ unsigned int i;
+ int retval;
- info->IER = 0;
- outb(0x00, info->base + UART_IER);
+ printk(KERN_INFO "max. baud rate = %d bps.\n", brd->ports[0].max_baud);
- if (!info->tty || (info->tty->termios->c_cflag & HUPCL))
- info->MCR &= ~(UART_MCR_DTR | UART_MCR_RTS);
- outb(info->MCR, info->base + UART_MCR);
+ for (i = 0; i < brd->info->nports; i++) {
+ info = &brd->ports[i];
+ info->board = brd;
+ info->stop_rx = 0;
+ info->ldisc_stop_rx = 0;
- /* clear Rx/Tx FIFO's */
- /* following add by Victor Yu. 08-30-2002 */
- if (info->IsMoxaMustChipFlag)
- outb((UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT |
- MOXA_MUST_FCR_GDA_MODE_ENABLE), info->base + UART_FCR);
- else
- /* above add by Victor Yu. 08-30-2002 */
- outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
- info->base + UART_FCR);
+ /* Enhance mode enabled here */
+ if (brd->chip_flag != MOXA_OTHER_UART)
+ ENABLE_MOXA_MUST_ENCHANCE_MODE(info->ioaddr);
- /* read data port to reset things */
- (void) inb(info->base + UART_RX);
+ info->flags = ASYNC_SHARE_IRQ;
+ info->type = brd->uart_type;
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
+ process_txrx_fifo(info);
- info->flags &= ~ASYNC_INITIALIZED;
+ info->custom_divisor = info->baud_base * 16;
+ info->close_delay = 5 * HZ / 10;
+ info->closing_wait = 30 * HZ;
+ info->normal_termios = mxvar_sdriver->init_termios;
+ init_waitqueue_head(&info->open_wait);
+ init_waitqueue_head(&info->delta_msr_wait);
+ memset(&info->mon_data, 0, sizeof(struct mxser_mon));
+ info->err_shadow = 0;
+ spin_lock_init(&info->slock);
- /* following add by Victor Yu. 09-23-2002 */
- if (info->IsMoxaMustChipFlag)
- SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(info->base);
- /* above add by Victor Yu. 09-23-2002 */
+ /* before set INT ISR, disable all int */
+ outb(inb(info->ioaddr + UART_IER) & 0xf0,
+ info->ioaddr + UART_IER);
+ }
- spin_unlock_irqrestore(&info->slock, flags);
+ retval = request_irq(brd->irq, mxser_interrupt, IRQF_SHARED, "mxser",
+ brd);
+ if (retval) {
+ printk(KERN_ERR "Board %s: Request irq failed, IRQ (%d) may "
+ "conflict with another device.\n",
+ brd->info->name, brd->irq);
+ /* We hold resources, we need to release them. */
+ mxser_release_res(brd, pdev, 0);
+ }
+ return retval;
}
-/*
- * This routine is called to set the UART divisor registers to match
- * the specified baud rate for a serial port.
- */
-static int mxser_change_speed(struct mxser_struct *info, struct ktermios *old_termios)
+static int __init mxser_get_ISA_conf(int cap, struct mxser_board *brd)
{
- unsigned cflag, cval, fcr;
- int ret = 0;
- unsigned char status;
- long baud;
- unsigned long flags;
-
- if (!info->tty || !info->tty->termios)
- return ret;
- cflag = info->tty->termios->c_cflag;
- if (!(info->base))
- return ret;
+ int id, i, bits;
+ unsigned short regs[16], irq;
+ unsigned char scratch, scratch2;
-#ifndef B921600
-#define B921600 (B460800 +1)
-#endif
- if (mxser_set_baud_method[info->port] == 0) {
- baud = tty_get_baud_rate(info->tty);
- mxser_set_baud(info, baud);
- }
+ brd->chip_flag = MOXA_OTHER_UART;
- /* byte size and parity */
- switch (cflag & CSIZE) {
- case CS5:
- cval = 0x00;
+ id = mxser_read_register(cap, regs);
+ switch (id) {
+ case C168_ASIC_ID:
+ brd->info = &mxser_cards[0];
break;
- case CS6:
- cval = 0x01;
+ case C104_ASIC_ID:
+ brd->info = &mxser_cards[1];
break;
- case CS7:
- cval = 0x02;
+ case CI104J_ASIC_ID:
+ brd->info = &mxser_cards[2];
break;
- case CS8:
- cval = 0x03;
+ case C102_ASIC_ID:
+ brd->info = &mxser_cards[5];
+ break;
+ case CI132_ASIC_ID:
+ brd->info = &mxser_cards[6];
+ break;
+ case CI134_ASIC_ID:
+ brd->info = &mxser_cards[7];
break;
default:
- cval = 0x00;
- break; /* too keep GCC shut... */
+ return 0;
}
- if (cflag & CSTOPB)
- cval |= 0x04;
- if (cflag & PARENB)
- cval |= UART_LCR_PARITY;
- if (!(cflag & PARODD))
- cval |= UART_LCR_EPAR;
- if (cflag & CMSPAR)
- cval |= UART_LCR_SPAR;
- if ((info->type == PORT_8250) || (info->type == PORT_16450)) {
- if (info->IsMoxaMustChipFlag) {
- fcr = UART_FCR_ENABLE_FIFO;
- fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
- SET_MOXA_MUST_FIFO_VALUE(info);
- } else
- fcr = 0;
- } else {
- fcr = UART_FCR_ENABLE_FIFO;
- /* following add by Victor Yu. 08-30-2002 */
- if (info->IsMoxaMustChipFlag) {
- fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
- SET_MOXA_MUST_FIFO_VALUE(info);
- } else {
- /* above add by Victor Yu. 08-30-2002 */
- switch (info->rx_trigger) {
- case 1:
- fcr |= UART_FCR_TRIGGER_1;
- break;
- case 4:
- fcr |= UART_FCR_TRIGGER_4;
- break;
- case 8:
- fcr |= UART_FCR_TRIGGER_8;
- break;
- default:
- fcr |= UART_FCR_TRIGGER_14;
- break;
- }
- }
+ irq = 0;
+ /* some ISA cards have 2 ports, but we want to see them as 4-port (why?)
+ Flag-hack checks if configuration should be read as 2-port here. */
+ if (brd->info->nports == 2 || (brd->info->flags & MXSER_HAS2)) {
+ irq = regs[9] & 0xF000;
+ irq = irq | (irq >> 4);
+ if (irq != (regs[9] & 0xFF00))
+ return MXSER_ERR_IRQ_CONFLIT;
+ } else if (brd->info->nports == 4) {
+ irq = regs[9] & 0xF000;
+ irq = irq | (irq >> 4);
+ irq = irq | (irq >> 8);
+ if (irq != regs[9])
+ return MXSER_ERR_IRQ_CONFLIT;
+ } else if (brd->info->nports == 8) {
+ irq = regs[9] & 0xF000;
+ irq = irq | (irq >> 4);
+ irq = irq | (irq >> 8);
+ if ((irq != regs[9]) || (irq != regs[10]))
+ return MXSER_ERR_IRQ_CONFLIT;
}
- /* CTS flow control flag and modem status interrupts */
- info->IER &= ~UART_IER_MSI;
- info->MCR &= ~UART_MCR_AFE;
- if (cflag & CRTSCTS) {
- info->flags |= ASYNC_CTS_FLOW;
- info->IER |= UART_IER_MSI;
- if ((info->type == PORT_16550A) || (info->IsMoxaMustChipFlag)) {
- info->MCR |= UART_MCR_AFE;
+ if (!irq)
+ return MXSER_ERR_IRQ;
+ brd->irq = ((int)(irq & 0xF000) >> 12);
+ for (i = 0; i < 8; i++)
+ brd->ports[i].ioaddr = (int) regs[i + 1] & 0xFFF8;
+ if ((regs[12] & 0x80) == 0)
+ return MXSER_ERR_VECTOR;
+ brd->vector = (int)regs[11]; /* interrupt vector */
+ if (id == 1)
+ brd->vector_mask = 0x00FF;
+ else
+ brd->vector_mask = 0x000F;
+ for (i = 7, bits = 0x0100; i >= 0; i--, bits <<= 1) {
+ if (regs[12] & bits) {
+ brd->ports[i].baud_base = 921600;
+ brd->ports[i].max_baud = 921600;
} else {
- status = inb(info->base + UART_MSR);
- if (info->tty->hw_stopped) {
- if (status & UART_MSR_CTS) {
- info->tty->hw_stopped = 0;
- if ((info->type != PORT_16550A) &&
- (!info->IsMoxaMustChipFlag)) {
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- }
- set_bit(MXSER_EVENT_TXLOW, &info->event);
- schedule_work(&info->tqueue); }
- } else {
- if (!(status & UART_MSR_CTS)) {
- info->tty->hw_stopped = 1;
- if ((info->type != PORT_16550A) &&
- (!info->IsMoxaMustChipFlag)) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- }
- }
- }
+ brd->ports[i].baud_base = 115200;
+ brd->ports[i].max_baud = 115200;
}
- } else {
- info->flags &= ~ASYNC_CTS_FLOW;
- }
- outb(info->MCR, info->base + UART_MCR);
- if (cflag & CLOCAL) {
- info->flags &= ~ASYNC_CHECK_CD;
- } else {
- info->flags |= ASYNC_CHECK_CD;
- info->IER |= UART_IER_MSI;
}
- outb(info->IER, info->base + UART_IER);
-
- /*
- * Set up parity check flag
- */
- info->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
- if (I_INPCK(info->tty))
- info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
- info->read_status_mask |= UART_LSR_BI;
-
- info->ignore_status_mask = 0;
+ scratch2 = inb(cap + UART_LCR) & (~UART_LCR_DLAB);
+ outb(scratch2 | UART_LCR_DLAB, cap + UART_LCR);
+ outb(0, cap + UART_EFR); /* EFR is the same as FCR */
+ outb(scratch2, cap + UART_LCR);
+ outb(UART_FCR_ENABLE_FIFO, cap + UART_FCR);
+ scratch = inb(cap + UART_IIR);
- if (I_IGNBRK(info->tty)) {
- info->ignore_status_mask |= UART_LSR_BI;
- info->read_status_mask |= UART_LSR_BI;
- /*
- * If we're ignore parity and break indicators, ignore
- * overruns too. (For real raw support).
- */
- if (I_IGNPAR(info->tty)) {
- info->ignore_status_mask |=
- UART_LSR_OE |
- UART_LSR_PE |
- UART_LSR_FE;
- info->read_status_mask |=
- UART_LSR_OE |
- UART_LSR_PE |
- UART_LSR_FE;
- }
- }
- /* following add by Victor Yu. 09-02-2002 */
- if (info->IsMoxaMustChipFlag) {
- spin_lock_irqsave(&info->slock, flags);
- SET_MOXA_MUST_XON1_VALUE(info->base, START_CHAR(info->tty));
- SET_MOXA_MUST_XOFF1_VALUE(info->base, STOP_CHAR(info->tty));
- if (I_IXON(info->tty)) {
- ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->base);
- } else {
- DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->base);
- }
- if (I_IXOFF(info->tty)) {
- ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->base);
- } else {
- DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->base);
- }
- /*
- if ( I_IXANY(info->tty) ) {
- info->MCR |= MOXA_MUST_MCR_XON_ANY;
- ENABLE_MOXA_MUST_XON_ANY_FLOW_CONTROL(info->base);
- } else {
- info->MCR &= ~MOXA_MUST_MCR_XON_ANY;
- DISABLE_MOXA_MUST_XON_ANY_FLOW_CONTROL(info->base);
- }
- */
- spin_unlock_irqrestore(&info->slock, flags);
+ if (scratch & 0xC0)
+ brd->uart_type = PORT_16550A;
+ else
+ brd->uart_type = PORT_16450;
+ if (!request_region(brd->ports[0].ioaddr, 8 * brd->info->nports,
+ "mxser(IO)"))
+ return MXSER_ERR_IOADDR;
+ if (!request_region(brd->vector, 1, "mxser(vector)")) {
+ release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
+ return MXSER_ERR_VECTOR;
}
- /* above add by Victor Yu. 09-02-2002 */
-
-
- outb(fcr, info->base + UART_FCR); /* set fcr */
- outb(cval, info->base + UART_LCR);
-
- return ret;
+ return brd->info->nports;
}
-
-static int mxser_set_baud(struct mxser_struct *info, long newspd)
+static int __devinit mxser_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
- int quot = 0;
- unsigned char cval;
- int ret = 0;
- unsigned long flags;
-
- if (!info->tty || !info->tty->termios)
- return ret;
-
- if (!(info->base))
- return ret;
+#ifdef CONFIG_PCI
+ struct mxser_board *brd;
+ unsigned int i, j;
+ unsigned long ioaddress;
+ int retval = -EINVAL;
- if (newspd > info->MaxCanSetBaudRate)
- return 0;
+ for (i = 0; i < MXSER_BOARDS; i++)
+ if (mxser_boards[i].info == NULL)
+ break;
- info->realbaud = newspd;
- if (newspd == 134) {
- quot = (2 * info->baud_base / 269);
- } else if (newspd) {
- quot = info->baud_base / newspd;
- if (quot == 0)
- quot = 1;
- } else {
- quot = 0;
+ if (i >= MXSER_BOARDS) {
+ printk(KERN_ERR "Too many Smartio/Industio family boards found "
+ "(maximum %d), board not configured\n", MXSER_BOARDS);
+ goto err;
}
- info->timeout = ((info->xmit_fifo_size * HZ * 10 * quot) / info->baud_base);
- info->timeout += HZ / 50; /* Add .02 seconds of slop */
+ brd = &mxser_boards[i];
+ brd->idx = i * MXSER_PORTS_PER_BOARD;
+ printk(KERN_INFO "Found MOXA %s board (BusNo=%d, DevNo=%d)\n",
+ mxser_cards[ent->driver_data].name,
+ pdev->bus->number, PCI_SLOT(pdev->devfn));
- if (quot) {
- spin_lock_irqsave(&info->slock, flags);
- info->MCR |= UART_MCR_DTR;
- outb(info->MCR, info->base + UART_MCR);
- spin_unlock_irqrestore(&info->slock, flags);
- } else {
- spin_lock_irqsave(&info->slock, flags);
- info->MCR &= ~UART_MCR_DTR;
- outb(info->MCR, info->base + UART_MCR);
- spin_unlock_irqrestore(&info->slock, flags);
- return ret;
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ printk(KERN_ERR "Moxa SmartI/O PCI enable fail !\n");
+ goto err;
}
- cval = inb(info->base + UART_LCR);
-
- outb(cval | UART_LCR_DLAB, info->base + UART_LCR); /* set DLAB */
-
- outb(quot & 0xff, info->base + UART_DLL); /* LS of divisor */
- outb(quot >> 8, info->base + UART_DLM); /* MS of divisor */
- outb(cval, info->base + UART_LCR); /* reset DLAB */
-
+ /* io address */
+ ioaddress = pci_resource_start(pdev, 2);
+ retval = pci_request_region(pdev, 2, "mxser(IO)");
+ if (retval)
+ goto err;
- return ret;
-}
+ brd->info = &mxser_cards[ent->driver_data];
+ for (i = 0; i < brd->info->nports; i++)
+ brd->ports[i].ioaddr = ioaddress + 8 * i;
-/*
- * ------------------------------------------------------------
- * friends of mxser_ioctl()
- * ------------------------------------------------------------
- */
-static int mxser_get_serial_info(struct mxser_struct *info, struct serial_struct __user *retinfo)
-{
- struct serial_struct tmp;
+ /* vector */
+ ioaddress = pci_resource_start(pdev, 3);
+ retval = pci_request_region(pdev, 3, "mxser(vector)");
+ if (retval)
+ goto err_relio;
+ brd->vector = ioaddress;
- if (!retinfo)
- return -EFAULT;
- memset(&tmp, 0, sizeof(tmp));
- tmp.type = info->type;
- tmp.line = info->port;
- tmp.port = info->base;
- tmp.irq = info->irq;
- tmp.flags = info->flags;
- tmp.baud_base = info->baud_base;
- tmp.close_delay = info->close_delay;
- tmp.closing_wait = info->closing_wait;
- tmp.custom_divisor = info->custom_divisor;
- tmp.hub6 = 0;
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
- return 0;
-}
+ /* irq */
+ brd->irq = pdev->irq;
-static int mxser_set_serial_info(struct mxser_struct *info, struct serial_struct __user *new_info)
-{
- struct serial_struct new_serial;
- unsigned int flags;
- int retval = 0;
+ brd->chip_flag = CheckIsMoxaMust(brd->ports[0].ioaddr);
+ brd->uart_type = PORT_16550A;
+ brd->vector_mask = 0;
- if (!new_info || !info->base)
- return -EFAULT;
- if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
- return -EFAULT;
+ for (i = 0; i < brd->info->nports; i++) {
+ for (j = 0; j < UART_INFO_NUM; j++) {
+ if (Gpci_uart_info[j].type == brd->chip_flag) {
+ brd->ports[i].max_baud =
+ Gpci_uart_info[j].max_baud;
- if ((new_serial.irq != info->irq) ||
- (new_serial.port != info->base) ||
- (new_serial.custom_divisor != info->custom_divisor) ||
- (new_serial.baud_base != info->baud_base))
- return -EPERM;
+ /* exception....CP-102 */
+ if (brd->info->flags & MXSER_HIGHBAUD)
+ brd->ports[i].max_baud = 921600;
+ break;
+ }
+ }
+ }
- flags = info->flags & ASYNC_SPD_MASK;
+ if (brd->chip_flag == MOXA_MUST_MU860_HWID) {
+ for (i = 0; i < brd->info->nports; i++) {
+ if (i < 4)
+ brd->ports[i].opmode_ioaddr = ioaddress + 4;
+ else
+ brd->ports[i].opmode_ioaddr = ioaddress + 0x0c;
+ }
+ outb(0, ioaddress + 4); /* default set to RS232 mode */
+ outb(0, ioaddress + 0x0c); /* default set to RS232 mode */
+ }
- if (!capable(CAP_SYS_ADMIN)) {
- if ((new_serial.baud_base != info->baud_base) ||
- (new_serial.close_delay != info->close_delay) ||
- ((new_serial.flags & ~ASYNC_USR_MASK) != (info->flags & ~ASYNC_USR_MASK)))
- return -EPERM;
- info->flags = ((info->flags & ~ASYNC_USR_MASK) |
- (new_serial.flags & ASYNC_USR_MASK));
- } else {
- /*
- * OK, past this point, all the error checking has been done.
- * At this point, we start making changes.....
- */
- info->flags = ((info->flags & ~ASYNC_FLAGS) |
- (new_serial.flags & ASYNC_FLAGS));
- info->close_delay = new_serial.close_delay * HZ / 100;
- info->closing_wait = new_serial.closing_wait * HZ / 100;
- info->tty->low_latency =
- (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
- info->tty->low_latency = 0; /* (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; */
+ for (i = 0; i < brd->info->nports; i++) {
+ brd->vector_mask |= (1 << i);
+ brd->ports[i].baud_base = 921600;
}
- /* added by casper, 3/17/2000, for mouse */
- info->type = new_serial.type;
+ /* mxser_initbrd will hook ISR. */
+ retval = mxser_initbrd(brd, pdev);
+ if (retval)
+ goto err_null;
- process_txrx_fifo(info);
+ for (i = 0; i < brd->info->nports; i++)
+ tty_register_device(mxvar_sdriver, brd->idx + i, &pdev->dev);
- if (info->flags & ASYNC_INITIALIZED) {
- if (flags != (info->flags & ASYNC_SPD_MASK)) {
- mxser_change_speed(info, NULL);
- }
- } else {
- retval = mxser_startup(info);
- }
+ pci_set_drvdata(pdev, brd);
+
+ return 0;
+err_relio:
+ pci_release_region(pdev, 2);
+err_null:
+ brd->info = NULL;
+err:
return retval;
+#else
+ return -ENODEV;
+#endif
}
-/*
- * mxser_get_lsr_info - get line status register info
- *
- * Purpose: Let user call ioctl() to get info when the UART physically
- * is emptied. On bus types like RS485, the transmitter must
- * release the bus after transmitting. This must be done when
- * the transmit shift register is empty, not be done when the
- * transmit holding register is empty. This functionality
- * allows an RS485 driver to be written in user space.
- */
-static int mxser_get_lsr_info(struct mxser_struct *info, unsigned int __user *value)
+static void __devexit mxser_remove(struct pci_dev *pdev)
{
- unsigned char status;
- unsigned int result;
- unsigned long flags;
+ struct mxser_board *brd = pci_get_drvdata(pdev);
+ unsigned int i;
- spin_lock_irqsave(&info->slock, flags);
- status = inb(info->base + UART_LSR);
- spin_unlock_irqrestore(&info->slock, flags);
- result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
- return put_user(result, value);
-}
+ for (i = 0; i < brd->info->nports; i++)
+ tty_unregister_device(mxvar_sdriver, brd->idx + i);
-/*
- * This routine sends a break character out the serial port.
- */
-static void mxser_send_break(struct mxser_struct *info, int duration)
-{
- unsigned long flags;
-
- if (!info->base)
- return;
- set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irqsave(&info->slock, flags);
- outb(inb(info->base + UART_LCR) | UART_LCR_SBC,
- info->base + UART_LCR);
- spin_unlock_irqrestore(&info->slock, flags);
- schedule_timeout(duration);
- spin_lock_irqsave(&info->slock, flags);
- outb(inb(info->base + UART_LCR) & ~UART_LCR_SBC,
- info->base + UART_LCR);
- spin_unlock_irqrestore(&info->slock, flags);
+ mxser_release_res(brd, pdev, 1);
+ brd->info = NULL;
}
-static int mxser_tiocmget(struct tty_struct *tty, struct file *file)
-{
- struct mxser_struct *info = tty->driver_data;
- unsigned char control, status;
- unsigned long flags;
+static struct pci_driver mxser_driver = {
+ .name = "mxser",
+ .id_table = mxser_pcibrds,
+ .probe = mxser_probe,
+ .remove = __devexit_p(mxser_remove)
+};
+static int __init mxser_module_init(void)
+{
+ struct mxser_board *brd;
+ unsigned long cap;
+ unsigned int i, m, isaloop;
+ int retval, b;
- if (tty->index == MXSER_PORTS)
- return -ENOIOCTLCMD;
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
+ pr_debug("Loading module mxser ...\n");
- control = info->MCR;
+ mxvar_sdriver = alloc_tty_driver(MXSER_PORTS + 1);
+ if (!mxvar_sdriver)
+ return -ENOMEM;
- spin_lock_irqsave(&info->slock, flags);
- status = inb(info->base + UART_MSR);
- if (status & UART_MSR_ANY_DELTA)
- mxser_check_modem_status(info, status);
- spin_unlock_irqrestore(&info->slock, flags);
- return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) |
- ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) |
- ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) |
- ((status & UART_MSR_RI) ? TIOCM_RNG : 0) |
- ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) |
- ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
-}
+ printk(KERN_INFO "MOXA Smartio/Industio family driver version %s\n",
+ MXSER_VERSION);
-static int mxser_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear)
-{
- struct mxser_struct *info = tty->driver_data;
- unsigned long flags;
+ /* Initialize the tty_driver structure */
+ mxvar_sdriver->owner = THIS_MODULE;
+ mxvar_sdriver->magic = TTY_DRIVER_MAGIC;
+ mxvar_sdriver->name = "ttyMI";
+ mxvar_sdriver->major = ttymajor;
+ mxvar_sdriver->minor_start = 0;
+ mxvar_sdriver->num = MXSER_PORTS + 1;
+ mxvar_sdriver->type = TTY_DRIVER_TYPE_SERIAL;
+ mxvar_sdriver->subtype = SERIAL_TYPE_NORMAL;
+ mxvar_sdriver->init_termios = tty_std_termios;
+ mxvar_sdriver->init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL;
+ mxvar_sdriver->flags = TTY_DRIVER_REAL_RAW|TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(mxvar_sdriver, &mxser_ops);
+ retval = tty_register_driver(mxvar_sdriver);
+ if (retval) {
+ printk(KERN_ERR "Couldn't install MOXA Smartio/Industio family "
+ "tty driver !\n");
+ goto err_put;
+ }
- if (tty->index == MXSER_PORTS)
- return -ENOIOCTLCMD;
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
+ mxvar_diagflag = 0;
- spin_lock_irqsave(&info->slock, flags);
+ m = 0;
+ /* Start finding ISA boards here */
+ for (isaloop = 0; isaloop < 2; isaloop++)
+ for (b = 0; b < MXSER_BOARDS && m < MXSER_BOARDS; b++) {
+ if (!isaloop)
+ cap = mxserBoardCAP[b]; /* predefined */
+ else
+ cap = ioaddr[b]; /* module param */
- if (set & TIOCM_RTS)
- info->MCR |= UART_MCR_RTS;
- if (set & TIOCM_DTR)
- info->MCR |= UART_MCR_DTR;
+ if (!cap)
+ continue;
- if (clear & TIOCM_RTS)
- info->MCR &= ~UART_MCR_RTS;
- if (clear & TIOCM_DTR)
- info->MCR &= ~UART_MCR_DTR;
+ brd = &mxser_boards[m];
+ retval = mxser_get_ISA_conf(cap, brd);
- outb(info->MCR, info->base + UART_MCR);
- spin_unlock_irqrestore(&info->slock, flags);
- return 0;
-}
+ if (retval != 0)
+ printk(KERN_INFO "Found MOXA %s board "
+ "(CAP=0x%x)\n",
+ brd->info->name, ioaddr[b]);
+ if (retval <= 0) {
+ if (retval == MXSER_ERR_IRQ)
+ printk(KERN_ERR "Invalid interrupt "
+ "number, board not "
+ "configured\n");
+ else if (retval == MXSER_ERR_IRQ_CONFLIT)
+ printk(KERN_ERR "Invalid interrupt "
+ "number, board not "
+ "configured\n");
+ else if (retval == MXSER_ERR_VECTOR)
+ printk(KERN_ERR "Invalid interrupt "
+ "vector, board not "
+ "configured\n");
+ else if (retval == MXSER_ERR_IOADDR)
+ printk(KERN_ERR "Invalid I/O address, "
+ "board not configured\n");
-static int mxser_read_register(int, unsigned short *);
-static int mxser_program_mode(int);
-static void mxser_normal_mode(int);
+ brd->info = NULL;
+ continue;
+ }
-static int mxser_get_ISA_conf(int cap, struct mxser_hwconf *hwconf)
-{
- int id, i, bits;
- unsigned short regs[16], irq;
- unsigned char scratch, scratch2;
+ /* mxser_initbrd will hook ISR. */
+ if (mxser_initbrd(brd, NULL) < 0) {
+ brd->info = NULL;
+ continue;
+ }
- hwconf->IsMoxaMustChipFlag = MOXA_OTHER_UART;
+ brd->idx = m * MXSER_PORTS_PER_BOARD;
+ for (i = 0; i < brd->info->nports; i++)
+ tty_register_device(mxvar_sdriver, brd->idx + i,
+ NULL);
- id = mxser_read_register(cap, regs);
- if (id == C168_ASIC_ID) {
- hwconf->board_type = MXSER_BOARD_C168_ISA;
- hwconf->ports = 8;
- } else if (id == C104_ASIC_ID) {
- hwconf->board_type = MXSER_BOARD_C104_ISA;
- hwconf->ports = 4;
- } else if (id == C102_ASIC_ID) {
- hwconf->board_type = MXSER_BOARD_C102_ISA;
- hwconf->ports = 2;
- } else if (id == CI132_ASIC_ID) {
- hwconf->board_type = MXSER_BOARD_CI132;
- hwconf->ports = 2;
- } else if (id == CI134_ASIC_ID) {
- hwconf->board_type = MXSER_BOARD_CI134;
- hwconf->ports = 4;
- } else if (id == CI104J_ASIC_ID) {
- hwconf->board_type = MXSER_BOARD_CI104J;
- hwconf->ports = 4;
- } else
- return 0;
+ m++;
+ }
- irq = 0;
- if (hwconf->ports == 2) {
- irq = regs[9] & 0xF000;
- irq = irq | (irq >> 4);
- if (irq != (regs[9] & 0xFF00))
- return MXSER_ERR_IRQ_CONFLIT;
- } else if (hwconf->ports == 4) {
- irq = regs[9] & 0xF000;
- irq = irq | (irq >> 4);
- irq = irq | (irq >> 8);
- if (irq != regs[9])
- return MXSER_ERR_IRQ_CONFLIT;
- } else if (hwconf->ports == 8) {
- irq = regs[9] & 0xF000;
- irq = irq | (irq >> 4);
- irq = irq | (irq >> 8);
- if ((irq != regs[9]) || (irq != regs[10]))
- return MXSER_ERR_IRQ_CONFLIT;
+ retval = pci_register_driver(&mxser_driver);
+ if (retval) {
+ printk(KERN_ERR "Can't register pci driver\n");
+ if (!m) {
+ retval = -ENODEV;
+ goto err_unr;
+ } /* else: we have some ISA cards under control */
}
- if (!irq)
- return MXSER_ERR_IRQ;
- hwconf->irq = ((int)(irq & 0xF000) >> 12);
- for (i = 0; i < 8; i++)
- hwconf->ioaddr[i] = (int) regs[i + 1] & 0xFFF8;
- if ((regs[12] & 0x80) == 0)
- return MXSER_ERR_VECTOR;
- hwconf->vector = (int)regs[11]; /* interrupt vector */
- if (id == 1)
- hwconf->vector_mask = 0x00FF;
- else
- hwconf->vector_mask = 0x000F;
- for (i = 7, bits = 0x0100; i >= 0; i--, bits <<= 1) {
- if (regs[12] & bits) {
- hwconf->baud_base[i] = 921600;
- hwconf->MaxCanSetBaudRate[i] = 921600; /* add by Victor Yu. 09-04-2002 */
- } else {
- hwconf->baud_base[i] = 115200;
- hwconf->MaxCanSetBaudRate[i] = 115200; /* add by Victor Yu. 09-04-2002 */
- }
- }
- scratch2 = inb(cap + UART_LCR) & (~UART_LCR_DLAB);
- outb(scratch2 | UART_LCR_DLAB, cap + UART_LCR);
- outb(0, cap + UART_EFR); /* EFR is the same as FCR */
- outb(scratch2, cap + UART_LCR);
- outb(UART_FCR_ENABLE_FIFO, cap + UART_FCR);
- scratch = inb(cap + UART_IIR);
+ pr_debug("Done.\n");
- if (scratch & 0xC0)
- hwconf->uart_type = PORT_16550A;
- else
- hwconf->uart_type = PORT_16450;
- if (id == 1)
- hwconf->ports = 8;
- else
- hwconf->ports = 4;
- request_region(hwconf->ioaddr[0], 8 * hwconf->ports, "mxser(IO)");
- request_region(hwconf->vector, 1, "mxser(vector)");
- return hwconf->ports;
+ return 0;
+err_unr:
+ tty_unregister_driver(mxvar_sdriver);
+err_put:
+ put_tty_driver(mxvar_sdriver);
+ return retval;
}
-#define CHIP_SK 0x01 /* Serial Data Clock in Eprom */
-#define CHIP_DO 0x02 /* Serial Data Output in Eprom */
-#define CHIP_CS 0x04 /* Serial Chip Select in Eprom */
-#define CHIP_DI 0x08 /* Serial Data Input in Eprom */
-#define EN_CCMD 0x000 /* Chip's command register */
-#define EN0_RSARLO 0x008 /* Remote start address reg 0 */
-#define EN0_RSARHI 0x009 /* Remote start address reg 1 */
-#define EN0_RCNTLO 0x00A /* Remote byte count reg WR */
-#define EN0_RCNTHI 0x00B /* Remote byte count reg WR */
-#define EN0_DCFG 0x00E /* Data configuration reg WR */
-#define EN0_PORT 0x010 /* Rcv missed frame error counter RD */
-#define ENC_PAGE0 0x000 /* Select page 0 of chip registers */
-#define ENC_PAGE3 0x0C0 /* Select page 3 of chip registers */
-static int mxser_read_register(int port, unsigned short *regs)
+static void __exit mxser_module_exit(void)
{
- int i, k, value, id;
- unsigned int j;
-
- id = mxser_program_mode(port);
- if (id < 0)
- return id;
- for (i = 0; i < 14; i++) {
- k = (i & 0x3F) | 0x180;
- for (j = 0x100; j > 0; j >>= 1) {
- outb(CHIP_CS, port);
- if (k & j) {
- outb(CHIP_CS | CHIP_DO, port);
- outb(CHIP_CS | CHIP_DO | CHIP_SK, port); /* A? bit of read */
- } else {
- outb(CHIP_CS, port);
- outb(CHIP_CS | CHIP_SK, port); /* A? bit of read */
- }
- }
- (void)inb(port);
- value = 0;
- for (k = 0, j = 0x8000; k < 16; k++, j >>= 1) {
- outb(CHIP_CS, port);
- outb(CHIP_CS | CHIP_SK, port);
- if (inb(port) & CHIP_DI)
- value |= j;
- }
- regs[i] = value;
- outb(0, port);
- }
- mxser_normal_mode(port);
- return id;
-}
+ unsigned int i, j;
-static int mxser_program_mode(int port)
-{
- int id, i, j, n;
- /* unsigned long flags; */
+ pr_debug("Unloading module mxser ...\n");
- spin_lock(&gm_lock);
- outb(0, port);
- outb(0, port);
- outb(0, port);
- (void)inb(port);
- (void)inb(port);
- outb(0, port);
- (void)inb(port);
- /* restore_flags(flags); */
- spin_unlock(&gm_lock);
+ pci_unregister_driver(&mxser_driver);
- id = inb(port + 1) & 0x1F;
- if ((id != C168_ASIC_ID) &&
- (id != C104_ASIC_ID) &&
- (id != C102_ASIC_ID) &&
- (id != CI132_ASIC_ID) &&
- (id != CI134_ASIC_ID) &&
- (id != CI104J_ASIC_ID))
- return -1;
- for (i = 0, j = 0; i < 4; i++) {
- n = inb(port + 2);
- if (n == 'M') {
- j = 1;
- } else if ((j == 1) && (n == 1)) {
- j = 2;
- break;
- } else
- j = 0;
- }
- if (j != 2)
- id = -2;
- return id;
-}
+ for (i = 0; i < MXSER_BOARDS; i++) /* ISA remains */
+ if (mxser_boards[i].info != NULL)
+ for (j = 0; j < mxser_boards[i].info->nports; j++)
+ tty_unregister_device(mxvar_sdriver,
+ mxser_boards[i].idx + j);
+ tty_unregister_driver(mxvar_sdriver);
+ put_tty_driver(mxvar_sdriver);
-static void mxser_normal_mode(int port)
-{
- int i, n;
+ for (i = 0; i < MXSER_BOARDS; i++)
+ if (mxser_boards[i].info != NULL)
+ mxser_release_res(&mxser_boards[i], NULL, 1);
- outb(0xA5, port + 1);
- outb(0x80, port + 3);
- outb(12, port + 0); /* 9600 bps */
- outb(0, port + 1);
- outb(0x03, port + 3); /* 8 data bits */
- outb(0x13, port + 4); /* loop back mode */
- for (i = 0; i < 16; i++) {
- n = inb(port + 5);
- if ((n & 0x61) == 0x60)
- break;
- if ((n & 1) == 1)
- (void)inb(port);
- }
- outb(0x00, port + 4);
+ pr_debug("Done.\n");
}
module_init(mxser_module_init);
diff --git a/drivers/char/mxser.h b/drivers/char/mxser.h
index 1f4aa45ec004..844171115954 100644
--- a/drivers/char/mxser.h
+++ b/drivers/char/mxser.h
@@ -4,19 +4,17 @@
/*
* Semi-public control interfaces
*/
-
+
/*
* MOXA ioctls
*/
#define MOXA 0x400
#define MOXA_GETDATACOUNT (MOXA + 23)
-#define MOXA_GET_CONF (MOXA + 35)
#define MOXA_DIAGNOSE (MOXA + 50)
#define MOXA_CHKPORTENABLE (MOXA + 60)
#define MOXA_HighSpeedOn (MOXA + 61)
#define MOXA_GET_MAJOR (MOXA + 63)
-#define MOXA_GET_CUMAJOR (MOXA + 64)
#define MOXA_GETMSTATUS (MOXA + 65)
#define MOXA_SET_OP_MODE (MOXA + 66)
#define MOXA_GET_OP_MODE (MOXA + 67)
@@ -26,26 +24,14 @@
#define RS422_MODE 2
#define RS485_4WIRE_MODE 3
#define OP_MODE_MASK 3
-// above add by Victor Yu. 01-05-2004
-
-#define TTY_THRESHOLD_THROTTLE 128
-
-#define HI_WATER 768
-
-// added by James. 03-11-2004.
-#define MOXA_SDS_GETICOUNTER (MOXA + 68)
-#define MOXA_SDS_RSTICOUNTER (MOXA + 69)
-// (above) added by James.
+#define MOXA_SDS_RSTICOUNTER (MOXA + 69)
#define MOXA_ASPP_OQUEUE (MOXA + 70)
-#define MOXA_ASPP_SETBAUD (MOXA + 71)
-#define MOXA_ASPP_GETBAUD (MOXA + 72)
#define MOXA_ASPP_MON (MOXA + 73)
#define MOXA_ASPP_LSTATUS (MOXA + 74)
#define MOXA_ASPP_MON_EXT (MOXA + 75)
#define MOXA_SET_BAUD_METHOD (MOXA + 76)
-
/* --------------------------------------------------- */
#define NPPI_NOTIFY_PARITY 0x01
@@ -54,51 +40,46 @@
#define NPPI_NOTIFY_SW_OVERRUN 0x08
#define NPPI_NOTIFY_BREAK 0x10
-#define NPPI_NOTIFY_CTSHOLD 0x01 // Tx hold by CTS low
-#define NPPI_NOTIFY_DSRHOLD 0x02 // Tx hold by DSR low
-#define NPPI_NOTIFY_XOFFHOLD 0x08 // Tx hold by Xoff received
-#define NPPI_NOTIFY_XOFFXENT 0x10 // Xoff Sent
-
-//CheckIsMoxaMust return value
-#define MOXA_OTHER_UART 0x00
-#define MOXA_MUST_MU150_HWID 0x01
-#define MOXA_MUST_MU860_HWID 0x02
-
-// follow just for Moxa Must chip define.
-//
-// when LCR register (offset 0x03) write following value,
-// the Must chip will enter enchance mode. And write value
-// on EFR (offset 0x02) bit 6,7 to change bank.
+#define NPPI_NOTIFY_CTSHOLD 0x01 /* Tx hold by CTS low */
+#define NPPI_NOTIFY_DSRHOLD 0x02 /* Tx hold by DSR low */
+#define NPPI_NOTIFY_XOFFHOLD 0x08 /* Tx hold by Xoff received */
+#define NPPI_NOTIFY_XOFFXENT 0x10 /* Xoff Sent */
+
+/* follow just for Moxa Must chip define. */
+/* */
+/* when LCR register (offset 0x03) write following value, */
+/* the Must chip will enter enchance mode. And write value */
+/* on EFR (offset 0x02) bit 6,7 to change bank. */
#define MOXA_MUST_ENTER_ENCHANCE 0xBF
-// when enhance mode enable, access on general bank register
+/* when enhance mode enable, access on general bank register */
#define MOXA_MUST_GDL_REGISTER 0x07
#define MOXA_MUST_GDL_MASK 0x7F
#define MOXA_MUST_GDL_HAS_BAD_DATA 0x80
-#define MOXA_MUST_LSR_RERR 0x80 // error in receive FIFO
-// enchance register bank select and enchance mode setting register
-// when LCR register equal to 0xBF
+#define MOXA_MUST_LSR_RERR 0x80 /* error in receive FIFO */
+/* enchance register bank select and enchance mode setting register */
+/* when LCR register equal to 0xBF */
#define MOXA_MUST_EFR_REGISTER 0x02
-// enchance mode enable
+/* enchance mode enable */
#define MOXA_MUST_EFR_EFRB_ENABLE 0x10
-// enchance reister bank set 0, 1, 2
+/* enchance reister bank set 0, 1, 2 */
#define MOXA_MUST_EFR_BANK0 0x00
#define MOXA_MUST_EFR_BANK1 0x40
#define MOXA_MUST_EFR_BANK2 0x80
#define MOXA_MUST_EFR_BANK3 0xC0
#define MOXA_MUST_EFR_BANK_MASK 0xC0
-// set XON1 value register, when LCR=0xBF and change to bank0
+/* set XON1 value register, when LCR=0xBF and change to bank0 */
#define MOXA_MUST_XON1_REGISTER 0x04
-// set XON2 value register, when LCR=0xBF and change to bank0
+/* set XON2 value register, when LCR=0xBF and change to bank0 */
#define MOXA_MUST_XON2_REGISTER 0x05
-// set XOFF1 value register, when LCR=0xBF and change to bank0
+/* set XOFF1 value register, when LCR=0xBF and change to bank0 */
#define MOXA_MUST_XOFF1_REGISTER 0x06
-// set XOFF2 value register, when LCR=0xBF and change to bank0
+/* set XOFF2 value register, when LCR=0xBF and change to bank0 */
#define MOXA_MUST_XOFF2_REGISTER 0x07
#define MOXA_MUST_RBRTL_REGISTER 0x04
@@ -110,32 +91,32 @@
#define MOXA_MUST_ECR_REGISTER 0x06
#define MOXA_MUST_CSR_REGISTER 0x07
-// good data mode enable
+/* good data mode enable */
#define MOXA_MUST_FCR_GDA_MODE_ENABLE 0x20
-// only good data put into RxFIFO
+/* only good data put into RxFIFO */
#define MOXA_MUST_FCR_GDA_ONLY_ENABLE 0x10
-// enable CTS interrupt
+/* enable CTS interrupt */
#define MOXA_MUST_IER_ECTSI 0x80
-// enable RTS interrupt
+/* enable RTS interrupt */
#define MOXA_MUST_IER_ERTSI 0x40
-// enable Xon/Xoff interrupt
+/* enable Xon/Xoff interrupt */
#define MOXA_MUST_IER_XINT 0x20
-// enable GDA interrupt
+/* enable GDA interrupt */
#define MOXA_MUST_IER_EGDAI 0x10
#define MOXA_MUST_RECV_ISR (UART_IER_RDI | MOXA_MUST_IER_EGDAI)
-// GDA interrupt pending
+/* GDA interrupt pending */
#define MOXA_MUST_IIR_GDA 0x1C
#define MOXA_MUST_IIR_RDA 0x04
#define MOXA_MUST_IIR_RTO 0x0C
#define MOXA_MUST_IIR_LSR 0x06
-// recieved Xon/Xoff or specical interrupt pending
+/* recieved Xon/Xoff or specical interrupt pending */
#define MOXA_MUST_IIR_XSC 0x10
-// RTS/CTS change state interrupt pending
+/* RTS/CTS change state interrupt pending */
#define MOXA_MUST_IIR_RTSCTS 0x20
#define MOXA_MUST_IIR_MASK 0x3E
@@ -143,299 +124,164 @@
#define MOXA_MUST_MCR_XON_ANY 0x80
#define MOXA_MUST_MCR_TX_XON 0x08
-
-// software flow control on chip mask value
+/* software flow control on chip mask value */
#define MOXA_MUST_EFR_SF_MASK 0x0F
-// send Xon1/Xoff1
+/* send Xon1/Xoff1 */
#define MOXA_MUST_EFR_SF_TX1 0x08
-// send Xon2/Xoff2
+/* send Xon2/Xoff2 */
#define MOXA_MUST_EFR_SF_TX2 0x04
-// send Xon1,Xon2/Xoff1,Xoff2
+/* send Xon1,Xon2/Xoff1,Xoff2 */
#define MOXA_MUST_EFR_SF_TX12 0x0C
-// don't send Xon/Xoff
+/* don't send Xon/Xoff */
#define MOXA_MUST_EFR_SF_TX_NO 0x00
-// Tx software flow control mask
+/* Tx software flow control mask */
#define MOXA_MUST_EFR_SF_TX_MASK 0x0C
-// don't receive Xon/Xoff
+/* don't receive Xon/Xoff */
#define MOXA_MUST_EFR_SF_RX_NO 0x00
-// receive Xon1/Xoff1
+/* receive Xon1/Xoff1 */
#define MOXA_MUST_EFR_SF_RX1 0x02
-// receive Xon2/Xoff2
+/* receive Xon2/Xoff2 */
#define MOXA_MUST_EFR_SF_RX2 0x01
-// receive Xon1,Xon2/Xoff1,Xoff2
+/* receive Xon1,Xon2/Xoff1,Xoff2 */
#define MOXA_MUST_EFR_SF_RX12 0x03
-// Rx software flow control mask
+/* Rx software flow control mask */
#define MOXA_MUST_EFR_SF_RX_MASK 0x03
-//#define MOXA_MUST_MIN_XOFFLIMIT 66
-//#define MOXA_MUST_MIN_XONLIMIT 20
-//#define ID1_RX_TRIG 120
-
-
-#define CHECK_MOXA_MUST_XOFFLIMIT(info) { \
- if ( (info)->IsMoxaMustChipFlag && \
- (info)->HandFlow.XoffLimit < MOXA_MUST_MIN_XOFFLIMIT ) { \
- (info)->HandFlow.XoffLimit = MOXA_MUST_MIN_XOFFLIMIT; \
- (info)->HandFlow.XonLimit = MOXA_MUST_MIN_XONLIMIT; \
- } \
-}
-
-#define ENABLE_MOXA_MUST_ENCHANCE_MODE(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+#define ENABLE_MOXA_MUST_ENCHANCE_MODE(baseio) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr |= MOXA_MUST_EFR_EFRB_ENABLE; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define DISABLE_MOXA_MUST_ENCHANCE_MODE(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr |= MOXA_MUST_EFR_EFRB_ENABLE; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define DISABLE_MOXA_MUST_ENCHANCE_MODE(baseio) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_EFRB_ENABLE; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_XON1_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_EFRB_ENABLE; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define SET_MOXA_MUST_XON1_VALUE(baseio, Value) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK0; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
+ __efr |= MOXA_MUST_EFR_BANK0; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
outb((u8)(Value), (baseio)+MOXA_MUST_XON1_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
-#define SET_MOXA_MUST_XON2_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+#define SET_MOXA_MUST_XOFF1_VALUE(baseio, Value) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK0; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_XON2_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_XOFF1_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK0; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
+ __efr |= MOXA_MUST_EFR_BANK0; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
outb((u8)(Value), (baseio)+MOXA_MUST_XOFF1_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_XOFF2_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK0; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_XOFF2_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_RBRTL_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_RBRTL_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_RBRTH_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_RBRTH_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_RBRTI_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define SET_MOXA_MUST_FIFO_VALUE(info) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((info)->ioaddr+UART_LCR); \
+ outb(MOXA_MUST_ENTER_ENCHANCE, (info)->ioaddr+UART_LCR);\
+ __efr = inb((info)->ioaddr+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
+ __efr |= MOXA_MUST_EFR_BANK1; \
+ outb(__efr, (info)->ioaddr+MOXA_MUST_EFR_REGISTER); \
+ outb((u8)((info)->rx_high_water), (info)->ioaddr+ \
+ MOXA_MUST_RBRTH_REGISTER); \
+ outb((u8)((info)->rx_trigger), (info)->ioaddr+ \
+ MOXA_MUST_RBRTI_REGISTER); \
+ outb((u8)((info)->rx_low_water), (info)->ioaddr+ \
+ MOXA_MUST_RBRTL_REGISTER); \
+ outb(__oldlcr, (info)->ioaddr+UART_LCR); \
+} while (0)
+
+#define SET_MOXA_MUST_ENUM_VALUE(baseio, Value) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_RBRTI_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_THRTL_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_THRTL_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-//#define MOXA_MUST_RBRL_VALUE 4
-#define SET_MOXA_MUST_FIFO_VALUE(info) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((info)->base+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (info)->base+UART_LCR); \
- __efr = inb((info)->base+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK1; \
- outb(__efr, (info)->base+MOXA_MUST_EFR_REGISTER); \
- outb((u8)((info)->rx_high_water), (info)->base+MOXA_MUST_RBRTH_REGISTER); \
- outb((u8)((info)->rx_trigger), (info)->base+MOXA_MUST_RBRTI_REGISTER); \
- outb((u8)((info)->rx_low_water), (info)->base+MOXA_MUST_RBRTL_REGISTER); \
- outb(__oldlcr, (info)->base+UART_LCR); \
-}
-
-
-
-#define SET_MOXA_MUST_ENUM_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK2; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
+ __efr |= MOXA_MUST_EFR_BANK2; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
outb((u8)(Value), (baseio)+MOXA_MUST_ENUM_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
-#define GET_MOXA_MUST_HARDWARE_ID(baseio, pId) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK2; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- *pId = inb((baseio)+MOXA_MUST_HWID_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_MASK; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_JUST_TX_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_MASK; \
- __efr |= MOXA_MUST_EFR_SF_TX1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+#define GET_MOXA_MUST_HARDWARE_ID(baseio, pId) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \
- __efr |= MOXA_MUST_EFR_SF_TX1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
+ __efr |= MOXA_MUST_EFR_BANK2; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ *pId = inb((baseio)+MOXA_MUST_HWID_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(baseio) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_JUST_RX_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_SF_MASK; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_MASK; \
- __efr |= MOXA_MUST_EFR_SF_RX1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \
+ __efr |= MOXA_MUST_EFR_SF_TX1; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \
- __efr |= MOXA_MUST_EFR_SF_RX1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define ENABLE_MOXA_MUST_TX_RX_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \
+ __efr |= MOXA_MUST_EFR_SF_RX1; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_MASK; \
- __efr |= (MOXA_MUST_EFR_SF_RX1|MOXA_MUST_EFR_SF_TX1); \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define ENABLE_MOXA_MUST_XON_ANY_FLOW_CONTROL(baseio) { \
- u8 __oldmcr; \
- __oldmcr = inb((baseio)+UART_MCR); \
- __oldmcr |= MOXA_MUST_MCR_XON_ANY; \
- outb(__oldmcr, (baseio)+UART_MCR); \
-}
-
-#define DISABLE_MOXA_MUST_XON_ANY_FLOW_CONTROL(baseio) { \
- u8 __oldmcr; \
- __oldmcr = inb((baseio)+UART_MCR); \
- __oldmcr &= ~MOXA_MUST_MCR_XON_ANY; \
- outb(__oldmcr, (baseio)+UART_MCR); \
-}
-
-#define READ_MOXA_MUST_GDL(baseio) inb((baseio)+MOXA_MUST_GDL_REGISTER)
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
#endif
diff --git a/drivers/char/mxser_new.c b/drivers/char/mxser_new.c
deleted file mode 100644
index 081c84c7b548..000000000000
--- a/drivers/char/mxser_new.c
+++ /dev/null
@@ -1,2817 +0,0 @@
-/*
- * mxser.c -- MOXA Smartio/Industio family multiport serial driver.
- *
- * Copyright (C) 1999-2006 Moxa Technologies (support@moxa.com.tw).
- * Copyright (C) 2006-2007 Jiri Slaby <jirislaby@gmail.com>
- *
- * This code is loosely based on the 1.8 moxa driver which is based on
- * Linux serial driver, written by Linus Torvalds, Theodore T'so and
- * others.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Fed through a cleanup, indent and remove of non 2.6 code by Alan Cox
- * <alan@redhat.com>. The original 1.8 code is available on www.moxa.com.
- * - Fixed x86_64 cleanness
- * - Fixed sleep with spinlock held in mxser_send_break
- */
-
-#include <linux/module.h>
-#include <linux/autoconf.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial.h>
-#include <linux/serial_reg.h>
-#include <linux/major.h>
-#include <linux/string.h>
-#include <linux/fcntl.h>
-#include <linux/ptrace.h>
-#include <linux/gfp.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/bitops.h>
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-
-#include "mxser_new.h"
-
-#define MXSER_VERSION "2.0.2" /* 1.10 */
-#define MXSERMAJOR 174
-#define MXSERCUMAJOR 175
-
-#define MXSER_BOARDS 4 /* Max. boards */
-#define MXSER_PORTS_PER_BOARD 8 /* Max. ports per board */
-#define MXSER_PORTS (MXSER_BOARDS * MXSER_PORTS_PER_BOARD)
-#define MXSER_ISR_PASS_LIMIT 100
-
-#define MXSER_ERR_IOADDR -1
-#define MXSER_ERR_IRQ -2
-#define MXSER_ERR_IRQ_CONFLIT -3
-#define MXSER_ERR_VECTOR -4
-
-/*CheckIsMoxaMust return value*/
-#define MOXA_OTHER_UART 0x00
-#define MOXA_MUST_MU150_HWID 0x01
-#define MOXA_MUST_MU860_HWID 0x02
-
-#define WAKEUP_CHARS 256
-
-#define UART_MCR_AFE 0x20
-#define UART_LSR_SPECIAL 0x1E
-
-#define PCI_DEVICE_ID_CB108 0x1080
-#define PCI_DEVICE_ID_CB114 0x1142
-#define PCI_DEVICE_ID_CB134I 0x1341
-#define PCI_DEVICE_ID_CP138U 0x1380
-#define PCI_DEVICE_ID_POS104UL 0x1044
-
-
-#define C168_ASIC_ID 1
-#define C104_ASIC_ID 2
-#define C102_ASIC_ID 0xB
-#define CI132_ASIC_ID 4
-#define CI134_ASIC_ID 3
-#define CI104J_ASIC_ID 5
-
-#define MXSER_HIGHBAUD 1
-#define MXSER_HAS2 2
-
-/* This is only for PCI */
-static const struct {
- int type;
- int tx_fifo;
- int rx_fifo;
- int xmit_fifo_size;
- int rx_high_water;
- int rx_trigger;
- int rx_low_water;
- long max_baud;
-} Gpci_uart_info[] = {
- {MOXA_OTHER_UART, 16, 16, 16, 14, 14, 1, 921600L},
- {MOXA_MUST_MU150_HWID, 64, 64, 64, 48, 48, 16, 230400L},
- {MOXA_MUST_MU860_HWID, 128, 128, 128, 96, 96, 32, 921600L}
-};
-#define UART_INFO_NUM ARRAY_SIZE(Gpci_uart_info)
-
-struct mxser_cardinfo {
- unsigned int nports;
- char *name;
- unsigned int flags;
-};
-
-static const struct mxser_cardinfo mxser_cards[] = {
-/* 0*/ { 8, "C168 series", },
- { 4, "C104 series", },
- { 4, "CI-104J series", },
- { 8, "C168H/PCI series", },
- { 4, "C104H/PCI series", },
-/* 5*/ { 4, "C102 series", MXSER_HAS2 }, /* C102-ISA */
- { 4, "CI-132 series", MXSER_HAS2 },
- { 4, "CI-134 series", },
- { 2, "CP-132 series", },
- { 4, "CP-114 series", },
-/*10*/ { 4, "CT-114 series", },
- { 2, "CP-102 series", MXSER_HIGHBAUD },
- { 4, "CP-104U series", },
- { 8, "CP-168U series", },
- { 2, "CP-132U series", },
-/*15*/ { 4, "CP-134U series", },
- { 4, "CP-104JU series", },
- { 8, "Moxa UC7000 Serial", }, /* RC7000 */
- { 8, "CP-118U series", },
- { 2, "CP-102UL series", },
-/*20*/ { 2, "CP-102U series", },
- { 8, "CP-118EL series", },
- { 8, "CP-168EL series", },
- { 4, "CP-104EL series", },
- { 8, "CB-108 series", },
-/*25*/ { 4, "CB-114 series", },
- { 4, "CB-134I series", },
- { 8, "CP-138U series", },
- { 4, "POS-104UL series", }
-};
-
-/* driver_data correspond to the lines in the structure above
- see also ISA probe function before you change something */
-static struct pci_device_id mxser_pcibrds[] = {
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C168), .driver_data = 3 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C104), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132), .driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP114), .driver_data = 9 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CT114), .driver_data = 10 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102), .driver_data = 11 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104U), .driver_data = 12 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168U), .driver_data = 13 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132U), .driver_data = 14 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP134U), .driver_data = 15 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104JU),.driver_data = 16 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_RC7000), .driver_data = 17 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118U), .driver_data = 18 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102UL),.driver_data = 19 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102U), .driver_data = 20 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118EL),.driver_data = 21 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168EL),.driver_data = 22 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104EL),.driver_data = 23 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB108), .driver_data = 24 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB114), .driver_data = 25 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB134I), .driver_data = 26 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP138U), .driver_data = 27 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_POS104UL), .driver_data = 28 },
- { }
-};
-MODULE_DEVICE_TABLE(pci, mxser_pcibrds);
-
-static int mxvar_baud_table[] = {
- 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400, 57600, 115200, 230400, 460800, 921600
-};
-static unsigned int mxvar_baud_table1[] = {
- 0, B50, B75, B110, B134, B150, B200, B300, B600, B1200, B1800, B2400,
- B4800, B9600, B19200, B38400, B57600, B115200, B230400, B460800, B921600
-};
-#define BAUD_TABLE_NO ARRAY_SIZE(mxvar_baud_table)
-
-#define B_SPEC B2000000
-
-static int ioaddr[MXSER_BOARDS] = { 0, 0, 0, 0 };
-static int ttymajor = MXSERMAJOR;
-static int calloutmajor = MXSERCUMAJOR;
-
-/* Variables for insmod */
-
-MODULE_AUTHOR("Casper Yang");
-MODULE_DESCRIPTION("MOXA Smartio/Industio Family Multiport Board Device Driver");
-module_param_array(ioaddr, int, NULL, 0);
-module_param(ttymajor, int, 0);
-MODULE_LICENSE("GPL");
-
-struct mxser_log {
- int tick;
- unsigned long rxcnt[MXSER_PORTS];
- unsigned long txcnt[MXSER_PORTS];
-};
-
-
-struct mxser_mon {
- unsigned long rxcnt;
- unsigned long txcnt;
- unsigned long up_rxcnt;
- unsigned long up_txcnt;
- int modem_status;
- unsigned char hold_reason;
-};
-
-struct mxser_mon_ext {
- unsigned long rx_cnt[32];
- unsigned long tx_cnt[32];
- unsigned long up_rxcnt[32];
- unsigned long up_txcnt[32];
- int modem_status[32];
-
- long baudrate[32];
- int databits[32];
- int stopbits[32];
- int parity[32];
- int flowctrl[32];
- int fifo[32];
- int iftype[32];
-};
-
-struct mxser_board;
-
-struct mxser_port {
- struct mxser_board *board;
- struct tty_struct *tty;
-
- unsigned long ioaddr;
- unsigned long opmode_ioaddr;
- int max_baud;
-
- int rx_high_water;
- int rx_trigger; /* Rx fifo trigger level */
- int rx_low_water;
- int baud_base; /* max. speed */
- long realbaud;
- int type; /* UART type */
- int flags; /* defined in tty.h */
- int speed;
-
- int x_char; /* xon/xoff character */
- int IER; /* Interrupt Enable Register */
- int MCR; /* Modem control register */
-
- unsigned char stop_rx;
- unsigned char ldisc_stop_rx;
-
- int custom_divisor;
- int close_delay;
- unsigned short closing_wait;
- unsigned char err_shadow;
- unsigned long event;
-
- int count; /* # of fd on device */
- int blocked_open; /* # of blocked opens */
- struct async_icount icount; /* kernel counters for 4 input interrupts */
- int timeout;
-
- int read_status_mask;
- int ignore_status_mask;
- int xmit_fifo_size;
- unsigned char *xmit_buf;
- int xmit_head;
- int xmit_tail;
- int xmit_cnt;
-
- struct ktermios normal_termios;
-
- struct mxser_mon mon_data;
-
- spinlock_t slock;
- wait_queue_head_t open_wait;
- wait_queue_head_t delta_msr_wait;
-};
-
-struct mxser_board {
- unsigned int idx;
- int irq;
- const struct mxser_cardinfo *info;
- unsigned long vector;
- unsigned long vector_mask;
-
- int chip_flag;
- int uart_type;
-
- struct mxser_port ports[MXSER_PORTS_PER_BOARD];
-};
-
-struct mxser_mstatus {
- tcflag_t cflag;
- int cts;
- int dsr;
- int ri;
- int dcd;
-};
-
-static struct mxser_mstatus GMStatus[MXSER_PORTS];
-
-static int mxserBoardCAP[MXSER_BOARDS] = {
- 0, 0, 0, 0
- /* 0x180, 0x280, 0x200, 0x320 */
-};
-
-static struct mxser_board mxser_boards[MXSER_BOARDS];
-static struct tty_driver *mxvar_sdriver;
-static struct mxser_log mxvar_log;
-static int mxvar_diagflag;
-static unsigned char mxser_msr[MXSER_PORTS + 1];
-static struct mxser_mon_ext mon_data_ext;
-static int mxser_set_baud_method[MXSER_PORTS + 1];
-
-#ifdef CONFIG_PCI
-static int __devinit CheckIsMoxaMust(int io)
-{
- u8 oldmcr, hwid;
- int i;
-
- outb(0, io + UART_LCR);
- DISABLE_MOXA_MUST_ENCHANCE_MODE(io);
- oldmcr = inb(io + UART_MCR);
- outb(0, io + UART_MCR);
- SET_MOXA_MUST_XON1_VALUE(io, 0x11);
- if ((hwid = inb(io + UART_MCR)) != 0) {
- outb(oldmcr, io + UART_MCR);
- return MOXA_OTHER_UART;
- }
-
- GET_MOXA_MUST_HARDWARE_ID(io, &hwid);
- for (i = 1; i < UART_INFO_NUM; i++) { /* 0 = OTHER_UART */
- if (hwid == Gpci_uart_info[i].type)
- return (int)hwid;
- }
- return MOXA_OTHER_UART;
-}
-#endif
-
-static void process_txrx_fifo(struct mxser_port *info)
-{
- int i;
-
- if ((info->type == PORT_16450) || (info->type == PORT_8250)) {
- info->rx_trigger = 1;
- info->rx_high_water = 1;
- info->rx_low_water = 1;
- info->xmit_fifo_size = 1;
- } else
- for (i = 0; i < UART_INFO_NUM; i++)
- if (info->board->chip_flag == Gpci_uart_info[i].type) {
- info->rx_trigger = Gpci_uart_info[i].rx_trigger;
- info->rx_low_water = Gpci_uart_info[i].rx_low_water;
- info->rx_high_water = Gpci_uart_info[i].rx_high_water;
- info->xmit_fifo_size = Gpci_uart_info[i].xmit_fifo_size;
- break;
- }
-}
-
-static unsigned char mxser_get_msr(int baseaddr, int mode, int port)
-{
- unsigned char status = 0;
-
- status = inb(baseaddr + UART_MSR);
-
- mxser_msr[port] &= 0x0F;
- mxser_msr[port] |= status;
- status = mxser_msr[port];
- if (mode)
- mxser_msr[port] = 0;
-
- return status;
-}
-
-static int mxser_block_til_ready(struct tty_struct *tty, struct file *filp,
- struct mxser_port *port)
-{
- DECLARE_WAITQUEUE(wait, current);
- int retval;
- int do_clocal = 0;
- unsigned long flags;
-
- /*
- * If non-blocking mode is set, or the port is not enabled,
- * then make the check up front and then exit.
- */
- if ((filp->f_flags & O_NONBLOCK) ||
- test_bit(TTY_IO_ERROR, &tty->flags)) {
- port->flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
- }
-
- if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
-
- /*
- * Block waiting for the carrier detect and the line to become
- * free (i.e., not in use by the callout). While we are in
- * this loop, port->count is dropped by one, so that
- * mxser_close() knows when to free things. We restore it upon
- * exit, either normal or abnormal.
- */
- retval = 0;
- add_wait_queue(&port->open_wait, &wait);
-
- spin_lock_irqsave(&port->slock, flags);
- if (!tty_hung_up_p(filp))
- port->count--;
- spin_unlock_irqrestore(&port->slock, flags);
- port->blocked_open++;
- while (1) {
- spin_lock_irqsave(&port->slock, flags);
- outb(inb(port->ioaddr + UART_MCR) |
- UART_MCR_DTR | UART_MCR_RTS, port->ioaddr + UART_MCR);
- spin_unlock_irqrestore(&port->slock, flags);
- set_current_state(TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) {
- if (port->flags & ASYNC_HUP_NOTIFY)
- retval = -EAGAIN;
- else
- retval = -ERESTARTSYS;
- break;
- }
- if (!(port->flags & ASYNC_CLOSING) &&
- (do_clocal ||
- (inb(port->ioaddr + UART_MSR) & UART_MSR_DCD)))
- break;
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
- schedule();
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&port->open_wait, &wait);
- if (!tty_hung_up_p(filp))
- port->count++;
- port->blocked_open--;
- if (retval)
- return retval;
- port->flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
-}
-
-static int mxser_set_baud(struct mxser_port *info, long newspd)
-{
- unsigned int i;
- int quot = 0;
- unsigned char cval;
- int ret = 0;
-
- if (!info->tty || !info->tty->termios)
- return ret;
-
- if (!(info->ioaddr))
- return ret;
-
- if (newspd > info->max_baud)
- return 0;
-
- info->realbaud = newspd;
- for (i = 0; i < BAUD_TABLE_NO; i++)
- if (newspd == mxvar_baud_table[i])
- break;
- if (i == BAUD_TABLE_NO) {
- quot = info->baud_base / info->speed;
- if (info->speed <= 0 || info->speed > info->max_baud)
- quot = 0;
- } else {
- if (newspd == 134) {
- quot = (2 * info->baud_base / 269);
- } else if (newspd) {
- quot = info->baud_base / newspd;
- if (quot == 0)
- quot = 1;
- } else {
- quot = 0;
- }
- }
-
- info->timeout = ((info->xmit_fifo_size * HZ * 10 * quot) / info->baud_base);
- info->timeout += HZ / 50; /* Add .02 seconds of slop */
-
- if (quot) {
- info->MCR |= UART_MCR_DTR;
- outb(info->MCR, info->ioaddr + UART_MCR);
- } else {
- info->MCR &= ~UART_MCR_DTR;
- outb(info->MCR, info->ioaddr + UART_MCR);
- return ret;
- }
-
- cval = inb(info->ioaddr + UART_LCR);
-
- outb(cval | UART_LCR_DLAB, info->ioaddr + UART_LCR); /* set DLAB */
-
- outb(quot & 0xff, info->ioaddr + UART_DLL); /* LS of divisor */
- outb(quot >> 8, info->ioaddr + UART_DLM); /* MS of divisor */
- outb(cval, info->ioaddr + UART_LCR); /* reset DLAB */
-
- if (i == BAUD_TABLE_NO) {
- quot = info->baud_base % info->speed;
- quot *= 8;
- if ((quot % info->speed) > (info->speed / 2)) {
- quot /= info->speed;
- quot++;
- } else {
- quot /= info->speed;
- }
- SET_MOXA_MUST_ENUM_VALUE(info->ioaddr, quot);
- } else
- SET_MOXA_MUST_ENUM_VALUE(info->ioaddr, 0);
-
- return ret;
-}
-
-/*
- * This routine is called to set the UART divisor registers to match
- * the specified baud rate for a serial port.
- */
-static int mxser_change_speed(struct mxser_port *info,
- struct ktermios *old_termios)
-{
- unsigned cflag, cval, fcr;
- int ret = 0;
- unsigned char status;
- long baud;
-
- if (!info->tty || !info->tty->termios)
- return ret;
- cflag = info->tty->termios->c_cflag;
- if (!(info->ioaddr))
- return ret;
-
- if (mxser_set_baud_method[info->tty->index] == 0) {
- if ((cflag & CBAUD) == B_SPEC)
- baud = info->speed;
- else
- baud = tty_get_baud_rate(info->tty);
- mxser_set_baud(info, baud);
- }
-
- /* byte size and parity */
- switch (cflag & CSIZE) {
- case CS5:
- cval = 0x00;
- break;
- case CS6:
- cval = 0x01;
- break;
- case CS7:
- cval = 0x02;
- break;
- case CS8:
- cval = 0x03;
- break;
- default:
- cval = 0x00;
- break; /* too keep GCC shut... */
- }
- if (cflag & CSTOPB)
- cval |= 0x04;
- if (cflag & PARENB)
- cval |= UART_LCR_PARITY;
- if (!(cflag & PARODD))
- cval |= UART_LCR_EPAR;
- if (cflag & CMSPAR)
- cval |= UART_LCR_SPAR;
-
- if ((info->type == PORT_8250) || (info->type == PORT_16450)) {
- if (info->board->chip_flag) {
- fcr = UART_FCR_ENABLE_FIFO;
- fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
- SET_MOXA_MUST_FIFO_VALUE(info);
- } else
- fcr = 0;
- } else {
- fcr = UART_FCR_ENABLE_FIFO;
- if (info->board->chip_flag) {
- fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
- SET_MOXA_MUST_FIFO_VALUE(info);
- } else {
- switch (info->rx_trigger) {
- case 1:
- fcr |= UART_FCR_TRIGGER_1;
- break;
- case 4:
- fcr |= UART_FCR_TRIGGER_4;
- break;
- case 8:
- fcr |= UART_FCR_TRIGGER_8;
- break;
- default:
- fcr |= UART_FCR_TRIGGER_14;
- break;
- }
- }
- }
-
- /* CTS flow control flag and modem status interrupts */
- info->IER &= ~UART_IER_MSI;
- info->MCR &= ~UART_MCR_AFE;
- if (cflag & CRTSCTS) {
- info->flags |= ASYNC_CTS_FLOW;
- info->IER |= UART_IER_MSI;
- if ((info->type == PORT_16550A) || (info->board->chip_flag)) {
- info->MCR |= UART_MCR_AFE;
- } else {
- status = inb(info->ioaddr + UART_MSR);
- if (info->tty->hw_stopped) {
- if (status & UART_MSR_CTS) {
- info->tty->hw_stopped = 0;
- if (info->type != PORT_16550A &&
- !info->board->chip_flag) {
- outb(info->IER & ~UART_IER_THRI,
- info->ioaddr +
- UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr +
- UART_IER);
- }
- tty_wakeup(info->tty);
- }
- } else {
- if (!(status & UART_MSR_CTS)) {
- info->tty->hw_stopped = 1;
- if ((info->type != PORT_16550A) &&
- (!info->board->chip_flag)) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->ioaddr +
- UART_IER);
- }
- }
- }
- }
- } else {
- info->flags &= ~ASYNC_CTS_FLOW;
- }
- outb(info->MCR, info->ioaddr + UART_MCR);
- if (cflag & CLOCAL) {
- info->flags &= ~ASYNC_CHECK_CD;
- } else {
- info->flags |= ASYNC_CHECK_CD;
- info->IER |= UART_IER_MSI;
- }
- outb(info->IER, info->ioaddr + UART_IER);
-
- /*
- * Set up parity check flag
- */
- info->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
- if (I_INPCK(info->tty))
- info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
- info->read_status_mask |= UART_LSR_BI;
-
- info->ignore_status_mask = 0;
-
- if (I_IGNBRK(info->tty)) {
- info->ignore_status_mask |= UART_LSR_BI;
- info->read_status_mask |= UART_LSR_BI;
- /*
- * If we're ignore parity and break indicators, ignore
- * overruns too. (For real raw support).
- */
- if (I_IGNPAR(info->tty)) {
- info->ignore_status_mask |=
- UART_LSR_OE |
- UART_LSR_PE |
- UART_LSR_FE;
- info->read_status_mask |=
- UART_LSR_OE |
- UART_LSR_PE |
- UART_LSR_FE;
- }
- }
- if (info->board->chip_flag) {
- SET_MOXA_MUST_XON1_VALUE(info->ioaddr, START_CHAR(info->tty));
- SET_MOXA_MUST_XOFF1_VALUE(info->ioaddr, STOP_CHAR(info->tty));
- if (I_IXON(info->tty)) {
- ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
- } else {
- DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
- }
- if (I_IXOFF(info->tty)) {
- ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
- } else {
- DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
- }
- }
-
-
- outb(fcr, info->ioaddr + UART_FCR); /* set fcr */
- outb(cval, info->ioaddr + UART_LCR);
-
- return ret;
-}
-
-static void mxser_check_modem_status(struct mxser_port *port, int status)
-{
- /* update input line counters */
- if (status & UART_MSR_TERI)
- port->icount.rng++;
- if (status & UART_MSR_DDSR)
- port->icount.dsr++;
- if (status & UART_MSR_DDCD)
- port->icount.dcd++;
- if (status & UART_MSR_DCTS)
- port->icount.cts++;
- port->mon_data.modem_status = status;
- wake_up_interruptible(&port->delta_msr_wait);
-
- if ((port->flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) {
- if (status & UART_MSR_DCD)
- wake_up_interruptible(&port->open_wait);
- }
-
- if (port->flags & ASYNC_CTS_FLOW) {
- if (port->tty->hw_stopped) {
- if (status & UART_MSR_CTS) {
- port->tty->hw_stopped = 0;
-
- if ((port->type != PORT_16550A) &&
- (!port->board->chip_flag)) {
- outb(port->IER & ~UART_IER_THRI,
- port->ioaddr + UART_IER);
- port->IER |= UART_IER_THRI;
- outb(port->IER, port->ioaddr +
- UART_IER);
- }
- tty_wakeup(port->tty);
- }
- } else {
- if (!(status & UART_MSR_CTS)) {
- port->tty->hw_stopped = 1;
- if (port->type != PORT_16550A &&
- !port->board->chip_flag) {
- port->IER &= ~UART_IER_THRI;
- outb(port->IER, port->ioaddr +
- UART_IER);
- }
- }
- }
- }
-}
-
-static int mxser_startup(struct mxser_port *info)
-{
- unsigned long page;
- unsigned long flags;
-
- page = __get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- spin_lock_irqsave(&info->slock, flags);
-
- if (info->flags & ASYNC_INITIALIZED) {
- free_page(page);
- spin_unlock_irqrestore(&info->slock, flags);
- return 0;
- }
-
- if (!info->ioaddr || !info->type) {
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
- free_page(page);
- spin_unlock_irqrestore(&info->slock, flags);
- return 0;
- }
- if (info->xmit_buf)
- free_page(page);
- else
- info->xmit_buf = (unsigned char *) page;
-
- /*
- * Clear the FIFO buffers and disable them
- * (they will be reenabled in mxser_change_speed())
- */
- if (info->board->chip_flag)
- outb((UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT |
- MOXA_MUST_FCR_GDA_MODE_ENABLE), info->ioaddr + UART_FCR);
- else
- outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
- info->ioaddr + UART_FCR);
-
- /*
- * At this point there's no way the LSR could still be 0xFF;
- * if it is, then bail out, because there's likely no UART
- * here.
- */
- if (inb(info->ioaddr + UART_LSR) == 0xff) {
- spin_unlock_irqrestore(&info->slock, flags);
- if (capable(CAP_SYS_ADMIN)) {
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
- return 0;
- } else
- return -ENODEV;
- }
-
- /*
- * Clear the interrupt registers.
- */
- (void) inb(info->ioaddr + UART_LSR);
- (void) inb(info->ioaddr + UART_RX);
- (void) inb(info->ioaddr + UART_IIR);
- (void) inb(info->ioaddr + UART_MSR);
-
- /*
- * Now, initialize the UART
- */
- outb(UART_LCR_WLEN8, info->ioaddr + UART_LCR); /* reset DLAB */
- info->MCR = UART_MCR_DTR | UART_MCR_RTS;
- outb(info->MCR, info->ioaddr + UART_MCR);
-
- /*
- * Finally, enable interrupts
- */
- info->IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI;
-
- if (info->board->chip_flag)
- info->IER |= MOXA_MUST_IER_EGDAI;
- outb(info->IER, info->ioaddr + UART_IER); /* enable interrupts */
-
- /*
- * And clear the interrupt registers again for luck.
- */
- (void) inb(info->ioaddr + UART_LSR);
- (void) inb(info->ioaddr + UART_RX);
- (void) inb(info->ioaddr + UART_IIR);
- (void) inb(info->ioaddr + UART_MSR);
-
- if (info->tty)
- clear_bit(TTY_IO_ERROR, &info->tty->flags);
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
-
- /*
- * and set the speed of the serial port
- */
- mxser_change_speed(info, NULL);
- info->flags |= ASYNC_INITIALIZED;
- spin_unlock_irqrestore(&info->slock, flags);
-
- return 0;
-}
-
-/*
- * This routine will shutdown a serial port; interrupts maybe disabled, and
- * DTR is dropped if the hangup on close termio flag is on.
- */
-static void mxser_shutdown(struct mxser_port *info)
-{
- unsigned long flags;
-
- if (!(info->flags & ASYNC_INITIALIZED))
- return;
-
- spin_lock_irqsave(&info->slock, flags);
-
- /*
- * clear delta_msr_wait queue to avoid mem leaks: we may free the irq
- * here so the queue might never be waken up
- */
- wake_up_interruptible(&info->delta_msr_wait);
-
- /*
- * Free the IRQ, if necessary
- */
- if (info->xmit_buf) {
- free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = NULL;
- }
-
- info->IER = 0;
- outb(0x00, info->ioaddr + UART_IER);
-
- if (!info->tty || (info->tty->termios->c_cflag & HUPCL))
- info->MCR &= ~(UART_MCR_DTR | UART_MCR_RTS);
- outb(info->MCR, info->ioaddr + UART_MCR);
-
- /* clear Rx/Tx FIFO's */
- if (info->board->chip_flag)
- outb(UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT |
- MOXA_MUST_FCR_GDA_MODE_ENABLE,
- info->ioaddr + UART_FCR);
- else
- outb(UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
- info->ioaddr + UART_FCR);
-
- /* read data port to reset things */
- (void) inb(info->ioaddr + UART_RX);
-
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
-
- info->flags &= ~ASYNC_INITIALIZED;
-
- if (info->board->chip_flag)
- SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(info->ioaddr);
-
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-/*
- * This routine is called whenever a serial port is opened. It
- * enables interrupts for a serial port, linking in its async structure into
- * the IRQ chain. It also performs the serial-specific
- * initialization for the tty structure.
- */
-static int mxser_open(struct tty_struct *tty, struct file *filp)
-{
- struct mxser_port *info;
- unsigned long flags;
- int retval, line;
-
- line = tty->index;
- if (line == MXSER_PORTS)
- return 0;
- if (line < 0 || line > MXSER_PORTS)
- return -ENODEV;
- info = &mxser_boards[line / MXSER_PORTS_PER_BOARD].ports[line % MXSER_PORTS_PER_BOARD];
- if (!info->ioaddr)
- return -ENODEV;
-
- tty->driver_data = info;
- info->tty = tty;
- /*
- * Start up serial port
- */
- spin_lock_irqsave(&info->slock, flags);
- info->count++;
- spin_unlock_irqrestore(&info->slock, flags);
- retval = mxser_startup(info);
- if (retval)
- return retval;
-
- retval = mxser_block_til_ready(tty, filp, info);
- if (retval)
- return retval;
-
- /* unmark here for very high baud rate (ex. 921600 bps) used */
- tty->low_latency = 1;
- return 0;
-}
-
-/*
- * This routine is called when the serial port gets closed. First, we
- * wait for the last remaining data to be sent. Then, we unlink its
- * async structure from the interrupt chain if necessary, and we free
- * that IRQ if nothing is left in the chain.
- */
-static void mxser_close(struct tty_struct *tty, struct file *filp)
-{
- struct mxser_port *info = tty->driver_data;
-
- unsigned long timeout;
- unsigned long flags;
-
- if (tty->index == MXSER_PORTS)
- return;
- if (!info)
- return;
-
- spin_lock_irqsave(&info->slock, flags);
-
- if (tty_hung_up_p(filp)) {
- spin_unlock_irqrestore(&info->slock, flags);
- return;
- }
- if ((tty->count == 1) && (info->count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. Info->count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- printk(KERN_ERR "mxser_close: bad serial port count; "
- "tty->count is 1, info->count is %d\n", info->count);
- info->count = 1;
- }
- if (--info->count < 0) {
- printk(KERN_ERR "mxser_close: bad serial port count for "
- "ttys%d: %d\n", tty->index, info->count);
- info->count = 0;
- }
- if (info->count) {
- spin_unlock_irqrestore(&info->slock, flags);
- return;
- }
- info->flags |= ASYNC_CLOSING;
- spin_unlock_irqrestore(&info->slock, flags);
- /*
- * Save the termios structure, since this port may have
- * separate termios for callout and dialin.
- */
- if (info->flags & ASYNC_NORMAL_ACTIVE)
- info->normal_termios = *tty->termios;
- /*
- * Now we wait for the transmit buffer to clear; and we notify
- * the line discipline to only process XON/XOFF characters.
- */
- tty->closing = 1;
- if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE)
- tty_wait_until_sent(tty, info->closing_wait);
- /*
- * At this point we stop accepting input. To do this, we
- * disable the receive line status interrupts, and tell the
- * interrupt driver to stop checking the data ready bit in the
- * line status register.
- */
- info->IER &= ~UART_IER_RLSI;
- if (info->board->chip_flag)
- info->IER &= ~MOXA_MUST_RECV_ISR;
-
- if (info->flags & ASYNC_INITIALIZED) {
- outb(info->IER, info->ioaddr + UART_IER);
- /*
- * Before we drop DTR, make sure the UART transmitter
- * has completely drained; this is especially
- * important if there is a transmit FIFO!
- */
- timeout = jiffies + HZ;
- while (!(inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT)) {
- schedule_timeout_interruptible(5);
- if (time_after(jiffies, timeout))
- break;
- }
- }
- mxser_shutdown(info);
-
- if (tty->driver->flush_buffer)
- tty->driver->flush_buffer(tty);
-
- tty_ldisc_flush(tty);
-
- tty->closing = 0;
- info->event = 0;
- info->tty = NULL;
- if (info->blocked_open) {
- if (info->close_delay)
- schedule_timeout_interruptible(info->close_delay);
- wake_up_interruptible(&info->open_wait);
- }
-
- info->flags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING);
-}
-
-static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int count)
-{
- int c, total = 0;
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
- if (!info->xmit_buf)
- return 0;
-
- while (1) {
- c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - info->xmit_head));
- if (c <= 0)
- break;
-
- memcpy(info->xmit_buf + info->xmit_head, buf, c);
- spin_lock_irqsave(&info->slock, flags);
- info->xmit_head = (info->xmit_head + c) &
- (SERIAL_XMIT_SIZE - 1);
- info->xmit_cnt += c;
- spin_unlock_irqrestore(&info->slock, flags);
-
- buf += c;
- count -= c;
- total += c;
- }
-
- if (info->xmit_cnt && !tty->stopped) {
- if (!tty->hw_stopped ||
- (info->type == PORT_16550A) ||
- (info->board->chip_flag)) {
- spin_lock_irqsave(&info->slock, flags);
- outb(info->IER & ~UART_IER_THRI, info->ioaddr +
- UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- spin_unlock_irqrestore(&info->slock, flags);
- }
- }
- return total;
-}
-
-static void mxser_put_char(struct tty_struct *tty, unsigned char ch)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
- if (!info->xmit_buf)
- return;
-
- if (info->xmit_cnt >= SERIAL_XMIT_SIZE - 1)
- return;
-
- spin_lock_irqsave(&info->slock, flags);
- info->xmit_buf[info->xmit_head++] = ch;
- info->xmit_head &= SERIAL_XMIT_SIZE - 1;
- info->xmit_cnt++;
- spin_unlock_irqrestore(&info->slock, flags);
- if (!tty->stopped) {
- if (!tty->hw_stopped ||
- (info->type == PORT_16550A) ||
- info->board->chip_flag) {
- spin_lock_irqsave(&info->slock, flags);
- outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- spin_unlock_irqrestore(&info->slock, flags);
- }
- }
-}
-
-
-static void mxser_flush_chars(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
- if (info->xmit_cnt <= 0 ||
- tty->stopped ||
- !info->xmit_buf ||
- (tty->hw_stopped &&
- (info->type != PORT_16550A) &&
- (!info->board->chip_flag)
- ))
- return;
-
- spin_lock_irqsave(&info->slock, flags);
-
- outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
-
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-static int mxser_write_room(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
- int ret;
-
- ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
- if (ret < 0)
- ret = 0;
- return ret;
-}
-
-static int mxser_chars_in_buffer(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
- return info->xmit_cnt;
-}
-
-static void mxser_flush_buffer(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
- char fcr;
- unsigned long flags;
-
-
- spin_lock_irqsave(&info->slock, flags);
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
-
- fcr = inb(info->ioaddr + UART_FCR);
- outb((fcr | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
- info->ioaddr + UART_FCR);
- outb(fcr, info->ioaddr + UART_FCR);
-
- spin_unlock_irqrestore(&info->slock, flags);
-
- tty_wakeup(tty);
-}
-
-/*
- * ------------------------------------------------------------
- * friends of mxser_ioctl()
- * ------------------------------------------------------------
- */
-static int mxser_get_serial_info(struct mxser_port *info,
- struct serial_struct __user *retinfo)
-{
- struct serial_struct tmp;
-
- if (!retinfo)
- return -EFAULT;
- memset(&tmp, 0, sizeof(tmp));
- tmp.type = info->type;
- tmp.line = info->tty->index;
- tmp.port = info->ioaddr;
- tmp.irq = info->board->irq;
- tmp.flags = info->flags;
- tmp.baud_base = info->baud_base;
- tmp.close_delay = info->close_delay;
- tmp.closing_wait = info->closing_wait;
- tmp.custom_divisor = info->custom_divisor;
- tmp.hub6 = 0;
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
- return 0;
-}
-
-static int mxser_set_serial_info(struct mxser_port *info,
- struct serial_struct __user *new_info)
-{
- struct serial_struct new_serial;
- unsigned long sl_flags;
- unsigned int flags;
- int retval = 0;
-
- if (!new_info || !info->ioaddr)
- return -EFAULT;
- if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
- return -EFAULT;
-
- if ((new_serial.irq != info->board->irq) ||
- (new_serial.port != info->ioaddr) ||
- (new_serial.custom_divisor != info->custom_divisor) ||
- (new_serial.baud_base != info->baud_base))
- return -EPERM;
-
- flags = info->flags & ASYNC_SPD_MASK;
-
- if (!capable(CAP_SYS_ADMIN)) {
- if ((new_serial.baud_base != info->baud_base) ||
- (new_serial.close_delay != info->close_delay) ||
- ((new_serial.flags & ~ASYNC_USR_MASK) != (info->flags & ~ASYNC_USR_MASK)))
- return -EPERM;
- info->flags = ((info->flags & ~ASYNC_USR_MASK) |
- (new_serial.flags & ASYNC_USR_MASK));
- } else {
- /*
- * OK, past this point, all the error checking has been done.
- * At this point, we start making changes.....
- */
- info->flags = ((info->flags & ~ASYNC_FLAGS) |
- (new_serial.flags & ASYNC_FLAGS));
- info->close_delay = new_serial.close_delay * HZ / 100;
- info->closing_wait = new_serial.closing_wait * HZ / 100;
- info->tty->low_latency =
- (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
- info->tty->low_latency = 0;
- }
-
- info->type = new_serial.type;
-
- process_txrx_fifo(info);
-
- if (info->flags & ASYNC_INITIALIZED) {
- if (flags != (info->flags & ASYNC_SPD_MASK)) {
- spin_lock_irqsave(&info->slock, sl_flags);
- mxser_change_speed(info, NULL);
- spin_unlock_irqrestore(&info->slock, sl_flags);
- }
- } else
- retval = mxser_startup(info);
-
- return retval;
-}
-
-/*
- * mxser_get_lsr_info - get line status register info
- *
- * Purpose: Let user call ioctl() to get info when the UART physically
- * is emptied. On bus types like RS485, the transmitter must
- * release the bus after transmitting. This must be done when
- * the transmit shift register is empty, not be done when the
- * transmit holding register is empty. This functionality
- * allows an RS485 driver to be written in user space.
- */
-static int mxser_get_lsr_info(struct mxser_port *info,
- unsigned int __user *value)
-{
- unsigned char status;
- unsigned int result;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
- status = inb(info->ioaddr + UART_LSR);
- spin_unlock_irqrestore(&info->slock, flags);
- result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
- return put_user(result, value);
-}
-
-/*
- * This routine sends a break character out the serial port.
- */
-static void mxser_send_break(struct mxser_port *info, int duration)
-{
- unsigned long flags;
-
- if (!info->ioaddr)
- return;
- set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irqsave(&info->slock, flags);
- outb(inb(info->ioaddr + UART_LCR) | UART_LCR_SBC,
- info->ioaddr + UART_LCR);
- spin_unlock_irqrestore(&info->slock, flags);
- schedule_timeout(duration);
- spin_lock_irqsave(&info->slock, flags);
- outb(inb(info->ioaddr + UART_LCR) & ~UART_LCR_SBC,
- info->ioaddr + UART_LCR);
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-static int mxser_tiocmget(struct tty_struct *tty, struct file *file)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned char control, status;
- unsigned long flags;
-
-
- if (tty->index == MXSER_PORTS)
- return -ENOIOCTLCMD;
- if (test_bit(TTY_IO_ERROR, &tty->flags))
- return -EIO;
-
- control = info->MCR;
-
- spin_lock_irqsave(&info->slock, flags);
- status = inb(info->ioaddr + UART_MSR);
- if (status & UART_MSR_ANY_DELTA)
- mxser_check_modem_status(info, status);
- spin_unlock_irqrestore(&info->slock, flags);
- return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) |
- ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) |
- ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) |
- ((status & UART_MSR_RI) ? TIOCM_RNG : 0) |
- ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) |
- ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
-}
-
-static int mxser_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
-
- if (tty->index == MXSER_PORTS)
- return -ENOIOCTLCMD;
- if (test_bit(TTY_IO_ERROR, &tty->flags))
- return -EIO;
-
- spin_lock_irqsave(&info->slock, flags);
-
- if (set & TIOCM_RTS)
- info->MCR |= UART_MCR_RTS;
- if (set & TIOCM_DTR)
- info->MCR |= UART_MCR_DTR;
-
- if (clear & TIOCM_RTS)
- info->MCR &= ~UART_MCR_RTS;
- if (clear & TIOCM_DTR)
- info->MCR &= ~UART_MCR_DTR;
-
- outb(info->MCR, info->ioaddr + UART_MCR);
- spin_unlock_irqrestore(&info->slock, flags);
- return 0;
-}
-
-static int __init mxser_program_mode(int port)
-{
- int id, i, j, n;
-
- outb(0, port);
- outb(0, port);
- outb(0, port);
- (void)inb(port);
- (void)inb(port);
- outb(0, port);
- (void)inb(port);
-
- id = inb(port + 1) & 0x1F;
- if ((id != C168_ASIC_ID) &&
- (id != C104_ASIC_ID) &&
- (id != C102_ASIC_ID) &&
- (id != CI132_ASIC_ID) &&
- (id != CI134_ASIC_ID) &&
- (id != CI104J_ASIC_ID))
- return -1;
- for (i = 0, j = 0; i < 4; i++) {
- n = inb(port + 2);
- if (n == 'M') {
- j = 1;
- } else if ((j == 1) && (n == 1)) {
- j = 2;
- break;
- } else
- j = 0;
- }
- if (j != 2)
- id = -2;
- return id;
-}
-
-static void __init mxser_normal_mode(int port)
-{
- int i, n;
-
- outb(0xA5, port + 1);
- outb(0x80, port + 3);
- outb(12, port + 0); /* 9600 bps */
- outb(0, port + 1);
- outb(0x03, port + 3); /* 8 data bits */
- outb(0x13, port + 4); /* loop back mode */
- for (i = 0; i < 16; i++) {
- n = inb(port + 5);
- if ((n & 0x61) == 0x60)
- break;
- if ((n & 1) == 1)
- (void)inb(port);
- }
- outb(0x00, port + 4);
-}
-
-#define CHIP_SK 0x01 /* Serial Data Clock in Eprom */
-#define CHIP_DO 0x02 /* Serial Data Output in Eprom */
-#define CHIP_CS 0x04 /* Serial Chip Select in Eprom */
-#define CHIP_DI 0x08 /* Serial Data Input in Eprom */
-#define EN_CCMD 0x000 /* Chip's command register */
-#define EN0_RSARLO 0x008 /* Remote start address reg 0 */
-#define EN0_RSARHI 0x009 /* Remote start address reg 1 */
-#define EN0_RCNTLO 0x00A /* Remote byte count reg WR */
-#define EN0_RCNTHI 0x00B /* Remote byte count reg WR */
-#define EN0_DCFG 0x00E /* Data configuration reg WR */
-#define EN0_PORT 0x010 /* Rcv missed frame error counter RD */
-#define ENC_PAGE0 0x000 /* Select page 0 of chip registers */
-#define ENC_PAGE3 0x0C0 /* Select page 3 of chip registers */
-static int __init mxser_read_register(int port, unsigned short *regs)
-{
- int i, k, value, id;
- unsigned int j;
-
- id = mxser_program_mode(port);
- if (id < 0)
- return id;
- for (i = 0; i < 14; i++) {
- k = (i & 0x3F) | 0x180;
- for (j = 0x100; j > 0; j >>= 1) {
- outb(CHIP_CS, port);
- if (k & j) {
- outb(CHIP_CS | CHIP_DO, port);
- outb(CHIP_CS | CHIP_DO | CHIP_SK, port); /* A? bit of read */
- } else {
- outb(CHIP_CS, port);
- outb(CHIP_CS | CHIP_SK, port); /* A? bit of read */
- }
- }
- (void)inb(port);
- value = 0;
- for (k = 0, j = 0x8000; k < 16; k++, j >>= 1) {
- outb(CHIP_CS, port);
- outb(CHIP_CS | CHIP_SK, port);
- if (inb(port) & CHIP_DI)
- value |= j;
- }
- regs[i] = value;
- outb(0, port);
- }
- mxser_normal_mode(port);
- return id;
-}
-
-static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
-{
- struct mxser_port *port;
- int result, status;
- unsigned int i, j;
-
- switch (cmd) {
- case MOXA_GET_CONF:
-/* if (copy_to_user(argp, mxsercfg,
- sizeof(struct mxser_hwconf) * 4))
- return -EFAULT;
- return 0;*/
- return -ENXIO;
- case MOXA_GET_MAJOR:
- if (copy_to_user(argp, &ttymajor, sizeof(int)))
- return -EFAULT;
- return 0;
-
- case MOXA_GET_CUMAJOR:
- if (copy_to_user(argp, &calloutmajor, sizeof(int)))
- return -EFAULT;
- return 0;
-
- case MOXA_CHKPORTENABLE:
- result = 0;
-
- for (i = 0; i < MXSER_BOARDS; i++)
- for (j = 0; j < MXSER_PORTS_PER_BOARD; j++)
- if (mxser_boards[i].ports[j].ioaddr)
- result |= (1 << i);
-
- return put_user(result, (unsigned long __user *)argp);
- case MOXA_GETDATACOUNT:
- if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log)))
- return -EFAULT;
- return 0;
- case MOXA_GETMSTATUS:
- for (i = 0; i < MXSER_BOARDS; i++)
- for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
- port = &mxser_boards[i].ports[j];
-
- GMStatus[i].ri = 0;
- if (!port->ioaddr) {
- GMStatus[i].dcd = 0;
- GMStatus[i].dsr = 0;
- GMStatus[i].cts = 0;
- continue;
- }
-
- if (!port->tty || !port->tty->termios)
- GMStatus[i].cflag =
- port->normal_termios.c_cflag;
- else
- GMStatus[i].cflag =
- port->tty->termios->c_cflag;
-
- status = inb(port->ioaddr + UART_MSR);
- if (status & 0x80 /*UART_MSR_DCD */ )
- GMStatus[i].dcd = 1;
- else
- GMStatus[i].dcd = 0;
-
- if (status & 0x20 /*UART_MSR_DSR */ )
- GMStatus[i].dsr = 1;
- else
- GMStatus[i].dsr = 0;
-
-
- if (status & 0x10 /*UART_MSR_CTS */ )
- GMStatus[i].cts = 1;
- else
- GMStatus[i].cts = 0;
- }
- if (copy_to_user(argp, GMStatus,
- sizeof(struct mxser_mstatus) * MXSER_PORTS))
- return -EFAULT;
- return 0;
- case MOXA_ASPP_MON_EXT: {
- int p, shiftbit;
- unsigned long opmode;
- unsigned cflag, iflag;
-
- for (i = 0; i < MXSER_BOARDS; i++)
- for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
- port = &mxser_boards[i].ports[j];
- if (!port->ioaddr)
- continue;
-
- status = mxser_get_msr(port->ioaddr, 0, i);
-
- if (status & UART_MSR_TERI)
- port->icount.rng++;
- if (status & UART_MSR_DDSR)
- port->icount.dsr++;
- if (status & UART_MSR_DDCD)
- port->icount.dcd++;
- if (status & UART_MSR_DCTS)
- port->icount.cts++;
-
- port->mon_data.modem_status = status;
- mon_data_ext.rx_cnt[i] = port->mon_data.rxcnt;
- mon_data_ext.tx_cnt[i] = port->mon_data.txcnt;
- mon_data_ext.up_rxcnt[i] =
- port->mon_data.up_rxcnt;
- mon_data_ext.up_txcnt[i] =
- port->mon_data.up_txcnt;
- mon_data_ext.modem_status[i] =
- port->mon_data.modem_status;
- mon_data_ext.baudrate[i] = port->realbaud;
-
- if (!port->tty || !port->tty->termios) {
- cflag = port->normal_termios.c_cflag;
- iflag = port->normal_termios.c_iflag;
- } else {
- cflag = port->tty->termios->c_cflag;
- iflag = port->tty->termios->c_iflag;
- }
-
- mon_data_ext.databits[i] = cflag & CSIZE;
-
- mon_data_ext.stopbits[i] = cflag & CSTOPB;
-
- mon_data_ext.parity[i] =
- cflag & (PARENB | PARODD | CMSPAR);
-
- mon_data_ext.flowctrl[i] = 0x00;
-
- if (cflag & CRTSCTS)
- mon_data_ext.flowctrl[i] |= 0x03;
-
- if (iflag & (IXON | IXOFF))
- mon_data_ext.flowctrl[i] |= 0x0C;
-
- if (port->type == PORT_16550A)
- mon_data_ext.fifo[i] = 1;
- else
- mon_data_ext.fifo[i] = 0;
-
- p = i % 4;
- shiftbit = p * 2;
- opmode = inb(port->opmode_ioaddr) >> shiftbit;
- opmode &= OP_MODE_MASK;
-
- mon_data_ext.iftype[i] = opmode;
-
- }
- if (copy_to_user(argp, &mon_data_ext,
- sizeof(mon_data_ext)))
- return -EFAULT;
-
- return 0;
-
- } default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-static int mxser_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct mxser_port *info = tty->driver_data;
- struct async_icount cprev, cnow; /* kernel counter temps */
- struct serial_icounter_struct __user *p_cuser;
- unsigned long templ;
- unsigned long flags;
- unsigned int i;
- void __user *argp = (void __user *)arg;
- int retval;
-
- if (tty->index == MXSER_PORTS)
- return mxser_ioctl_special(cmd, argp);
-
- if (cmd == MOXA_SET_OP_MODE || cmd == MOXA_GET_OP_MODE) {
- int p;
- unsigned long opmode;
- static unsigned char ModeMask[] = { 0xfc, 0xf3, 0xcf, 0x3f };
- int shiftbit;
- unsigned char val, mask;
-
- p = tty->index % 4;
- if (cmd == MOXA_SET_OP_MODE) {
- if (get_user(opmode, (int __user *) argp))
- return -EFAULT;
- if (opmode != RS232_MODE &&
- opmode != RS485_2WIRE_MODE &&
- opmode != RS422_MODE &&
- opmode != RS485_4WIRE_MODE)
- return -EFAULT;
- mask = ModeMask[p];
- shiftbit = p * 2;
- val = inb(info->opmode_ioaddr);
- val &= mask;
- val |= (opmode << shiftbit);
- outb(val, info->opmode_ioaddr);
- } else {
- shiftbit = p * 2;
- opmode = inb(info->opmode_ioaddr) >> shiftbit;
- opmode &= OP_MODE_MASK;
- if (copy_to_user(argp, &opmode, sizeof(int)))
- return -EFAULT;
- }
- return 0;
- }
-
- if (cmd == MOXA_SET_SPECIAL_BAUD_RATE) {
- int speed;
-
- if (get_user(speed, (int __user *)argp))
- return -EFAULT;
- if (speed <= 0 || speed > info->max_baud)
- return -EFAULT;
- if (!info->tty || !info->tty->termios || !info->ioaddr)
- return 0;
- info->tty->termios->c_cflag &= ~(CBAUD | CBAUDEX);
- for (i = 0; i < BAUD_TABLE_NO; i++)
- if (speed == mxvar_baud_table[i])
- break;
- if (i == BAUD_TABLE_NO) {
- info->tty->termios->c_cflag |= B_SPEC;
- } else if (speed != 0)
- info->tty->termios->c_cflag |= mxvar_baud_table1[i];
-
- info->speed = speed;
- spin_lock_irqsave(&info->slock, flags);
- mxser_change_speed(info, NULL);
- spin_unlock_irqrestore(&info->slock, flags);
-
- return 0;
- } else if (cmd == MOXA_GET_SPECIAL_BAUD_RATE) {
- if (copy_to_user(argp, &info->speed, sizeof(int)))
- return -EFAULT;
- return 0;
- }
-
- if (cmd != TIOCGSERIAL && cmd != TIOCMIWAIT && cmd != TIOCGICOUNT &&
- test_bit(TTY_IO_ERROR, &tty->flags))
- return -EIO;
-
- switch (cmd) {
- case TCSBRK: /* SVID version: non-zero arg --> no break */
- retval = tty_check_change(tty);
- if (retval)
- return retval;
- tty_wait_until_sent(tty, 0);
- if (!arg)
- mxser_send_break(info, HZ / 4); /* 1/4 second */
- return 0;
- case TCSBRKP: /* support for POSIX tcsendbreak() */
- retval = tty_check_change(tty);
- if (retval)
- return retval;
- tty_wait_until_sent(tty, 0);
- mxser_send_break(info, arg ? arg * (HZ / 10) : HZ / 4);
- return 0;
- case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
- case TIOCSSOFTCAR:
- if (get_user(templ, (unsigned long __user *) argp))
- return -EFAULT;
- arg = templ;
- tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
- return 0;
- case TIOCGSERIAL:
- return mxser_get_serial_info(info, argp);
- case TIOCSSERIAL:
- return mxser_set_serial_info(info, argp);
- case TIOCSERGETLSR: /* Get line status register */
- return mxser_get_lsr_info(info, argp);
- /*
- * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
- * - mask passed in arg for lines of interest
- * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
- * Caller should use TIOCGICOUNT to see which one it was
- */
- case TIOCMIWAIT:
- spin_lock_irqsave(&info->slock, flags);
- cnow = info->icount; /* note the counters on entry */
- spin_unlock_irqrestore(&info->slock, flags);
-
- wait_event_interruptible(info->delta_msr_wait, ({
- cprev = cnow;
- spin_lock_irqsave(&info->slock, flags);
- cnow = info->icount; /* atomic copy */
- spin_unlock_irqrestore(&info->slock, flags);
-
- ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
- ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
- ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
- ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts));
- }));
- break;
- /*
- * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
- * Return: write counters to the user passed counter struct
- * NB: both 1->0 and 0->1 transitions are counted except for
- * RI where only 0->1 is counted.
- */
- case TIOCGICOUNT:
- spin_lock_irqsave(&info->slock, flags);
- cnow = info->icount;
- spin_unlock_irqrestore(&info->slock, flags);
- p_cuser = argp;
- if (put_user(cnow.frame, &p_cuser->frame))
- return -EFAULT;
- if (put_user(cnow.brk, &p_cuser->brk))
- return -EFAULT;
- if (put_user(cnow.overrun, &p_cuser->overrun))
- return -EFAULT;
- if (put_user(cnow.buf_overrun, &p_cuser->buf_overrun))
- return -EFAULT;
- if (put_user(cnow.parity, &p_cuser->parity))
- return -EFAULT;
- if (put_user(cnow.rx, &p_cuser->rx))
- return -EFAULT;
- if (put_user(cnow.tx, &p_cuser->tx))
- return -EFAULT;
- put_user(cnow.cts, &p_cuser->cts);
- put_user(cnow.dsr, &p_cuser->dsr);
- put_user(cnow.rng, &p_cuser->rng);
- put_user(cnow.dcd, &p_cuser->dcd);
- return 0;
- case MOXA_HighSpeedOn:
- return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
- case MOXA_SDS_RSTICOUNTER:
- info->mon_data.rxcnt = 0;
- info->mon_data.txcnt = 0;
- return 0;
- case MOXA_ASPP_SETBAUD:{
- long baud;
- if (get_user(baud, (long __user *)argp))
- return -EFAULT;
- spin_lock_irqsave(&info->slock, flags);
- mxser_set_baud(info, baud);
- spin_unlock_irqrestore(&info->slock, flags);
- return 0;
- }
- case MOXA_ASPP_GETBAUD:
- if (copy_to_user(argp, &info->realbaud, sizeof(long)))
- return -EFAULT;
-
- return 0;
-
- case MOXA_ASPP_OQUEUE:{
- int len, lsr;
-
- len = mxser_chars_in_buffer(tty);
-
- lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT;
-
- len += (lsr ? 0 : 1);
-
- if (copy_to_user(argp, &len, sizeof(int)))
- return -EFAULT;
-
- return 0;
- }
- case MOXA_ASPP_MON: {
- int mcr, status;
-
- status = mxser_get_msr(info->ioaddr, 1, tty->index);
- mxser_check_modem_status(info, status);
-
- mcr = inb(info->ioaddr + UART_MCR);
- if (mcr & MOXA_MUST_MCR_XON_FLAG)
- info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFHOLD;
- else
- info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFHOLD;
-
- if (mcr & MOXA_MUST_MCR_TX_XON)
- info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFXENT;
- else
- info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFXENT;
-
- if (info->tty->hw_stopped)
- info->mon_data.hold_reason |= NPPI_NOTIFY_CTSHOLD;
- else
- info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD;
-
- if (copy_to_user(argp, &info->mon_data,
- sizeof(struct mxser_mon)))
- return -EFAULT;
-
- return 0;
- }
- case MOXA_ASPP_LSTATUS: {
- if (copy_to_user(argp, &info->err_shadow,
- sizeof(unsigned char)))
- return -EFAULT;
-
- info->err_shadow = 0;
- return 0;
- }
- case MOXA_SET_BAUD_METHOD: {
- int method;
-
- if (get_user(method, (int __user *)argp))
- return -EFAULT;
- mxser_set_baud_method[tty->index] = method;
- if (copy_to_user(argp, &method, sizeof(int)))
- return -EFAULT;
-
- return 0;
- }
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-static void mxser_stoprx(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
-
- info->ldisc_stop_rx = 1;
- if (I_IXOFF(tty)) {
- if (info->board->chip_flag) {
- info->IER &= ~MOXA_MUST_RECV_ISR;
- outb(info->IER, info->ioaddr + UART_IER);
- } else {
- info->x_char = STOP_CHAR(tty);
- outb(0, info->ioaddr + UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- }
- }
-
- if (info->tty->termios->c_cflag & CRTSCTS) {
- info->MCR &= ~UART_MCR_RTS;
- outb(info->MCR, info->ioaddr + UART_MCR);
- }
-}
-
-/*
- * This routine is called by the upper-layer tty layer to signal that
- * incoming characters should be throttled.
- */
-static void mxser_throttle(struct tty_struct *tty)
-{
- mxser_stoprx(tty);
-}
-
-static void mxser_unthrottle(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
-
- /* startrx */
- info->ldisc_stop_rx = 0;
- if (I_IXOFF(tty)) {
- if (info->x_char)
- info->x_char = 0;
- else {
- if (info->board->chip_flag) {
- info->IER |= MOXA_MUST_RECV_ISR;
- outb(info->IER, info->ioaddr + UART_IER);
- } else {
- info->x_char = START_CHAR(tty);
- outb(0, info->ioaddr + UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- }
- }
- }
-
- if (info->tty->termios->c_cflag & CRTSCTS) {
- info->MCR |= UART_MCR_RTS;
- outb(info->MCR, info->ioaddr + UART_MCR);
- }
-}
-
-/*
- * mxser_stop() and mxser_start()
- *
- * This routines are called before setting or resetting tty->stopped.
- * They enable or disable transmitter interrupts, as necessary.
- */
-static void mxser_stop(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
- if (info->IER & UART_IER_THRI) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- }
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-static void mxser_start(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
- if (info->xmit_cnt && info->xmit_buf) {
- outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- }
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
- mxser_change_speed(info, old_termios);
- spin_unlock_irqrestore(&info->slock, flags);
-
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios->c_cflag & CRTSCTS)) {
- tty->hw_stopped = 0;
- mxser_start(tty);
- }
-
- /* Handle sw stopped */
- if ((old_termios->c_iflag & IXON) &&
- !(tty->termios->c_iflag & IXON)) {
- tty->stopped = 0;
-
- if (info->board->chip_flag) {
- spin_lock_irqsave(&info->slock, flags);
- DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
- spin_unlock_irqrestore(&info->slock, flags);
- }
-
- mxser_start(tty);
- }
-}
-
-/*
- * mxser_wait_until_sent() --- wait until the transmitter is empty
- */
-static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long orig_jiffies, char_time;
- int lsr;
-
- if (info->type == PORT_UNKNOWN)
- return;
-
- if (info->xmit_fifo_size == 0)
- return; /* Just in case.... */
-
- orig_jiffies = jiffies;
- /*
- * Set the check interval to be 1/5 of the estimated time to
- * send a single character, and make it at least 1. The check
- * interval should also be less than the timeout.
- *
- * Note: we have to use pretty tight timings here to satisfy
- * the NIST-PCTS.
- */
- char_time = (info->timeout - HZ / 50) / info->xmit_fifo_size;
- char_time = char_time / 5;
- if (char_time == 0)
- char_time = 1;
- if (timeout && timeout < char_time)
- char_time = timeout;
- /*
- * If the transmitter hasn't cleared in twice the approximate
- * amount of time to send the entire FIFO, it probably won't
- * ever clear. This assumes the UART isn't doing flow
- * control, which is currently the case. Hence, if it ever
- * takes longer than info->timeout, this is probably due to a
- * UART bug of some kind. So, we clamp the timeout parameter at
- * 2*info->timeout.
- */
- if (!timeout || timeout > 2 * info->timeout)
- timeout = 2 * info->timeout;
-#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
- printk(KERN_DEBUG "In rs_wait_until_sent(%d) check=%lu...",
- timeout, char_time);
- printk("jiff=%lu...", jiffies);
-#endif
- while (!((lsr = inb(info->ioaddr + UART_LSR)) & UART_LSR_TEMT)) {
-#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
- printk("lsr = %d (jiff=%lu)...", lsr, jiffies);
-#endif
- schedule_timeout_interruptible(char_time);
- if (signal_pending(current))
- break;
- if (timeout && time_after(jiffies, orig_jiffies + timeout))
- break;
- }
- set_current_state(TASK_RUNNING);
-
-#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
- printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
-#endif
-}
-
-/*
- * This routine is called by tty_hangup() when a hangup is signaled.
- */
-static void mxser_hangup(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
-
- mxser_flush_buffer(tty);
- mxser_shutdown(info);
- info->event = 0;
- info->count = 0;
- info->flags &= ~ASYNC_NORMAL_ACTIVE;
- info->tty = NULL;
- wake_up_interruptible(&info->open_wait);
-}
-
-/*
- * mxser_rs_break() --- routine which turns the break handling on or off
- */
-static void mxser_rs_break(struct tty_struct *tty, int break_state)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
- if (break_state == -1)
- outb(inb(info->ioaddr + UART_LCR) | UART_LCR_SBC,
- info->ioaddr + UART_LCR);
- else
- outb(inb(info->ioaddr + UART_LCR) & ~UART_LCR_SBC,
- info->ioaddr + UART_LCR);
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-static void mxser_receive_chars(struct mxser_port *port, int *status)
-{
- struct tty_struct *tty = port->tty;
- unsigned char ch, gdl;
- int ignored = 0;
- int cnt = 0;
- int recv_room;
- int max = 256;
-
- recv_room = tty->receive_room;
- if ((recv_room == 0) && (!port->ldisc_stop_rx))
- mxser_stoprx(tty);
-
- if (port->board->chip_flag != MOXA_OTHER_UART) {
-
- if (*status & UART_LSR_SPECIAL)
- goto intr_old;
- if (port->board->chip_flag == MOXA_MUST_MU860_HWID &&
- (*status & MOXA_MUST_LSR_RERR))
- goto intr_old;
- if (*status & MOXA_MUST_LSR_RERR)
- goto intr_old;
-
- gdl = inb(port->ioaddr + MOXA_MUST_GDL_REGISTER);
-
- if (port->board->chip_flag == MOXA_MUST_MU150_HWID)
- gdl &= MOXA_MUST_GDL_MASK;
- if (gdl >= recv_room) {
- if (!port->ldisc_stop_rx)
- mxser_stoprx(tty);
- }
- while (gdl--) {
- ch = inb(port->ioaddr + UART_RX);
- tty_insert_flip_char(tty, ch, 0);
- cnt++;
- }
- goto end_intr;
- }
-intr_old:
-
- do {
- if (max-- < 0)
- break;
-
- ch = inb(port->ioaddr + UART_RX);
- if (port->board->chip_flag && (*status & UART_LSR_OE))
- outb(0x23, port->ioaddr + UART_FCR);
- *status &= port->read_status_mask;
- if (*status & port->ignore_status_mask) {
- if (++ignored > 100)
- break;
- } else {
- char flag = 0;
- if (*status & UART_LSR_SPECIAL) {
- if (*status & UART_LSR_BI) {
- flag = TTY_BREAK;
- port->icount.brk++;
-
- if (port->flags & ASYNC_SAK)
- do_SAK(tty);
- } else if (*status & UART_LSR_PE) {
- flag = TTY_PARITY;
- port->icount.parity++;
- } else if (*status & UART_LSR_FE) {
- flag = TTY_FRAME;
- port->icount.frame++;
- } else if (*status & UART_LSR_OE) {
- flag = TTY_OVERRUN;
- port->icount.overrun++;
- } else
- flag = TTY_BREAK;
- }
- tty_insert_flip_char(tty, ch, flag);
- cnt++;
- if (cnt >= recv_room) {
- if (!port->ldisc_stop_rx)
- mxser_stoprx(tty);
- break;
- }
-
- }
-
- if (port->board->chip_flag)
- break;
-
- *status = inb(port->ioaddr + UART_LSR);
- } while (*status & UART_LSR_DR);
-
-end_intr:
- mxvar_log.rxcnt[port->tty->index] += cnt;
- port->mon_data.rxcnt += cnt;
- port->mon_data.up_rxcnt += cnt;
-
- /*
- * We are called from an interrupt context with &port->slock
- * being held. Drop it temporarily in order to prevent
- * recursive locking.
- */
- spin_unlock(&port->slock);
- tty_flip_buffer_push(tty);
- spin_lock(&port->slock);
-}
-
-static void mxser_transmit_chars(struct mxser_port *port)
-{
- int count, cnt;
-
- if (port->x_char) {
- outb(port->x_char, port->ioaddr + UART_TX);
- port->x_char = 0;
- mxvar_log.txcnt[port->tty->index]++;
- port->mon_data.txcnt++;
- port->mon_data.up_txcnt++;
- port->icount.tx++;
- return;
- }
-
- if (port->xmit_buf == 0)
- return;
-
- if ((port->xmit_cnt <= 0) || port->tty->stopped ||
- (port->tty->hw_stopped &&
- (port->type != PORT_16550A) &&
- (!port->board->chip_flag))) {
- port->IER &= ~UART_IER_THRI;
- outb(port->IER, port->ioaddr + UART_IER);
- return;
- }
-
- cnt = port->xmit_cnt;
- count = port->xmit_fifo_size;
- do {
- outb(port->xmit_buf[port->xmit_tail++],
- port->ioaddr + UART_TX);
- port->xmit_tail = port->xmit_tail & (SERIAL_XMIT_SIZE - 1);
- if (--port->xmit_cnt <= 0)
- break;
- } while (--count > 0);
- mxvar_log.txcnt[port->tty->index] += (cnt - port->xmit_cnt);
-
- port->mon_data.txcnt += (cnt - port->xmit_cnt);
- port->mon_data.up_txcnt += (cnt - port->xmit_cnt);
- port->icount.tx += (cnt - port->xmit_cnt);
-
- if (port->xmit_cnt < WAKEUP_CHARS)
- tty_wakeup(port->tty);
-
- if (port->xmit_cnt <= 0) {
- port->IER &= ~UART_IER_THRI;
- outb(port->IER, port->ioaddr + UART_IER);
- }
-}
-
-/*
- * This is the serial driver's generic interrupt routine
- */
-static irqreturn_t mxser_interrupt(int irq, void *dev_id)
-{
- int status, iir, i;
- struct mxser_board *brd = NULL;
- struct mxser_port *port;
- int max, irqbits, bits, msr;
- unsigned int int_cnt, pass_counter = 0;
- int handled = IRQ_NONE;
-
- for (i = 0; i < MXSER_BOARDS; i++)
- if (dev_id == &mxser_boards[i]) {
- brd = dev_id;
- break;
- }
-
- if (i == MXSER_BOARDS)
- goto irq_stop;
- if (brd == NULL)
- goto irq_stop;
- max = brd->info->nports;
- while (pass_counter++ < MXSER_ISR_PASS_LIMIT) {
- irqbits = inb(brd->vector) & brd->vector_mask;
- if (irqbits == brd->vector_mask)
- break;
-
- handled = IRQ_HANDLED;
- for (i = 0, bits = 1; i < max; i++, irqbits |= bits, bits <<= 1) {
- if (irqbits == brd->vector_mask)
- break;
- if (bits & irqbits)
- continue;
- port = &brd->ports[i];
-
- int_cnt = 0;
- spin_lock(&port->slock);
- do {
- iir = inb(port->ioaddr + UART_IIR);
- if (iir & UART_IIR_NO_INT)
- break;
- iir &= MOXA_MUST_IIR_MASK;
- if (!port->tty ||
- (port->flags & ASYNC_CLOSING) ||
- !(port->flags &
- ASYNC_INITIALIZED)) {
- status = inb(port->ioaddr + UART_LSR);
- outb(0x27, port->ioaddr + UART_FCR);
- inb(port->ioaddr + UART_MSR);
- break;
- }
-
- status = inb(port->ioaddr + UART_LSR);
-
- if (status & UART_LSR_PE)
- port->err_shadow |= NPPI_NOTIFY_PARITY;
- if (status & UART_LSR_FE)
- port->err_shadow |= NPPI_NOTIFY_FRAMING;
- if (status & UART_LSR_OE)
- port->err_shadow |=
- NPPI_NOTIFY_HW_OVERRUN;
- if (status & UART_LSR_BI)
- port->err_shadow |= NPPI_NOTIFY_BREAK;
-
- if (port->board->chip_flag) {
- if (iir == MOXA_MUST_IIR_GDA ||
- iir == MOXA_MUST_IIR_RDA ||
- iir == MOXA_MUST_IIR_RTO ||
- iir == MOXA_MUST_IIR_LSR)
- mxser_receive_chars(port,
- &status);
-
- } else {
- status &= port->read_status_mask;
- if (status & UART_LSR_DR)
- mxser_receive_chars(port,
- &status);
- }
- msr = inb(port->ioaddr + UART_MSR);
- if (msr & UART_MSR_ANY_DELTA)
- mxser_check_modem_status(port, msr);
-
- if (port->board->chip_flag) {
- if (iir == 0x02 && (status &
- UART_LSR_THRE))
- mxser_transmit_chars(port);
- } else {
- if (status & UART_LSR_THRE)
- mxser_transmit_chars(port);
- }
- } while (int_cnt++ < MXSER_ISR_PASS_LIMIT);
- spin_unlock(&port->slock);
- }
- }
-
-irq_stop:
- return handled;
-}
-
-static const struct tty_operations mxser_ops = {
- .open = mxser_open,
- .close = mxser_close,
- .write = mxser_write,
- .put_char = mxser_put_char,
- .flush_chars = mxser_flush_chars,
- .write_room = mxser_write_room,
- .chars_in_buffer = mxser_chars_in_buffer,
- .flush_buffer = mxser_flush_buffer,
- .ioctl = mxser_ioctl,
- .throttle = mxser_throttle,
- .unthrottle = mxser_unthrottle,
- .set_termios = mxser_set_termios,
- .stop = mxser_stop,
- .start = mxser_start,
- .hangup = mxser_hangup,
- .break_ctl = mxser_rs_break,
- .wait_until_sent = mxser_wait_until_sent,
- .tiocmget = mxser_tiocmget,
- .tiocmset = mxser_tiocmset,
-};
-
-/*
- * The MOXA Smartio/Industio serial driver boot-time initialization code!
- */
-
-static void mxser_release_res(struct mxser_board *brd, struct pci_dev *pdev,
- unsigned int irq)
-{
- if (irq)
- free_irq(brd->irq, brd);
- if (pdev != NULL) { /* PCI */
-#ifdef CONFIG_PCI
- pci_release_region(pdev, 2);
- pci_release_region(pdev, 3);
-#endif
- } else {
- release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
- release_region(brd->vector, 1);
- }
-}
-
-static int __devinit mxser_initbrd(struct mxser_board *brd,
- struct pci_dev *pdev)
-{
- struct mxser_port *info;
- unsigned int i;
- int retval;
-
- printk(KERN_INFO "max. baud rate = %d bps.\n", brd->ports[0].max_baud);
-
- for (i = 0; i < brd->info->nports; i++) {
- info = &brd->ports[i];
- info->board = brd;
- info->stop_rx = 0;
- info->ldisc_stop_rx = 0;
-
- /* Enhance mode enabled here */
- if (brd->chip_flag != MOXA_OTHER_UART)
- ENABLE_MOXA_MUST_ENCHANCE_MODE(info->ioaddr);
-
- info->flags = ASYNC_SHARE_IRQ;
- info->type = brd->uart_type;
-
- process_txrx_fifo(info);
-
- info->custom_divisor = info->baud_base * 16;
- info->close_delay = 5 * HZ / 10;
- info->closing_wait = 30 * HZ;
- info->normal_termios = mxvar_sdriver->init_termios;
- init_waitqueue_head(&info->open_wait);
- init_waitqueue_head(&info->delta_msr_wait);
- info->speed = 9600;
- memset(&info->mon_data, 0, sizeof(struct mxser_mon));
- info->err_shadow = 0;
- spin_lock_init(&info->slock);
-
- /* before set INT ISR, disable all int */
- outb(inb(info->ioaddr + UART_IER) & 0xf0,
- info->ioaddr + UART_IER);
- }
-
- retval = request_irq(brd->irq, mxser_interrupt, IRQF_SHARED, "mxser",
- brd);
- if (retval) {
- printk(KERN_ERR "Board %s: Request irq failed, IRQ (%d) may "
- "conflict with another device.\n",
- brd->info->name, brd->irq);
- /* We hold resources, we need to release them. */
- mxser_release_res(brd, pdev, 0);
- }
- return retval;
-}
-
-static int __init mxser_get_ISA_conf(int cap, struct mxser_board *brd)
-{
- int id, i, bits;
- unsigned short regs[16], irq;
- unsigned char scratch, scratch2;
-
- brd->chip_flag = MOXA_OTHER_UART;
-
- id = mxser_read_register(cap, regs);
- switch (id) {
- case C168_ASIC_ID:
- brd->info = &mxser_cards[0];
- break;
- case C104_ASIC_ID:
- brd->info = &mxser_cards[1];
- break;
- case CI104J_ASIC_ID:
- brd->info = &mxser_cards[2];
- break;
- case C102_ASIC_ID:
- brd->info = &mxser_cards[5];
- break;
- case CI132_ASIC_ID:
- brd->info = &mxser_cards[6];
- break;
- case CI134_ASIC_ID:
- brd->info = &mxser_cards[7];
- break;
- default:
- return 0;
- }
-
- irq = 0;
- /* some ISA cards have 2 ports, but we want to see them as 4-port (why?)
- Flag-hack checks if configuration should be read as 2-port here. */
- if (brd->info->nports == 2 || (brd->info->flags & MXSER_HAS2)) {
- irq = regs[9] & 0xF000;
- irq = irq | (irq >> 4);
- if (irq != (regs[9] & 0xFF00))
- return MXSER_ERR_IRQ_CONFLIT;
- } else if (brd->info->nports == 4) {
- irq = regs[9] & 0xF000;
- irq = irq | (irq >> 4);
- irq = irq | (irq >> 8);
- if (irq != regs[9])
- return MXSER_ERR_IRQ_CONFLIT;
- } else if (brd->info->nports == 8) {
- irq = regs[9] & 0xF000;
- irq = irq | (irq >> 4);
- irq = irq | (irq >> 8);
- if ((irq != regs[9]) || (irq != regs[10]))
- return MXSER_ERR_IRQ_CONFLIT;
- }
-
- if (!irq)
- return MXSER_ERR_IRQ;
- brd->irq = ((int)(irq & 0xF000) >> 12);
- for (i = 0; i < 8; i++)
- brd->ports[i].ioaddr = (int) regs[i + 1] & 0xFFF8;
- if ((regs[12] & 0x80) == 0)
- return MXSER_ERR_VECTOR;
- brd->vector = (int)regs[11]; /* interrupt vector */
- if (id == 1)
- brd->vector_mask = 0x00FF;
- else
- brd->vector_mask = 0x000F;
- for (i = 7, bits = 0x0100; i >= 0; i--, bits <<= 1) {
- if (regs[12] & bits) {
- brd->ports[i].baud_base = 921600;
- brd->ports[i].max_baud = 921600;
- } else {
- brd->ports[i].baud_base = 115200;
- brd->ports[i].max_baud = 115200;
- }
- }
- scratch2 = inb(cap + UART_LCR) & (~UART_LCR_DLAB);
- outb(scratch2 | UART_LCR_DLAB, cap + UART_LCR);
- outb(0, cap + UART_EFR); /* EFR is the same as FCR */
- outb(scratch2, cap + UART_LCR);
- outb(UART_FCR_ENABLE_FIFO, cap + UART_FCR);
- scratch = inb(cap + UART_IIR);
-
- if (scratch & 0xC0)
- brd->uart_type = PORT_16550A;
- else
- brd->uart_type = PORT_16450;
- if (!request_region(brd->ports[0].ioaddr, 8 * brd->info->nports,
- "mxser(IO)"))
- return MXSER_ERR_IOADDR;
- if (!request_region(brd->vector, 1, "mxser(vector)")) {
- release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
- return MXSER_ERR_VECTOR;
- }
- return brd->info->nports;
-}
-
-static int __devinit mxser_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
-#ifdef CONFIG_PCI
- struct mxser_board *brd;
- unsigned int i, j;
- unsigned long ioaddress;
- int retval = -EINVAL;
-
- for (i = 0; i < MXSER_BOARDS; i++)
- if (mxser_boards[i].info == NULL)
- break;
-
- if (i >= MXSER_BOARDS) {
- printk(KERN_ERR "Too many Smartio/Industio family boards found "
- "(maximum %d), board not configured\n", MXSER_BOARDS);
- goto err;
- }
-
- brd = &mxser_boards[i];
- brd->idx = i * MXSER_PORTS_PER_BOARD;
- printk(KERN_INFO "Found MOXA %s board (BusNo=%d, DevNo=%d)\n",
- mxser_cards[ent->driver_data].name,
- pdev->bus->number, PCI_SLOT(pdev->devfn));
-
- retval = pci_enable_device(pdev);
- if (retval) {
- printk(KERN_ERR "Moxa SmartI/O PCI enable fail !\n");
- goto err;
- }
-
- /* io address */
- ioaddress = pci_resource_start(pdev, 2);
- retval = pci_request_region(pdev, 2, "mxser(IO)");
- if (retval)
- goto err;
-
- brd->info = &mxser_cards[ent->driver_data];
- for (i = 0; i < brd->info->nports; i++)
- brd->ports[i].ioaddr = ioaddress + 8 * i;
-
- /* vector */
- ioaddress = pci_resource_start(pdev, 3);
- retval = pci_request_region(pdev, 3, "mxser(vector)");
- if (retval)
- goto err_relio;
- brd->vector = ioaddress;
-
- /* irq */
- brd->irq = pdev->irq;
-
- brd->chip_flag = CheckIsMoxaMust(brd->ports[0].ioaddr);
- brd->uart_type = PORT_16550A;
- brd->vector_mask = 0;
-
- for (i = 0; i < brd->info->nports; i++) {
- for (j = 0; j < UART_INFO_NUM; j++) {
- if (Gpci_uart_info[j].type == brd->chip_flag) {
- brd->ports[i].max_baud =
- Gpci_uart_info[j].max_baud;
-
- /* exception....CP-102 */
- if (brd->info->flags & MXSER_HIGHBAUD)
- brd->ports[i].max_baud = 921600;
- break;
- }
- }
- }
-
- if (brd->chip_flag == MOXA_MUST_MU860_HWID) {
- for (i = 0; i < brd->info->nports; i++) {
- if (i < 4)
- brd->ports[i].opmode_ioaddr = ioaddress + 4;
- else
- brd->ports[i].opmode_ioaddr = ioaddress + 0x0c;
- }
- outb(0, ioaddress + 4); /* default set to RS232 mode */
- outb(0, ioaddress + 0x0c); /* default set to RS232 mode */
- }
-
- for (i = 0; i < brd->info->nports; i++) {
- brd->vector_mask |= (1 << i);
- brd->ports[i].baud_base = 921600;
- }
-
- /* mxser_initbrd will hook ISR. */
- retval = mxser_initbrd(brd, pdev);
- if (retval)
- goto err_null;
-
- for (i = 0; i < brd->info->nports; i++)
- tty_register_device(mxvar_sdriver, brd->idx + i, &pdev->dev);
-
- pci_set_drvdata(pdev, brd);
-
- return 0;
-err_relio:
- pci_release_region(pdev, 2);
-err_null:
- brd->info = NULL;
-err:
- return retval;
-#else
- return -ENODEV;
-#endif
-}
-
-static void __devexit mxser_remove(struct pci_dev *pdev)
-{
- struct mxser_board *brd = pci_get_drvdata(pdev);
- unsigned int i;
-
- for (i = 0; i < brd->info->nports; i++)
- tty_unregister_device(mxvar_sdriver, brd->idx + i);
-
- mxser_release_res(brd, pdev, 1);
- brd->info = NULL;
-}
-
-static struct pci_driver mxser_driver = {
- .name = "mxser",
- .id_table = mxser_pcibrds,
- .probe = mxser_probe,
- .remove = __devexit_p(mxser_remove)
-};
-
-static int __init mxser_module_init(void)
-{
- struct mxser_board *brd;
- unsigned long cap;
- unsigned int i, m, isaloop;
- int retval, b;
-
- pr_debug("Loading module mxser ...\n");
-
- mxvar_sdriver = alloc_tty_driver(MXSER_PORTS + 1);
- if (!mxvar_sdriver)
- return -ENOMEM;
-
- printk(KERN_INFO "MOXA Smartio/Industio family driver version %s\n",
- MXSER_VERSION);
-
- /* Initialize the tty_driver structure */
- mxvar_sdriver->owner = THIS_MODULE;
- mxvar_sdriver->magic = TTY_DRIVER_MAGIC;
- mxvar_sdriver->name = "ttyMI";
- mxvar_sdriver->major = ttymajor;
- mxvar_sdriver->minor_start = 0;
- mxvar_sdriver->num = MXSER_PORTS + 1;
- mxvar_sdriver->type = TTY_DRIVER_TYPE_SERIAL;
- mxvar_sdriver->subtype = SERIAL_TYPE_NORMAL;
- mxvar_sdriver->init_termios = tty_std_termios;
- mxvar_sdriver->init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL;
- mxvar_sdriver->flags = TTY_DRIVER_REAL_RAW|TTY_DRIVER_DYNAMIC_DEV;
- tty_set_operations(mxvar_sdriver, &mxser_ops);
-
- retval = tty_register_driver(mxvar_sdriver);
- if (retval) {
- printk(KERN_ERR "Couldn't install MOXA Smartio/Industio family "
- "tty driver !\n");
- goto err_put;
- }
-
- mxvar_diagflag = 0;
-
- m = 0;
- /* Start finding ISA boards here */
- for (isaloop = 0; isaloop < 2; isaloop++)
- for (b = 0; b < MXSER_BOARDS && m < MXSER_BOARDS; b++) {
- if (!isaloop)
- cap = mxserBoardCAP[b]; /* predefined */
- else
- cap = ioaddr[b]; /* module param */
-
- if (!cap)
- continue;
-
- brd = &mxser_boards[m];
- retval = mxser_get_ISA_conf(cap, brd);
-
- if (retval != 0)
- printk(KERN_INFO "Found MOXA %s board "
- "(CAP=0x%x)\n",
- brd->info->name, ioaddr[b]);
-
- if (retval <= 0) {
- if (retval == MXSER_ERR_IRQ)
- printk(KERN_ERR "Invalid interrupt "
- "number, board not "
- "configured\n");
- else if (retval == MXSER_ERR_IRQ_CONFLIT)
- printk(KERN_ERR "Invalid interrupt "
- "number, board not "
- "configured\n");
- else if (retval == MXSER_ERR_VECTOR)
- printk(KERN_ERR "Invalid interrupt "
- "vector, board not "
- "configured\n");
- else if (retval == MXSER_ERR_IOADDR)
- printk(KERN_ERR "Invalid I/O address, "
- "board not configured\n");
-
- brd->info = NULL;
- continue;
- }
-
- /* mxser_initbrd will hook ISR. */
- if (mxser_initbrd(brd, NULL) < 0) {
- brd->info = NULL;
- continue;
- }
-
- brd->idx = m * MXSER_PORTS_PER_BOARD;
- for (i = 0; i < brd->info->nports; i++)
- tty_register_device(mxvar_sdriver, brd->idx + i,
- NULL);
-
- m++;
- }
-
- retval = pci_register_driver(&mxser_driver);
- if (retval) {
- printk(KERN_ERR "Can't register pci driver\n");
- if (!m) {
- retval = -ENODEV;
- goto err_unr;
- } /* else: we have some ISA cards under control */
- }
-
- pr_debug("Done.\n");
-
- return 0;
-err_unr:
- tty_unregister_driver(mxvar_sdriver);
-err_put:
- put_tty_driver(mxvar_sdriver);
- return retval;
-}
-
-static void __exit mxser_module_exit(void)
-{
- unsigned int i, j;
-
- pr_debug("Unloading module mxser ...\n");
-
- pci_unregister_driver(&mxser_driver);
-
- for (i = 0; i < MXSER_BOARDS; i++) /* ISA remains */
- if (mxser_boards[i].info != NULL)
- for (j = 0; j < mxser_boards[i].info->nports; j++)
- tty_unregister_device(mxvar_sdriver,
- mxser_boards[i].idx + j);
- tty_unregister_driver(mxvar_sdriver);
- put_tty_driver(mxvar_sdriver);
-
- for (i = 0; i < MXSER_BOARDS; i++)
- if (mxser_boards[i].info != NULL)
- mxser_release_res(&mxser_boards[i], NULL, 1);
-
- pr_debug("Done.\n");
-}
-
-module_init(mxser_module_init);
-module_exit(mxser_module_exit);
diff --git a/drivers/char/mxser_new.h b/drivers/char/mxser_new.h
deleted file mode 100644
index d42f7766c652..000000000000
--- a/drivers/char/mxser_new.h
+++ /dev/null
@@ -1,293 +0,0 @@
-#ifndef _MXSER_H
-#define _MXSER_H
-
-/*
- * Semi-public control interfaces
- */
-
-/*
- * MOXA ioctls
- */
-
-#define MOXA 0x400
-#define MOXA_GETDATACOUNT (MOXA + 23)
-#define MOXA_GET_CONF (MOXA + 35)
-#define MOXA_DIAGNOSE (MOXA + 50)
-#define MOXA_CHKPORTENABLE (MOXA + 60)
-#define MOXA_HighSpeedOn (MOXA + 61)
-#define MOXA_GET_MAJOR (MOXA + 63)
-#define MOXA_GET_CUMAJOR (MOXA + 64)
-#define MOXA_GETMSTATUS (MOXA + 65)
-#define MOXA_SET_OP_MODE (MOXA + 66)
-#define MOXA_GET_OP_MODE (MOXA + 67)
-
-#define RS232_MODE 0
-#define RS485_2WIRE_MODE 1
-#define RS422_MODE 2
-#define RS485_4WIRE_MODE 3
-#define OP_MODE_MASK 3
-
-#define MOXA_SDS_RSTICOUNTER (MOXA + 69)
-#define MOXA_ASPP_OQUEUE (MOXA + 70)
-#define MOXA_ASPP_SETBAUD (MOXA + 71)
-#define MOXA_ASPP_GETBAUD (MOXA + 72)
-#define MOXA_ASPP_MON (MOXA + 73)
-#define MOXA_ASPP_LSTATUS (MOXA + 74)
-#define MOXA_ASPP_MON_EXT (MOXA + 75)
-#define MOXA_SET_BAUD_METHOD (MOXA + 76)
-#define MOXA_SET_SPECIAL_BAUD_RATE (MOXA + 77)
-#define MOXA_GET_SPECIAL_BAUD_RATE (MOXA + 78)
-
-/* --------------------------------------------------- */
-
-#define NPPI_NOTIFY_PARITY 0x01
-#define NPPI_NOTIFY_FRAMING 0x02
-#define NPPI_NOTIFY_HW_OVERRUN 0x04
-#define NPPI_NOTIFY_SW_OVERRUN 0x08
-#define NPPI_NOTIFY_BREAK 0x10
-
-#define NPPI_NOTIFY_CTSHOLD 0x01 /* Tx hold by CTS low */
-#define NPPI_NOTIFY_DSRHOLD 0x02 /* Tx hold by DSR low */
-#define NPPI_NOTIFY_XOFFHOLD 0x08 /* Tx hold by Xoff received */
-#define NPPI_NOTIFY_XOFFXENT 0x10 /* Xoff Sent */
-
-/* follow just for Moxa Must chip define. */
-/* */
-/* when LCR register (offset 0x03) write following value, */
-/* the Must chip will enter enchance mode. And write value */
-/* on EFR (offset 0x02) bit 6,7 to change bank. */
-#define MOXA_MUST_ENTER_ENCHANCE 0xBF
-
-/* when enhance mode enable, access on general bank register */
-#define MOXA_MUST_GDL_REGISTER 0x07
-#define MOXA_MUST_GDL_MASK 0x7F
-#define MOXA_MUST_GDL_HAS_BAD_DATA 0x80
-
-#define MOXA_MUST_LSR_RERR 0x80 /* error in receive FIFO */
-/* enchance register bank select and enchance mode setting register */
-/* when LCR register equal to 0xBF */
-#define MOXA_MUST_EFR_REGISTER 0x02
-/* enchance mode enable */
-#define MOXA_MUST_EFR_EFRB_ENABLE 0x10
-/* enchance reister bank set 0, 1, 2 */
-#define MOXA_MUST_EFR_BANK0 0x00
-#define MOXA_MUST_EFR_BANK1 0x40
-#define MOXA_MUST_EFR_BANK2 0x80
-#define MOXA_MUST_EFR_BANK3 0xC0
-#define MOXA_MUST_EFR_BANK_MASK 0xC0
-
-/* set XON1 value register, when LCR=0xBF and change to bank0 */
-#define MOXA_MUST_XON1_REGISTER 0x04
-
-/* set XON2 value register, when LCR=0xBF and change to bank0 */
-#define MOXA_MUST_XON2_REGISTER 0x05
-
-/* set XOFF1 value register, when LCR=0xBF and change to bank0 */
-#define MOXA_MUST_XOFF1_REGISTER 0x06
-
-/* set XOFF2 value register, when LCR=0xBF and change to bank0 */
-#define MOXA_MUST_XOFF2_REGISTER 0x07
-
-#define MOXA_MUST_RBRTL_REGISTER 0x04
-#define MOXA_MUST_RBRTH_REGISTER 0x05
-#define MOXA_MUST_RBRTI_REGISTER 0x06
-#define MOXA_MUST_THRTL_REGISTER 0x07
-#define MOXA_MUST_ENUM_REGISTER 0x04
-#define MOXA_MUST_HWID_REGISTER 0x05
-#define MOXA_MUST_ECR_REGISTER 0x06
-#define MOXA_MUST_CSR_REGISTER 0x07
-
-/* good data mode enable */
-#define MOXA_MUST_FCR_GDA_MODE_ENABLE 0x20
-/* only good data put into RxFIFO */
-#define MOXA_MUST_FCR_GDA_ONLY_ENABLE 0x10
-
-/* enable CTS interrupt */
-#define MOXA_MUST_IER_ECTSI 0x80
-/* enable RTS interrupt */
-#define MOXA_MUST_IER_ERTSI 0x40
-/* enable Xon/Xoff interrupt */
-#define MOXA_MUST_IER_XINT 0x20
-/* enable GDA interrupt */
-#define MOXA_MUST_IER_EGDAI 0x10
-
-#define MOXA_MUST_RECV_ISR (UART_IER_RDI | MOXA_MUST_IER_EGDAI)
-
-/* GDA interrupt pending */
-#define MOXA_MUST_IIR_GDA 0x1C
-#define MOXA_MUST_IIR_RDA 0x04
-#define MOXA_MUST_IIR_RTO 0x0C
-#define MOXA_MUST_IIR_LSR 0x06
-
-/* recieved Xon/Xoff or specical interrupt pending */
-#define MOXA_MUST_IIR_XSC 0x10
-
-/* RTS/CTS change state interrupt pending */
-#define MOXA_MUST_IIR_RTSCTS 0x20
-#define MOXA_MUST_IIR_MASK 0x3E
-
-#define MOXA_MUST_MCR_XON_FLAG 0x40
-#define MOXA_MUST_MCR_XON_ANY 0x80
-#define MOXA_MUST_MCR_TX_XON 0x08
-
-/* software flow control on chip mask value */
-#define MOXA_MUST_EFR_SF_MASK 0x0F
-/* send Xon1/Xoff1 */
-#define MOXA_MUST_EFR_SF_TX1 0x08
-/* send Xon2/Xoff2 */
-#define MOXA_MUST_EFR_SF_TX2 0x04
-/* send Xon1,Xon2/Xoff1,Xoff2 */
-#define MOXA_MUST_EFR_SF_TX12 0x0C
-/* don't send Xon/Xoff */
-#define MOXA_MUST_EFR_SF_TX_NO 0x00
-/* Tx software flow control mask */
-#define MOXA_MUST_EFR_SF_TX_MASK 0x0C
-/* don't receive Xon/Xoff */
-#define MOXA_MUST_EFR_SF_RX_NO 0x00
-/* receive Xon1/Xoff1 */
-#define MOXA_MUST_EFR_SF_RX1 0x02
-/* receive Xon2/Xoff2 */
-#define MOXA_MUST_EFR_SF_RX2 0x01
-/* receive Xon1,Xon2/Xoff1,Xoff2 */
-#define MOXA_MUST_EFR_SF_RX12 0x03
-/* Rx software flow control mask */
-#define MOXA_MUST_EFR_SF_RX_MASK 0x03
-
-#define ENABLE_MOXA_MUST_ENCHANCE_MODE(baseio) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr |= MOXA_MUST_EFR_EFRB_ENABLE; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define DISABLE_MOXA_MUST_ENCHANCE_MODE(baseio) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_EFRB_ENABLE; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define SET_MOXA_MUST_XON1_VALUE(baseio, Value) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK0; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_XON1_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define SET_MOXA_MUST_XOFF1_VALUE(baseio, Value) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK0; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_XOFF1_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define SET_MOXA_MUST_FIFO_VALUE(info) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((info)->ioaddr+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (info)->ioaddr+UART_LCR);\
- __efr = inb((info)->ioaddr+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK1; \
- outb(__efr, (info)->ioaddr+MOXA_MUST_EFR_REGISTER); \
- outb((u8)((info)->rx_high_water), (info)->ioaddr+ \
- MOXA_MUST_RBRTH_REGISTER); \
- outb((u8)((info)->rx_trigger), (info)->ioaddr+ \
- MOXA_MUST_RBRTI_REGISTER); \
- outb((u8)((info)->rx_low_water), (info)->ioaddr+ \
- MOXA_MUST_RBRTL_REGISTER); \
- outb(__oldlcr, (info)->ioaddr+UART_LCR); \
-} while (0)
-
-#define SET_MOXA_MUST_ENUM_VALUE(baseio, Value) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK2; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_ENUM_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define GET_MOXA_MUST_HARDWARE_ID(baseio, pId) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK2; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- *pId = inb((baseio)+MOXA_MUST_HWID_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(baseio) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_MASK; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \
- __efr |= MOXA_MUST_EFR_SF_TX1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \
- __efr |= MOXA_MUST_EFR_SF_RX1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#endif
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index 596c7173997b..90c3969012a3 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -695,17 +695,16 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
return;
}
- if (tty->stopped && !tty->flow_stopped &&
- I_IXON(tty) && I_IXANY(tty)) {
- start_tty(tty);
- return;
- }
-
if (I_ISTRIP(tty))
c &= 0x7f;
if (I_IUCLC(tty) && L_IEXTEN(tty))
c=tolower(c);
+ if (tty->stopped && !tty->flow_stopped && I_IXON(tty) &&
+ ((I_IXANY(tty) && c != START_CHAR(tty) && c != STOP_CHAR(tty)) ||
+ c == INTR_CHAR(tty) || c == QUIT_CHAR(tty)))
+ start_tty(tty);
+
if (tty->closing) {
if (I_IXON(tty)) {
if (c == START_CHAR(tty))
@@ -769,7 +768,21 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
signal = SIGTSTP;
if (c == SUSP_CHAR(tty)) {
send_signal:
- isig(signal, tty, 0);
+ /*
+ * Echo character, and then send the signal.
+ * Note that we do not use isig() here because we want
+ * the order to be:
+ * 1) flush, 2) echo, 3) signal
+ */
+ if (!L_NOFLSH(tty)) {
+ n_tty_flush_buffer(tty);
+ if (tty->driver->flush_buffer)
+ tty->driver->flush_buffer(tty);
+ }
+ if (L_ECHO(tty))
+ echo_char(c, tty);
+ if (tty->pgrp)
+ kill_pgrp(tty->pgrp, signal, 1);
return;
}
}
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index 6076e662886a..dfaab2322de3 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -2,7 +2,7 @@
* nozomi.c -- HSDPA driver Broadband Wireless Data Card - Globe Trotter
*
* Written by: Ulf Jakobsson,
- * Jan �erfeldt,
+ * Jan Ã…kerfeldt,
* Stefan Thomasson,
*
* Maintained by: Paul Hardwick (p.hardwick@option.com)
@@ -38,60 +38,6 @@
* --------------------------------------------------------------------------
*/
-/*
- * CHANGELOG
- * Version 2.1d
- * 11-November-2007 Jiri Slaby, Frank Seidel
- * - Big rework of multicard support by Jiri
- * - Major cleanups (semaphore to mutex, endianess, no major reservation)
- * - Optimizations
- *
- * Version 2.1c
- * 30-October-2007 Frank Seidel
- * - Completed multicard support
- * - Minor cleanups
- *
- * Version 2.1b
- * 07-August-2007 Frank Seidel
- * - Minor cleanups
- * - theoretical multicard support
- *
- * Version 2.1
- * 03-July-2006 Paul Hardwick
- *
- * - Stability Improvements. Incorporated spinlock wraps patch.
- * - Updated for newer 2.6.14+ kernels (tty_buffer_request_room)
- * - using __devexit macro for tty
- *
- *
- * Version 2.0
- * 08-feb-2006 15:34:10:Ulf
- *
- * -Fixed issue when not waking up line disipine layer, could probably result
- * in better uplink performance for 2.4.
- *
- * -Fixed issue with big endian during initalization, now proper toggle flags
- * are handled between preloader and maincode.
- *
- * -Fixed flow control issue.
- *
- * -Added support for setting DTR.
- *
- * -For 2.4 kernels, removing temporary buffer that's not needed.
- *
- * -Reading CTS only for modem port (only port that supports it).
- *
- * -Return 0 in write_room instead of netative value, it's not handled in
- * upper layer.
- *
- * --------------------------------------------------------------------------
- * Version 1.0
- *
- * First version of driver, only tested with card of type F32_2.
- * Works fine with 2.4 and 2.6 kernels.
- * Driver also support big endian architecture.
- */
-
/* Enable this to have a lot of debug printouts */
#define DEBUG
@@ -143,8 +89,9 @@ do { \
/* Do we need this settable at runtime? */
static int debug = NOZOMI_DEBUG_LEVEL;
-#define D(lvl, args...) do {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \
- while (0)
+#define D(lvl, args...) do \
+ {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \
+ while (0)
#define D_(lvl, args...) D(lvl, ##args)
/* These printouts are always printed */
@@ -273,13 +220,13 @@ enum port_type {
/* Big endian */
struct toggles {
- unsigned enabled:5; /*
+ unsigned int enabled:5; /*
* Toggle fields are valid if enabled is 0,
* else A-channels must always be used.
*/
- unsigned diag_dl:1;
- unsigned mdm_dl:1;
- unsigned mdm_ul:1;
+ unsigned int diag_dl:1;
+ unsigned int mdm_dl:1;
+ unsigned int mdm_ul:1;
} __attribute__ ((packed));
/* Configuration table to read at startup of card */
@@ -320,19 +267,19 @@ struct config_table {
/* This stores all control downlink flags */
struct ctrl_dl {
u8 port;
- unsigned reserved:4;
- unsigned CTS:1;
- unsigned RI:1;
- unsigned DCD:1;
- unsigned DSR:1;
+ unsigned int reserved:4;
+ unsigned int CTS:1;
+ unsigned int RI:1;
+ unsigned int DCD:1;
+ unsigned int DSR:1;
} __attribute__ ((packed));
/* This stores all control uplink flags */
struct ctrl_ul {
u8 port;
- unsigned reserved:6;
- unsigned RTS:1;
- unsigned DTR:1;
+ unsigned int reserved:6;
+ unsigned int RTS:1;
+ unsigned int DTR:1;
} __attribute__ ((packed));
#else
@@ -340,10 +287,10 @@ struct ctrl_ul {
/* This represents the toggle information */
struct toggles {
- unsigned mdm_ul:1;
- unsigned mdm_dl:1;
- unsigned diag_dl:1;
- unsigned enabled:5; /*
+ unsigned int mdm_ul:1;
+ unsigned int mdm_dl:1;
+ unsigned int diag_dl:1;
+ unsigned int enabled:5; /*
* Toggle fields are valid if enabled is 0,
* else A-channels must always be used.
*/
@@ -379,19 +326,19 @@ struct config_table {
/* This stores all control downlink flags */
struct ctrl_dl {
- unsigned DSR:1;
- unsigned DCD:1;
- unsigned RI:1;
- unsigned CTS:1;
- unsigned reserverd:4;
+ unsigned int DSR:1;
+ unsigned int DCD:1;
+ unsigned int RI:1;
+ unsigned int CTS:1;
+ unsigned int reserverd:4;
u8 port;
} __attribute__ ((packed));
/* This stores all control uplink flags */
struct ctrl_ul {
- unsigned DTR:1;
- unsigned RTS:1;
- unsigned reserved:6;
+ unsigned int DTR:1;
+ unsigned int RTS:1;
+ unsigned int reserved:6;
u8 port;
} __attribute__ ((packed));
#endif
@@ -448,7 +395,7 @@ struct buffer {
} __attribute__ ((packed));
/* Global variables */
-static struct pci_device_id nozomi_pci_tbl[] = {
+static const struct pci_device_id nozomi_pci_tbl[] __devinitconst = {
{PCI_DEVICE(VENDOR1, DEVICE1)},
{},
};
@@ -524,12 +471,12 @@ out:
* -Optimize
* -Rewrite cleaner
*/
-static u32 write_mem32(void __iomem *mem_addr_start, u32 *buf,
+static u32 write_mem32(void __iomem *mem_addr_start, const u32 *buf,
u32 size_bytes)
{
u32 i = 0;
u32 *ptr = (__force u32 *) mem_addr_start;
- u16 *buf16;
+ const u16 *buf16;
if (unlikely(!ptr || !buf))
return 0;
@@ -537,7 +484,7 @@ static u32 write_mem32(void __iomem *mem_addr_start, u32 *buf,
/* shortcut for extremely often used cases */
switch (size_bytes) {
case 2: /* 2 bytes */
- buf16 = (u16 *) buf;
+ buf16 = (const u16 *)buf;
writew(__cpu_to_le16(*buf16), (void __iomem *)ptr);
return 2;
break;
@@ -554,7 +501,7 @@ static u32 write_mem32(void __iomem *mem_addr_start, u32 *buf,
while (i < size_bytes) {
if (size_bytes - i == 2) {
/* 2 bytes */
- buf16 = (u16 *) buf;
+ buf16 = (const u16 *)buf;
writew(__cpu_to_le16(*buf16), (void __iomem *)ptr);
i += 2;
} else {
@@ -694,7 +641,7 @@ static void dump_table(const struct nozomi *dc)
dc->config_table.ul_ctrl_len);
}
#else
-static __inline__ void dump_table(const struct nozomi *dc) { }
+static inline void dump_table(const struct nozomi *dc) { }
#endif
/*
@@ -776,8 +723,7 @@ static int nozomi_read_config_table(struct nozomi *dc)
/* Enable uplink interrupts */
static void enable_transmit_ul(enum port_type port, struct nozomi *dc)
{
- u16 mask[NOZOMI_MAX_PORTS] = \
- {MDM_UL, DIAG_UL, APP1_UL, APP2_UL, CTRL_UL};
+ static const u16 mask[] = {MDM_UL, DIAG_UL, APP1_UL, APP2_UL, CTRL_UL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier |= mask[port];
@@ -790,8 +736,8 @@ static void enable_transmit_ul(enum port_type port, struct nozomi *dc)
/* Disable uplink interrupts */
static void disable_transmit_ul(enum port_type port, struct nozomi *dc)
{
- u16 mask[NOZOMI_MAX_PORTS] = \
- {~MDM_UL, ~DIAG_UL, ~APP1_UL, ~APP2_UL, ~CTRL_UL};
+ static const u16 mask[] =
+ {~MDM_UL, ~DIAG_UL, ~APP1_UL, ~APP2_UL, ~CTRL_UL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier &= mask[port];
@@ -804,8 +750,7 @@ static void disable_transmit_ul(enum port_type port, struct nozomi *dc)
/* Enable downlink interrupts */
static void enable_transmit_dl(enum port_type port, struct nozomi *dc)
{
- u16 mask[NOZOMI_MAX_PORTS] = \
- {MDM_DL, DIAG_DL, APP1_DL, APP2_DL, CTRL_DL};
+ static const u16 mask[] = {MDM_DL, DIAG_DL, APP1_DL, APP2_DL, CTRL_DL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier |= mask[port];
@@ -818,8 +763,8 @@ static void enable_transmit_dl(enum port_type port, struct nozomi *dc)
/* Disable downlink interrupts */
static void disable_transmit_dl(enum port_type port, struct nozomi *dc)
{
- u16 mask[NOZOMI_MAX_PORTS] = \
- {~MDM_DL, ~DIAG_DL, ~APP1_DL, ~APP2_DL, ~CTRL_DL};
+ static const u16 mask[] =
+ {~MDM_DL, ~DIAG_DL, ~APP1_DL, ~APP2_DL, ~CTRL_DL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier &= mask[port];
@@ -833,13 +778,13 @@ static void disable_transmit_dl(enum port_type port, struct nozomi *dc)
* Return 1 - send buffer to card and ack.
* Return 0 - don't ack, don't send buffer to card.
*/
-static int send_data(enum port_type index, struct nozomi *dc)
+static int send_data(enum port_type index, const struct nozomi *dc)
{
u32 size = 0;
- struct port *port = &dc->port[index];
- u8 toggle = port->toggle_ul;
+ const struct port *port = &dc->port[index];
+ const u8 toggle = port->toggle_ul;
void __iomem *addr = port->ul_addr[toggle];
- u32 ul_size = port->ul_size[toggle];
+ const u32 ul_size = port->ul_size[toggle];
struct tty_struct *tty = port->tty;
/* Get data from tty and place in buf for now */
@@ -1102,7 +1047,7 @@ static int send_flow_control(struct nozomi *dc)
}
/*
- * Handle donlink data, ports that are handled are modem and diagnostics
+ * Handle downlink data, ports that are handled are modem and diagnostics
* Return 1 - ok
* Return 0 - toggle fields are out of sync
*/
@@ -1359,20 +1304,20 @@ static void nozomi_setup_private_data(struct nozomi *dc)
static ssize_t card_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
+ const struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
return sprintf(buf, "%d\n", dc->card_type);
}
-static DEVICE_ATTR(card_type, 0444, card_type_show, NULL);
+static DEVICE_ATTR(card_type, S_IRUGO, card_type_show, NULL);
static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
+ const struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
return sprintf(buf, "%u\n", dc->open_ttys);
}
-static DEVICE_ATTR(open_ttys, 0444, open_ttys_show, NULL);
+static DEVICE_ATTR(open_ttys, S_IRUGO, open_ttys_show, NULL);
static void make_sysfs_files(struct nozomi *dc)
{
@@ -1735,7 +1680,7 @@ static int ntty_write_room(struct tty_struct *tty)
{
struct port *port = tty->driver_data;
int room = 0;
- struct nozomi *dc = get_dc_by_tty(tty);
+ const struct nozomi *dc = get_dc_by_tty(tty);
if (!dc || !port)
return 0;
@@ -1755,9 +1700,9 @@ exit:
/* Gets io control parameters */
static int ntty_tiocmget(struct tty_struct *tty, struct file *file)
{
- struct port *port = tty->driver_data;
- struct ctrl_dl *ctrl_dl = &port->ctrl_dl;
- struct ctrl_ul *ctrl_ul = &port->ctrl_ul;
+ const struct port *port = tty->driver_data;
+ const struct ctrl_dl *ctrl_dl = &port->ctrl_dl;
+ const struct ctrl_ul *ctrl_ul = &port->ctrl_ul;
return (ctrl_ul->RTS ? TIOCM_RTS : 0) |
(ctrl_ul->DTR ? TIOCM_DTR : 0) |
@@ -1787,7 +1732,7 @@ static int ntty_tiocmset(struct tty_struct *tty, struct file *file,
static int ntty_cflags_changed(struct port *port, unsigned long flags,
struct async_icount *cprev)
{
- struct async_icount cnow = port->tty_icount;
+ const struct async_icount cnow = port->tty_icount;
int ret;
ret = ((flags & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
@@ -1802,7 +1747,7 @@ static int ntty_cflags_changed(struct port *port, unsigned long flags,
static int ntty_ioctl_tiocgicount(struct port *port, void __user *argp)
{
- struct async_icount cnow = port->tty_icount;
+ const struct async_icount cnow = port->tty_icount;
struct serial_icounter_struct icount;
icount.cts = cnow.cts;
@@ -1882,7 +1827,10 @@ static void ntty_throttle(struct tty_struct *tty)
/* just to discard single character writes */
static void ntty_put_char(struct tty_struct *tty, unsigned char c)
{
- /* FIXME !!! */
+ /*
+ * card does not react correct when we write single chars
+ * to the card, so we discard them
+ */
DBG2("PUT CHAR Function: %c", c);
}
@@ -1910,7 +1858,7 @@ exit_in_buffer:
return rval;
}
-static struct tty_operations tty_ops = {
+static const struct tty_operations tty_ops = {
.ioctl = ntty_ioctl,
.open = ntty_open,
.close = ntty_close,
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 02518da6a386..454d7324ba40 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -308,7 +308,8 @@ static unsigned int calc_baudv(unsigned char fidi)
return (wcrcf / wbrcf);
}
-static unsigned short io_read_num_rec_bytes(ioaddr_t iobase, unsigned short *s)
+static unsigned short io_read_num_rec_bytes(unsigned int iobase,
+ unsigned short *s)
{
unsigned short tmp;
@@ -426,7 +427,7 @@ static struct card_fixup card_fixups[] = {
static void set_cardparameter(struct cm4000_dev *dev)
{
int i;
- ioaddr_t iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->io.BasePort1;
u_int8_t stopbits = 0x02; /* ISO default */
DEBUGP(3, dev, "-> set_cardparameter\n");
@@ -459,7 +460,7 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
unsigned short num_bytes_read;
unsigned char pts_reply[4];
ssize_t rc;
- ioaddr_t iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->io.BasePort1;
rc = 0;
@@ -610,7 +611,7 @@ exit_setprotocol:
return rc;
}
-static int io_detect_cm4000(ioaddr_t iobase, struct cm4000_dev *dev)
+static int io_detect_cm4000(unsigned int iobase, struct cm4000_dev *dev)
{
/* note: statemachine is assumed to be reset */
@@ -671,7 +672,7 @@ static void terminate_monitor(struct cm4000_dev *dev)
static void monitor_card(unsigned long p)
{
struct cm4000_dev *dev = (struct cm4000_dev *) p;
- ioaddr_t iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->io.BasePort1;
unsigned short s;
struct ptsreq ptsreq;
int i, atrc;
@@ -933,7 +934,7 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
loff_t *ppos)
{
struct cm4000_dev *dev = filp->private_data;
- ioaddr_t iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->io.BasePort1;
ssize_t rc;
int i, j, k;
@@ -1054,7 +1055,7 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
struct cm4000_dev *dev = (struct cm4000_dev *) filp->private_data;
- ioaddr_t iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->io.BasePort1;
unsigned short s;
unsigned char tmp;
unsigned char infolen;
@@ -1408,7 +1409,7 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct cm4000_dev *dev = filp->private_data;
- ioaddr_t iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->io.BasePort1;
struct pcmcia_device *link;
int size;
int rc;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 8caff0ca80ff..279ff5005cec 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -57,6 +57,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/ioctl.h>
+#include <linux/synclink.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -87,8 +88,6 @@
#include <asm/uaccess.h>
-#include "linux/synclink.h"
-
static MGSL_PARAMS default_params = {
MGSL_MODE_HDLC, /* unsigned long mode */
0, /* unsigned char loopback; */
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c511a831f0c0..f43c89f7c449 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1039,6 +1039,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
p += bytes;
add_entropy_words(r, buf, (bytes + 3) / 4);
+ cond_resched();
}
return 0;
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 102ece4c4e0e..8fc4fe4e38f1 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -47,6 +47,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/tty_flip.h>
+#include <linux/spinlock.h>
#include <asm/uaccess.h>
@@ -77,10 +78,10 @@
ASYNC_SPD_HI | ASYNC_SPEED_VHI | ASYNC_SESSION_LOCKOUT | \
ASYNC_PGRP_LOCKOUT | ASYNC_CALLOUT_NOHUP)
-#define RS_EVENT_WRITE_WAKEUP 0
-
static struct tty_driver *riscom_driver;
+static DEFINE_SPINLOCK(riscom_lock);
+
static struct riscom_board rc_board[RC_NBOARD] = {
{
.base = RC_IOBASE1,
@@ -217,13 +218,14 @@ static void __init rc_init_CD180(struct riscom_board const * bp)
{
unsigned long flags;
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
rc_out(bp, RC_CTOUT, 0); /* Clear timeout */
rc_wait_CCR(bp); /* Wait for CCR ready */
rc_out(bp, CD180_CCR, CCR_HARDRESET); /* Reset CD180 chip */
- sti();
+ spin_unlock_irqrestore(&riscom_lock, flags);
msleep(50); /* Delay 0.05 sec */
- cli();
+ spin_lock_irqsave(&riscom_lock, flags);
rc_out(bp, CD180_GIVR, RC_ID); /* Set ID for this chip */
rc_out(bp, CD180_GICR, 0); /* Clear all bits */
rc_out(bp, CD180_PILR1, RC_ACK_MINT); /* Prio for modem intr */
@@ -234,7 +236,7 @@ static void __init rc_init_CD180(struct riscom_board const * bp)
rc_out(bp, CD180_PPRH, (RC_OSCFREQ/(1000000/RISCOM_TPS)) >> 8);
rc_out(bp, CD180_PPRL, (RC_OSCFREQ/(1000000/RISCOM_TPS)) & 0xff);
- restore_flags(flags);
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
/* Main probing routine, also sets irq. */
@@ -310,12 +312,6 @@ out_release:
*
*/
-static inline void rc_mark_event(struct riscom_port * port, int event)
-{
- set_bit(event, &port->event);
- schedule_work(&port->tqueue);
-}
-
static inline struct riscom_port * rc_get_port(struct riscom_board const * bp,
unsigned char const * what)
{
@@ -482,7 +478,7 @@ static inline void rc_transmit(struct riscom_board const * bp)
rc_out(bp, CD180_IER, port->IER);
}
if (port->xmit_cnt <= port->wakeup_chars)
- rc_mark_event(port, RS_EVENT_WRITE_WAKEUP);
+ tty_wakeup(tty);
}
static inline void rc_check_modem(struct riscom_board const * bp)
@@ -501,7 +497,7 @@ static inline void rc_check_modem(struct riscom_board const * bp)
if (rc_in(bp, CD180_MSVR) & MSVR_CD)
wake_up_interruptible(&port->open_wait);
else
- schedule_work(&port->tqueue_hangup);
+ tty_hangup(tty);
}
#ifdef RISCOM_BRAIN_DAMAGED_CTS
@@ -510,7 +506,7 @@ static inline void rc_check_modem(struct riscom_board const * bp)
tty->hw_stopped = 0;
port->IER |= IER_TXRDY;
if (port->xmit_cnt <= port->wakeup_chars)
- rc_mark_event(port, RS_EVENT_WRITE_WAKEUP);
+ tty_wakeup(tty);
} else {
tty->hw_stopped = 1;
port->IER &= ~IER_TXRDY;
@@ -522,7 +518,7 @@ static inline void rc_check_modem(struct riscom_board const * bp)
tty->hw_stopped = 0;
port->IER |= IER_TXRDY;
if (port->xmit_cnt <= port->wakeup_chars)
- rc_mark_event(port, RS_EVENT_WRITE_WAKEUP);
+ tty_wakeup(tty);
} else {
tty->hw_stopped = 1;
port->IER &= ~IER_TXRDY;
@@ -812,9 +808,9 @@ static int rc_setup_port(struct riscom_board *bp, struct riscom_port *port)
}
port->xmit_buf = (unsigned char *) tmp;
}
-
- save_flags(flags); cli();
-
+
+ spin_lock_irqsave(&riscom_lock, flags);
+
if (port->tty)
clear_bit(TTY_IO_ERROR, &port->tty->flags);
@@ -825,7 +821,7 @@ static int rc_setup_port(struct riscom_board *bp, struct riscom_port *port)
rc_change_speed(bp, port);
port->flags |= ASYNC_INITIALIZED;
- restore_flags(flags);
+ spin_unlock_irqrestore(&riscom_lock, flags);
return 0;
}
@@ -901,6 +897,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
int retval;
int do_clocal = 0;
int CD;
+ unsigned long flags;
/*
* If the device is in the middle of being closed, then block
@@ -936,19 +933,26 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
*/
retval = 0;
add_wait_queue(&port->open_wait, &wait);
- cli();
+
+ spin_lock_irqsave(&riscom_lock, flags);
+
if (!tty_hung_up_p(filp))
port->count--;
- sti();
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
+
port->blocked_open++;
while (1) {
- cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
rc_out(bp, CD180_CAR, port_No(port));
CD = rc_in(bp, CD180_MSVR) & MSVR_CD;
rc_out(bp, CD180_MSVR, MSVR_RTS);
bp->DTR &= ~(1u << port_No(port));
rc_out(bp, RC_DTR, bp->DTR);
- sti();
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
+
set_current_state(TASK_INTERRUPTIBLE);
if (tty_hung_up_p(filp) ||
!(port->flags & ASYNC_INITIALIZED)) {
@@ -1020,8 +1024,9 @@ static void rc_close(struct tty_struct * tty, struct file * filp)
if (!port || rc_paranoia_check(port, tty->name, "close"))
return;
-
- save_flags(flags); cli();
+
+ spin_lock_irqsave(&riscom_lock, flags);
+
if (tty_hung_up_p(filp))
goto out;
@@ -1078,7 +1083,6 @@ static void rc_close(struct tty_struct * tty, struct file * filp)
tty_ldisc_flush(tty);
tty->closing = 0;
- port->event = 0;
port->tty = NULL;
if (port->blocked_open) {
if (port->close_delay) {
@@ -1088,7 +1092,9 @@ static void rc_close(struct tty_struct * tty, struct file * filp)
}
port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
wake_up_interruptible(&port->close_wait);
-out: restore_flags(flags);
+
+out:
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static int rc_write(struct tty_struct * tty,
@@ -1107,34 +1113,33 @@ static int rc_write(struct tty_struct * tty,
if (!tty || !port->xmit_buf)
return 0;
- save_flags(flags);
while (1) {
- cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
c = min_t(int, count, min(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
SERIAL_XMIT_SIZE - port->xmit_head));
- if (c <= 0) {
- restore_flags(flags);
- break;
- }
+ if (c <= 0)
+ break; /* lock continues to be held */
memcpy(port->xmit_buf + port->xmit_head, buf, c);
port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
port->xmit_cnt += c;
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
buf += c;
count -= c;
total += c;
}
- cli();
if (port->xmit_cnt && !tty->stopped && !tty->hw_stopped &&
!(port->IER & IER_TXRDY)) {
port->IER |= IER_TXRDY;
rc_out(bp, CD180_CAR, port_No(port));
rc_out(bp, CD180_IER, port->IER);
}
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
return total;
}
@@ -1150,7 +1155,7 @@ static void rc_put_char(struct tty_struct * tty, unsigned char ch)
if (!tty || !port->xmit_buf)
return;
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1)
goto out;
@@ -1158,7 +1163,9 @@ static void rc_put_char(struct tty_struct * tty, unsigned char ch)
port->xmit_buf[port->xmit_head++] = ch;
port->xmit_head &= SERIAL_XMIT_SIZE - 1;
port->xmit_cnt++;
-out: restore_flags(flags);
+
+out:
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static void rc_flush_chars(struct tty_struct * tty)
@@ -1173,11 +1180,13 @@ static void rc_flush_chars(struct tty_struct * tty)
!port->xmit_buf)
return;
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
port->IER |= IER_TXRDY;
rc_out(port_Board(port), CD180_CAR, port_No(port));
rc_out(port_Board(port), CD180_IER, port->IER);
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static int rc_write_room(struct tty_struct * tty)
@@ -1212,9 +1221,11 @@ static void rc_flush_buffer(struct tty_struct *tty)
if (rc_paranoia_check(port, tty->name, "rc_flush_buffer"))
return;
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
tty_wakeup(tty);
}
@@ -1231,11 +1242,15 @@ static int rc_tiocmget(struct tty_struct *tty, struct file *file)
return -ENODEV;
bp = port_Board(port);
- save_flags(flags); cli();
+
+ spin_lock_irqsave(&riscom_lock, flags);
+
rc_out(bp, CD180_CAR, port_No(port));
status = rc_in(bp, CD180_MSVR);
result = rc_in(bp, RC_RI) & (1u << port_No(port)) ? 0 : TIOCM_RNG;
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
+
result |= ((status & MSVR_RTS) ? TIOCM_RTS : 0)
| ((status & MSVR_DTR) ? TIOCM_DTR : 0)
| ((status & MSVR_CD) ? TIOCM_CAR : 0)
@@ -1256,7 +1271,8 @@ static int rc_tiocmset(struct tty_struct *tty, struct file *file,
bp = port_Board(port);
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
if (set & TIOCM_RTS)
port->MSVR |= MSVR_RTS;
if (set & TIOCM_DTR)
@@ -1270,7 +1286,9 @@ static int rc_tiocmset(struct tty_struct *tty, struct file *file,
rc_out(bp, CD180_CAR, port_No(port));
rc_out(bp, CD180_MSVR, port->MSVR);
rc_out(bp, RC_DTR, bp->DTR);
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
+
return 0;
}
@@ -1279,7 +1297,8 @@ static inline void rc_send_break(struct riscom_port * port, unsigned long length
struct riscom_board *bp = port_Board(port);
unsigned long flags;
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
port->break_length = RISCOM_TPS / HZ * length;
port->COR2 |= COR2_ETC;
port->IER |= IER_TXRDY;
@@ -1289,7 +1308,8 @@ static inline void rc_send_break(struct riscom_port * port, unsigned long length
rc_wait_CCR(bp);
rc_out(bp, CD180_CCR, CCR_CORCHG2);
rc_wait_CCR(bp);
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static inline int rc_set_serial_info(struct riscom_port * port,
@@ -1298,7 +1318,6 @@ static inline int rc_set_serial_info(struct riscom_port * port,
struct serial_struct tmp;
struct riscom_board *bp = port_Board(port);
int change_speed;
- unsigned long flags;
if (copy_from_user(&tmp, newinfo, sizeof(tmp)))
return -EFAULT;
@@ -1332,9 +1351,11 @@ static inline int rc_set_serial_info(struct riscom_port * port,
port->closing_wait = tmp.closing_wait;
}
if (change_speed) {
- save_flags(flags); cli();
+ unsigned long flags;
+
+ spin_lock_irqsave(&riscom_lock, flags);
rc_change_speed(bp, port);
- restore_flags(flags);
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
return 0;
}
@@ -1414,17 +1435,19 @@ static void rc_throttle(struct tty_struct * tty)
return;
bp = port_Board(port);
-
- save_flags(flags); cli();
+
+ spin_lock_irqsave(&riscom_lock, flags);
+
port->MSVR &= ~MSVR_RTS;
rc_out(bp, CD180_CAR, port_No(port));
- if (I_IXOFF(tty)) {
+ if (I_IXOFF(tty)) {
rc_wait_CCR(bp);
rc_out(bp, CD180_CCR, CCR_SSCH2);
rc_wait_CCR(bp);
}
rc_out(bp, CD180_MSVR, port->MSVR);
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static void rc_unthrottle(struct tty_struct * tty)
@@ -1438,7 +1461,8 @@ static void rc_unthrottle(struct tty_struct * tty)
bp = port_Board(port);
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
port->MSVR |= MSVR_RTS;
rc_out(bp, CD180_CAR, port_No(port));
if (I_IXOFF(tty)) {
@@ -1447,7 +1471,8 @@ static void rc_unthrottle(struct tty_struct * tty)
rc_wait_CCR(bp);
}
rc_out(bp, CD180_MSVR, port->MSVR);
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static void rc_stop(struct tty_struct * tty)
@@ -1461,11 +1486,13 @@ static void rc_stop(struct tty_struct * tty)
bp = port_Board(port);
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
port->IER &= ~IER_TXRDY;
rc_out(bp, CD180_CAR, port_No(port));
rc_out(bp, CD180_IER, port->IER);
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static void rc_start(struct tty_struct * tty)
@@ -1479,32 +1506,15 @@ static void rc_start(struct tty_struct * tty)
bp = port_Board(port);
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) {
port->IER |= IER_TXRDY;
rc_out(bp, CD180_CAR, port_No(port));
rc_out(bp, CD180_IER, port->IER);
}
- restore_flags(flags);
-}
-/*
- * This routine is called from the work queue when the interrupt
- * routine has signalled that a hangup has occurred. The path of
- * hangup processing is:
- *
- * serial interrupt routine -> (workqueue) ->
- * do_rc_hangup() -> tty->hangup() -> rc_hangup()
- *
- */
-static void do_rc_hangup(struct work_struct *ugly_api)
-{
- struct riscom_port *port = container_of(ugly_api, struct riscom_port, tqueue_hangup);
- struct tty_struct *tty;
-
- tty = port->tty;
- if (tty)
- tty_hangup(tty); /* FIXME: module removal race still here */
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static void rc_hangup(struct tty_struct * tty)
@@ -1518,7 +1528,6 @@ static void rc_hangup(struct tty_struct * tty)
bp = port_Board(port);
rc_shutdown_port(bp, port);
- port->event = 0;
port->count = 0;
port->flags &= ~ASYNC_NORMAL_ACTIVE;
port->tty = NULL;
@@ -1537,9 +1546,9 @@ static void rc_set_termios(struct tty_struct * tty, struct ktermios * old_termio
tty->termios->c_iflag == old_termios->c_iflag)
return;
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
rc_change_speed(port_Board(port), port);
- restore_flags(flags);
+ spin_unlock_irqrestore(&riscom_lock, flags);
if ((old_termios->c_cflag & CRTSCTS) &&
!(tty->termios->c_cflag & CRTSCTS)) {
@@ -1548,18 +1557,6 @@ static void rc_set_termios(struct tty_struct * tty, struct ktermios * old_termio
}
}
-static void do_softint(struct work_struct *ugly_api)
-{
- struct riscom_port *port = container_of(ugly_api, struct riscom_port, tqueue);
- struct tty_struct *tty;
-
- if(!(tty = port->tty))
- return;
-
- if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &port->event))
- tty_wakeup(tty);
-}
-
static const struct tty_operations riscom_ops = {
.open = rc_open,
.close = rc_close,
@@ -1580,7 +1577,7 @@ static const struct tty_operations riscom_ops = {
.tiocmset = rc_tiocmset,
};
-static inline int rc_init_drivers(void)
+static int __init rc_init_drivers(void)
{
int error;
int i;
@@ -1612,8 +1609,6 @@ static inline int rc_init_drivers(void)
memset(rc_port, 0, sizeof(rc_port));
for (i = 0; i < RC_NPORT * RC_NBOARD; i++) {
rc_port[i].magic = RISCOM8_MAGIC;
- INIT_WORK(&rc_port[i].tqueue, do_softint);
- INIT_WORK(&rc_port[i].tqueue_hangup, do_rc_hangup);
rc_port[i].close_delay = 50 * HZ/100;
rc_port[i].closing_wait = 3000 * HZ/100;
init_waitqueue_head(&rc_port[i].open_wait);
@@ -1627,11 +1622,12 @@ static void rc_release_drivers(void)
{
unsigned long flags;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
tty_unregister_driver(riscom_driver);
put_tty_driver(riscom_driver);
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
#ifndef MODULE
diff --git a/drivers/char/riscom8.h b/drivers/char/riscom8.h
index 9cc1313d5e67..cdfdf4394477 100644
--- a/drivers/char/riscom8.h
+++ b/drivers/char/riscom8.h
@@ -71,7 +71,6 @@ struct riscom_port {
struct tty_struct * tty;
int count;
int blocked_open;
- unsigned long event; /* long req'd for set_bit --RR */
int timeout;
int close_delay;
unsigned char * xmit_buf;
@@ -81,8 +80,6 @@ struct riscom_port {
int xmit_cnt;
wait_queue_head_t open_wait;
wait_queue_head_t close_wait;
- struct work_struct tqueue;
- struct work_struct tqueue_hangup;
short wakeup_chars;
short break_length;
unsigned short closing_wait;
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index d83419c3857e..68c289fe2dc2 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -40,12 +40,6 @@
*/
/****** Defines ******/
-#ifdef PCI_NUM_RESOURCES
-#define PCI_BASE_ADDRESS(dev, r) ((dev)->resource[r].start)
-#else
-#define PCI_BASE_ADDRESS(dev, r) ((dev)->base_address[r])
-#endif
-
#define ROCKET_PARANOIA_CHECK
#define ROCKET_DISABLE_SIMUSAGE
@@ -305,8 +299,8 @@ static inline int rocket_paranoia_check(struct r_port *info,
if (!info)
return 1;
if (info->magic != RPORT_MAGIC) {
- printk(KERN_INFO "Warning: bad magic number for rocketport struct in %s\n",
- routine);
+ printk(KERN_WARNING "Warning: bad magic number for rocketport "
+ "struct in %s\n", routine);
return 1;
}
#endif
@@ -328,7 +322,7 @@ static void rp_do_receive(struct r_port *info,
ToRecv = sGetRxCnt(cp);
#ifdef ROCKET_DEBUG_INTR
- printk(KERN_INFO "rp_do_receive(%d)...", ToRecv);
+ printk(KERN_INFO "rp_do_receive(%d)...\n", ToRecv);
#endif
if (ToRecv == 0)
return;
@@ -341,7 +335,7 @@ static void rp_do_receive(struct r_port *info,
if (ChanStatus & (RXFOVERFL | RXBREAK | RXFRAME | RXPARITY)) {
if (!(ChanStatus & STATMODE)) {
#ifdef ROCKET_DEBUG_RECEIVE
- printk(KERN_INFO "Entering STATMODE...");
+ printk(KERN_INFO "Entering STATMODE...\n");
#endif
ChanStatus |= STATMODE;
sEnRxStatusMode(cp);
@@ -355,15 +349,15 @@ static void rp_do_receive(struct r_port *info,
*/
if (ChanStatus & STATMODE) {
#ifdef ROCKET_DEBUG_RECEIVE
- printk(KERN_INFO "Ignore %x, read %x...", info->ignore_status_mask,
- info->read_status_mask);
+ printk(KERN_INFO "Ignore %x, read %x...\n",
+ info->ignore_status_mask, info->read_status_mask);
#endif
while (ToRecv) {
char flag;
CharNStat = sInW(sGetTxRxDataIO(cp));
#ifdef ROCKET_DEBUG_RECEIVE
- printk(KERN_INFO "%x...", CharNStat);
+ printk(KERN_INFO "%x...\n", CharNStat);
#endif
if (CharNStat & STMBREAKH)
CharNStat &= ~(STMFRAMEH | STMPARITYH);
@@ -435,12 +429,13 @@ static void rp_do_transmit(struct r_port *info)
unsigned long flags;
#ifdef ROCKET_DEBUG_INTR
- printk(KERN_INFO "rp_do_transmit ");
+ printk(KERN_DEBUG "%s\n", __func__);
#endif
if (!info)
return;
if (!info->tty) {
- printk(KERN_INFO "rp: WARNING rp_do_transmit called with info->tty==NULL\n");
+ printk(KERN_WARNING "rp: WARNING %s called with "
+ "info->tty==NULL\n", __func__);
clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
return;
}
@@ -464,7 +459,7 @@ static void rp_do_transmit(struct r_port *info)
info->xmit_cnt -= c;
info->xmit_fifo_room -= c;
#ifdef ROCKET_DEBUG_INTR
- printk(KERN_INFO "tx %d chars...", c);
+ printk(KERN_INFO "tx %d chars...\n", c);
#endif
}
@@ -481,7 +476,7 @@ static void rp_do_transmit(struct r_port *info)
spin_unlock_irqrestore(&info->slock, flags);
#ifdef ROCKET_DEBUG_INTR
- printk(KERN_INFO "(%d,%d,%d,%d)...", info->xmit_cnt, info->xmit_head,
+ printk(KERN_DEBUG "(%d,%d,%d,%d)...\n", info->xmit_cnt, info->xmit_head,
info->xmit_tail, info->xmit_fifo_room);
#endif
}
@@ -501,11 +496,13 @@ static void rp_handle_port(struct r_port *info)
return;
if ((info->flags & ROCKET_INITIALIZED) == 0) {
- printk(KERN_INFO "rp: WARNING: rp_handle_port called with info->flags & NOT_INIT\n");
+ printk(KERN_WARNING "rp: WARNING: rp_handle_port called with "
+ "info->flags & NOT_INIT\n");
return;
}
if (!info->tty) {
- printk(KERN_INFO "rp: WARNING: rp_handle_port called with info->tty==NULL\n");
+ printk(KERN_WARNING "rp: WARNING: rp_handle_port called with "
+ "info->tty==NULL\n");
return;
}
cp = &info->channel;
@@ -513,7 +510,7 @@ static void rp_handle_port(struct r_port *info)
IntMask = sGetChanIntID(cp) & info->intmask;
#ifdef ROCKET_DEBUG_INTR
- printk(KERN_INFO "rp_interrupt %02x...", IntMask);
+ printk(KERN_INFO "rp_interrupt %02x...\n", IntMask);
#endif
ChanStatus = sGetChanStatus(cp);
if (IntMask & RXF_TRIG) { /* Rx FIFO trigger level */
@@ -521,7 +518,7 @@ static void rp_handle_port(struct r_port *info)
}
if (IntMask & DELTA_CD) { /* CD change */
#if (defined(ROCKET_DEBUG_OPEN) || defined(ROCKET_DEBUG_INTR) || defined(ROCKET_DEBUG_HANGUP))
- printk(KERN_INFO "ttyR%d CD now %s...", info->line,
+ printk(KERN_INFO "ttyR%d CD now %s...\n", info->line,
(ChanStatus & CD_ACT) ? "on" : "off");
#endif
if (!(ChanStatus & CD_ACT) && info->cd_status) {
@@ -638,7 +635,8 @@ static void init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
/* Get a r_port struct for the port, fill it in and save it globally, indexed by line number */
info = kzalloc(sizeof (struct r_port), GFP_KERNEL);
if (!info) {
- printk(KERN_INFO "Couldn't allocate info struct for line #%d\n", line);
+ printk(KERN_ERR "Couldn't allocate info struct for line #%d\n",
+ line);
return;
}
@@ -668,7 +666,8 @@ static void init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR;
if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) {
- printk(KERN_INFO "RocketPort sInitChan(%d, %d, %d) failed!\n", board, aiop, chan);
+ printk(KERN_ERR "RocketPort sInitChan(%d, %d, %d) failed!\n",
+ board, aiop, chan);
kfree(info);
return;
}
@@ -976,7 +975,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
CHANNEL_t *cp;
unsigned long page;
- line = TTY_GET_LINE(tty);
+ line = tty->index;
if ((line < 0) || (line >= MAX_RP_PORTS) || ((info = rp_table[line]) == NULL))
return -ENXIO;
@@ -1007,7 +1006,8 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
atomic_inc(&rp_num_ports_open);
#ifdef ROCKET_DEBUG_OPEN
- printk(KERN_INFO "rocket mod++ = %d...", atomic_read(&rp_num_ports_open));
+ printk(KERN_INFO "rocket mod++ = %d...\n",
+ atomic_read(&rp_num_ports_open));
#endif
}
#ifdef ROCKET_DEBUG_OPEN
@@ -1103,13 +1103,13 @@ static void rp_close(struct tty_struct *tty, struct file *filp)
* one, we've got real problems, since it means the
* serial port won't be shutdown.
*/
- printk(KERN_INFO "rp_close: bad serial port count; tty->count is 1, "
- "info->count is %d\n", info->count);
+ printk(KERN_WARNING "rp_close: bad serial port count; "
+ "tty->count is 1, info->count is %d\n", info->count);
info->count = 1;
}
if (--info->count < 0) {
- printk(KERN_INFO "rp_close: bad serial port count for ttyR%d: %d\n",
- info->line, info->count);
+ printk(KERN_WARNING "rp_close: bad serial port count for "
+ "ttyR%d: %d\n", info->line, info->count);
info->count = 0;
}
if (info->count) {
@@ -1160,8 +1160,7 @@ static void rp_close(struct tty_struct *tty, struct file *filp)
if (C_HUPCL(tty))
sClrDTR(cp);
- if (TTY_DRIVER_FLUSH_BUFFER_EXISTS(tty))
- TTY_DRIVER_FLUSH_BUFFER(tty);
+ rp_flush_buffer(tty);
tty_ldisc_flush(tty);
@@ -1184,7 +1183,8 @@ static void rp_close(struct tty_struct *tty, struct file *filp)
atomic_dec(&rp_num_ports_open);
#ifdef ROCKET_DEBUG_OPEN
- printk(KERN_INFO "rocket mod-- = %d...", atomic_read(&rp_num_ports_open));
+ printk(KERN_INFO "rocket mod-- = %d...\n",
+ atomic_read(&rp_num_ports_open));
printk(KERN_INFO "rp_close ttyR%d complete shutdown\n", info->line);
#endif
@@ -1569,9 +1569,9 @@ static void rp_wait_until_sent(struct tty_struct *tty, int timeout)
orig_jiffies = jiffies;
#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT
- printk(KERN_INFO "In RP_wait_until_sent(%d) (jiff=%lu)...", timeout,
+ printk(KERN_INFO "In RP_wait_until_sent(%d) (jiff=%lu)...\n", timeout,
jiffies);
- printk(KERN_INFO "cps=%d...", info->cps);
+ printk(KERN_INFO "cps=%d...\n", info->cps);
#endif
while (1) {
txcnt = sGetTxCnt(cp);
@@ -1592,7 +1592,8 @@ static void rp_wait_until_sent(struct tty_struct *tty, int timeout)
if (check_time == 0)
check_time = 1;
#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT
- printk(KERN_INFO "txcnt = %d (jiff=%lu,check=%d)...", txcnt, jiffies, check_time);
+ printk(KERN_INFO "txcnt = %d (jiff=%lu,check=%d)...\n", txcnt,
+ jiffies, check_time);
#endif
msleep_interruptible(jiffies_to_msecs(check_time));
if (signal_pending(current))
@@ -1616,7 +1617,7 @@ static void rp_hangup(struct tty_struct *tty)
return;
#if (defined(ROCKET_DEBUG_OPEN) || defined(ROCKET_DEBUG_HANGUP))
- printk(KERN_INFO "rp_hangup of ttyR%d...", info->line);
+ printk(KERN_INFO "rp_hangup of ttyR%d...\n", info->line);
#endif
rp_flush_buffer(tty);
if (info->flags & ROCKET_CLOSING)
@@ -1664,7 +1665,7 @@ static void rp_put_char(struct tty_struct *tty, unsigned char ch)
mutex_lock(&info->write_mtx);
#ifdef ROCKET_DEBUG_WRITE
- printk(KERN_INFO "rp_put_char %c...", ch);
+ printk(KERN_INFO "rp_put_char %c...\n", ch);
#endif
spin_lock_irqsave(&info->slock, flags);
@@ -1709,7 +1710,7 @@ static int rp_write(struct tty_struct *tty,
return -ERESTARTSYS;
#ifdef ROCKET_DEBUG_WRITE
- printk(KERN_INFO "rp_write %d chars...", count);
+ printk(KERN_INFO "rp_write %d chars...\n", count);
#endif
cp = &info->channel;
@@ -1798,7 +1799,7 @@ static int rp_write_room(struct tty_struct *tty)
if (ret < 0)
ret = 0;
#ifdef ROCKET_DEBUG_WRITE
- printk(KERN_INFO "rp_write_room returns %d...", ret);
+ printk(KERN_INFO "rp_write_room returns %d...\n", ret);
#endif
return ret;
}
@@ -1818,7 +1819,7 @@ static int rp_chars_in_buffer(struct tty_struct *tty)
cp = &info->channel;
#ifdef ROCKET_DEBUG_WRITE
- printk(KERN_INFO "rp_chars_in_buffer returns %d...", info->xmit_cnt);
+ printk(KERN_INFO "rp_chars_in_buffer returns %d...\n", info->xmit_cnt);
#endif
return info->xmit_cnt;
}
@@ -2161,14 +2162,11 @@ static __init int register_PCI(int i, struct pci_dev *dev)
for (aiop = 0; aiop < max_num_aiops; aiop++)
ctlp->AiopNumChan[aiop] = ports_per_aiop;
- printk("Comtrol PCI controller #%d ID 0x%x found in bus:slot:fn %s at address %04lx, "
- "%d AIOP(s) (%s)\n", i, dev->device, pci_name(dev),
- rcktpt_io_addr[i], num_aiops, rocketModel[i].modelString);
- printk(KERN_INFO "Installing %s, creating /dev/ttyR%d - %ld\n",
- rocketModel[i].modelString,
- rocketModel[i].startingPortNumber,
- rocketModel[i].startingPortNumber +
- rocketModel[i].numPorts - 1);
+ dev_info(&dev->dev, "comtrol PCI controller #%d found at "
+ "address %04lx, %d AIOP(s) (%s), creating ttyR%d - %ld\n",
+ i, rcktpt_io_addr[i], num_aiops, rocketModel[i].modelString,
+ rocketModel[i].startingPortNumber,
+ rocketModel[i].startingPortNumber + rocketModel[i].numPorts-1);
if (num_aiops <= 0) {
rcktpt_io_addr[i] = 0;
@@ -2191,10 +2189,10 @@ static __init int register_PCI(int i, struct pci_dev *dev)
num_chan = ports_per_aiop;
for (chan = 0; chan < num_chan; chan++)
sPCIModemReset(ctlp, chan, 1);
- mdelay(500);
+ msleep(500);
for (chan = 0; chan < num_chan; chan++)
sPCIModemReset(ctlp, chan, 0);
- mdelay(500);
+ msleep(500);
rmSpeakerReset(ctlp, rocketModel[i].model);
}
return (1);
@@ -2240,7 +2238,9 @@ static int __init init_ISA(int i)
/* Reserve the IO region */
if (!request_region(rcktpt_io_addr[i], 64, "Comtrol RocketPort")) {
- printk(KERN_INFO "Unable to reserve IO region for configured ISA RocketPort at address 0x%lx, board not installed...\n", rcktpt_io_addr[i]);
+ printk(KERN_ERR "Unable to reserve IO region for configured "
+ "ISA RocketPort at address 0x%lx, board not "
+ "installed...\n", rcktpt_io_addr[i]);
rcktpt_io_addr[i] = 0;
return (0);
}
@@ -2309,10 +2309,10 @@ static int __init init_ISA(int i)
total_num_chan = num_chan;
for (chan = 0; chan < num_chan; chan++)
sModemReset(ctlp, chan, 1);
- mdelay(500);
+ msleep(500);
for (chan = 0; chan < num_chan; chan++)
sModemReset(ctlp, chan, 0);
- mdelay(500);
+ msleep(500);
strcpy(rocketModel[i].modelString, "RocketModem ISA");
} else {
strcpy(rocketModel[i].modelString, "RocketPort ISA");
@@ -2480,7 +2480,7 @@ static void rp_cleanup_module(void)
retval = tty_unregister_driver(rocket_driver);
if (retval)
- printk(KERN_INFO "Error %d while trying to unregister "
+ printk(KERN_ERR "Error %d while trying to unregister "
"rocketport driver\n", -retval);
for (i = 0; i < MAX_RP_PORTS; i++)
diff --git a/drivers/char/rocket_int.h b/drivers/char/rocket_int.h
index 55b8f2d71a96..f3a75791b811 100644
--- a/drivers/char/rocket_int.h
+++ b/drivers/char/rocket_int.h
@@ -42,7 +42,7 @@ typedef unsigned int DWordIO_t;
static inline void sOutB(unsigned short port, unsigned char value)
{
#ifdef ROCKET_DEBUG_IO
- printk("sOutB(%x, %x)...", port, value);
+ printk(KERN_DEBUG "sOutB(%x, %x)...\n", port, value);
#endif
outb_p(value, port);
}
@@ -50,7 +50,7 @@ static inline void sOutB(unsigned short port, unsigned char value)
static inline void sOutW(unsigned short port, unsigned short value)
{
#ifdef ROCKET_DEBUG_IO
- printk("sOutW(%x, %x)...", port, value);
+ printk(KERN_DEBUG "sOutW(%x, %x)...\n", port, value);
#endif
outw_p(value, port);
}
@@ -58,7 +58,7 @@ static inline void sOutW(unsigned short port, unsigned short value)
static inline void sOutDW(unsigned short port, unsigned long value)
{
#ifdef ROCKET_DEBUG_IO
- printk("sOutDW(%x, %lx)...", port, value);
+ printk(KERN_DEBUG "sOutDW(%x, %lx)...\n", port, value);
#endif
outl_p(cpu_to_le32(value), port);
}
@@ -105,12 +105,6 @@ static inline unsigned short sInW(unsigned short port)
#define AIOPID_NULL -1 /* no AIOP or channel exists */
#define AIOPID_0001 0x0001 /* AIOP release 1 */
-#define NULLDEV -1 /* identifies non-existant device */
-#define NULLCTL -1 /* identifies non-existant controller */
-#define NULLCTLPTR (CONTROLLER_T *)0 /* identifies non-existant controller */
-#define NULLAIOP -1 /* identifies non-existant AIOP */
-#define NULLCHAN -1 /* identifies non-existant channel */
-
/************************************************************************
Global Register Offsets - Direct Access - Fixed values
************************************************************************/
@@ -1187,9 +1181,6 @@ struct r_port {
#define ROCKET_CLOSING 0x40000000 /* Serial port is closing */
#define ROCKET_NORMAL_ACTIVE 0x20000000 /* Normal port is active */
-/* tty subtypes */
-#define SERIAL_TYPE_NORMAL 1
-
/*
* Assigned major numbers for the Comtrol Rocketport
*/
@@ -1240,12 +1231,3 @@ struct r_port {
/* Compact PCI device */
#define PCI_DEVICE_ID_CRP16INTF 0x0903 /* Rocketport Compact PCI 16 port w/external I/F */
-#define TTY_GET_LINE(t) t->index
-#define TTY_DRIVER_MINOR_START(t) t->driver->minor_start
-#define TTY_DRIVER_SUBTYPE(t) t->driver->subtype
-#define TTY_DRIVER_NAME(t) t->driver->name
-#define TTY_DRIVER_NAME_BASE(t) t->driver->name_base
-#define TTY_DRIVER_FLUSH_BUFFER_EXISTS(t) t->driver->flush_buffer
-#define TTY_DRIVER_FLUSH_BUFFER(t) t->driver->flush_buffer(t)
-
-
diff --git a/drivers/char/ser_a2232.c b/drivers/char/ser_a2232.c
index 3c869145bfdc..4ba3aec9e1cd 100644
--- a/drivers/char/ser_a2232.c
+++ b/drivers/char/ser_a2232.c
@@ -653,7 +653,7 @@ static void a2232_init_portstructs(void)
port->gs.closing_wait = 30 * HZ;
port->gs.rd = &a2232_real_driver;
#ifdef NEW_WRITE_LOCKING
- init_MUTEX(&(port->gs.port_write_mutex));
+ mutex_init(&(port->gs.port_write_mutex));
#endif
init_waitqueue_head(&port->gs.open_wait);
init_waitqueue_head(&port->gs.close_wait);
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index f1497cecffd8..df8cd0ca97eb 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -90,8 +90,6 @@
#define STD_COM_FLAGS (0)
-#define SERIAL_TYPE_NORMAL 1
-
static struct tty_driver *cy_serial_driver;
extern int serial_console;
static struct cyclades_port *serial_console_info = NULL;
@@ -359,18 +357,6 @@ static void cy_start(struct tty_struct *tty)
local_irq_restore(flags);
} /* cy_start */
-/*
- * This routine is used by the interrupt handler to schedule
- * processing in the software interrupt portion of the driver
- * (also known as the "bottom half"). This can be called any
- * number of times for any channel without harm.
- */
-static inline void cy_sched_event(struct cyclades_port *info, int event)
-{
- info->event |= 1 << event; /* remember what kind of event and who */
- schedule_work(&info->tqueue);
-} /* cy_sched_event */
-
/* The real interrupt service routines are called
whenever the card wants its hand held--chars
received, out buffer empty, modem change, etc.
@@ -485,10 +471,12 @@ static irqreturn_t cd2401_modem_interrupt(int irq, void *dev_id)
&& (info->flags & ASYNC_CHECK_CD)) {
if (mdm_status & CyDCD) {
/* CP('!'); */
- cy_sched_event(info, Cy_EVENT_OPEN_WAKEUP);
+ wake_up_interruptible(&info->open_wait);
} else {
/* CP('@'); */
- cy_sched_event(info, Cy_EVENT_HANGUP);
+ tty_hangup(info->tty);
+ wake_up_interruptible(&info->open_wait);
+ info->flags &= ~ASYNC_NORMAL_ACTIVE;
}
}
if ((mdm_change & CyCTS)
@@ -498,8 +486,7 @@ static irqreturn_t cd2401_modem_interrupt(int irq, void *dev_id)
/* !!! cy_start isn't used because... */
info->tty->stopped = 0;
base_addr[CyIER] |= CyTxMpty;
- cy_sched_event(info,
- Cy_EVENT_WRITE_WAKEUP);
+ tty_wakeup(info->tty);
}
} else {
if (!(mdm_status & CyCTS)) {
@@ -545,9 +532,6 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id)
info->last_active = jiffies;
if (info->tty == 0) {
base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
- if (info->xmit_cnt < WAKEUP_CHARS) {
- cy_sched_event(info, Cy_EVENT_WRITE_WAKEUP);
- }
base_addr[CyTEOIR] = CyNOTRANS;
return IRQ_HANDLED;
}
@@ -629,9 +613,9 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id)
}
}
- if (info->xmit_cnt < WAKEUP_CHARS) {
- cy_sched_event(info, Cy_EVENT_WRITE_WAKEUP);
- }
+ if (info->xmit_cnt < WAKEUP_CHARS)
+ tty_wakeup(info->tty);
+
base_addr[CyTEOIR] = (char_count != saved_cnt) ? 0 : CyNOTRANS;
return IRQ_HANDLED;
} /* cy_tx_interrupt */
@@ -692,49 +676,6 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
} /* cy_rx_interrupt */
-/*
- * This routine is used to handle the "bottom half" processing for the
- * serial driver, known also the "software interrupt" processing.
- * This processing is done at the kernel interrupt level, after the
- * cy#/_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON. This
- * is where time-consuming activities which can not be done in the
- * interrupt driver proper are done; the interrupt driver schedules
- * them using cy_sched_event(), and they get done here.
- *
- * This is done through one level of indirection--the task queue.
- * When a hardware interrupt service routine wants service by the
- * driver's bottom half, it enqueues the appropriate tq_struct (one
- * per port) to the keventd work queue and sets a request flag
- * that the work queue be processed.
- *
- * Although this may seem unwieldy, it gives the system a way to
- * pass an argument (in this case the pointer to the cyclades_port
- * structure) to the bottom half of the driver. Previous kernels
- * had to poll every port to see if that port needed servicing.
- */
-static void do_softint(struct work_struct *ugly_api)
-{
- struct cyclades_port *info =
- container_of(ugly_api, struct cyclades_port, tqueue);
- struct tty_struct *tty;
-
- tty = info->tty;
- if (!tty)
- return;
-
- if (test_and_clear_bit(Cy_EVENT_HANGUP, &info->event)) {
- tty_hangup(info->tty);
- wake_up_interruptible(&info->open_wait);
- info->flags &= ~ASYNC_NORMAL_ACTIVE;
- }
- if (test_and_clear_bit(Cy_EVENT_OPEN_WAKEUP, &info->event)) {
- wake_up_interruptible(&info->open_wait);
- }
- if (test_and_clear_bit(Cy_EVENT_WRITE_WAKEUP, &info->event)) {
- tty_wakeup(tty);
- }
-} /* do_softint */
-
/* This is called whenever a port becomes active;
interrupts are enabled and DTR & RTS are turned on.
*/
@@ -1745,7 +1686,6 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
if (tty->driver->flush_buffer)
tty->driver->flush_buffer(tty);
tty_ldisc_flush(tty);
- info->event = 0;
info->tty = NULL;
if (info->blocked_open) {
if (info->close_delay) {
@@ -2236,7 +2176,6 @@ static int __init serial167_init(void)
info->rco = baud_co[DefSpeed] >> 5; /* Rx CO */
info->close_delay = 0;
info->x_char = 0;
- info->event = 0;
info->count = 0;
#ifdef SERIAL_DEBUG_COUNT
printk("cyc: %d: setting count to 0\n",
@@ -2245,7 +2184,6 @@ static int __init serial167_init(void)
info->blocked_open = 0;
info->default_threshold = 0;
info->default_timeout = 0;
- INIT_WORK(&info->tqueue, do_softint);
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
/* info->session */
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 455855631aef..c0e08c7bca2f 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -178,9 +178,6 @@ static int sx_poll = HZ;
ASYNC_SPD_HI | ASYNC_SPEED_VHI | ASYNC_SESSION_LOCKOUT | \
ASYNC_PGRP_LOCKOUT | ASYNC_CALLOUT_NOHUP)
-#undef RS_EVENT_WRITE_WAKEUP
-#define RS_EVENT_WRITE_WAKEUP 0
-
static struct tty_driver *specialix_driver;
static struct specialix_board sx_board[SX_NBOARD] = {
@@ -602,17 +599,6 @@ static int sx_probe(struct specialix_board *bp)
* Interrupt processing routines.
* */
-static inline void sx_mark_event(struct specialix_port * port, int event)
-{
- func_enter();
-
- set_bit(event, &port->event);
- schedule_work(&port->tqueue);
-
- func_exit();
-}
-
-
static inline struct specialix_port * sx_get_port(struct specialix_board * bp,
unsigned char const * what)
{
@@ -809,7 +795,7 @@ static inline void sx_transmit(struct specialix_board * bp)
sx_out(bp, CD186x_IER, port->IER);
}
if (port->xmit_cnt <= port->wakeup_chars)
- sx_mark_event(port, RS_EVENT_WRITE_WAKEUP);
+ tty_wakeup(tty);
func_exit();
}
@@ -839,7 +825,7 @@ static inline void sx_check_modem(struct specialix_board * bp)
wake_up_interruptible(&port->open_wait);
} else {
dprintk (SX_DEBUG_SIGNALS, "Sending HUP.\n");
- schedule_work(&port->tqueue_hangup);
+ tty_hangup(tty);
}
}
@@ -849,7 +835,7 @@ static inline void sx_check_modem(struct specialix_board * bp)
tty->hw_stopped = 0;
port->IER |= IER_TXRDY;
if (port->xmit_cnt <= port->wakeup_chars)
- sx_mark_event(port, RS_EVENT_WRITE_WAKEUP);
+ tty_wakeup(tty);
} else {
tty->hw_stopped = 1;
port->IER &= ~IER_TXRDY;
@@ -861,7 +847,7 @@ static inline void sx_check_modem(struct specialix_board * bp)
tty->hw_stopped = 0;
port->IER |= IER_TXRDY;
if (port->xmit_cnt <= port->wakeup_chars)
- sx_mark_event(port, RS_EVENT_WRITE_WAKEUP);
+ tty_wakeup(tty);
} else {
tty->hw_stopped = 1;
port->IER &= ~IER_TXRDY;
@@ -1618,7 +1604,6 @@ static void sx_close(struct tty_struct * tty, struct file * filp)
tty_ldisc_flush(tty);
spin_lock_irqsave(&port->lock, flags);
tty->closing = 0;
- port->event = 0;
port->tty = NULL;
spin_unlock_irqrestore(&port->lock, flags);
if (port->blocked_open) {
@@ -2235,32 +2220,6 @@ static void sx_start(struct tty_struct * tty)
func_exit();
}
-
-/*
- * This routine is called from the work-queue when the interrupt
- * routine has signalled that a hangup has occurred. The path of
- * hangup processing is:
- *
- * serial interrupt routine -> (workqueue) ->
- * do_sx_hangup() -> tty->hangup() -> sx_hangup()
- *
- */
-static void do_sx_hangup(struct work_struct *work)
-{
- struct specialix_port *port =
- container_of(work, struct specialix_port, tqueue_hangup);
- struct tty_struct *tty;
-
- func_enter();
-
- tty = port->tty;
- if (tty)
- tty_hangup(tty); /* FIXME: module removal race here */
-
- func_exit();
-}
-
-
static void sx_hangup(struct tty_struct * tty)
{
struct specialix_port *port = (struct specialix_port *)tty->driver_data;
@@ -2278,7 +2237,6 @@ static void sx_hangup(struct tty_struct * tty)
sx_shutdown_port(bp, port);
spin_lock_irqsave(&port->lock, flags);
- port->event = 0;
bp->count -= port->count;
if (bp->count < 0) {
printk(KERN_ERR "sx%d: sx_hangup: bad board count: %d port: %d\n",
@@ -2320,26 +2278,6 @@ static void sx_set_termios(struct tty_struct * tty, struct ktermios * old_termio
}
}
-
-static void do_softint(struct work_struct *work)
-{
- struct specialix_port *port =
- container_of(work, struct specialix_port, tqueue);
- struct tty_struct *tty;
-
- func_enter();
-
- if(!(tty = port->tty)) {
- func_exit();
- return;
- }
-
- if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &port->event))
- tty_wakeup(tty);
-
- func_exit();
-}
-
static const struct tty_operations sx_ops = {
.open = sx_open,
.close = sx_close,
@@ -2397,8 +2335,6 @@ static int sx_init_drivers(void)
memset(sx_port, 0, sizeof(sx_port));
for (i = 0; i < SX_NPORT * SX_NBOARD; i++) {
sx_port[i].magic = SPECIALIX_MAGIC;
- INIT_WORK(&sx_port[i].tqueue, do_softint);
- INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup);
sx_port[i].close_delay = 50 * HZ/100;
sx_port[i].closing_wait = 3000 * HZ/100;
init_waitqueue_head(&sx_port[i].open_wait);
diff --git a/drivers/char/specialix_io8.h b/drivers/char/specialix_io8.h
index 895bd90de363..3f2f85bdf516 100644
--- a/drivers/char/specialix_io8.h
+++ b/drivers/char/specialix_io8.h
@@ -112,7 +112,6 @@ struct specialix_port {
struct tty_struct * tty;
int count;
int blocked_open;
- ulong event;
int timeout;
int close_delay;
unsigned char * xmit_buf;
@@ -122,8 +121,6 @@ struct specialix_port {
int xmit_cnt;
wait_queue_head_t open_wait;
wait_queue_head_t close_wait;
- struct work_struct tqueue;
- struct work_struct tqueue_hangup;
short wakeup_chars;
short break_length;
unsigned short closing_wait;
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 45758d5b56ef..feac54e32a12 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -145,8 +145,7 @@ static struct stlbrd *stl_brds[STL_MAXBRDS];
*/
#define ASYI_TXBUSY 1
#define ASYI_TXLOW 2
-#define ASYI_DCDCHANGE 3
-#define ASYI_TXFLOWED 4
+#define ASYI_TXFLOWED 3
/*
* Define an array of board names as printable strings. Handy for
@@ -610,6 +609,23 @@ static const struct file_operations stl_fsiomem = {
static struct class *stallion_class;
+static void stl_cd_change(struct stlport *portp)
+{
+ unsigned int oldsigs = portp->sigs;
+
+ if (!portp->tty)
+ return;
+
+ portp->sigs = stl_getsignals(portp);
+
+ if ((portp->sigs & TIOCM_CD) && ((oldsigs & TIOCM_CD) == 0))
+ wake_up_interruptible(&portp->open_wait);
+
+ if ((oldsigs & TIOCM_CD) && ((portp->sigs & TIOCM_CD) == 0))
+ if (portp->flags & ASYNC_CHECK_CD)
+ tty_hangup(portp->tty);
+}
+
/*
* Check for any arguments passed in on the module load command line.
*/
@@ -1771,41 +1787,6 @@ static int stl_echpci64intr(struct stlbrd *brdp)
/*****************************************************************************/
/*
- * Service an off-level request for some channel.
- */
-static void stl_offintr(struct work_struct *work)
-{
- struct stlport *portp = container_of(work, struct stlport, tqueue);
- struct tty_struct *tty;
- unsigned int oldsigs;
-
- pr_debug("stl_offintr(portp=%p)\n", portp);
-
- if (portp == NULL)
- return;
-
- tty = portp->tty;
- if (tty == NULL)
- return;
-
- if (test_bit(ASYI_TXLOW, &portp->istate))
- tty_wakeup(tty);
-
- if (test_bit(ASYI_DCDCHANGE, &portp->istate)) {
- clear_bit(ASYI_DCDCHANGE, &portp->istate);
- oldsigs = portp->sigs;
- portp->sigs = stl_getsignals(portp);
- if ((portp->sigs & TIOCM_CD) && ((oldsigs & TIOCM_CD) == 0))
- wake_up_interruptible(&portp->open_wait);
- if ((oldsigs & TIOCM_CD) && ((portp->sigs & TIOCM_CD) == 0))
- if (portp->flags & ASYNC_CHECK_CD)
- tty_hangup(tty); /* FIXME: module removal race here - AKPM */
- }
-}
-
-/*****************************************************************************/
-
-/*
* Initialize all the ports on a panel.
*/
@@ -1840,7 +1821,6 @@ static int __devinit stl_initports(struct stlbrd *brdp, struct stlpanel *panelp)
portp->baud_base = STL_BAUDBASE;
portp->close_delay = STL_CLOSEDELAY;
portp->closing_wait = 30 * HZ;
- INIT_WORK(&portp->tqueue, stl_offintr);
init_waitqueue_head(&portp->open_wait);
init_waitqueue_head(&portp->close_wait);
portp->stats.brd = portp->brdnr;
@@ -3530,7 +3510,8 @@ static void stl_cd1400txisr(struct stlpanel *panelp, int ioaddr)
if ((len == 0) || ((len < STL_TXBUFLOW) &&
(test_bit(ASYI_TXLOW, &portp->istate) == 0))) {
set_bit(ASYI_TXLOW, &portp->istate);
- schedule_work(&portp->tqueue);
+ if (portp->tty)
+ tty_wakeup(portp->tty);
}
if (len == 0) {
@@ -3546,7 +3527,8 @@ static void stl_cd1400txisr(struct stlpanel *panelp, int ioaddr)
} else {
len = min(len, CD1400_TXFIFOSIZE);
portp->stats.txtotal += len;
- stlen = min(len, ((portp->tx.buf + STL_TXBUFSIZE) - tail));
+ stlen = min_t(unsigned int, len,
+ (portp->tx.buf + STL_TXBUFSIZE) - tail);
outb((TDR + portp->uartaddr), ioaddr);
outsb((ioaddr + EREG_DATA), tail, stlen);
len -= stlen;
@@ -3599,7 +3581,7 @@ static void stl_cd1400rxisr(struct stlpanel *panelp, int ioaddr)
outb((RDCR + portp->uartaddr), ioaddr);
len = inb(ioaddr + EREG_DATA);
if (tty == NULL || (buflen = tty_buffer_request_room(tty, len)) == 0) {
- len = min(len, sizeof(stl_unwanted));
+ len = min_t(unsigned int, len, sizeof(stl_unwanted));
outb((RDSR + portp->uartaddr), ioaddr);
insb((ioaddr + EREG_DATA), &stl_unwanted[0], len);
portp->stats.rxlost += len;
@@ -3692,8 +3674,7 @@ static void stl_cd1400mdmisr(struct stlpanel *panelp, int ioaddr)
outb((MISR + portp->uartaddr), ioaddr);
misr = inb(ioaddr + EREG_DATA);
if (misr & MISR_DCD) {
- set_bit(ASYI_DCDCHANGE, &portp->istate);
- schedule_work(&portp->tqueue);
+ stl_cd_change(portp);
portp->stats.modem++;
}
@@ -4447,7 +4428,8 @@ static void stl_sc26198txisr(struct stlport *portp)
if ((len == 0) || ((len < STL_TXBUFLOW) &&
(test_bit(ASYI_TXLOW, &portp->istate) == 0))) {
set_bit(ASYI_TXLOW, &portp->istate);
- schedule_work(&portp->tqueue);
+ if (portp->tty)
+ tty_wakeup(portp->tty);
}
if (len == 0) {
@@ -4465,7 +4447,8 @@ static void stl_sc26198txisr(struct stlport *portp)
} else {
len = min(len, SC26198_TXFIFOSIZE);
portp->stats.txtotal += len;
- stlen = min(len, ((portp->tx.buf + STL_TXBUFSIZE) - tail));
+ stlen = min_t(unsigned int, len,
+ (portp->tx.buf + STL_TXBUFSIZE) - tail);
outb(GTXFIFO, (ioaddr + XP_ADDR));
outsb((ioaddr + XP_DATA), tail, stlen);
len -= stlen;
@@ -4506,7 +4489,7 @@ static void stl_sc26198rxisr(struct stlport *portp, unsigned int iack)
if ((iack & IVR_TYPEMASK) == IVR_RXDATA) {
if (tty == NULL || (buflen = tty_buffer_request_room(tty, len)) == 0) {
- len = min(len, sizeof(stl_unwanted));
+ len = min_t(unsigned int, len, sizeof(stl_unwanted));
outb(GRXFIFO, (ioaddr + XP_ADDR));
insb((ioaddr + XP_DATA), &stl_unwanted[0], len);
portp->stats.rxlost += len;
@@ -4647,8 +4630,7 @@ static void stl_sc26198otherisr(struct stlport *portp, unsigned int iack)
case CIR_SUBCOS:
ipr = stl_sc26198getreg(portp, IPR);
if (ipr & IPR_DCDCHANGE) {
- set_bit(ASYI_DCDCHANGE, &portp->istate);
- schedule_work(&portp->tqueue);
+ stl_cd_change(portp);
portp->stats.modem++;
}
break;
diff --git a/drivers/char/sx.h b/drivers/char/sx.h
index 70d9783c7323..87c2defdead7 100644
--- a/drivers/char/sx.h
+++ b/drivers/char/sx.h
@@ -88,8 +88,6 @@ struct vpd_prom {
#define IS_CF_BOARD(board) (board->flags & (SX_CFISA_BOARD | SX_CFPCI_BOARD))
-#define SERIAL_TYPE_NORMAL 1
-
/* The SI processor clock is required to calculate the cc_int_count register
value for the SI cards. */
#define SI_PROCESSOR_CLOCK 25000000
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 905d1f51a7bf..ddc74d1f4f1b 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -85,6 +85,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/ioctl.h>
+#include <linux/synclink.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -110,8 +111,6 @@
#include <asm/uaccess.h>
-#include "linux/synclink.h"
-
#define RCLRVALUE 0xffff
static MGSL_PARAMS default_params = {
@@ -1544,7 +1543,7 @@ static void mgsl_isr_receive_data( struct mgsl_struct *info )
/* mgsl_isr_misc()
*
- * Service a miscellaneos interrupt source.
+ * Service a miscellaneous interrupt source.
*
* Arguments: info pointer to device extension (instance data)
* Return Value: None
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 64e835f62438..1f954acf2bac 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -73,6 +73,7 @@
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <linux/hdlc.h>
+#include <linux/synclink.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -81,8 +82,6 @@
#include <asm/types.h>
#include <asm/uaccess.h>
-#include "linux/synclink.h"
-
#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE))
#define SYNCLINK_GENERIC_HDLC 1
#else
@@ -2040,37 +2039,41 @@ static void bh_transmit(struct slgt_info *info)
tty_wakeup(tty);
}
-static void dsr_change(struct slgt_info *info)
+static void dsr_change(struct slgt_info *info, unsigned short status)
{
- get_signals(info);
+ if (status & BIT3) {
+ info->signals |= SerialSignal_DSR;
+ info->input_signal_events.dsr_up++;
+ } else {
+ info->signals &= ~SerialSignal_DSR;
+ info->input_signal_events.dsr_down++;
+ }
DBGISR(("dsr_change %s signals=%04X\n", info->device_name, info->signals));
if ((info->dsr_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
slgt_irq_off(info, IRQ_DSR);
return;
}
info->icount.dsr++;
- if (info->signals & SerialSignal_DSR)
- info->input_signal_events.dsr_up++;
- else
- info->input_signal_events.dsr_down++;
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
info->pending_bh |= BH_STATUS;
}
-static void cts_change(struct slgt_info *info)
+static void cts_change(struct slgt_info *info, unsigned short status)
{
- get_signals(info);
+ if (status & BIT2) {
+ info->signals |= SerialSignal_CTS;
+ info->input_signal_events.cts_up++;
+ } else {
+ info->signals &= ~SerialSignal_CTS;
+ info->input_signal_events.cts_down++;
+ }
DBGISR(("cts_change %s signals=%04X\n", info->device_name, info->signals));
if ((info->cts_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
slgt_irq_off(info, IRQ_CTS);
return;
}
info->icount.cts++;
- if (info->signals & SerialSignal_CTS)
- info->input_signal_events.cts_up++;
- else
- info->input_signal_events.cts_down++;
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
info->pending_bh |= BH_STATUS;
@@ -2091,20 +2094,21 @@ static void cts_change(struct slgt_info *info)
}
}
-static void dcd_change(struct slgt_info *info)
+static void dcd_change(struct slgt_info *info, unsigned short status)
{
- get_signals(info);
+ if (status & BIT1) {
+ info->signals |= SerialSignal_DCD;
+ info->input_signal_events.dcd_up++;
+ } else {
+ info->signals &= ~SerialSignal_DCD;
+ info->input_signal_events.dcd_down++;
+ }
DBGISR(("dcd_change %s signals=%04X\n", info->device_name, info->signals));
if ((info->dcd_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
slgt_irq_off(info, IRQ_DCD);
return;
}
info->icount.dcd++;
- if (info->signals & SerialSignal_DCD) {
- info->input_signal_events.dcd_up++;
- } else {
- info->input_signal_events.dcd_down++;
- }
#if SYNCLINK_GENERIC_HDLC
if (info->netcount) {
if (info->signals & SerialSignal_DCD)
@@ -2127,20 +2131,21 @@ static void dcd_change(struct slgt_info *info)
}
}
-static void ri_change(struct slgt_info *info)
+static void ri_change(struct slgt_info *info, unsigned short status)
{
- get_signals(info);
+ if (status & BIT0) {
+ info->signals |= SerialSignal_RI;
+ info->input_signal_events.ri_up++;
+ } else {
+ info->signals &= ~SerialSignal_RI;
+ info->input_signal_events.ri_down++;
+ }
DBGISR(("ri_change %s signals=%04X\n", info->device_name, info->signals));
if ((info->ri_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
slgt_irq_off(info, IRQ_RI);
return;
}
- info->icount.dcd++;
- if (info->signals & SerialSignal_RI) {
- info->input_signal_events.ri_up++;
- } else {
- info->input_signal_events.ri_down++;
- }
+ info->icount.rng++;
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
info->pending_bh |= BH_STATUS;
@@ -2191,13 +2196,13 @@ static void isr_serial(struct slgt_info *info)
}
if (status & IRQ_DSR)
- dsr_change(info);
+ dsr_change(info, status);
if (status & IRQ_CTS)
- cts_change(info);
+ cts_change(info, status);
if (status & IRQ_DCD)
- dcd_change(info);
+ dcd_change(info, status);
if (status & IRQ_RI)
- ri_change(info);
+ ri_change(info, status);
}
static void isr_rdma(struct slgt_info *info)
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index c63013b2fc36..f3e7807f78d9 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -66,6 +66,7 @@
#include <linux/termios.h>
#include <linux/workqueue.h>
#include <linux/hdlc.h>
+#include <linux/synclink.h>
#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINKMP_MODULE))
#define SYNCLINK_GENERIC_HDLC 1
@@ -80,8 +81,6 @@
#include <asm/uaccess.h>
-#include "linux/synclink.h"
-
static MGSL_PARAMS default_params = {
MGSL_MODE_HDLC, /* unsigned long mode */
0, /* unsigned char loopback; */
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index 5422f999636f..ce5ebe3b168f 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -505,7 +505,7 @@ static int __init toshiba_init(void)
if (tosh_probe())
return -ENODEV;
- printk(KERN_INFO "Toshiba System Managment Mode driver v" TOSH_VERSION "\n");
+ printk(KERN_INFO "Toshiba System Management Mode driver v" TOSH_VERSION "\n");
/* set the port to use for Fn status if not specified as a parameter */
if (tosh_fn==0x00)
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index c88424a0c89b..a5d8bcb40000 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1031,18 +1031,13 @@ void tpm_remove_hardware(struct device *dev)
spin_unlock(&driver_lock);
- dev_set_drvdata(dev, NULL);
misc_deregister(&chip->vendor.miscdev);
- kfree(chip->vendor.miscdev.name);
sysfs_remove_group(&dev->kobj, chip->vendor.attr_group);
tpm_bios_log_teardown(chip->bios_dir);
- clear_bit(chip->dev_num, dev_mask);
-
- kfree(chip);
-
- put_device(dev);
+ /* write it this way to be explicit (chip->dev == dev) */
+ put_device(chip->dev);
}
EXPORT_SYMBOL_GPL(tpm_remove_hardware);
@@ -1083,6 +1078,26 @@ int tpm_pm_resume(struct device *dev)
EXPORT_SYMBOL_GPL(tpm_pm_resume);
/*
+ * Once all references to platform device are down to 0,
+ * release all allocated structures.
+ * In case vendor provided release function,
+ * call it too.
+ */
+static void tpm_dev_release(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ if (chip->vendor.release)
+ chip->vendor.release(dev);
+
+ chip->release(dev);
+
+ clear_bit(chip->dev_num, dev_mask);
+ kfree(chip->vendor.miscdev.name);
+ kfree(chip);
+}
+
+/*
* Called from tpm_<specific>.c probe function only for devices
* the driver has determined it should claim. Prior to calling
* this function the specific probe function has called pci_enable_device
@@ -1136,23 +1151,21 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend
chip->vendor.miscdev.parent = dev;
chip->dev = get_device(dev);
+ chip->release = dev->release;
+ dev->release = tpm_dev_release;
+ dev_set_drvdata(dev, chip);
if (misc_register(&chip->vendor.miscdev)) {
dev_err(chip->dev,
"unable to misc_register %s, minor %d\n",
chip->vendor.miscdev.name,
chip->vendor.miscdev.minor);
- put_device(dev);
- clear_bit(chip->dev_num, dev_mask);
- kfree(chip);
- kfree(devname);
+ put_device(chip->dev);
return NULL;
}
spin_lock(&driver_lock);
- dev_set_drvdata(dev, chip);
-
list_add(&chip->list, &tpm_chip_list);
spin_unlock(&driver_lock);
@@ -1160,10 +1173,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend
if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
list_del(&chip->list);
misc_deregister(&chip->vendor.miscdev);
- put_device(dev);
- clear_bit(chip->dev_num, dev_mask);
- kfree(chip);
- kfree(devname);
+ put_device(chip->dev);
return NULL;
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index d15ccddc92eb..e885148b4cfb 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -74,6 +74,7 @@ struct tpm_vendor_specific {
int (*send) (struct tpm_chip *, u8 *, size_t);
void (*cancel) (struct tpm_chip *);
u8 (*status) (struct tpm_chip *);
+ void (*release) (struct device *);
struct miscdevice miscdev;
struct attribute_group *attr_group;
struct list_head list;
@@ -106,6 +107,7 @@ struct tpm_chip {
struct dentry **bios_dir;
struct list_head list;
+ void (*release) (struct device *);
};
#define to_tpm_chip(n) container_of(n, struct tpm_chip, vendor)
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 967002a5a1e5..726ee8a0277f 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -611,7 +611,7 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
}
}
-static struct pnp_driver tpm_inf_pnp = {
+static struct pnp_driver tpm_inf_pnp_driver = {
.name = "tpm_inf_pnp",
.driver = {
.owner = THIS_MODULE,
@@ -625,12 +625,12 @@ static struct pnp_driver tpm_inf_pnp = {
static int __init init_inf(void)
{
- return pnp_register_driver(&tpm_inf_pnp);
+ return pnp_register_driver(&tpm_inf_pnp_driver);
}
static void __exit cleanup_inf(void)
{
- pnp_unregister_driver(&tpm_inf_pnp);
+ pnp_unregister_driver(&tpm_inf_pnp_driver);
}
module_init(init_inf);
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index f36fecd3fd26..79c86c47947f 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -138,7 +138,7 @@ EXPORT_SYMBOL(tty_mutex);
extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */
extern int pty_limit; /* Config limit on Unix98 ptys */
static DEFINE_IDR(allocated_ptys);
-static DECLARE_MUTEX(allocated_ptys_lock);
+static DEFINE_MUTEX(allocated_ptys_lock);
static int ptmx_open(struct inode *, struct file *);
#endif
@@ -2571,9 +2571,9 @@ static void release_dev(struct file * filp)
#ifdef CONFIG_UNIX98_PTYS
/* Make this pty number available for reallocation */
if (devpts) {
- down(&allocated_ptys_lock);
+ mutex_lock(&allocated_ptys_lock);
idr_remove(&allocated_ptys, idx);
- up(&allocated_ptys_lock);
+ mutex_unlock(&allocated_ptys_lock);
}
#endif
@@ -2737,24 +2737,24 @@ static int ptmx_open(struct inode * inode, struct file * filp)
nonseekable_open(inode, filp);
/* find a device that is not in use. */
- down(&allocated_ptys_lock);
+ mutex_lock(&allocated_ptys_lock);
if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) {
- up(&allocated_ptys_lock);
+ mutex_unlock(&allocated_ptys_lock);
return -ENOMEM;
}
idr_ret = idr_get_new(&allocated_ptys, NULL, &index);
if (idr_ret < 0) {
- up(&allocated_ptys_lock);
+ mutex_unlock(&allocated_ptys_lock);
if (idr_ret == -EAGAIN)
return -ENOMEM;
return -EIO;
}
if (index >= pty_limit) {
idr_remove(&allocated_ptys, index);
- up(&allocated_ptys_lock);
+ mutex_unlock(&allocated_ptys_lock);
return -EIO;
}
- up(&allocated_ptys_lock);
+ mutex_unlock(&allocated_ptys_lock);
mutex_lock(&tty_mutex);
retval = init_dev(ptm_driver, index, &tty);
@@ -2781,9 +2781,9 @@ out1:
release_dev(filp);
return retval;
out:
- down(&allocated_ptys_lock);
+ mutex_lock(&allocated_ptys_lock);
idr_remove(&allocated_ptys, index);
- up(&allocated_ptys_lock);
+ mutex_unlock(&allocated_ptys_lock);
return retval;
}
#endif
@@ -3721,7 +3721,6 @@ static void initialize_tty_struct(struct tty_struct *tty)
tty->buf.head = tty->buf.tail = NULL;
tty_buffer_init(tty);
INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
- init_MUTEX(&tty->buf.pty_sem);
mutex_init(&tty->termios_mutex);
init_waitqueue_head(&tty->write_wait);
init_waitqueue_head(&tty->read_wait);
@@ -4048,10 +4047,6 @@ void __init console_init(void)
}
}
-#ifdef CONFIG_VT
-extern int vty_init(void);
-#endif
-
static int __init tty_class_init(void)
{
tty_class = class_create(THIS_MODULE, "tty");
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index e34da5c97196..dc17fe3a88bc 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -158,13 +158,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
/* Find the input queue. */
/* FIXME: This is why we want to wean off hvc: we do nothing
* when input comes in. */
- in_vq = vdev->config->find_vq(vdev, NULL);
+ in_vq = vdev->config->find_vq(vdev, 0, NULL);
if (IS_ERR(in_vq)) {
err = PTR_ERR(in_vq);
goto free;
}
- out_vq = vdev->config->find_vq(vdev, NULL);
+ out_vq = vdev->config->find_vq(vdev, 1, NULL);
if (IS_ERR(out_vq)) {
err = PTR_ERR(out_vq);
goto free_in_vq;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 7a5badfb7d84..367be9175061 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2400,13 +2400,15 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
{
struct vc_data *vc = vc_cons[fg_console].d;
unsigned char c;
- static unsigned long printing;
+ static DEFINE_SPINLOCK(printing_lock);
const ushort *start;
ushort cnt = 0;
ushort myx;
/* console busy or not yet initialized */
- if (!printable || test_and_set_bit(0, &printing))
+ if (!printable)
+ return;
+ if (!spin_trylock(&printing_lock))
return;
if (kmsg_redirect && vc_cons_allocated(kmsg_redirect - 1))
@@ -2481,7 +2483,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
notify_update(vc);
quit:
- clear_bit(0, &printing);
+ spin_unlock(&printing_lock);
}
static struct tty_driver *vt_console_device(struct console *c, int *index)
diff --git a/drivers/char/xilinx_hwicap/Makefile b/drivers/char/xilinx_hwicap/Makefile
new file mode 100644
index 000000000000..5491cbc42f43
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Xilinx OPB hwicap driver
+#
+
+obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap_m.o
+
+xilinx_hwicap_m-y := xilinx_hwicap.o fifo_icap.o buffer_icap.o
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c
new file mode 100644
index 000000000000..dfea2bde162b
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/buffer_icap.c
@@ -0,0 +1,380 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * Xilinx products are not intended for use in life support appliances,
+ * devices, or systems. Use in such applications is expressly prohibited.
+ *
+ * (c) Copyright 2003-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#include "buffer_icap.h"
+
+/* Indicates how many bytes will fit in a buffer. (1 BRAM) */
+#define XHI_MAX_BUFFER_BYTES 2048
+#define XHI_MAX_BUFFER_INTS (XHI_MAX_BUFFER_BYTES >> 2)
+
+/* File access and error constants */
+#define XHI_DEVICE_READ_ERROR -1
+#define XHI_DEVICE_WRITE_ERROR -2
+#define XHI_BUFFER_OVERFLOW_ERROR -3
+
+#define XHI_DEVICE_READ 0x1
+#define XHI_DEVICE_WRITE 0x0
+
+/* Constants for checking transfer status */
+#define XHI_CYCLE_DONE 0
+#define XHI_CYCLE_EXECUTING 1
+
+/* buffer_icap register offsets */
+
+/* Size of transfer, read & write */
+#define XHI_SIZE_REG_OFFSET 0x800L
+/* offset into bram, read & write */
+#define XHI_BRAM_OFFSET_REG_OFFSET 0x804L
+/* Read not Configure, direction of transfer. Write only */
+#define XHI_RNC_REG_OFFSET 0x808L
+/* Indicates transfer complete. Read only */
+#define XHI_STATUS_REG_OFFSET 0x80CL
+
+/* Constants for setting the RNC register */
+#define XHI_CONFIGURE 0x0UL
+#define XHI_READBACK 0x1UL
+
+/* Constants for the Done register */
+#define XHI_NOT_FINISHED 0x0UL
+#define XHI_FINISHED 0x1UL
+
+#define XHI_BUFFER_START 0
+
+/**
+ * buffer_icap_get_status: Get the contents of the status register.
+ * @parameter base_address: is the base address of the device
+ *
+ * The status register contains the ICAP status and the done bit.
+ *
+ * D8 - cfgerr
+ * D7 - dalign
+ * D6 - rip
+ * D5 - in_abort_l
+ * D4 - Always 1
+ * D3 - Always 1
+ * D2 - Always 1
+ * D1 - Always 1
+ * D0 - Done bit
+ **/
+static inline u32 buffer_icap_get_status(void __iomem *base_address)
+{
+ return in_be32(base_address + XHI_STATUS_REG_OFFSET);
+}
+
+/**
+ * buffer_icap_get_bram: Reads data from the storage buffer bram.
+ * @parameter base_address: contains the base address of the component.
+ * @parameter offset: The word offset from which the data should be read.
+ *
+ * A bram is used as a configuration memory cache. One frame of data can
+ * be stored in this "storage buffer".
+ **/
+static inline u32 buffer_icap_get_bram(void __iomem *base_address,
+ u32 offset)
+{
+ return in_be32(base_address + (offset << 2));
+}
+
+/**
+ * buffer_icap_busy: Return true if the icap device is busy
+ * @parameter base_address: is the base address of the device
+ *
+ * The queries the low order bit of the status register, which
+ * indicates whether the current configuration or readback operation
+ * has completed.
+ **/
+static inline bool buffer_icap_busy(void __iomem *base_address)
+{
+ return (buffer_icap_get_status(base_address) & 1) == XHI_NOT_FINISHED;
+}
+
+/**
+ * buffer_icap_busy: Return true if the icap device is not busy
+ * @parameter base_address: is the base address of the device
+ *
+ * The queries the low order bit of the status register, which
+ * indicates whether the current configuration or readback operation
+ * has completed.
+ **/
+static inline bool buffer_icap_done(void __iomem *base_address)
+{
+ return (buffer_icap_get_status(base_address) & 1) == XHI_FINISHED;
+}
+
+/**
+ * buffer_icap_set_size: Set the size register.
+ * @parameter base_address: is the base address of the device
+ * @parameter data: The size in bytes.
+ *
+ * The size register holds the number of 8 bit bytes to transfer between
+ * bram and the icap (or icap to bram).
+ **/
+static inline void buffer_icap_set_size(void __iomem *base_address,
+ u32 data)
+{
+ out_be32(base_address + XHI_SIZE_REG_OFFSET, data);
+}
+
+/**
+ * buffer_icap_mSetoffsetReg: Set the bram offset register.
+ * @parameter base_address: contains the base address of the device.
+ * @parameter data: is the value to be written to the data register.
+ *
+ * The bram offset register holds the starting bram address to transfer
+ * data from during configuration or write data to during readback.
+ **/
+static inline void buffer_icap_set_offset(void __iomem *base_address,
+ u32 data)
+{
+ out_be32(base_address + XHI_BRAM_OFFSET_REG_OFFSET, data);
+}
+
+/**
+ * buffer_icap_set_rnc: Set the RNC (Readback not Configure) register.
+ * @parameter base_address: contains the base address of the device.
+ * @parameter data: is the value to be written to the data register.
+ *
+ * The RNC register determines the direction of the data transfer. It
+ * controls whether a configuration or readback take place. Writing to
+ * this register initiates the transfer. A value of 1 initiates a
+ * readback while writing a value of 0 initiates a configuration.
+ **/
+static inline void buffer_icap_set_rnc(void __iomem *base_address,
+ u32 data)
+{
+ out_be32(base_address + XHI_RNC_REG_OFFSET, data);
+}
+
+/**
+ * buffer_icap_set_bram: Write data to the storage buffer bram.
+ * @parameter base_address: contains the base address of the component.
+ * @parameter offset: The word offset at which the data should be written.
+ * @parameter data: The value to be written to the bram offset.
+ *
+ * A bram is used as a configuration memory cache. One frame of data can
+ * be stored in this "storage buffer".
+ **/
+static inline void buffer_icap_set_bram(void __iomem *base_address,
+ u32 offset, u32 data)
+{
+ out_be32(base_address + (offset << 2), data);
+}
+
+/**
+ * buffer_icap_device_read: Transfer bytes from ICAP to the storage buffer.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter offset: The storage buffer start address.
+ * @parameter count: The number of words (32 bit) to read from the
+ * device (ICAP).
+ **/
+static int buffer_icap_device_read(struct hwicap_drvdata *drvdata,
+ u32 offset, u32 count)
+{
+
+ s32 retries = 0;
+ void __iomem *base_address = drvdata->base_address;
+
+ if (buffer_icap_busy(base_address))
+ return -EBUSY;
+
+ if ((offset + count) > XHI_MAX_BUFFER_INTS)
+ return -EINVAL;
+
+ /* setSize count*4 to get bytes. */
+ buffer_icap_set_size(base_address, (count << 2));
+ buffer_icap_set_offset(base_address, offset);
+ buffer_icap_set_rnc(base_address, XHI_READBACK);
+
+ while (buffer_icap_busy(base_address)) {
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EBUSY;
+ }
+ return 0;
+
+};
+
+/**
+ * buffer_icap_device_write: Transfer bytes from ICAP to the storage buffer.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter offset: The storage buffer start address.
+ * @parameter count: The number of words (32 bit) to read from the
+ * device (ICAP).
+ **/
+static int buffer_icap_device_write(struct hwicap_drvdata *drvdata,
+ u32 offset, u32 count)
+{
+
+ s32 retries = 0;
+ void __iomem *base_address = drvdata->base_address;
+
+ if (buffer_icap_busy(base_address))
+ return -EBUSY;
+
+ if ((offset + count) > XHI_MAX_BUFFER_INTS)
+ return -EINVAL;
+
+ /* setSize count*4 to get bytes. */
+ buffer_icap_set_size(base_address, count << 2);
+ buffer_icap_set_offset(base_address, offset);
+ buffer_icap_set_rnc(base_address, XHI_CONFIGURE);
+
+ while (buffer_icap_busy(base_address)) {
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EBUSY;
+ }
+ return 0;
+
+};
+
+/**
+ * buffer_icap_reset: Reset the logic of the icap device.
+ * @parameter drvdata: a pointer to the drvdata.
+ *
+ * Writing to the status register resets the ICAP logic in an internal
+ * version of the core. For the version of the core published in EDK,
+ * this is a noop.
+ **/
+void buffer_icap_reset(struct hwicap_drvdata *drvdata)
+{
+ out_be32(drvdata->base_address + XHI_STATUS_REG_OFFSET, 0xFEFE);
+}
+
+/**
+ * buffer_icap_set_configuration: Load a partial bitstream from system memory.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter data: Kernel address of the partial bitstream.
+ * @parameter size: the size of the partial bitstream in 32 bit words.
+ **/
+int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size)
+{
+ int status;
+ s32 buffer_count = 0;
+ s32 num_writes = 0;
+ bool dirty = 0;
+ u32 i;
+ void __iomem *base_address = drvdata->base_address;
+
+ /* Loop through all the data */
+ for (i = 0, buffer_count = 0; i < size; i++) {
+
+ /* Copy data to bram */
+ buffer_icap_set_bram(base_address, buffer_count, data[i]);
+ dirty = 1;
+
+ if (buffer_count < XHI_MAX_BUFFER_INTS - 1) {
+ buffer_count++;
+ continue;
+ }
+
+ /* Write data to ICAP */
+ status = buffer_icap_device_write(
+ drvdata,
+ XHI_BUFFER_START,
+ XHI_MAX_BUFFER_INTS);
+ if (status != 0) {
+ /* abort. */
+ buffer_icap_reset(drvdata);
+ return status;
+ }
+
+ buffer_count = 0;
+ num_writes++;
+ dirty = 0;
+ }
+
+ /* Write unwritten data to ICAP */
+ if (dirty) {
+ /* Write data to ICAP */
+ status = buffer_icap_device_write(drvdata, XHI_BUFFER_START,
+ buffer_count);
+ if (status != 0) {
+ /* abort. */
+ buffer_icap_reset(drvdata);
+ }
+ return status;
+ }
+
+ return 0;
+};
+
+/**
+ * buffer_icap_get_configuration: Read configuration data from the device.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter data: Address of the data representing the partial bitstream
+ * @parameter size: the size of the partial bitstream in 32 bit words.
+ **/
+int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size)
+{
+ int status;
+ s32 buffer_count = 0;
+ s32 read_count = 0;
+ u32 i;
+ void __iomem *base_address = drvdata->base_address;
+
+ /* Loop through all the data */
+ for (i = 0, buffer_count = XHI_MAX_BUFFER_INTS; i < size; i++) {
+ if (buffer_count == XHI_MAX_BUFFER_INTS) {
+ u32 words_remaining = size - i;
+ u32 words_to_read =
+ words_remaining <
+ XHI_MAX_BUFFER_INTS ? words_remaining :
+ XHI_MAX_BUFFER_INTS;
+
+ /* Read data from ICAP */
+ status = buffer_icap_device_read(
+ drvdata,
+ XHI_BUFFER_START,
+ words_to_read);
+ if (status != 0) {
+ /* abort. */
+ buffer_icap_reset(drvdata);
+ return status;
+ }
+
+ buffer_count = 0;
+ read_count++;
+ }
+
+ /* Copy data from bram */
+ data[i] = buffer_icap_get_bram(base_address, buffer_count);
+ buffer_count++;
+ }
+
+ return 0;
+};
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.h b/drivers/char/xilinx_hwicap/buffer_icap.h
new file mode 100644
index 000000000000..03184959fa00
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/buffer_icap.h
@@ -0,0 +1,57 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * Xilinx products are not intended for use in life support appliances,
+ * devices, or systems. Use in such applications is expressly prohibited.
+ *
+ * (c) Copyright 2003-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_BUFFER_ICAP_H_ /* prevent circular inclusions */
+#define XILINX_BUFFER_ICAP_H_ /* by using protection macros */
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include "xilinx_hwicap.h"
+
+void buffer_icap_reset(struct hwicap_drvdata *drvdata);
+
+/* Loads a partial bitstream from system memory. */
+int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 Size);
+
+/* Loads a partial bitstream from system memory. */
+int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 Size);
+
+#endif
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c
new file mode 100644
index 000000000000..0988314694a6
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/fifo_icap.c
@@ -0,0 +1,381 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * Xilinx products are not intended for use in life support appliances,
+ * devices, or systems. Use in such applications is expressly prohibited.
+ *
+ * (c) Copyright 2007-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#include "fifo_icap.h"
+
+/* Register offsets for the XHwIcap device. */
+#define XHI_GIER_OFFSET 0x1C /* Device Global Interrupt Enable Reg */
+#define XHI_IPISR_OFFSET 0x20 /* Interrupt Status Register */
+#define XHI_IPIER_OFFSET 0x28 /* Interrupt Enable Register */
+#define XHI_WF_OFFSET 0x100 /* Write FIFO */
+#define XHI_RF_OFFSET 0x104 /* Read FIFO */
+#define XHI_SZ_OFFSET 0x108 /* Size Register */
+#define XHI_CR_OFFSET 0x10C /* Control Register */
+#define XHI_SR_OFFSET 0x110 /* Status Register */
+#define XHI_WFV_OFFSET 0x114 /* Write FIFO Vacancy Register */
+#define XHI_RFO_OFFSET 0x118 /* Read FIFO Occupancy Register */
+
+/* Device Global Interrupt Enable Register (GIER) bit definitions */
+
+#define XHI_GIER_GIE_MASK 0x80000000 /* Global Interrupt enable Mask */
+
+/**
+ * HwIcap Device Interrupt Status/Enable Registers
+ *
+ * Interrupt Status Register (IPISR) : This register holds the
+ * interrupt status flags for the device. These bits are toggle on
+ * write.
+ *
+ * Interrupt Enable Register (IPIER) : This register is used to enable
+ * interrupt sources for the device.
+ * Writing a '1' to a bit enables the corresponding interrupt.
+ * Writing a '0' to a bit disables the corresponding interrupt.
+ *
+ * IPISR/IPIER registers have the same bit definitions and are only defined
+ * once.
+ */
+#define XHI_IPIXR_RFULL_MASK 0x00000008 /* Read FIFO Full */
+#define XHI_IPIXR_WEMPTY_MASK 0x00000004 /* Write FIFO Empty */
+#define XHI_IPIXR_RDP_MASK 0x00000002 /* Read FIFO half full */
+#define XHI_IPIXR_WRP_MASK 0x00000001 /* Write FIFO half full */
+#define XHI_IPIXR_ALL_MASK 0x0000000F /* Mask of all interrupts */
+
+/* Control Register (CR) */
+#define XHI_CR_SW_RESET_MASK 0x00000008 /* SW Reset Mask */
+#define XHI_CR_FIFO_CLR_MASK 0x00000004 /* FIFO Clear Mask */
+#define XHI_CR_READ_MASK 0x00000002 /* Read from ICAP to FIFO */
+#define XHI_CR_WRITE_MASK 0x00000001 /* Write from FIFO to ICAP */
+
+/* Status Register (SR) */
+#define XHI_SR_CFGERR_N_MASK 0x00000100 /* Config Error Mask */
+#define XHI_SR_DALIGN_MASK 0x00000080 /* Data Alignment Mask */
+#define XHI_SR_RIP_MASK 0x00000040 /* Read back Mask */
+#define XHI_SR_IN_ABORT_N_MASK 0x00000020 /* Select Map Abort Mask */
+#define XHI_SR_DONE_MASK 0x00000001 /* Done bit Mask */
+
+
+#define XHI_WFO_MAX_VACANCY 1024 /* Max Write FIFO Vacancy, in words */
+#define XHI_RFO_MAX_OCCUPANCY 256 /* Max Read FIFO Occupancy, in words */
+/* The maximum amount we can request from fifo_icap_get_configuration
+ at once, in bytes. */
+#define XHI_MAX_READ_TRANSACTION_WORDS 0xFFF
+
+
+/**
+ * fifo_icap_fifo_write: Write data to the write FIFO.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter data: the 32-bit value to be written to the FIFO.
+ *
+ * This function will silently fail if the fifo is full.
+ **/
+static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata,
+ u32 data)
+{
+ dev_dbg(drvdata->dev, "fifo_write: %x\n", data);
+ out_be32(drvdata->base_address + XHI_WF_OFFSET, data);
+}
+
+/**
+ * fifo_icap_fifo_read: Read data from the Read FIFO.
+ * @parameter drvdata: a pointer to the drvdata.
+ *
+ * This function will silently fail if the fifo is empty.
+ **/
+static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata)
+{
+ u32 data = in_be32(drvdata->base_address + XHI_RF_OFFSET);
+ dev_dbg(drvdata->dev, "fifo_read: %x\n", data);
+ return data;
+}
+
+/**
+ * fifo_icap_set_read_size: Set the the size register.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter data: the size of the following read transaction, in words.
+ **/
+static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata,
+ u32 data)
+{
+ out_be32(drvdata->base_address + XHI_SZ_OFFSET, data);
+}
+
+/**
+ * fifo_icap_start_config: Initiate a configuration (write) to the device.
+ * @parameter drvdata: a pointer to the drvdata.
+ **/
+static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata)
+{
+ out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_WRITE_MASK);
+ dev_dbg(drvdata->dev, "configuration started\n");
+}
+
+/**
+ * fifo_icap_start_readback: Initiate a readback from the device.
+ * @parameter drvdata: a pointer to the drvdata.
+ **/
+static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata)
+{
+ out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_READ_MASK);
+ dev_dbg(drvdata->dev, "readback started\n");
+}
+
+/**
+ * fifo_icap_busy: Return true if the ICAP is still processing a transaction.
+ * @parameter drvdata: a pointer to the drvdata.
+ **/
+static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata)
+{
+ u32 status = in_be32(drvdata->base_address + XHI_SR_OFFSET);
+ dev_dbg(drvdata->dev, "Getting status = %x\n", status);
+ return (status & XHI_SR_DONE_MASK) ? 0 : 1;
+}
+
+/**
+ * fifo_icap_write_fifo_vacancy: Query the write fifo available space.
+ * @parameter drvdata: a pointer to the drvdata.
+ *
+ * Return the number of words that can be safely pushed into the write fifo.
+ **/
+static inline u32 fifo_icap_write_fifo_vacancy(
+ struct hwicap_drvdata *drvdata)
+{
+ return in_be32(drvdata->base_address + XHI_WFV_OFFSET);
+}
+
+/**
+ * fifo_icap_read_fifo_occupancy: Query the read fifo available data.
+ * @parameter drvdata: a pointer to the drvdata.
+ *
+ * Return the number of words that can be safely read from the read fifo.
+ **/
+static inline u32 fifo_icap_read_fifo_occupancy(
+ struct hwicap_drvdata *drvdata)
+{
+ return in_be32(drvdata->base_address + XHI_RFO_OFFSET);
+}
+
+/**
+ * fifo_icap_set_configuration: Send configuration data to the ICAP.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter frame_buffer: a pointer to the data to be written to the
+ * ICAP device.
+ * @parameter num_words: the number of words (32 bit) to write to the ICAP
+ * device.
+
+ * This function writes the given user data to the Write FIFO in
+ * polled mode and starts the transfer of the data to
+ * the ICAP device.
+ **/
+int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata,
+ u32 *frame_buffer, u32 num_words)
+{
+
+ u32 write_fifo_vacancy = 0;
+ u32 retries = 0;
+ u32 remaining_words;
+
+ dev_dbg(drvdata->dev, "fifo_set_configuration\n");
+
+ /*
+ * Check if the ICAP device is Busy with the last Read/Write
+ */
+ if (fifo_icap_busy(drvdata))
+ return -EBUSY;
+
+ /*
+ * Set up the buffer pointer and the words to be transferred.
+ */
+ remaining_words = num_words;
+
+ while (remaining_words > 0) {
+ /*
+ * Wait until we have some data in the fifo.
+ */
+ while (write_fifo_vacancy == 0) {
+ write_fifo_vacancy =
+ fifo_icap_write_fifo_vacancy(drvdata);
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EIO;
+ }
+
+ /*
+ * Write data into the Write FIFO.
+ */
+ while ((write_fifo_vacancy != 0) &&
+ (remaining_words > 0)) {
+ fifo_icap_fifo_write(drvdata, *frame_buffer);
+
+ remaining_words--;
+ write_fifo_vacancy--;
+ frame_buffer++;
+ }
+ /* Start pushing whatever is in the FIFO into the ICAP. */
+ fifo_icap_start_config(drvdata);
+ }
+
+ /* Wait until the write has finished. */
+ while (fifo_icap_busy(drvdata)) {
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ break;
+ }
+
+ dev_dbg(drvdata->dev, "done fifo_set_configuration\n");
+
+ /*
+ * If the requested number of words have not been read from
+ * the device then indicate failure.
+ */
+ if (remaining_words != 0)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * fifo_icap_get_configuration: Read configuration data from the device.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter data: Address of the data representing the partial bitstream
+ * @parameter size: the size of the partial bitstream in 32 bit words.
+ *
+ * This function reads the specified number of words from the ICAP device in
+ * the polled mode.
+ */
+int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata,
+ u32 *frame_buffer, u32 num_words)
+{
+
+ u32 read_fifo_occupancy = 0;
+ u32 retries = 0;
+ u32 *data = frame_buffer;
+ u32 remaining_words;
+ u32 words_to_read;
+
+ dev_dbg(drvdata->dev, "fifo_get_configuration\n");
+
+ /*
+ * Check if the ICAP device is Busy with the last Write/Read
+ */
+ if (fifo_icap_busy(drvdata))
+ return -EBUSY;
+
+ remaining_words = num_words;
+
+ while (remaining_words > 0) {
+ words_to_read = remaining_words;
+ /* The hardware has a limit on the number of words
+ that can be read at one time. */
+ if (words_to_read > XHI_MAX_READ_TRANSACTION_WORDS)
+ words_to_read = XHI_MAX_READ_TRANSACTION_WORDS;
+
+ remaining_words -= words_to_read;
+
+ fifo_icap_set_read_size(drvdata, words_to_read);
+ fifo_icap_start_readback(drvdata);
+
+ while (words_to_read > 0) {
+ /* Wait until we have some data in the fifo. */
+ while (read_fifo_occupancy == 0) {
+ read_fifo_occupancy =
+ fifo_icap_read_fifo_occupancy(drvdata);
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EIO;
+ }
+
+ if (read_fifo_occupancy > words_to_read)
+ read_fifo_occupancy = words_to_read;
+
+ words_to_read -= read_fifo_occupancy;
+
+ /* Read the data from the Read FIFO. */
+ while (read_fifo_occupancy != 0) {
+ *data++ = fifo_icap_fifo_read(drvdata);
+ read_fifo_occupancy--;
+ }
+ }
+ }
+
+ dev_dbg(drvdata->dev, "done fifo_get_configuration\n");
+
+ return 0;
+}
+
+/**
+ * buffer_icap_reset: Reset the logic of the icap device.
+ * @parameter drvdata: a pointer to the drvdata.
+ *
+ * This function forces the software reset of the complete HWICAP device.
+ * All the registers will return to the default value and the FIFO is also
+ * flushed as a part of this software reset.
+ */
+void fifo_icap_reset(struct hwicap_drvdata *drvdata)
+{
+ u32 reg_data;
+ /*
+ * Reset the device by setting/clearing the RESET bit in the
+ * Control Register.
+ */
+ reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data | XHI_CR_SW_RESET_MASK);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data & (~XHI_CR_SW_RESET_MASK));
+
+}
+
+/**
+ * fifo_icap_flush_fifo: This function flushes the FIFOs in the device.
+ * @parameter drvdata: a pointer to the drvdata.
+ */
+void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata)
+{
+ u32 reg_data;
+ /*
+ * Flush the FIFO by setting/clearing the FIFO Clear bit in the
+ * Control Register.
+ */
+ reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data | XHI_CR_FIFO_CLR_MASK);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data & (~XHI_CR_FIFO_CLR_MASK));
+}
+
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.h b/drivers/char/xilinx_hwicap/fifo_icap.h
new file mode 100644
index 000000000000..4d3068dd0405
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/fifo_icap.h
@@ -0,0 +1,62 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * Xilinx products are not intended for use in life support appliances,
+ * devices, or systems. Use in such applications is expressly prohibited.
+ *
+ * (c) Copyright 2007-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_FIFO_ICAP_H_ /* prevent circular inclusions */
+#define XILINX_FIFO_ICAP_H_ /* by using protection macros */
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include "xilinx_hwicap.h"
+
+/* Reads integers from the device into the storage buffer. */
+int fifo_icap_get_configuration(
+ struct hwicap_drvdata *drvdata,
+ u32 *FrameBuffer,
+ u32 NumWords);
+
+/* Writes integers to the device from the storage buffer. */
+int fifo_icap_set_configuration(
+ struct hwicap_drvdata *drvdata,
+ u32 *FrameBuffer,
+ u32 NumWords);
+
+void fifo_icap_reset(struct hwicap_drvdata *drvdata);
+void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata);
+
+#endif
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
new file mode 100644
index 000000000000..24f6aef0fd3c
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -0,0 +1,904 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * Xilinx products are not intended for use in life support appliances,
+ * devices, or systems. Use in such applications is expressly prohibited.
+ *
+ * (c) Copyright 2002 Xilinx Inc., Systems Engineering Group
+ * (c) Copyright 2004 Xilinx Inc., Systems Engineering Group
+ * (c) Copyright 2007-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+/*
+ * This is the code behind /dev/xilinx_icap -- it allows a user-space
+ * application to use the Xilinx ICAP subsystem.
+ *
+ * The following operations are possible:
+ *
+ * open open the port and initialize for access.
+ * release release port
+ * write Write a bitstream to the configuration processor.
+ * read Read a data stream from the configuration processor.
+ *
+ * After being opened, the port is initialized and accessed to avoid a
+ * corrupted first read which may occur with some hardware. The port
+ * is left in a desynched state, requiring that a synch sequence be
+ * transmitted before any valid configuration data. A user will have
+ * exclusive access to the device while it remains open, and the state
+ * of the ICAP cannot be guaranteed after the device is closed. Note
+ * that a complete reset of the core and the state of the ICAP cannot
+ * be performed on many versions of the cores, hence users of this
+ * device should avoid making inconsistent accesses to the device. In
+ * particular, accessing the read interface, without first generating
+ * a write containing a readback packet can leave the ICAP in an
+ * inaccessible state.
+ *
+ * Note that in order to use the read interface, it is first necessary
+ * to write a request packet to the write interface. i.e., it is not
+ * possible to simply readback the bitstream (or any configuration
+ * bits) from a device without specifically requesting them first.
+ * The code to craft such packets is intended to be part of the
+ * user-space application code that uses this device. The simplest
+ * way to use this interface is simply:
+ *
+ * cp foo.bit /dev/xilinx_icap
+ *
+ * Note that unless foo.bit is an appropriately constructed partial
+ * bitstream, this has a high likelyhood of overwriting the design
+ * currently programmed in the FPGA.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <asm/semaphore.h>
+#include <linux/sysctl.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_OF
+/* For open firmware. */
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#endif
+
+#include "xilinx_hwicap.h"
+#include "buffer_icap.h"
+#include "fifo_icap.h"
+
+#define DRIVER_NAME "xilinx_icap"
+
+#define HWICAP_REGS (0x10000)
+
+/* dynamically allocate device number */
+static int xhwicap_major;
+static int xhwicap_minor;
+#define HWICAP_DEVICES 1
+
+module_param(xhwicap_major, int, S_IRUGO);
+module_param(xhwicap_minor, int, S_IRUGO);
+
+/* An array, which is set to true when the device is registered. */
+static bool probed_devices[HWICAP_DEVICES];
+
+static struct class *icap_class;
+
+#define UNIMPLEMENTED 0xFFFF
+
+static const struct config_registers v2_config_registers = {
+ .CRC = 0,
+ .FAR = 1,
+ .FDRI = 2,
+ .FDRO = 3,
+ .CMD = 4,
+ .CTL = 5,
+ .MASK = 6,
+ .STAT = 7,
+ .LOUT = 8,
+ .COR = 9,
+ .MFWR = 10,
+ .FLR = 11,
+ .KEY = 12,
+ .CBC = 13,
+ .IDCODE = 14,
+ .AXSS = UNIMPLEMENTED,
+ .C0R_1 = UNIMPLEMENTED,
+ .CSOB = UNIMPLEMENTED,
+ .WBSTAR = UNIMPLEMENTED,
+ .TIMER = UNIMPLEMENTED,
+ .BOOTSTS = UNIMPLEMENTED,
+ .CTL_1 = UNIMPLEMENTED,
+};
+
+static const struct config_registers v4_config_registers = {
+ .CRC = 0,
+ .FAR = 1,
+ .FDRI = 2,
+ .FDRO = 3,
+ .CMD = 4,
+ .CTL = 5,
+ .MASK = 6,
+ .STAT = 7,
+ .LOUT = 8,
+ .COR = 9,
+ .MFWR = 10,
+ .FLR = UNIMPLEMENTED,
+ .KEY = UNIMPLEMENTED,
+ .CBC = 11,
+ .IDCODE = 12,
+ .AXSS = 13,
+ .C0R_1 = UNIMPLEMENTED,
+ .CSOB = UNIMPLEMENTED,
+ .WBSTAR = UNIMPLEMENTED,
+ .TIMER = UNIMPLEMENTED,
+ .BOOTSTS = UNIMPLEMENTED,
+ .CTL_1 = UNIMPLEMENTED,
+};
+static const struct config_registers v5_config_registers = {
+ .CRC = 0,
+ .FAR = 1,
+ .FDRI = 2,
+ .FDRO = 3,
+ .CMD = 4,
+ .CTL = 5,
+ .MASK = 6,
+ .STAT = 7,
+ .LOUT = 8,
+ .COR = 9,
+ .MFWR = 10,
+ .FLR = UNIMPLEMENTED,
+ .KEY = UNIMPLEMENTED,
+ .CBC = 11,
+ .IDCODE = 12,
+ .AXSS = 13,
+ .C0R_1 = 14,
+ .CSOB = 15,
+ .WBSTAR = 16,
+ .TIMER = 17,
+ .BOOTSTS = 18,
+ .CTL_1 = 19,
+};
+
+/**
+ * hwicap_command_desync: Send a DESYNC command to the ICAP port.
+ * @parameter drvdata: a pointer to the drvdata.
+ *
+ * This command desynchronizes the ICAP After this command, a
+ * bitstream containing a NULL packet, followed by a SYNCH packet is
+ * required before the ICAP will recognize commands.
+ */
+int hwicap_command_desync(struct hwicap_drvdata *drvdata)
+{
+ u32 buffer[4];
+ u32 index = 0;
+
+ /*
+ * Create the data to be written to the ICAP.
+ */
+ buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1;
+ buffer[index++] = XHI_CMD_DESYNCH;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+
+ /*
+ * Write the data to the FIFO and intiate the transfer of data present
+ * in the FIFO to the ICAP device.
+ */
+ return drvdata->config->set_configuration(drvdata,
+ &buffer[0], index);
+}
+
+/**
+ * hwicap_command_capture: Send a CAPTURE command to the ICAP port.
+ * @parameter drvdata: a pointer to the drvdata.
+ *
+ * This command captures all of the flip flop states so they will be
+ * available during readback. One can use this command instead of
+ * enabling the CAPTURE block in the design.
+ */
+int hwicap_command_capture(struct hwicap_drvdata *drvdata)
+{
+ u32 buffer[7];
+ u32 index = 0;
+
+ /*
+ * Create the data to be written to the ICAP.
+ */
+ buffer[index++] = XHI_DUMMY_PACKET;
+ buffer[index++] = XHI_SYNC_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1;
+ buffer[index++] = XHI_CMD_GCAPTURE;
+ buffer[index++] = XHI_DUMMY_PACKET;
+ buffer[index++] = XHI_DUMMY_PACKET;
+
+ /*
+ * Write the data to the FIFO and intiate the transfer of data
+ * present in the FIFO to the ICAP device.
+ */
+ return drvdata->config->set_configuration(drvdata,
+ &buffer[0], index);
+
+}
+
+/**
+ * hwicap_get_configuration_register: Query a configuration register.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter reg: a constant which represents the configuration
+ * register value to be returned.
+ * Examples: XHI_IDCODE, XHI_FLR.
+ * @parameter RegData: returns the value of the register.
+ *
+ * Sends a query packet to the ICAP and then receives the response.
+ * The icap is left in Synched state.
+ */
+int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata,
+ u32 reg, u32 *RegData)
+{
+ int status;
+ u32 buffer[6];
+ u32 index = 0;
+
+ /*
+ * Create the data to be written to the ICAP.
+ */
+ buffer[index++] = XHI_DUMMY_PACKET;
+ buffer[index++] = XHI_SYNC_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = hwicap_type_1_read(reg) | 1;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+
+ /*
+ * Write the data to the FIFO and intiate the transfer of data present
+ * in the FIFO to the ICAP device.
+ */
+ status = drvdata->config->set_configuration(drvdata,
+ &buffer[0], index);
+ if (status)
+ return status;
+
+ /*
+ * Read the configuration register
+ */
+ status = drvdata->config->get_configuration(drvdata, RegData, 1);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
+{
+ int status;
+ u32 idcode;
+
+ dev_dbg(drvdata->dev, "initializing\n");
+
+ /* Abort any current transaction, to make sure we have the
+ * ICAP in a good state. */
+ dev_dbg(drvdata->dev, "Reset...\n");
+ drvdata->config->reset(drvdata);
+
+ dev_dbg(drvdata->dev, "Desync...\n");
+ status = hwicap_command_desync(drvdata);
+ if (status)
+ return status;
+
+ /* Attempt to read the IDCODE from ICAP. This
+ * may not be returned correctly, due to the design of the
+ * hardware.
+ */
+ dev_dbg(drvdata->dev, "Reading IDCODE...\n");
+ status = hwicap_get_configuration_register(
+ drvdata, drvdata->config_regs->IDCODE, &idcode);
+ dev_dbg(drvdata->dev, "IDCODE = %x\n", idcode);
+ if (status)
+ return status;
+
+ dev_dbg(drvdata->dev, "Desync...\n");
+ status = hwicap_command_desync(drvdata);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static ssize_t
+hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos)
+{
+ struct hwicap_drvdata *drvdata = file->private_data;
+ ssize_t bytes_to_read = 0;
+ u32 *kbuf;
+ u32 words;
+ u32 bytes_remaining;
+ int status;
+
+ if (down_interruptible(&drvdata->sem))
+ return -ERESTARTSYS;
+
+ if (drvdata->read_buffer_in_use) {
+ /* If there are leftover bytes in the buffer, just */
+ /* return them and don't try to read more from the */
+ /* ICAP device. */
+ bytes_to_read =
+ (count < drvdata->read_buffer_in_use) ? count :
+ drvdata->read_buffer_in_use;
+
+ /* Return the data currently in the read buffer. */
+ if (copy_to_user(buf, drvdata->read_buffer, bytes_to_read)) {
+ status = -EFAULT;
+ goto error;
+ }
+ drvdata->read_buffer_in_use -= bytes_to_read;
+ memcpy(drvdata->read_buffer + bytes_to_read,
+ drvdata->read_buffer, 4 - bytes_to_read);
+ } else {
+ /* Get new data from the ICAP, and return was was requested. */
+ kbuf = (u32 *) get_zeroed_page(GFP_KERNEL);
+ if (!kbuf) {
+ status = -ENOMEM;
+ goto error;
+ }
+
+ /* The ICAP device is only able to read complete */
+ /* words. If a number of bytes that do not correspond */
+ /* to complete words is requested, then we read enough */
+ /* words to get the required number of bytes, and then */
+ /* save the remaining bytes for the next read. */
+
+ /* Determine the number of words to read, rounding up */
+ /* if necessary. */
+ words = ((count + 3) >> 2);
+ bytes_to_read = words << 2;
+
+ if (bytes_to_read > PAGE_SIZE)
+ bytes_to_read = PAGE_SIZE;
+
+ /* Ensure we only read a complete number of words. */
+ bytes_remaining = bytes_to_read & 3;
+ bytes_to_read &= ~3;
+ words = bytes_to_read >> 2;
+
+ status = drvdata->config->get_configuration(drvdata,
+ kbuf, words);
+
+ /* If we didn't read correctly, then bail out. */
+ if (status) {
+ free_page((unsigned long)kbuf);
+ goto error;
+ }
+
+ /* If we fail to return the data to the user, then bail out. */
+ if (copy_to_user(buf, kbuf, bytes_to_read)) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ memcpy(kbuf, drvdata->read_buffer, bytes_remaining);
+ drvdata->read_buffer_in_use = bytes_remaining;
+ free_page((unsigned long)kbuf);
+ }
+ status = bytes_to_read;
+ error:
+ up(&drvdata->sem);
+ return status;
+}
+
+static ssize_t
+hwicap_write(struct file *file, const char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hwicap_drvdata *drvdata = file->private_data;
+ ssize_t written = 0;
+ ssize_t left = count;
+ u32 *kbuf;
+ ssize_t len;
+ ssize_t status;
+
+ if (down_interruptible(&drvdata->sem))
+ return -ERESTARTSYS;
+
+ left += drvdata->write_buffer_in_use;
+
+ /* Only write multiples of 4 bytes. */
+ if (left < 4) {
+ status = 0;
+ goto error;
+ }
+
+ kbuf = (u32 *) __get_free_page(GFP_KERNEL);
+ if (!kbuf) {
+ status = -ENOMEM;
+ goto error;
+ }
+
+ while (left > 3) {
+ /* only write multiples of 4 bytes, so there might */
+ /* be as many as 3 bytes left (at the end). */
+ len = left;
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ len &= ~3;
+
+ if (drvdata->write_buffer_in_use) {
+ memcpy(kbuf, drvdata->write_buffer,
+ drvdata->write_buffer_in_use);
+ if (copy_from_user(
+ (((char *)kbuf) + (drvdata->write_buffer_in_use)),
+ buf + written,
+ len - (drvdata->write_buffer_in_use))) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ } else {
+ if (copy_from_user(kbuf, buf + written, len)) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ }
+
+ status = drvdata->config->set_configuration(drvdata,
+ kbuf, len >> 2);
+
+ if (status) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ if (drvdata->write_buffer_in_use) {
+ len -= drvdata->write_buffer_in_use;
+ left -= drvdata->write_buffer_in_use;
+ drvdata->write_buffer_in_use = 0;
+ }
+ written += len;
+ left -= len;
+ }
+ if ((left > 0) && (left < 4)) {
+ if (!copy_from_user(drvdata->write_buffer,
+ buf + written, left)) {
+ drvdata->write_buffer_in_use = left;
+ written += left;
+ left = 0;
+ }
+ }
+
+ free_page((unsigned long)kbuf);
+ status = written;
+ error:
+ up(&drvdata->sem);
+ return status;
+}
+
+static int hwicap_open(struct inode *inode, struct file *file)
+{
+ struct hwicap_drvdata *drvdata;
+ int status;
+
+ drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev);
+
+ if (down_interruptible(&drvdata->sem))
+ return -ERESTARTSYS;
+
+ if (drvdata->is_open) {
+ status = -EBUSY;
+ goto error;
+ }
+
+ status = hwicap_initialize_hwicap(drvdata);
+ if (status) {
+ dev_err(drvdata->dev, "Failed to open file");
+ goto error;
+ }
+
+ file->private_data = drvdata;
+ drvdata->write_buffer_in_use = 0;
+ drvdata->read_buffer_in_use = 0;
+ drvdata->is_open = 1;
+
+ error:
+ up(&drvdata->sem);
+ return status;
+}
+
+static int hwicap_release(struct inode *inode, struct file *file)
+{
+ struct hwicap_drvdata *drvdata = file->private_data;
+ int i;
+ int status = 0;
+
+ if (down_interruptible(&drvdata->sem))
+ return -ERESTARTSYS;
+
+ if (drvdata->write_buffer_in_use) {
+ /* Flush write buffer. */
+ for (i = drvdata->write_buffer_in_use; i < 4; i++)
+ drvdata->write_buffer[i] = 0;
+
+ status = drvdata->config->set_configuration(drvdata,
+ (u32 *) drvdata->write_buffer, 1);
+ if (status)
+ goto error;
+ }
+
+ status = hwicap_command_desync(drvdata);
+ if (status)
+ goto error;
+
+ error:
+ drvdata->is_open = 0;
+ up(&drvdata->sem);
+ return status;
+}
+
+static struct file_operations hwicap_fops = {
+ .owner = THIS_MODULE,
+ .write = hwicap_write,
+ .read = hwicap_read,
+ .open = hwicap_open,
+ .release = hwicap_release,
+};
+
+static int __devinit hwicap_setup(struct device *dev, int id,
+ const struct resource *regs_res,
+ const struct hwicap_driver_config *config,
+ const struct config_registers *config_regs)
+{
+ dev_t devt;
+ struct hwicap_drvdata *drvdata = NULL;
+ int retval = 0;
+
+ dev_info(dev, "Xilinx icap port driver\n");
+
+ if (id < 0) {
+ for (id = 0; id < HWICAP_DEVICES; id++)
+ if (!probed_devices[id])
+ break;
+ }
+ if (id < 0 || id >= HWICAP_DEVICES) {
+ dev_err(dev, "%s%i too large\n", DRIVER_NAME, id);
+ return -EINVAL;
+ }
+ if (probed_devices[id]) {
+ dev_err(dev, "cannot assign to %s%i; it is already in use\n",
+ DRIVER_NAME, id);
+ return -EBUSY;
+ }
+
+ probed_devices[id] = 1;
+
+ devt = MKDEV(xhwicap_major, xhwicap_minor + id);
+
+ drvdata = kmalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ dev_err(dev, "Couldn't allocate device private record\n");
+ return -ENOMEM;
+ }
+ memset((void *)drvdata, 0, sizeof(struct hwicap_drvdata));
+ dev_set_drvdata(dev, (void *)drvdata);
+
+ if (!regs_res) {
+ dev_err(dev, "Couldn't get registers resource\n");
+ retval = -EFAULT;
+ goto failed1;
+ }
+
+ drvdata->mem_start = regs_res->start;
+ drvdata->mem_end = regs_res->end;
+ drvdata->mem_size = regs_res->end - regs_res->start + 1;
+
+ if (!request_mem_region(drvdata->mem_start,
+ drvdata->mem_size, DRIVER_NAME)) {
+ dev_err(dev, "Couldn't lock memory region at %p\n",
+ (void *)regs_res->start);
+ retval = -EBUSY;
+ goto failed1;
+ }
+
+ drvdata->devt = devt;
+ drvdata->dev = dev;
+ drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size);
+ if (!drvdata->base_address) {
+ dev_err(dev, "ioremap() failed\n");
+ goto failed2;
+ }
+
+ drvdata->config = config;
+ drvdata->config_regs = config_regs;
+
+ init_MUTEX(&drvdata->sem);
+ drvdata->is_open = 0;
+
+ dev_info(dev, "ioremap %lx to %p with size %x\n",
+ (unsigned long int)drvdata->mem_start,
+ drvdata->base_address, drvdata->mem_size);
+
+ cdev_init(&drvdata->cdev, &hwicap_fops);
+ drvdata->cdev.owner = THIS_MODULE;
+ retval = cdev_add(&drvdata->cdev, devt, 1);
+ if (retval) {
+ dev_err(dev, "cdev_add() failed\n");
+ goto failed3;
+ }
+ /* devfs_mk_cdev(devt, S_IFCHR|S_IRUGO|S_IWUGO, DRIVER_NAME); */
+ class_device_create(icap_class, NULL, devt, NULL, DRIVER_NAME);
+ return 0; /* success */
+
+ failed3:
+ iounmap(drvdata->base_address);
+
+ failed2:
+ release_mem_region(regs_res->start, drvdata->mem_size);
+
+ failed1:
+ kfree(drvdata);
+
+ return retval;
+}
+
+static struct hwicap_driver_config buffer_icap_config = {
+ .get_configuration = buffer_icap_get_configuration,
+ .set_configuration = buffer_icap_set_configuration,
+ .reset = buffer_icap_reset,
+};
+
+static struct hwicap_driver_config fifo_icap_config = {
+ .get_configuration = fifo_icap_get_configuration,
+ .set_configuration = fifo_icap_set_configuration,
+ .reset = fifo_icap_reset,
+};
+
+static int __devexit hwicap_remove(struct device *dev)
+{
+ struct hwicap_drvdata *drvdata;
+
+ drvdata = (struct hwicap_drvdata *)dev_get_drvdata(dev);
+
+ if (!drvdata)
+ return 0;
+
+ class_device_destroy(icap_class, drvdata->devt);
+ cdev_del(&drvdata->cdev);
+ iounmap(drvdata->base_address);
+ release_mem_region(drvdata->mem_start, drvdata->mem_size);
+ kfree(drvdata);
+ dev_set_drvdata(dev, NULL);
+ probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0;
+
+ return 0; /* success */
+}
+
+static int __devinit hwicap_drv_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ const struct config_registers *regs;
+ const char *family;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ /* It's most likely that we're using V4, if the family is not
+ specified */
+ regs = &v4_config_registers;
+ family = pdev->dev.platform_data;
+
+ if (family) {
+ if (!strcmp(family, "virtex2p")) {
+ regs = &v2_config_registers;
+ } else if (!strcmp(family, "virtex4")) {
+ regs = &v4_config_registers;
+ } else if (!strcmp(family, "virtex5")) {
+ regs = &v5_config_registers;
+ }
+ }
+
+ return hwicap_setup(&pdev->dev, pdev->id, res,
+ &buffer_icap_config, regs);
+}
+
+static int __devexit hwicap_drv_remove(struct platform_device *pdev)
+{
+ return hwicap_remove(&pdev->dev);
+}
+
+static struct platform_driver hwicap_platform_driver = {
+ .probe = hwicap_drv_probe,
+ .remove = hwicap_drv_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+};
+
+/* ---------------------------------------------------------------------
+ * OF bus binding
+ */
+
+#if defined(CONFIG_OF)
+static int __devinit
+hwicap_of_probe(struct of_device *op, const struct of_device_id *match)
+{
+ struct resource res;
+ const unsigned int *id;
+ const char *family;
+ int rc;
+ const struct hwicap_driver_config *config = match->data;
+ const struct config_registers *regs;
+
+ dev_dbg(&op->dev, "hwicap_of_probe(%p, %p)\n", op, match);
+
+ rc = of_address_to_resource(op->node, 0, &res);
+ if (rc) {
+ dev_err(&op->dev, "invalid address\n");
+ return rc;
+ }
+
+ id = of_get_property(op->node, "port-number", NULL);
+
+ /* It's most likely that we're using V4, if the family is not
+ specified */
+ regs = &v4_config_registers;
+ family = of_get_property(op->node, "xlnx,family", NULL);
+
+ if (family) {
+ if (!strcmp(family, "virtex2p")) {
+ regs = &v2_config_registers;
+ } else if (!strcmp(family, "virtex4")) {
+ regs = &v4_config_registers;
+ } else if (!strcmp(family, "virtex5")) {
+ regs = &v5_config_registers;
+ }
+ }
+ return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
+ regs);
+}
+
+static int __devexit hwicap_of_remove(struct of_device *op)
+{
+ return hwicap_remove(&op->dev);
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id __devinit hwicap_of_match[] = {
+ { .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config},
+ { .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config},
+ {},
+};
+MODULE_DEVICE_TABLE(of, hwicap_of_match);
+
+static struct of_platform_driver hwicap_of_driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .match_table = hwicap_of_match,
+ .probe = hwicap_of_probe,
+ .remove = __devexit_p(hwicap_of_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+/* Registration helpers to keep the number of #ifdefs to a minimum */
+static inline int __devinit hwicap_of_register(void)
+{
+ pr_debug("hwicap: calling of_register_platform_driver()\n");
+ return of_register_platform_driver(&hwicap_of_driver);
+}
+
+static inline void __devexit hwicap_of_unregister(void)
+{
+ of_unregister_platform_driver(&hwicap_of_driver);
+}
+#else /* CONFIG_OF */
+/* CONFIG_OF not enabled; do nothing helpers */
+static inline int __devinit hwicap_of_register(void) { return 0; }
+static inline void __devexit hwicap_of_unregister(void) { }
+#endif /* CONFIG_OF */
+
+static int __devinit hwicap_module_init(void)
+{
+ dev_t devt;
+ int retval;
+
+ icap_class = class_create(THIS_MODULE, "xilinx_config");
+
+ if (xhwicap_major) {
+ devt = MKDEV(xhwicap_major, xhwicap_minor);
+ retval = register_chrdev_region(
+ devt,
+ HWICAP_DEVICES,
+ DRIVER_NAME);
+ if (retval < 0)
+ return retval;
+ } else {
+ retval = alloc_chrdev_region(&devt,
+ xhwicap_minor,
+ HWICAP_DEVICES,
+ DRIVER_NAME);
+ if (retval < 0)
+ return retval;
+ xhwicap_major = MAJOR(devt);
+ }
+
+ retval = platform_driver_register(&hwicap_platform_driver);
+
+ if (retval)
+ goto failed1;
+
+ retval = hwicap_of_register();
+
+ if (retval)
+ goto failed2;
+
+ return retval;
+
+ failed2:
+ platform_driver_unregister(&hwicap_platform_driver);
+
+ failed1:
+ unregister_chrdev_region(devt, HWICAP_DEVICES);
+
+ return retval;
+}
+
+static void __devexit hwicap_module_cleanup(void)
+{
+ dev_t devt = MKDEV(xhwicap_major, xhwicap_minor);
+
+ class_destroy(icap_class);
+
+ platform_driver_unregister(&hwicap_platform_driver);
+
+ hwicap_of_unregister();
+
+ unregister_chrdev_region(devt, HWICAP_DEVICES);
+}
+
+module_init(hwicap_module_init);
+module_exit(hwicap_module_cleanup);
+
+MODULE_AUTHOR("Xilinx, Inc; Xilinx Research Labs Group");
+MODULE_DESCRIPTION("Xilinx ICAP Port Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
new file mode 100644
index 000000000000..ae771cac1629
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
@@ -0,0 +1,193 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * Xilinx products are not intended for use in life support appliances,
+ * devices, or systems. Use in such applications is expressly prohibited.
+ *
+ * (c) Copyright 2003-2007 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_HWICAP_H_ /* prevent circular inclusions */
+#define XILINX_HWICAP_H_ /* by using protection macros */
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+
+struct hwicap_drvdata {
+ u32 write_buffer_in_use; /* Always in [0,3] */
+ u8 write_buffer[4];
+ u32 read_buffer_in_use; /* Always in [0,3] */
+ u8 read_buffer[4];
+ u32 mem_start; /* phys. address of the control registers */
+ u32 mem_end; /* phys. address of the control registers */
+ u32 mem_size;
+ void __iomem *base_address;/* virt. address of the control registers */
+
+ struct device *dev;
+ struct cdev cdev; /* Char device structure */
+ dev_t devt;
+
+ const struct hwicap_driver_config *config;
+ const struct config_registers *config_regs;
+ void *private_data;
+ bool is_open;
+ struct semaphore sem;
+};
+
+struct hwicap_driver_config {
+ int (*get_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size);
+ int (*set_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size);
+ void (*reset)(struct hwicap_drvdata *drvdata);
+};
+
+/* Number of times to poll the done regsiter */
+#define XHI_MAX_RETRIES 10
+
+/************ Constant Definitions *************/
+
+#define XHI_PAD_FRAMES 0x1
+
+/* Mask for calculating configuration packet headers */
+#define XHI_WORD_COUNT_MASK_TYPE_1 0x7FFUL
+#define XHI_WORD_COUNT_MASK_TYPE_2 0x1FFFFFUL
+#define XHI_TYPE_MASK 0x7
+#define XHI_REGISTER_MASK 0xF
+#define XHI_OP_MASK 0x3
+
+#define XHI_TYPE_SHIFT 29
+#define XHI_REGISTER_SHIFT 13
+#define XHI_OP_SHIFT 27
+
+#define XHI_TYPE_1 1
+#define XHI_TYPE_2 2
+#define XHI_OP_WRITE 2
+#define XHI_OP_READ 1
+
+/* Address Block Types */
+#define XHI_FAR_CLB_BLOCK 0
+#define XHI_FAR_BRAM_BLOCK 1
+#define XHI_FAR_BRAM_INT_BLOCK 2
+
+struct config_registers {
+ u32 CRC;
+ u32 FAR;
+ u32 FDRI;
+ u32 FDRO;
+ u32 CMD;
+ u32 CTL;
+ u32 MASK;
+ u32 STAT;
+ u32 LOUT;
+ u32 COR;
+ u32 MFWR;
+ u32 FLR;
+ u32 KEY;
+ u32 CBC;
+ u32 IDCODE;
+ u32 AXSS;
+ u32 C0R_1;
+ u32 CSOB;
+ u32 WBSTAR;
+ u32 TIMER;
+ u32 BOOTSTS;
+ u32 CTL_1;
+};
+
+/* Configuration Commands */
+#define XHI_CMD_NULL 0
+#define XHI_CMD_WCFG 1
+#define XHI_CMD_MFW 2
+#define XHI_CMD_DGHIGH 3
+#define XHI_CMD_RCFG 4
+#define XHI_CMD_START 5
+#define XHI_CMD_RCAP 6
+#define XHI_CMD_RCRC 7
+#define XHI_CMD_AGHIGH 8
+#define XHI_CMD_SWITCH 9
+#define XHI_CMD_GRESTORE 10
+#define XHI_CMD_SHUTDOWN 11
+#define XHI_CMD_GCAPTURE 12
+#define XHI_CMD_DESYNCH 13
+#define XHI_CMD_IPROG 15 /* Only in Virtex5 */
+#define XHI_CMD_CRCC 16 /* Only in Virtex5 */
+#define XHI_CMD_LTIMER 17 /* Only in Virtex5 */
+
+/* Packet constants */
+#define XHI_SYNC_PACKET 0xAA995566UL
+#define XHI_DUMMY_PACKET 0xFFFFFFFFUL
+#define XHI_NOOP_PACKET (XHI_TYPE_1 << XHI_TYPE_SHIFT)
+#define XHI_TYPE_2_READ ((XHI_TYPE_2 << XHI_TYPE_SHIFT) | \
+ (XHI_OP_READ << XHI_OP_SHIFT))
+
+#define XHI_TYPE_2_WRITE ((XHI_TYPE_2 << XHI_TYPE_SHIFT) | \
+ (XHI_OP_WRITE << XHI_OP_SHIFT))
+
+#define XHI_TYPE2_CNT_MASK 0x07FFFFFF
+
+#define XHI_TYPE_1_PACKET_MAX_WORDS 2047UL
+#define XHI_TYPE_1_HEADER_BYTES 4
+#define XHI_TYPE_2_HEADER_BYTES 8
+
+/* Constant to use for CRC check when CRC has been disabled */
+#define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL
+
+/**
+ * hwicap_type_1_read: Generates a Type 1 read packet header.
+ * @parameter: Register is the address of the register to be read back.
+ *
+ * Generates a Type 1 read packet header, which is used to indirectly
+ * read registers in the configuration logic. This packet must then
+ * be sent through the icap device, and a return packet received with
+ * the information.
+ **/
+static inline u32 hwicap_type_1_read(u32 Register)
+{
+ return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
+ (Register << XHI_REGISTER_SHIFT) |
+ (XHI_OP_READ << XHI_OP_SHIFT);
+}
+
+/**
+ * hwicap_type_1_write: Generates a Type 1 write packet header
+ * @parameter: Register is the address of the register to be read back.
+ **/
+static inline u32 hwicap_type_1_write(u32 Register)
+{
+ return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
+ (Register << XHI_REGISTER_SHIFT) |
+ (XHI_OP_WRITE << XHI_OP_SHIFT);
+}
+
+#endif
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 721f86f4f008..c159ae64eeb2 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -9,9 +9,6 @@ config CPU_FREQ
clock speed, you need to either enable a dynamic cpufreq governor
(see below) after boot, or use a userspace tool.
- To compile this driver as a module, choose M here: the
- module will be called cpufreq.
-
For details, take a look at <file:Documentation/cpu-freq>.
If in doubt, say N.
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b730d6709529..64926aa990db 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -287,7 +287,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
if (!l_p_j_ref_freq) {
l_p_j_ref = loops_per_jiffy;
l_p_j_ref_freq = ci->old;
- dprintk("saving %lu as reference value for loops_per_jiffy;"
+ dprintk("saving %lu as reference value for loops_per_jiffy; "
"freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
}
if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
@@ -295,7 +295,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
(val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
ci->new);
- dprintk("scaling loops_per_jiffy to %lu"
+ dprintk("scaling loops_per_jiffy to %lu "
"for frequency %u kHz\n", loops_per_jiffy, ci->new);
}
}
@@ -601,6 +601,31 @@ static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
return i;
}
+static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ unsigned int freq = 0;
+ unsigned int ret;
+
+ if (!policy->governor->store_setspeed)
+ return -EINVAL;
+
+ ret = sscanf(buf, "%u", &freq);
+ if (ret != 1)
+ return -EINVAL;
+
+ policy->governor->store_setspeed(policy, freq);
+
+ return count;
+}
+
+static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
+{
+ if (!policy->governor->show_setspeed)
+ return sprintf(buf, "<unsupported>\n");
+
+ return policy->governor->show_setspeed(policy, buf);
+}
#define define_one_ro(_name) \
static struct freq_attr _name = \
@@ -624,6 +649,7 @@ define_one_ro(affected_cpus);
define_one_rw(scaling_min_freq);
define_one_rw(scaling_max_freq);
define_one_rw(scaling_governor);
+define_one_rw(scaling_setspeed);
static struct attribute * default_attrs[] = {
&cpuinfo_min_freq.attr,
@@ -634,6 +660,7 @@ static struct attribute * default_attrs[] = {
&scaling_governor.attr,
&scaling_driver.attr,
&scaling_available_governors.attr,
+ &scaling_setspeed.attr,
NULL
};
@@ -1313,7 +1340,7 @@ static int cpufreq_resume(struct sys_device * sysdev)
struct cpufreq_freqs freqs;
if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
- dprintk("Warning: CPU frequency"
+ dprintk("Warning: CPU frequency "
"is %u, cpufreq assumed %u kHz.\n",
cur_freq, cpu_policy->cur);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index f8cdde4bf6cd..cb2ac01a41a1 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -65,12 +65,12 @@ static struct notifier_block userspace_cpufreq_notifier_block = {
/**
* cpufreq_set - set the CPU frequency
+ * @policy: pointer to policy struct where freq is being set
* @freq: target frequency in kHz
- * @cpu: CPU for which the frequency is to be set
*
* Sets the CPU frequency to freq.
*/
-static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
+static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
{
int ret = -EINVAL;
@@ -102,34 +102,11 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
}
-/************************** sysfs interface ************************/
-static ssize_t show_speed (struct cpufreq_policy *policy, char *buf)
+static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
{
- return sprintf (buf, "%u\n", cpu_cur_freq[policy->cpu]);
+ return sprintf(buf, "%u\n", cpu_cur_freq[policy->cpu]);
}
-static ssize_t
-store_speed (struct cpufreq_policy *policy, const char *buf, size_t count)
-{
- unsigned int freq = 0;
- unsigned int ret;
-
- ret = sscanf (buf, "%u", &freq);
- if (ret != 1)
- return -EINVAL;
-
- cpufreq_set(freq, policy);
-
- return count;
-}
-
-static struct freq_attr freq_attr_scaling_setspeed =
-{
- .attr = { .name = "scaling_setspeed", .mode = 0644 },
- .show = show_speed,
- .store = store_speed,
-};
-
static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
unsigned int event)
{
@@ -142,10 +119,6 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
return -EINVAL;
BUG_ON(!policy->cur);
mutex_lock(&userspace_mutex);
- rc = sysfs_create_file (&policy->kobj,
- &freq_attr_scaling_setspeed.attr);
- if (rc)
- goto start_out;
if (cpus_using_userspace_governor == 0) {
cpufreq_register_notifier(
@@ -160,7 +133,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
cpu_cur_freq[cpu] = policy->cur;
cpu_set_freq[cpu] = policy->cur;
dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]);
-start_out:
+
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_STOP:
@@ -176,7 +149,6 @@ start_out:
cpu_min_freq[cpu] = 0;
cpu_max_freq[cpu] = 0;
cpu_set_freq[cpu] = 0;
- sysfs_remove_file (&policy->kobj, &freq_attr_scaling_setspeed.attr);
dprintk("managing cpu %u stopped\n", cpu);
mutex_unlock(&userspace_mutex);
break;
@@ -211,6 +183,8 @@ start_out:
struct cpufreq_governor cpufreq_gov_userspace = {
.name = "userspace",
.governor = cpufreq_governor_userspace,
+ .store_setspeed = cpufreq_set,
+ .show_setspeed = show_speed,
.owner = THIS_MODULE,
};
EXPORT_SYMBOL(cpufreq_gov_userspace);
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 5409f3afb3f8..ae6cd60d5c14 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -171,7 +171,7 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
static struct cpufreq_frequency_table *show_table[NR_CPUS];
/**
- * show_scaling_governor - show the current policy for the specified CPU
+ * show_available_freqs - show available frequencies for the specified CPU
*/
static ssize_t show_available_freqs (struct cpufreq_policy *policy, char *buf)
{
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 3bed4127d4ad..7dbc4a83c45c 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -1,13 +1,13 @@
config CPU_IDLE
bool "CPU idle PM support"
+ default ACPI
help
CPU idle is a generic framework for supporting software-controlled
idle processor power management. It includes modular cross-platform
governors that can be swapped during runtime.
- If you're using a mobile platform that supports CPU idle PM (e.g.
- an ACPI-capable notebook), you should say Y here.
+ If you're using an ACPI-enabled platform, you should say Y here.
config CPU_IDLE_GOV_LADDER
bool
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index d2fabe7863a9..2c4b2d47973e 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -12,9 +12,10 @@
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/notifier.h>
-#include <linux/latency.h>
+#include <linux/pm_qos_params.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
+#include <linux/ktime.h>
#include "cpuidle.h"
@@ -180,6 +181,44 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
EXPORT_SYMBOL_GPL(cpuidle_disable_device);
+#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
+{
+ ktime_t t1, t2;
+ s64 diff;
+ int ret;
+
+ t1 = ktime_get();
+ local_irq_enable();
+ while (!need_resched())
+ cpu_relax();
+
+ t2 = ktime_get();
+ diff = ktime_to_us(ktime_sub(t2, t1));
+ if (diff > INT_MAX)
+ diff = INT_MAX;
+
+ ret = (int) diff;
+ return ret;
+}
+
+static void poll_idle_init(struct cpuidle_device *dev)
+{
+ struct cpuidle_state *state = &dev->states[0];
+
+ cpuidle_set_statedata(state, NULL);
+
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C0 (poll idle)");
+ state->exit_latency = 0;
+ state->target_residency = 0;
+ state->power_usage = -1;
+ state->flags = CPUIDLE_FLAG_POLL | CPUIDLE_FLAG_TIME_VALID;
+ state->enter = poll_idle;
+}
+#else
+static void poll_idle_init(struct cpuidle_device *dev) {}
+#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
+
/**
* cpuidle_register_device - registers a CPU's idle PM feature
* @dev: the cpu
@@ -198,6 +237,8 @@ int cpuidle_register_device(struct cpuidle_device *dev)
mutex_lock(&cpuidle_lock);
+ poll_idle_init(dev);
+
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
if ((ret = cpuidle_add_sysfs(sys_dev))) {
@@ -265,7 +306,10 @@ static struct notifier_block cpuidle_latency_notifier = {
.notifier_call = cpuidle_latency_notify,
};
-#define latency_notifier_init(x) do { register_latency_notifier(x); } while (0)
+static inline void latency_notifier_init(struct notifier_block *n)
+{
+ pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
+}
#else /* CONFIG_SMP */
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index eb666ecae7c9..ba7b9a6b17a1 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/cpuidle.h>
-#include <linux/latency.h>
+#include <linux/pm_qos_params.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
@@ -81,7 +81,8 @@ static int ladder_select_state(struct cpuidle_device *dev)
/* consider promotion */
if (last_idx < dev->state_count - 1 &&
last_residency > last_state->threshold.promotion_time &&
- dev->states[last_idx + 1].exit_latency <= system_latency_constraint()) {
+ dev->states[last_idx + 1].exit_latency <=
+ pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
last_state->stats.promotion_count++;
last_state->stats.demotion_count = 0;
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 299d45c3bdd2..78d77c5dc35c 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -8,7 +8,7 @@
#include <linux/kernel.h>
#include <linux/cpuidle.h>
-#include <linux/latency.h>
+#include <linux/pm_qos_params.h>
#include <linux/time.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
@@ -48,7 +48,7 @@ static int menu_select(struct cpuidle_device *dev)
break;
if (s->target_residency > data->predicted_us)
break;
- if (s->exit_latency > system_latency_constraint())
+ if (s->exit_latency > pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY))
break;
}
diff --git a/drivers/dio/dio-driver.c b/drivers/dio/dio-driver.c
index e4c48e329367..8cd8507b1a8a 100644
--- a/drivers/dio/dio-driver.c
+++ b/drivers/dio/dio-driver.c
@@ -15,16 +15,15 @@
#include <linux/dio.h>
- /**
- * dio_match_device - Tell if a DIO device structure has a matching
- * DIO device id structure
- * @ids: array of DIO device id structures to search in
- * @dev: the DIO device structure to match against
- *
- * Used by a driver to check whether a DIO device present in the
- * system is in its list of supported devices. Returns the matching
- * dio_device_id structure or %NULL if there is no match.
- */
+/**
+ * dio_match_device - Tell if a DIO device structure has a matching DIO device id structure
+ * @ids: array of DIO device id structures to search in
+ * @d: the DIO device structure to match against
+ *
+ * Used by a driver to check whether a DIO device present in the
+ * system is in its list of supported devices. Returns the matching
+ * dio_device_id structure or %NULL if there is no match.
+ */
const struct dio_device_id *
dio_match_device(const struct dio_device_id *ids,
@@ -66,13 +65,13 @@ static int dio_device_probe(struct device *dev)
}
- /**
- * dio_register_driver - register a new DIO driver
- * @drv: the driver structure to register
- *
- * Adds the driver structure to the list of registered drivers
- * Returns zero or a negative error value.
- */
+/**
+ * dio_register_driver - register a new DIO driver
+ * @drv: the driver structure to register
+ *
+ * Adds the driver structure to the list of registered drivers
+ * Returns zero or a negative error value.
+ */
int dio_register_driver(struct dio_driver *drv)
{
@@ -85,15 +84,15 @@ int dio_register_driver(struct dio_driver *drv)
}
- /**
- * dio_unregister_driver - unregister a DIO driver
- * @drv: the driver structure to unregister
- *
- * Deletes the driver structure from the list of registered DIO drivers,
- * gives it a chance to clean up by calling its remove() function for
- * each device it was responsible for, and marks those devices as
- * driverless.
- */
+/**
+ * dio_unregister_driver - unregister a DIO driver
+ * @drv: the driver structure to unregister
+ *
+ * Deletes the driver structure from the list of registered DIO drivers,
+ * gives it a chance to clean up by calling its remove() function for
+ * each device it was responsible for, and marks those devices as
+ * driverless.
+ */
void dio_unregister_driver(struct dio_driver *drv)
{
@@ -101,16 +100,15 @@ void dio_unregister_driver(struct dio_driver *drv)
}
- /**
- * dio_bus_match - Tell if a DIO device structure has a matching DIO
- * device id structure
- * @ids: array of DIO device id structures to search in
- * @dev: the DIO device structure to match against
- *
- * Used by a driver to check whether a DIO device present in the
- * system is in its list of supported devices. Returns the matching
- * dio_device_id structure or %NULL if there is no match.
- */
+/**
+ * dio_bus_match - Tell if a DIO device structure has a matching DIO device id structure
+ * @dev: the DIO device structure to match against
+ * @drv: the &device_driver that points to the array of DIO device id structures to search
+ *
+ * Used by a driver to check whether a DIO device present in the
+ * system is in its list of supported devices. Returns the matching
+ * dio_device_id structure or %NULL if there is no match.
+ */
static int dio_bus_match(struct device *dev, struct device_driver *drv)
{
diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c
index 17502d6efae7..07f274f853d9 100644
--- a/drivers/dio/dio.c
+++ b/drivers/dio/dio.c
@@ -88,8 +88,6 @@ static struct dioname names[] =
#undef DIONAME
#undef DIOFBNAME
-#define NUMNAMES (sizeof(names) / sizeof(struct dioname))
-
static const char *unknowndioname
= "unknown DIO board -- please email <linux-m68k@lists.linux-m68k.org>!";
@@ -97,7 +95,7 @@ static const char *dio_getname(int id)
{
/* return pointer to a constant string describing the board with given ID */
unsigned int i;
- for (i = 0; i < NUMNAMES; i++)
+ for (i = 0; i < ARRAY_SIZE(names); i++)
if (names[i].id == id)
return names[i].name;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c46b7c219ee9..a703deffb795 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -5,6 +5,7 @@
menuconfig DMADEVICES
bool "DMA Engine support"
depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
+ depends on !HIGHMEM64G
help
DMA engines can do asynchronous data transfers without
involving the host CPU. Currently, this framework can be
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index bcf52df30339..29965231b912 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -473,20 +473,22 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
- dma_addr_t addr;
+ dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int cpu;
- tx = dev->device_prep_dma_memcpy(chan, len, 0);
- if (!tx)
+ dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
+ dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
+ tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
+
+ if (!tx) {
+ dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
+ dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
+ }
tx->ack = 1;
tx->callback = NULL;
- addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
- tx->tx_set_src(addr, tx, 0);
- addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
- tx->tx_set_dest(addr, tx, 0);
cookie = tx->tx_submit(tx);
cpu = get_cpu();
@@ -517,20 +519,22 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
- dma_addr_t addr;
+ dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int cpu;
- tx = dev->device_prep_dma_memcpy(chan, len, 0);
- if (!tx)
+ dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
+ dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
+ tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
+
+ if (!tx) {
+ dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
+ dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
+ }
tx->ack = 1;
tx->callback = NULL;
- addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
- tx->tx_set_src(addr, tx, 0);
- addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
- tx->tx_set_dest(addr, tx, 0);
cookie = tx->tx_submit(tx);
cpu = get_cpu();
@@ -563,20 +567,23 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
- dma_addr_t addr;
+ dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int cpu;
- tx = dev->device_prep_dma_memcpy(chan, len, 0);
- if (!tx)
+ dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
+ dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
+ DMA_FROM_DEVICE);
+ tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
+
+ if (!tx) {
+ dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
+ dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
+ }
tx->ack = 1;
tx->callback = NULL;
- addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
- tx->tx_set_src(addr, tx, 0);
- addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE);
- tx->tx_set_dest(addr, tx, 0);
cookie = tx->tx_submit(tx);
cpu = get_cpu();
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 45e7b4666c7b..dff38accc5c1 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -159,20 +159,6 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
return device->common.chancnt;
}
-static void ioat_set_src(dma_addr_t addr,
- struct dma_async_tx_descriptor *tx,
- int index)
-{
- tx_to_ioat_desc(tx)->src = addr;
-}
-
-static void ioat_set_dest(dma_addr_t addr,
- struct dma_async_tx_descriptor *tx,
- int index)
-{
- tx_to_ioat_desc(tx)->dst = addr;
-}
-
/**
* ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
* descriptors to hw
@@ -415,8 +401,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
memset(desc, 0, sizeof(*desc));
dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
- desc_sw->async_tx.tx_set_src = ioat_set_src;
- desc_sw->async_tx.tx_set_dest = ioat_set_dest;
switch (ioat_chan->device->version) {
case IOAT_VER_1_2:
desc_sw->async_tx.tx_submit = ioat1_tx_submit;
@@ -714,8 +698,10 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
struct dma_chan *chan,
+ dma_addr_t dma_dest,
+ dma_addr_t dma_src,
size_t len,
- int int_en)
+ unsigned long flags)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
struct ioat_desc_sw *new;
@@ -726,6 +712,8 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
if (new) {
new->len = len;
+ new->dst = dma_dest;
+ new->src = dma_src;
return &new->async_tx;
} else
return NULL;
@@ -733,8 +721,10 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
struct dma_chan *chan,
+ dma_addr_t dma_dest,
+ dma_addr_t dma_src,
size_t len,
- int int_en)
+ unsigned long flags)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
struct ioat_desc_sw *new;
@@ -749,6 +739,8 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
if (new) {
new->len = len;
+ new->dst = dma_dest;
+ new->src = dma_src;
return &new->async_tx;
} else
return NULL;
@@ -1045,7 +1037,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
u8 *dest;
struct dma_chan *dma_chan;
struct dma_async_tx_descriptor *tx;
- dma_addr_t addr;
+ dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int err = 0;
@@ -1073,7 +1065,12 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
goto out;
}
- tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
+ dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
+ DMA_TO_DEVICE);
+ dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
+ DMA_FROM_DEVICE);
+ tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
+ IOAT_TEST_SIZE, 0);
if (!tx) {
dev_err(&device->pdev->dev,
"Self-test prep failed, disabling\n");
@@ -1082,12 +1079,6 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
}
async_tx_ack(tx);
- addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
- DMA_TO_DEVICE);
- tx->tx_set_src(addr, tx, 0);
- addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
- DMA_FROM_DEVICE);
- tx->tx_set_dest(addr, tx, 0);
tx->callback = ioat_dma_test_callback;
tx->callback_param = (void *)0x8086;
cookie = tx->tx_submit(tx);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index e5c62b75f36f..3986d54492bd 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -284,7 +284,7 @@ iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
int slots_per_op)
{
struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
- struct list_head chain = LIST_HEAD_INIT(chain);
+ LIST_HEAD(chain);
int slots_found, retry = 0;
/* start search from the last allocated descrtiptor
@@ -443,17 +443,6 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
return cookie;
}
-static void
-iop_adma_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
- int index)
-{
- struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
- struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
-
- /* to do: support transfers lengths > IOP_ADMA_MAX_BYTE_COUNT */
- iop_desc_set_dest_addr(sw_desc->group_head, iop_chan, addr);
-}
-
static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
@@ -486,7 +475,6 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = iop_adma_tx_submit;
- slot->async_tx.tx_set_dest = iop_adma_set_dest;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->async_tx.tx_list);
@@ -547,18 +535,9 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan)
return sw_desc ? &sw_desc->async_tx : NULL;
}
-static void
-iop_adma_memcpy_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
- int index)
-{
- struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
- struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
-
- iop_desc_set_memcpy_src_addr(grp_start, addr);
-}
-
static struct dma_async_tx_descriptor *
-iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
+iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -576,11 +555,12 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) {
grp_start = sw_desc->group_head;
- iop_desc_init_memcpy(grp_start, int_en);
+ iop_desc_init_memcpy(grp_start, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len);
+ iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
+ iop_desc_set_memcpy_src_addr(grp_start, dma_src);
sw_desc->unmap_src_cnt = 1;
sw_desc->unmap_len = len;
- sw_desc->async_tx.tx_set_src = iop_adma_memcpy_set_src;
}
spin_unlock_bh(&iop_chan->lock);
@@ -588,8 +568,8 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
}
static struct dma_async_tx_descriptor *
-iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
- int int_en)
+iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
+ int value, size_t len, unsigned long flags)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -607,9 +587,10 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) {
grp_start = sw_desc->group_head;
- iop_desc_init_memset(grp_start, int_en);
+ iop_desc_init_memset(grp_start, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len);
iop_desc_set_block_fill_val(grp_start, value);
+ iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
sw_desc->unmap_src_cnt = 1;
sw_desc->unmap_len = len;
}
@@ -618,19 +599,10 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
return sw_desc ? &sw_desc->async_tx : NULL;
}
-static void
-iop_adma_xor_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
- int index)
-{
- struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
- struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
-
- iop_desc_set_xor_src_addr(grp_start, index, addr);
-}
-
static struct dma_async_tx_descriptor *
-iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len,
- int int_en)
+iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
+ unsigned long flags)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -641,39 +613,32 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len,
BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
dev_dbg(iop_chan->device->common.dev,
- "%s src_cnt: %d len: %u int_en: %d\n",
- __FUNCTION__, src_cnt, len, int_en);
+ "%s src_cnt: %d len: %u flags: %lx\n",
+ __FUNCTION__, src_cnt, len, flags);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) {
grp_start = sw_desc->group_head;
- iop_desc_init_xor(grp_start, src_cnt, int_en);
+ iop_desc_init_xor(grp_start, src_cnt, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len);
+ iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len;
- sw_desc->async_tx.tx_set_src = iop_adma_xor_set_src;
+ while (src_cnt--)
+ iop_desc_set_xor_src_addr(grp_start, src_cnt,
+ dma_src[src_cnt]);
}
spin_unlock_bh(&iop_chan->lock);
return sw_desc ? &sw_desc->async_tx : NULL;
}
-static void
-iop_adma_xor_zero_sum_set_src(dma_addr_t addr,
- struct dma_async_tx_descriptor *tx,
- int index)
-{
- struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
- struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
-
- iop_desc_set_zero_sum_src_addr(grp_start, index, addr);
-}
-
static struct dma_async_tx_descriptor *
-iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt,
- size_t len, u32 *result, int int_en)
+iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
+ unsigned int src_cnt, size_t len, u32 *result,
+ unsigned long flags)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -690,14 +655,16 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt,
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) {
grp_start = sw_desc->group_head;
- iop_desc_init_zero_sum(grp_start, src_cnt, int_en);
+ iop_desc_init_zero_sum(grp_start, src_cnt, flags);
iop_desc_set_zero_sum_byte_count(grp_start, len);
grp_start->xor_check_result = result;
pr_debug("\t%s: grp_start->xor_check_result: %p\n",
__FUNCTION__, grp_start->xor_check_result);
sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len;
- sw_desc->async_tx.tx_set_src = iop_adma_xor_zero_sum_set_src;
+ while (src_cnt--)
+ iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
+ dma_src[src_cnt]);
}
spin_unlock_bh(&iop_chan->lock);
@@ -882,13 +849,12 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
goto out;
}
- tx = iop_adma_prep_dma_memcpy(dma_chan, IOP_ADMA_TEST_SIZE, 1);
dest_dma = dma_map_single(dma_chan->device->dev, dest,
IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
- iop_adma_set_dest(dest_dma, tx, 0);
src_dma = dma_map_single(dma_chan->device->dev, src,
IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
- iop_adma_memcpy_set_src(src_dma, tx, 0);
+ tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
+ IOP_ADMA_TEST_SIZE, 1);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
@@ -929,6 +895,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
struct page *dest;
struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
+ dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
dma_addr_t dma_addr, dest_dma;
struct dma_async_tx_descriptor *tx;
struct dma_chan *dma_chan;
@@ -981,17 +948,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
}
/* test xor */
- tx = iop_adma_prep_dma_xor(dma_chan, IOP_ADMA_NUM_SRC_TEST,
- PAGE_SIZE, 1);
dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
- iop_adma_set_dest(dest_dma, tx, 0);
-
- for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
- dma_addr = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0,
- PAGE_SIZE, DMA_TO_DEVICE);
- iop_adma_xor_set_src(dma_addr, tx, i);
- }
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
+ dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
+ 0, PAGE_SIZE, DMA_TO_DEVICE);
+ tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
+ IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 1);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
@@ -1032,13 +995,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
zero_sum_result = 1;
- tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1,
- PAGE_SIZE, &zero_sum_result, 1);
- for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) {
- dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i],
- 0, PAGE_SIZE, DMA_TO_DEVICE);
- iop_adma_xor_zero_sum_set_src(dma_addr, tx, i);
- }
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
+ dma_srcs[i] = dma_map_page(dma_chan->device->dev,
+ zero_sum_srcs[i], 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
+ IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
+ &zero_sum_result, 1);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
@@ -1060,10 +1023,9 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
}
/* test memset */
- tx = iop_adma_prep_dma_memset(dma_chan, 0, PAGE_SIZE, 1);
dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
- iop_adma_set_dest(dma_addr, tx, 0);
+ tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
@@ -1089,13 +1051,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
/* test for non-zero parity sum */
zero_sum_result = 0;
- tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1,
- PAGE_SIZE, &zero_sum_result, 1);
- for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) {
- dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i],
- 0, PAGE_SIZE, DMA_TO_DEVICE);
- iop_adma_xor_zero_sum_set_src(dma_addr, tx, i);
- }
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
+ dma_srcs[i] = dma_map_page(dma_chan->device->dev,
+ zero_sum_srcs[i], 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
+ IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
+ &zero_sum_result, 1);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 98b6b4fb4257..2b382990fe58 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -97,7 +97,7 @@ config EDAC_I82975X
config EDAC_I3000
tristate "Intel 3000/3010"
- depends on EDAC_MM_EDAC && PCI && X86_32
+ depends on EDAC_MM_EDAC && PCI && X86
help
Support for error detection and correction on the Intel
3000 and 3010 server chipsets.
@@ -123,6 +123,20 @@ config EDAC_I5000
Support for error detection and correction the Intel
Greekcreek/Blackford chipsets.
+config EDAC_MPC85XX
+ tristate "Freescale MPC85xx"
+ depends on EDAC_MM_EDAC && FSL_SOC && MPC85xx
+ help
+ Support for error detection and correction on the Freescale
+ MPC8560, MPC8540, MPC8548
+
+config EDAC_MV64X60
+ tristate "Marvell MV64x60"
+ depends on EDAC_MM_EDAC && MV64X60
+ help
+ Support for error detection and correction on the Marvell
+ MV64360 and MV64460 chipsets.
+
config EDAC_PASEMI
tristate "PA Semi PWRficient"
depends on EDAC_MM_EDAC && PCI
@@ -131,5 +145,12 @@ config EDAC_PASEMI
Support for error detection and correction on PA Semi
PWRficient.
+config EDAC_CELL
+ tristate "Cell Broadband Engine memory controller"
+ depends on EDAC_MM_EDAC && PPC_CELL_NATIVE
+ help
+ Support for error detection and correction on the
+ Cell Broadband Engine internal memory controller
+ on platform without a hypervisor
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 02c09f0ff157..83807731d4a9 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -28,4 +28,7 @@ obj-$(CONFIG_EDAC_I3000) += i3000_edac.o
obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
+obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o
+obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o
+obj-$(CONFIG_EDAC_CELL) += cell_edac.o
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
new file mode 100644
index 000000000000..b54112ffd282
--- /dev/null
+++ b/drivers/edac/cell_edac.c
@@ -0,0 +1,258 @@
+/*
+ * Cell MIC driver for ECC counting
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ */
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/stop_machine.h>
+#include <linux/io.h>
+#include <asm/machdep.h>
+#include <asm/cell-regs.h>
+
+#include "edac_core.h"
+
+struct cell_edac_priv
+{
+ struct cbe_mic_tm_regs __iomem *regs;
+ int node;
+ int chanmask;
+#ifdef DEBUG
+ u64 prev_fir;
+#endif
+};
+
+static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
+{
+ struct cell_edac_priv *priv = mci->pvt_info;
+ struct csrow_info *csrow = &mci->csrows[0];
+ unsigned long address, pfn, offset;
+
+ dev_dbg(mci->dev, "ECC CE err on node %d, channel %d, ar = 0x%016lx\n",
+ priv->node, chan, ar);
+
+ /* Address decoding is likely a bit bogus, to dbl check */
+ address = (ar & 0xffffffffe0000000ul) >> 29;
+ if (priv->chanmask == 0x3)
+ address = (address << 1) | chan;
+ pfn = address >> PAGE_SHIFT;
+ offset = address & ~PAGE_MASK;
+
+ /* TODO: Decoding of the error addresss */
+ edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
+ 0, 0, chan, "");
+}
+
+static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
+{
+ struct cell_edac_priv *priv = mci->pvt_info;
+ struct csrow_info *csrow = &mci->csrows[0];
+ unsigned long address, pfn, offset;
+
+ dev_dbg(mci->dev, "ECC UE err on node %d, channel %d, ar = 0x%016lx\n",
+ priv->node, chan, ar);
+
+ /* Address decoding is likely a bit bogus, to dbl check */
+ address = (ar & 0xffffffffe0000000ul) >> 29;
+ if (priv->chanmask == 0x3)
+ address = (address << 1) | chan;
+ pfn = address >> PAGE_SHIFT;
+ offset = address & ~PAGE_MASK;
+
+ /* TODO: Decoding of the error addresss */
+ edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
+}
+
+static void cell_edac_check(struct mem_ctl_info *mci)
+{
+ struct cell_edac_priv *priv = mci->pvt_info;
+ u64 fir, addreg, clear = 0;
+
+ fir = in_be64(&priv->regs->mic_fir);
+#ifdef DEBUG
+ if (fir != priv->prev_fir) {
+ dev_dbg(mci->dev, "fir change : 0x%016lx\n", fir);
+ priv->prev_fir = fir;
+ }
+#endif
+ if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_SINGLE_0_ERR)) {
+ addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
+ clear |= CBE_MIC_FIR_ECC_SINGLE_0_RESET;
+ cell_edac_count_ce(mci, 0, addreg);
+ }
+ if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_SINGLE_1_ERR)) {
+ addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
+ clear |= CBE_MIC_FIR_ECC_SINGLE_1_RESET;
+ cell_edac_count_ce(mci, 1, addreg);
+ }
+ if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_MULTI_0_ERR)) {
+ addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
+ clear |= CBE_MIC_FIR_ECC_MULTI_0_RESET;
+ cell_edac_count_ue(mci, 0, addreg);
+ }
+ if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_MULTI_1_ERR)) {
+ addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
+ clear |= CBE_MIC_FIR_ECC_MULTI_1_RESET;
+ cell_edac_count_ue(mci, 1, addreg);
+ }
+
+ /* The procedure for clearing FIR bits is a bit ... weird */
+ if (clear) {
+ fir &= ~(CBE_MIC_FIR_ECC_ERR_MASK | CBE_MIC_FIR_ECC_SET_MASK);
+ fir |= CBE_MIC_FIR_ECC_RESET_MASK;
+ fir &= ~clear;
+ out_be64(&priv->regs->mic_fir, fir);
+ (void)in_be64(&priv->regs->mic_fir);
+
+ mb(); /* sync up */
+#ifdef DEBUG
+ fir = in_be64(&priv->regs->mic_fir);
+ dev_dbg(mci->dev, "fir clear : 0x%016lx\n", fir);
+#endif
+ }
+}
+
+static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
+{
+ struct csrow_info *csrow = &mci->csrows[0];
+ struct cell_edac_priv *priv = mci->pvt_info;
+ struct device_node *np;
+
+ for (np = NULL;
+ (np = of_find_node_by_name(np, "memory")) != NULL;) {
+ struct resource r;
+
+ /* We "know" that the Cell firmware only creates one entry
+ * in the "memory" nodes. If that changes, this code will
+ * need to be adapted.
+ */
+ if (of_address_to_resource(np, 0, &r))
+ continue;
+ if (of_node_to_nid(np) != priv->node)
+ continue;
+ csrow->first_page = r.start >> PAGE_SHIFT;
+ csrow->nr_pages = (r.end - r.start + 1) >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ csrow->mtype = MEM_XDR;
+ csrow->edac_mode = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ dev_dbg(mci->dev,
+ "Initialized on node %d, chanmask=0x%x,"
+ " first_page=0x%lx, nr_pages=0x%x\n",
+ priv->node, priv->chanmask,
+ csrow->first_page, csrow->nr_pages);
+ break;
+ }
+}
+
+static int __devinit cell_edac_probe(struct platform_device *pdev)
+{
+ struct cbe_mic_tm_regs __iomem *regs;
+ struct mem_ctl_info *mci;
+ struct cell_edac_priv *priv;
+ u64 reg;
+ int rc, chanmask;
+
+ regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
+ if (regs == NULL)
+ return -ENODEV;
+
+ /* Get channel population */
+ reg = in_be64(&regs->mic_mnt_cfg);
+ dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016lx\n", reg);
+ chanmask = 0;
+ if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP)
+ chanmask |= 0x1;
+ if (reg & CBE_MIC_MNT_CFG_CHAN_1_POP)
+ chanmask |= 0x2;
+ if (chanmask == 0) {
+ dev_warn(&pdev->dev,
+ "Yuck ! No channel populated ? Aborting !\n");
+ return -ENODEV;
+ }
+ dev_dbg(&pdev->dev, "Initial FIR = 0x%016lx\n",
+ in_be64(&regs->mic_fir));
+
+ /* Allocate & init EDAC MC data structure */
+ mci = edac_mc_alloc(sizeof(struct cell_edac_priv), 1,
+ chanmask == 3 ? 2 : 1, pdev->id);
+ if (mci == NULL)
+ return -ENOMEM;
+ priv = mci->pvt_info;
+ priv->regs = regs;
+ priv->node = pdev->id;
+ priv->chanmask = chanmask;
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_XDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ mci->mod_name = "cell_edac";
+ mci->ctl_name = "MIC";
+ mci->dev_name = pdev->dev.bus_id;
+ mci->edac_check = cell_edac_check;
+ cell_edac_init_csrows(mci);
+
+ /* Register with EDAC core */
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to register with EDAC core\n");
+ edac_mc_free(mci);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int __devexit cell_edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
+ if (mci)
+ edac_mc_free(mci);
+ return 0;
+}
+
+static struct platform_driver cell_edac_driver = {
+ .driver = {
+ .name = "cbe-mic",
+ .owner = THIS_MODULE,
+ },
+ .probe = cell_edac_probe,
+ .remove = cell_edac_remove,
+};
+
+static int __init cell_edac_init(void)
+{
+ /* Sanity check registers data structure */
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_df_ecc_address_0) != 0xf8);
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_df_ecc_address_1) != 0x1b8);
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_df_config) != 0x218);
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_fir) != 0x230);
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_mnt_cfg) != 0x210);
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_exc) != 0x208);
+
+ return platform_driver_register(&cell_edac_driver);
+}
+
+static void __exit cell_edac_exit(void)
+{
+ platform_driver_unregister(&cell_edac_driver);
+}
+
+module_init(cell_edac_init);
+module_exit(cell_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("ECC counting for Cell MIC");
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 2d23e304f5ec..a9aa845dbe74 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -136,6 +136,7 @@ enum mem_type {
MEM_DDR2, /* DDR2 RAM */
MEM_FB_DDR2, /* fully buffered DDR2 */
MEM_RDDR2, /* Registered DDR2 RAM */
+ MEM_XDR, /* Rambus XDR */
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -152,6 +153,7 @@ enum mem_type {
#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
+#define MEM_FLAG_XDR BIT(MEM_XDR)
/* chipset Error Detection and Correction capabilities and mode */
enum edac_type {
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index f3690a697cf9..b9552bc03dea 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -155,6 +155,10 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
dev_ctl->instances = dev_inst;
dev_ctl->pvt_info = pvt;
+ /* Default logging of CEs and UEs */
+ dev_ctl->log_ce = 1;
+ dev_ctl->log_ue = 1;
+
/* Name of this edac device */
snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
@@ -436,7 +440,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
*/
if (edac_dev->poll_msec == 1000)
queue_delayed_work(edac_workqueue, &edac_dev->work,
- round_jiffies(edac_dev->delay));
+ round_jiffies_relative(edac_dev->delay));
else
queue_delayed_work(edac_workqueue, &edac_dev->work,
edac_dev->delay);
@@ -468,7 +472,7 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
*/
if (edac_dev->poll_msec == 1000)
queue_delayed_work(edac_workqueue, &edac_dev->work,
- round_jiffies(edac_dev->delay));
+ round_jiffies_relative(edac_dev->delay));
else
queue_delayed_work(edac_workqueue, &edac_dev->work,
edac_dev->delay);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 9aac88027fb3..021d18795145 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -73,7 +73,8 @@ static const char *mem_types[] = {
[MEM_RMBS] = "RMBS",
[MEM_DDR2] = "Unbuffered-DDR2",
[MEM_FB_DDR2] = "FullyBuffered-DDR2",
- [MEM_RDDR2] = "Registered-DDR2"
+ [MEM_RDDR2] = "Registered-DDR2",
+ [MEM_XDR] = "XDR"
};
static const char *dev_types[] = {
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 5dee9f50414b..32be43576a8e 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -73,7 +73,7 @@ EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info);
*
* Last action on the pci control structure.
*
- * call the remove sysfs informaton, which will unregister
+ * call the remove sysfs information, which will unregister
* this control struct's kobj. When that kobj's ref count
* goes to zero, its release function will be call and then
* kfree() the memory.
@@ -246,7 +246,7 @@ static void edac_pci_workq_function(struct work_struct *work_req)
/* if we are on a one second period, then use round */
msec = edac_pci_get_poll_msec();
if (msec == 1000)
- delay = round_jiffies(msecs_to_jiffies(msec));
+ delay = round_jiffies_relative(msecs_to_jiffies(msec));
else
delay = msecs_to_jiffies(msec);
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 5b075da99145..71c3195d3704 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -558,8 +558,10 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
debugf4("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id);
- /* check the status reg for errors */
- if (status) {
+ /* check the status reg for errors on boards NOT marked as broken
+ * if broken, we cannot trust any of the status bits
+ */
+ if (status && !dev->broken_parity_status) {
if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
edac_printk(KERN_CRIT, EDAC_PCI,
"Signaled System Error on %s\n",
@@ -593,8 +595,10 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
debugf4("PCI SEC_STATUS= 0x%04x %s\n", status, dev->dev.bus_id);
- /* check the secondary status reg for errors */
- if (status) {
+ /* check the secondary status reg for errors,
+ * on NOT broken boards
+ */
+ if (status && !dev->broken_parity_status) {
if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
"Signaled System Error on %s\n",
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index e895f9f887ab..5d4292811c14 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -14,6 +14,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
+#include <linux/edac.h>
#include "edac_core.h"
#define I3000_REVISION "1.1"
@@ -30,105 +31,139 @@
#define I3000_MCHBAR_MASK 0xffffc000
#define I3000_MMR_WINDOW_SIZE 16384
-#define I3000_EDEAP 0x70 /* Extended DRAM Error Address Pointer (8b)
- *
- * 7:1 reserved
- * 0 bit 32 of address
- */
-#define I3000_DEAP 0x58 /* DRAM Error Address Pointer (32b)
- *
- * 31:7 address
- * 6:1 reserved
- * 0 Error channel 0/1
- */
-#define I3000_DEAP_GRAIN (1 << 7)
-#define I3000_DEAP_PFN(edeap, deap) ((((edeap) & 1) << (32 - PAGE_SHIFT)) | \
- ((deap) >> PAGE_SHIFT))
-#define I3000_DEAP_OFFSET(deap) ((deap) & ~(I3000_DEAP_GRAIN-1) & ~PAGE_MASK)
-#define I3000_DEAP_CHANNEL(deap) ((deap) & 1)
-
-#define I3000_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
- *
- * 7:0 DRAM ECC Syndrome
- */
-
-#define I3000_ERRSTS 0xc8 /* Error Status Register (16b)
- *
- * 15:12 reserved
- * 11 MCH Thermal Sensor Event for SMI/SCI/SERR
- * 10 reserved
- * 9 LOCK to non-DRAM Memory Flag (LCKF)
- * 8 Received Refresh Timeout Flag (RRTOF)
- * 7:2 reserved
- * 1 Multiple-bit DRAM ECC Error Flag (DMERR)
- * 0 Single-bit DRAM ECC Error Flag (DSERR)
- */
+#define I3000_EDEAP 0x70 /* Extended DRAM Error Address Pointer (8b)
+ *
+ * 7:1 reserved
+ * 0 bit 32 of address
+ */
+#define I3000_DEAP 0x58 /* DRAM Error Address Pointer (32b)
+ *
+ * 31:7 address
+ * 6:1 reserved
+ * 0 Error channel 0/1
+ */
+#define I3000_DEAP_GRAIN (1 << 7)
+
+/*
+ * Helper functions to decode the DEAP/EDEAP hardware registers.
+ *
+ * The type promotion here is deliberate; we're deriving an
+ * unsigned long pfn and offset from hardware regs which are u8/u32.
+ */
+
+static inline unsigned long deap_pfn(u8 edeap, u32 deap)
+{
+ deap >>= PAGE_SHIFT;
+ deap |= (edeap & 1) << (32 - PAGE_SHIFT);
+ return deap;
+}
+
+static inline unsigned long deap_offset(u32 deap)
+{
+ return deap & ~(I3000_DEAP_GRAIN - 1) & ~PAGE_MASK;
+}
+
+static inline int deap_channel(u32 deap)
+{
+ return deap & 1;
+}
+
+#define I3000_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
+ *
+ * 7:0 DRAM ECC Syndrome
+ */
+
+#define I3000_ERRSTS 0xc8 /* Error Status Register (16b)
+ *
+ * 15:12 reserved
+ * 11 MCH Thermal Sensor Event
+ * for SMI/SCI/SERR
+ * 10 reserved
+ * 9 LOCK to non-DRAM Memory Flag (LCKF)
+ * 8 Received Refresh Timeout Flag (RRTOF)
+ * 7:2 reserved
+ * 1 Multi-bit DRAM ECC Error Flag (DMERR)
+ * 0 Single-bit DRAM ECC Error Flag (DSERR)
+ */
#define I3000_ERRSTS_BITS 0x0b03 /* bits which indicate errors */
#define I3000_ERRSTS_UE 0x0002
#define I3000_ERRSTS_CE 0x0001
-#define I3000_ERRCMD 0xca /* Error Command (16b)
- *
- * 15:12 reserved
- * 11 SERR on MCH Thermal Sensor Event (TSESERR)
- * 10 reserved
- * 9 SERR on LOCK to non-DRAM Memory (LCKERR)
- * 8 SERR on DRAM Refresh Timeout (DRTOERR)
- * 7:2 reserved
- * 1 SERR Multiple-Bit DRAM ECC Error (DMERR)
- * 0 SERR on Single-Bit ECC Error (DSERR)
- */
+#define I3000_ERRCMD 0xca /* Error Command (16b)
+ *
+ * 15:12 reserved
+ * 11 SERR on MCH Thermal Sensor Event
+ * (TSESERR)
+ * 10 reserved
+ * 9 SERR on LOCK to non-DRAM Memory
+ * (LCKERR)
+ * 8 SERR on DRAM Refresh Timeout
+ * (DRTOERR)
+ * 7:2 reserved
+ * 1 SERR Multi-Bit DRAM ECC Error
+ * (DMERR)
+ * 0 SERR on Single-Bit ECC Error
+ * (DSERR)
+ */
/* Intel MMIO register space - device 0 function 0 - MMR space */
#define I3000_DRB_SHIFT 25 /* 32MiB grain */
-#define I3000_C0DRB 0x100 /* Channel 0 DRAM Rank Boundary (8b x 4)
- *
- * 7:0 Channel 0 DRAM Rank Boundary Address
- */
-#define I3000_C1DRB 0x180 /* Channel 1 DRAM Rank Boundary (8b x 4)
- *
- * 7:0 Channel 1 DRAM Rank Boundary Address
- */
-
-#define I3000_C0DRA 0x108 /* Channel 0 DRAM Rank Attribute (8b x 2)
- *
- * 7 reserved
- * 6:4 DRAM odd Rank Attribute
- * 3 reserved
- * 2:0 DRAM even Rank Attribute
- *
- * Each attribute defines the page
- * size of the corresponding rank:
- * 000: unpopulated
- * 001: reserved
- * 010: 4 KB
- * 011: 8 KB
- * 100: 16 KB
- * Others: reserved
- */
-#define I3000_C1DRA 0x188 /* Channel 1 DRAM Rank Attribute (8b x 2) */
-#define ODD_RANK_ATTRIB(dra) (((dra) & 0x70) >> 4)
-#define EVEN_RANK_ATTRIB(dra) ((dra) & 0x07)
-
-#define I3000_C0DRC0 0x120 /* DRAM Controller Mode 0 (32b)
- *
- * 31:30 reserved
- * 29 Initialization Complete (IC)
- * 28:11 reserved
- * 10:8 Refresh Mode Select (RMS)
- * 7 reserved
- * 6:4 Mode Select (SMS)
- * 3:2 reserved
- * 1:0 DRAM Type (DT)
- */
-
-#define I3000_C0DRC1 0x124 /* DRAM Controller Mode 1 (32b)
- *
- * 31 Enhanced Addressing Enable (ENHADE)
- * 30:0 reserved
- */
+#define I3000_C0DRB 0x100 /* Channel 0 DRAM Rank Boundary (8b x 4)
+ *
+ * 7:0 Channel 0 DRAM Rank Boundary Address
+ */
+#define I3000_C1DRB 0x180 /* Channel 1 DRAM Rank Boundary (8b x 4)
+ *
+ * 7:0 Channel 1 DRAM Rank Boundary Address
+ */
+
+#define I3000_C0DRA 0x108 /* Channel 0 DRAM Rank Attribute (8b x 2)
+ *
+ * 7 reserved
+ * 6:4 DRAM odd Rank Attribute
+ * 3 reserved
+ * 2:0 DRAM even Rank Attribute
+ *
+ * Each attribute defines the page
+ * size of the corresponding rank:
+ * 000: unpopulated
+ * 001: reserved
+ * 010: 4 KB
+ * 011: 8 KB
+ * 100: 16 KB
+ * Others: reserved
+ */
+#define I3000_C1DRA 0x188 /* Channel 1 DRAM Rank Attribute (8b x 2) */
+
+static inline unsigned char odd_rank_attrib(unsigned char dra)
+{
+ return (dra & 0x70) >> 4;
+}
+
+static inline unsigned char even_rank_attrib(unsigned char dra)
+{
+ return dra & 0x07;
+}
+
+#define I3000_C0DRC0 0x120 /* DRAM Controller Mode 0 (32b)
+ *
+ * 31:30 reserved
+ * 29 Initialization Complete (IC)
+ * 28:11 reserved
+ * 10:8 Refresh Mode Select (RMS)
+ * 7 reserved
+ * 6:4 Mode Select (SMS)
+ * 3:2 reserved
+ * 1:0 DRAM Type (DT)
+ */
+
+#define I3000_C0DRC1 0x124 /* DRAM Controller Mode 1 (32b)
+ *
+ * 31 Enhanced Addressing Enable (ENHADE)
+ * 30:0 reserved
+ */
enum i3000p_chips {
I3000 = 0,
@@ -187,7 +222,8 @@ static void i3000_get_error_info(struct mem_ctl_info *mci,
pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
}
- /* Clear any error bits.
+ /*
+ * Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
@@ -198,8 +234,8 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
struct i3000_error_info *info,
int handle_errors)
{
- int row, multi_chan;
- int pfn, offset, channel;
+ int row, multi_chan, channel;
+ unsigned long pfn, offset;
multi_chan = mci->csrows[0].nr_channels - 1;
@@ -214,9 +250,9 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
info->errsts = info->errsts2;
}
- pfn = I3000_DEAP_PFN(info->edeap, info->deap);
- offset = I3000_DEAP_OFFSET(info->deap);
- channel = I3000_DEAP_CHANNEL(info->deap);
+ pfn = deap_pfn(info->edeap, info->deap);
+ offset = deap_offset(info->deap);
+ channel = deap_channel(info->deap);
row = edac_mc_find_csrow_by_page(mci, pfn);
@@ -245,16 +281,18 @@ static int i3000_is_interleaved(const unsigned char *c0dra,
{
int i;
- /* If the channels aren't populated identically then
+ /*
+ * If the channels aren't populated identically then
* we're not interleaved.
*/
for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++)
- if (ODD_RANK_ATTRIB(c0dra[i]) != ODD_RANK_ATTRIB(c1dra[i]) ||
- EVEN_RANK_ATTRIB(c0dra[i]) !=
- EVEN_RANK_ATTRIB(c1dra[i]))
+ if (odd_rank_attrib(c0dra[i]) != odd_rank_attrib(c1dra[i]) ||
+ even_rank_attrib(c0dra[i]) !=
+ even_rank_attrib(c1dra[i]))
return 0;
- /* If the rank boundaries for the two channels are different
+ /*
+ * If the rank boundaries for the two channels are different
* then we're not interleaved.
*/
for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++)
@@ -288,6 +326,15 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
return -ENODEV;
}
+ switch (edac_op_state) {
+ case EDAC_OPSTATE_POLL:
+ case EDAC_OPSTATE_NMI:
+ break;
+ default:
+ edac_op_state = EDAC_OPSTATE_POLL;
+ break;
+ }
+
c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */
c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */
c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */
@@ -300,7 +347,8 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
iounmap(window);
- /* Figure out how many channels we have.
+ /*
+ * Figure out how many channels we have.
*
* If we have what the datasheet calls "asymmetric channels"
* (essentially the same as what was called "virtual single
@@ -363,7 +411,8 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
csrow->edac_mode = EDAC_UNKNOWN;
}
- /* Clear any error bits.
+ /*
+ * Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
@@ -390,7 +439,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
debugf3("MC: %s(): success\n", __func__);
return 0;
- fail:
+fail:
if (mci)
edac_mc_free(mci);
@@ -409,7 +458,7 @@ static int __devinit i3000_init_one(struct pci_dev *pdev,
return -EIO;
rc = i3000_probe1(pdev, ent->driver_data);
- if (mci_pdev == NULL)
+ if (!mci_pdev)
mci_pdev = pci_dev_get(pdev);
return rc;
@@ -424,7 +473,8 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
if (i3000_pci)
edac_pci_release_generic_ctl(i3000_pci);
- if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+ mci = edac_mc_del_mc(&pdev->dev);
+ if (!mci)
return;
edac_mc_free(mci);
@@ -457,7 +507,7 @@ static int __init i3000_init(void)
if (pci_rc < 0)
goto fail0;
- if (mci_pdev == NULL) {
+ if (!mci_pdev) {
i3000_registered = 0;
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_3000_HB, NULL);
@@ -504,3 +554,6 @@ module_exit(i3000_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Akamai Technologies Arthur Ulfeldt/Jason Uhlenkott");
MODULE_DESCRIPTION("MC support for Intel 3000 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index a1f24c42d5ff..5a852017c17a 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -351,7 +351,7 @@ struct i5000_pvt {
u16 b1_ambpresent0; /* Branch 1, Channel 8 */
u16 b1_ambpresent1; /* Branch 1, Channel 1 */
- /* DIMM infomation matrix, allocating architecture maximums */
+ /* DIMM information matrix, allocating architecture maximums */
struct i5000_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
/* Actual values for this controller */
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
new file mode 100644
index 000000000000..065732ddf40c
--- /dev/null
+++ b/drivers/edac/mpc85xx_edac.c
@@ -0,0 +1,1043 @@
+/*
+ * Freescale MPC85xx Memory Controller kenel module
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/edac.h>
+
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <asm/mpc85xx.h>
+#include "edac_module.h"
+#include "edac_core.h"
+#include "mpc85xx_edac.h"
+
+static int edac_dev_idx;
+static int edac_pci_idx;
+static int edac_mc_idx;
+
+static u32 orig_ddr_err_disable;
+static u32 orig_ddr_err_sbe;
+
+/*
+ * PCI Err defines
+ */
+#ifdef CONFIG_PCI
+static u32 orig_pci_err_cap_dr;
+static u32 orig_pci_err_en;
+#endif
+
+static u32 orig_l2_err_disable;
+static u32 orig_hid1;
+
+static const char *mpc85xx_ctl_name = "MPC85xx";
+
+/************************ MC SYSFS parts ***********************************/
+
+static ssize_t mpc85xx_mc_inject_data_hi_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->mc_vbase +
+ MPC85XX_MC_DATA_ERR_INJECT_HI));
+}
+
+static ssize_t mpc85xx_mc_inject_data_lo_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->mc_vbase +
+ MPC85XX_MC_DATA_ERR_INJECT_LO));
+}
+
+static ssize_t mpc85xx_mc_inject_ctrl_show(struct mem_ctl_info *mci, char *data)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
+}
+
+static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static struct mcidev_sysfs_attribute mpc85xx_mc_sysfs_attributes[] = {
+ {
+ .attr = {
+ .name = "inject_data_hi",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_mc_inject_data_hi_show,
+ .store = mpc85xx_mc_inject_data_hi_store},
+ {
+ .attr = {
+ .name = "inject_data_lo",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_mc_inject_data_lo_show,
+ .store = mpc85xx_mc_inject_data_lo_store},
+ {
+ .attr = {
+ .name = "inject_ctrl",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_mc_inject_ctrl_show,
+ .store = mpc85xx_mc_inject_ctrl_store},
+
+ /* End of list */
+ {
+ .attr = {.name = NULL}
+ }
+};
+
+static void mpc85xx_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
+{
+ mci->mc_driver_sysfs_attributes = mpc85xx_mc_sysfs_attributes;
+}
+
+/**************************** PCI Err device ***************************/
+#ifdef CONFIG_PCI
+
+static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
+{
+ struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
+
+ /* master aborts can happen during PCI config cycles */
+ if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) {
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
+ return;
+ }
+
+ printk(KERN_ERR "PCI error(s) detected\n");
+ printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect);
+
+ printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB));
+ printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR));
+ printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR));
+ printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL));
+ printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH));
+
+ /* clear error bits */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
+
+ if (err_detect & PCI_EDE_PERR_MASK)
+ edac_pci_handle_pe(pci, pci->ctl_name);
+
+ if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK)
+ edac_pci_handle_npe(pci, pci->ctl_name);
+}
+
+static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
+{
+ struct edac_pci_ctl_info *pci = dev_id;
+ struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
+
+ if (!err_detect)
+ return IRQ_NONE;
+
+ mpc85xx_pci_check(pci);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mpc85xx_pci_err_probe(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci;
+ struct mpc85xx_pci_pdata *pdata;
+ struct resource *r;
+ int res = 0;
+
+ if (!devres_open_group(&pdev->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
+ if (!pci)
+ return -ENOMEM;
+
+ pdata = pci->pvt_info;
+ pdata->name = "mpc85xx_pci_err";
+ pdata->irq = NO_IRQ;
+ platform_set_drvdata(pdev, pci);
+ pci->dev = &pdev->dev;
+ pci->mod_name = EDAC_MOD_STR;
+ pci->ctl_name = pdata->name;
+ pci->dev_name = pdev->dev.bus_id;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ pci->edac_check = mpc85xx_pci_check;
+
+ pdata->edac_idx = edac_pci_idx++;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "PCI err regs\n", __func__);
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, r->start,
+ r->end - r->start + 1, pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->pci_vbase = devm_ioremap(&pdev->dev, r->start,
+ r->end - r->start + 1);
+ if (!pdata->pci_vbase) {
+ printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ orig_pci_err_cap_dr =
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
+
+ /* PCI master abort is expected during config cycles */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
+
+ orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
+
+ /* disable master abort reporting */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
+
+ /* clear error bits */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
+
+ if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
+ debugf3("%s(): failed edac_pci_add_device()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev, pdata->irq,
+ mpc85xx_pci_isr, IRQF_DISABLED,
+ "[EDAC] PCI err", pci);
+ if (res < 0) {
+ printk(KERN_ERR
+ "%s: Unable to requiest irq %d for "
+ "MPC85xx PCI err\n", __func__, pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
+ pdata->irq);
+ }
+
+ devres_remove_group(&pdev->dev, mpc85xx_pci_err_probe);
+ debugf3("%s(): success\n", __func__);
+ printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
+
+ return 0;
+
+err2:
+ edac_pci_del_device(&pdev->dev);
+err:
+ edac_pci_free_ctl_info(pci);
+ devres_release_group(&pdev->dev, mpc85xx_pci_err_probe);
+ return res;
+}
+
+static int mpc85xx_pci_err_remove(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
+ struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+
+ debugf0("%s()\n", __func__);
+
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
+ orig_pci_err_cap_dr);
+
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
+
+ edac_pci_del_device(pci->dev);
+
+ if (edac_op_state == EDAC_OPSTATE_INT)
+ irq_dispose_mapping(pdata->irq);
+
+ edac_pci_free_ctl_info(pci);
+
+ return 0;
+}
+
+static struct platform_driver mpc85xx_pci_err_driver = {
+ .probe = mpc85xx_pci_err_probe,
+ .remove = __devexit_p(mpc85xx_pci_err_remove),
+ .driver = {
+ .name = "mpc85xx_pci_err",
+ }
+};
+
+#endif /* CONFIG_PCI */
+
+/**************************** L2 Err device ***************************/
+
+/************************ L2 SYSFS parts ***********************************/
+
+static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info
+ *edac_dev, char *data)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI));
+}
+
+static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info
+ *edac_dev, char *data)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO));
+}
+
+static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info
+ *edac_dev, char *data)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL));
+}
+
+static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info
+ *edac_dev, const char *data,
+ size_t count)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info
+ *edac_dev, const char *data,
+ size_t count)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info
+ *edac_dev, const char *data,
+ size_t count)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = {
+ {
+ .attr = {
+ .name = "inject_data_hi",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_l2_inject_data_hi_show,
+ .store = mpc85xx_l2_inject_data_hi_store},
+ {
+ .attr = {
+ .name = "inject_data_lo",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_l2_inject_data_lo_show,
+ .store = mpc85xx_l2_inject_data_lo_store},
+ {
+ .attr = {
+ .name = "inject_ctrl",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_l2_inject_ctrl_show,
+ .store = mpc85xx_l2_inject_ctrl_store},
+
+ /* End of list */
+ {
+ .attr = {.name = NULL}
+ }
+};
+
+static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info
+ *edac_dev)
+{
+ edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes;
+}
+
+/***************************** L2 ops ***********************************/
+
+static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
+
+ if (!(err_detect & L2_EDE_MASK))
+ return;
+
+ printk(KERN_ERR "ECC Error in CPU L2 cache\n");
+ printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect);
+ printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI));
+ printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO));
+ printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC));
+ printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR));
+ printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR));
+
+ /* clear error detect register */
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect);
+
+ if (err_detect & L2_EDE_CE_MASK)
+ edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
+
+ if (err_detect & L2_EDE_UE_MASK)
+ edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *edac_dev = dev_id;
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
+
+ if (!(err_detect & L2_EDE_MASK))
+ return IRQ_NONE;
+
+ mpc85xx_l2_check(edac_dev);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mpc85xx_l2_err_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct edac_device_ctl_info *edac_dev;
+ struct mpc85xx_l2_pdata *pdata;
+ struct resource r;
+ int res;
+
+ if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
+ "cpu", 1, "L", 1, 2, NULL, 0,
+ edac_dev_idx);
+ if (!edac_dev) {
+ devres_release_group(&op->dev, mpc85xx_l2_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = edac_dev->pvt_info;
+ pdata->name = "mpc85xx_l2_err";
+ pdata->irq = NO_IRQ;
+ edac_dev->dev = &op->dev;
+ dev_set_drvdata(edac_dev->dev, edac_dev);
+ edac_dev->ctl_name = pdata->name;
+ edac_dev->dev_name = pdata->name;
+
+ res = of_address_to_resource(op->node, 0, &r);
+ if (res) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "L2 err regs\n", __func__);
+ goto err;
+ }
+
+ /* we only need the error registers */
+ r.start += 0xe00;
+
+ if (!devm_request_mem_region(&op->dev, r.start,
+ r.end - r.start + 1, pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->l2_vbase = devm_ioremap(&op->dev, r.start, r.end - r.start + 1);
+ if (!pdata->l2_vbase) {
+ printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0);
+
+ orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS);
+
+ /* clear the err_dis */
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0);
+
+ edac_dev->mod_name = EDAC_MOD_STR;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ edac_dev->edac_check = mpc85xx_l2_check;
+
+ mpc85xx_set_l2_sysfs_attributes(edac_dev);
+
+ pdata->edac_idx = edac_dev_idx++;
+
+ if (edac_device_add_device(edac_dev) > 0) {
+ debugf3("%s(): failed edac_device_add_device()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = irq_of_parse_and_map(op->node, 0);
+ res = devm_request_irq(&op->dev, pdata->irq,
+ mpc85xx_l2_isr, IRQF_DISABLED,
+ "[EDAC] L2 err", edac_dev);
+ if (res < 0) {
+ printk(KERN_ERR
+ "%s: Unable to requiest irq %d for "
+ "MPC85xx L2 err\n", __func__, pdata->irq);
+ irq_dispose_mapping(pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n",
+ pdata->irq);
+
+ edac_dev->op_state = OP_RUNNING_INTERRUPT;
+
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK);
+ }
+
+ devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
+
+ debugf3("%s(): success\n", __func__);
+ printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
+
+ return 0;
+
+err2:
+ edac_device_del_device(&op->dev);
+err:
+ devres_release_group(&op->dev, mpc85xx_l2_err_probe);
+ edac_device_free_ctl_info(edac_dev);
+ return res;
+}
+
+static int mpc85xx_l2_err_remove(struct of_device *op)
+{
+ struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+
+ debugf0("%s()\n", __func__);
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
+ irq_dispose_mapping(pdata->irq);
+ }
+
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
+ edac_device_del_device(&op->dev);
+ edac_device_free_ctl_info(edac_dev);
+ return 0;
+}
+
+static struct of_device_id mpc85xx_l2_err_of_match[] = {
+ {
+ .compatible = "fsl,8540-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,8541-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,8544-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,8548-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,8555-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,8568-l2-cache-controller",
+ },
+ {},
+};
+
+static struct of_platform_driver mpc85xx_l2_err_driver = {
+ .owner = THIS_MODULE,
+ .name = "mpc85xx_l2_err",
+ .match_table = mpc85xx_l2_err_of_match,
+ .probe = mpc85xx_l2_err_probe,
+ .remove = mpc85xx_l2_err_remove,
+ .driver = {
+ .name = "mpc85xx_l2_err",
+ .owner = THIS_MODULE,
+ },
+};
+
+/**************************** MC Err device ***************************/
+
+static void mpc85xx_mc_check(struct mem_ctl_info *mci)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ struct csrow_info *csrow;
+ u32 err_detect;
+ u32 syndrome;
+ u32 err_addr;
+ u32 pfn;
+ int row_index;
+
+ err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
+ if (err_detect)
+ return;
+
+ mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
+ err_detect);
+
+ /* no more processing if not ECC bit errors */
+ if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
+ return;
+ }
+
+ syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC);
+ err_addr = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS);
+ pfn = err_addr >> PAGE_SHIFT;
+
+ for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
+ csrow = &mci->csrows[row_index];
+ if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
+ break;
+ }
+
+ mpc85xx_mc_printk(mci, KERN_ERR, "Capture Data High: %#8.8x\n",
+ in_be32(pdata->mc_vbase +
+ MPC85XX_MC_CAPTURE_DATA_HI));
+ mpc85xx_mc_printk(mci, KERN_ERR, "Capture Data Low: %#8.8x\n",
+ in_be32(pdata->mc_vbase +
+ MPC85XX_MC_CAPTURE_DATA_LO));
+ mpc85xx_mc_printk(mci, KERN_ERR, "syndrome: %#8.8x\n", syndrome);
+ mpc85xx_mc_printk(mci, KERN_ERR, "err addr: %#8.8x\n", err_addr);
+ mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
+
+ /* we are out of range */
+ if (row_index == mci->nr_csrows)
+ mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
+
+ if (err_detect & DDR_EDE_SBE)
+ edac_mc_handle_ce(mci, pfn, err_addr & PAGE_MASK,
+ syndrome, row_index, 0, mci->ctl_name);
+
+ if (err_detect & DDR_EDE_MBE)
+ edac_mc_handle_ue(mci, pfn, err_addr & PAGE_MASK,
+ row_index, mci->ctl_name);
+
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
+}
+
+static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
+{
+ struct mem_ctl_info *mci = dev_id;
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
+ if (!err_detect)
+ return IRQ_NONE;
+
+ mpc85xx_mc_check(mci);
+
+ return IRQ_HANDLED;
+}
+
+static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ struct csrow_info *csrow;
+ u32 sdram_ctl;
+ u32 sdtype;
+ enum mem_type mtype;
+ u32 cs_bnds;
+ int index;
+
+ sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
+
+ sdtype = sdram_ctl & DSC_SDTYPE_MASK;
+ if (sdram_ctl & DSC_RD_EN) {
+ switch (sdtype) {
+ case DSC_SDTYPE_DDR:
+ mtype = MEM_RDDR;
+ break;
+ case DSC_SDTYPE_DDR2:
+ mtype = MEM_RDDR2;
+ break;
+ default:
+ mtype = MEM_UNKNOWN;
+ break;
+ }
+ } else {
+ switch (sdtype) {
+ case DSC_SDTYPE_DDR:
+ mtype = MEM_DDR;
+ break;
+ case DSC_SDTYPE_DDR2:
+ mtype = MEM_DDR2;
+ break;
+ default:
+ mtype = MEM_UNKNOWN;
+ break;
+ }
+ }
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ u32 start;
+ u32 end;
+
+ csrow = &mci->csrows[index];
+ cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
+ (index * MPC85XX_MC_CS_BNDS_OFS));
+ start = (cs_bnds & 0xfff0000) << 4;
+ end = ((cs_bnds & 0xfff) << 20);
+ if (start)
+ start |= 0xfffff;
+ if (end)
+ end |= 0xfffff;
+
+ if (start == end)
+ continue; /* not populated */
+
+ csrow->first_page = start >> PAGE_SHIFT;
+ csrow->last_page = end >> PAGE_SHIFT;
+ csrow->nr_pages = csrow->last_page + 1 - csrow->first_page;
+ csrow->grain = 8;
+ csrow->mtype = mtype;
+ csrow->dtype = DEV_UNKNOWN;
+ if (sdram_ctl & DSC_X32_EN)
+ csrow->dtype = DEV_X32;
+ csrow->edac_mode = EDAC_SECDED;
+ }
+}
+
+static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct mem_ctl_info *mci;
+ struct mpc85xx_mc_pdata *pdata;
+ struct resource r;
+ u32 sdram_ctl;
+ int res;
+
+ if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx);
+ if (!mci) {
+ devres_release_group(&op->dev, mpc85xx_mc_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = mci->pvt_info;
+ pdata->name = "mpc85xx_mc_err";
+ pdata->irq = NO_IRQ;
+ mci->dev = &op->dev;
+ pdata->edac_idx = edac_mc_idx++;
+ dev_set_drvdata(mci->dev, mci);
+ mci->ctl_name = pdata->name;
+ mci->dev_name = pdata->name;
+
+ res = of_address_to_resource(op->node, 0, &r);
+ if (res) {
+ printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
+ __func__);
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&op->dev, r.start,
+ r.end - r.start + 1, pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->mc_vbase = devm_ioremap(&op->dev, r.start, r.end - r.start + 1);
+ if (!pdata->mc_vbase) {
+ printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
+ if (!(sdram_ctl & DSC_ECC_EN)) {
+ /* no ECC */
+ printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
+ res = -ENODEV;
+ goto err;
+ }
+
+ debugf3("%s(): init mci\n", __func__);
+ mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
+ MEM_FLAG_DDR | MEM_FLAG_DDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = MPC85XX_REVISION;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ mci->edac_check = mpc85xx_mc_check;
+
+ mci->ctl_page_to_phys = NULL;
+
+ mci->scrub_mode = SCRUB_SW_SRC;
+
+ mpc85xx_set_mc_sysfs_attributes(mci);
+
+ mpc85xx_init_csrows(mci);
+
+#ifdef CONFIG_EDAC_DEBUG
+ edac_mc_register_mcidev_debug((struct attribute **)debug_attr);
+#endif
+
+ /* store the original error disable bits */
+ orig_ddr_err_disable =
+ in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0);
+
+ /* clear all error bits */
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN,
+ DDR_EIE_MBEE | DDR_EIE_SBEE);
+
+ /* store the original error management threshold */
+ orig_ddr_err_sbe = in_be32(pdata->mc_vbase +
+ MPC85XX_MC_ERR_SBE) & 0xff0000;
+
+ /* set threshold to 1 error per interrupt */
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
+
+ /* register interrupts */
+ pdata->irq = irq_of_parse_and_map(op->node, 0);
+ res = devm_request_irq(&op->dev, pdata->irq,
+ mpc85xx_mc_isr, IRQF_DISABLED,
+ "[EDAC] MC err", mci);
+ if (res < 0) {
+ printk(KERN_ERR "%s: Unable to request irq %d for "
+ "MPC85xx DRAM ERR\n", __func__, pdata->irq);
+ irq_dispose_mapping(pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n",
+ pdata->irq);
+ }
+
+ devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
+ debugf3("%s(): success\n", __func__);
+ printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
+
+ return 0;
+
+err2:
+ edac_mc_del_mc(&op->dev);
+err:
+ devres_release_group(&op->dev, mpc85xx_mc_err_probe);
+ edac_mc_free(mci);
+ return res;
+}
+
+static int mpc85xx_mc_err_remove(struct of_device *op)
+{
+ struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+
+ debugf0("%s()\n", __func__);
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
+ irq_dispose_mapping(pdata->irq);
+ }
+
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE,
+ orig_ddr_err_disable);
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
+
+ edac_mc_del_mc(&op->dev);
+ edac_mc_free(mci);
+ return 0;
+}
+
+static struct of_device_id mpc85xx_mc_err_of_match[] = {
+ {
+ .compatible = "fsl,8540-memory-controller",
+ },
+ {
+ .compatible = "fsl,8541-memory-controller",
+ },
+ {
+ .compatible = "fsl,8544-memory-controller",
+ },
+ {
+ .compatible = "fsl,8548-memory-controller",
+ },
+ {
+ .compatible = "fsl,8555-memory-controller",
+ },
+ {
+ .compatible = "fsl,8568-memory-controller",
+ },
+ {},
+};
+
+static struct of_platform_driver mpc85xx_mc_err_driver = {
+ .owner = THIS_MODULE,
+ .name = "mpc85xx_mc_err",
+ .match_table = mpc85xx_mc_err_of_match,
+ .probe = mpc85xx_mc_err_probe,
+ .remove = mpc85xx_mc_err_remove,
+ .driver = {
+ .name = "mpc85xx_mc_err",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mpc85xx_mc_init(void)
+{
+ int res = 0;
+
+ printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
+ "(C) 2006 Montavista Software\n");
+
+ /* make sure error reporting method is sane */
+ switch (edac_op_state) {
+ case EDAC_OPSTATE_POLL:
+ case EDAC_OPSTATE_INT:
+ break;
+ default:
+ edac_op_state = EDAC_OPSTATE_INT;
+ break;
+ }
+
+ res = of_register_platform_driver(&mpc85xx_mc_err_driver);
+ if (res)
+ printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");
+
+ res = of_register_platform_driver(&mpc85xx_l2_err_driver);
+ if (res)
+ printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
+
+#ifdef CONFIG_PCI
+ res = platform_driver_register(&mpc85xx_pci_err_driver);
+ if (res)
+ printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
+#endif
+
+ /*
+ * need to clear HID1[RFXE] to disable machine check int
+ * so we can catch it
+ */
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ orig_hid1 = mfspr(SPRN_HID1);
+ mtspr(SPRN_HID1, (orig_hid1 & ~0x20000));
+ }
+
+ return 0;
+}
+
+module_init(mpc85xx_mc_init);
+
+static void __exit mpc85xx_mc_exit(void)
+{
+ mtspr(SPRN_HID1, orig_hid1);
+#ifdef CONFIG_PCI
+ platform_driver_unregister(&mpc85xx_pci_err_driver);
+#endif
+ of_unregister_platform_driver(&mpc85xx_l2_err_driver);
+ of_unregister_platform_driver(&mpc85xx_mc_err_driver);
+}
+
+module_exit(mpc85xx_mc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Montavista Software, Inc.");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state,
+ "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
new file mode 100644
index 000000000000..135b3539a030
--- /dev/null
+++ b/drivers/edac/mpc85xx_edac.h
@@ -0,0 +1,162 @@
+/*
+ * Freescale MPC85xx Memory Controller kenel module
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#ifndef _MPC85XX_EDAC_H_
+#define _MPC85XX_EDAC_H_
+
+#define MPC85XX_REVISION " Ver: 2.0.0 " __DATE__
+#define EDAC_MOD_STR "MPC85xx_edac"
+
+#define mpc85xx_printk(level, fmt, arg...) \
+ edac_printk(level, "MPC85xx", fmt, ##arg)
+
+#define mpc85xx_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "MPC85xx", fmt, ##arg)
+
+/*
+ * DRAM error defines
+ */
+
+/* DDR_SDRAM_CFG */
+#define MPC85XX_MC_DDR_SDRAM_CFG 0x0110
+#define MPC85XX_MC_CS_BNDS_0 0x0000
+#define MPC85XX_MC_CS_BNDS_1 0x0008
+#define MPC85XX_MC_CS_BNDS_2 0x0010
+#define MPC85XX_MC_CS_BNDS_3 0x0018
+#define MPC85XX_MC_CS_BNDS_OFS 0x0008
+
+#define MPC85XX_MC_DATA_ERR_INJECT_HI 0x0e00
+#define MPC85XX_MC_DATA_ERR_INJECT_LO 0x0e04
+#define MPC85XX_MC_ECC_ERR_INJECT 0x0e08
+#define MPC85XX_MC_CAPTURE_DATA_HI 0x0e20
+#define MPC85XX_MC_CAPTURE_DATA_LO 0x0e24
+#define MPC85XX_MC_CAPTURE_ECC 0x0e28
+#define MPC85XX_MC_ERR_DETECT 0x0e40
+#define MPC85XX_MC_ERR_DISABLE 0x0e44
+#define MPC85XX_MC_ERR_INT_EN 0x0e48
+#define MPC85XX_MC_CAPTURE_ATRIBUTES 0x0e4c
+#define MPC85XX_MC_CAPTURE_ADDRESS 0x0e50
+#define MPC85XX_MC_ERR_SBE 0x0e58
+
+#define DSC_MEM_EN 0x80000000
+#define DSC_ECC_EN 0x20000000
+#define DSC_RD_EN 0x10000000
+
+#define DSC_SDTYPE_MASK 0x07000000
+
+#define DSC_SDTYPE_DDR 0x02000000
+#define DSC_SDTYPE_DDR2 0x03000000
+#define DSC_X32_EN 0x00000020
+
+/* Err_Int_En */
+#define DDR_EIE_MSEE 0x1 /* memory select */
+#define DDR_EIE_SBEE 0x4 /* single-bit ECC error */
+#define DDR_EIE_MBEE 0x8 /* multi-bit ECC error */
+
+/* Err_Detect */
+#define DDR_EDE_MSE 0x1 /* memory select */
+#define DDR_EDE_SBE 0x4 /* single-bit ECC error */
+#define DDR_EDE_MBE 0x8 /* multi-bit ECC error */
+#define DDR_EDE_MME 0x80000000 /* multiple memory errors */
+
+/* Err_Disable */
+#define DDR_EDI_MSED 0x1 /* memory select disable */
+#define DDR_EDI_SBED 0x4 /* single-bit ECC error disable */
+#define DDR_EDI_MBED 0x8 /* multi-bit ECC error disable */
+
+/*
+ * L2 Err defines
+ */
+#define MPC85XX_L2_ERRINJHI 0x0000
+#define MPC85XX_L2_ERRINJLO 0x0004
+#define MPC85XX_L2_ERRINJCTL 0x0008
+#define MPC85XX_L2_CAPTDATAHI 0x0020
+#define MPC85XX_L2_CAPTDATALO 0x0024
+#define MPC85XX_L2_CAPTECC 0x0028
+#define MPC85XX_L2_ERRDET 0x0040
+#define MPC85XX_L2_ERRDIS 0x0044
+#define MPC85XX_L2_ERRINTEN 0x0048
+#define MPC85XX_L2_ERRATTR 0x004c
+#define MPC85XX_L2_ERRADDR 0x0050
+#define MPC85XX_L2_ERRCTL 0x0058
+
+/* Error Interrupt Enable */
+#define L2_EIE_L2CFGINTEN 0x1
+#define L2_EIE_SBECCINTEN 0x4
+#define L2_EIE_MBECCINTEN 0x8
+#define L2_EIE_TPARINTEN 0x10
+#define L2_EIE_MASK (L2_EIE_L2CFGINTEN | L2_EIE_SBECCINTEN | \
+ L2_EIE_MBECCINTEN | L2_EIE_TPARINTEN)
+
+/* Error Detect */
+#define L2_EDE_L2CFGERR 0x1
+#define L2_EDE_SBECCERR 0x4
+#define L2_EDE_MBECCERR 0x8
+#define L2_EDE_TPARERR 0x10
+#define L2_EDE_MULL2ERR 0x80000000
+
+#define L2_EDE_CE_MASK L2_EDE_SBECCERR
+#define L2_EDE_UE_MASK (L2_EDE_L2CFGERR | L2_EDE_MBECCERR | \
+ L2_EDE_TPARERR)
+#define L2_EDE_MASK (L2_EDE_L2CFGERR | L2_EDE_SBECCERR | \
+ L2_EDE_MBECCERR | L2_EDE_TPARERR | L2_EDE_MULL2ERR)
+
+/*
+ * PCI Err defines
+ */
+#define PCI_EDE_TOE 0x00000001
+#define PCI_EDE_SCM 0x00000002
+#define PCI_EDE_IRMSV 0x00000004
+#define PCI_EDE_ORMSV 0x00000008
+#define PCI_EDE_OWMSV 0x00000010
+#define PCI_EDE_TGT_ABRT 0x00000020
+#define PCI_EDE_MST_ABRT 0x00000040
+#define PCI_EDE_TGT_PERR 0x00000080
+#define PCI_EDE_MST_PERR 0x00000100
+#define PCI_EDE_RCVD_SERR 0x00000200
+#define PCI_EDE_ADDR_PERR 0x00000400
+#define PCI_EDE_MULTI_ERR 0x80000000
+
+#define PCI_EDE_PERR_MASK (PCI_EDE_TGT_PERR | PCI_EDE_MST_PERR | \
+ PCI_EDE_ADDR_PERR)
+
+#define MPC85XX_PCI_ERR_DR 0x0000
+#define MPC85XX_PCI_ERR_CAP_DR 0x0004
+#define MPC85XX_PCI_ERR_EN 0x0008
+#define MPC85XX_PCI_ERR_ATTRIB 0x000c
+#define MPC85XX_PCI_ERR_ADDR 0x0010
+#define MPC85XX_PCI_ERR_EXT_ADDR 0x0014
+#define MPC85XX_PCI_ERR_DL 0x0018
+#define MPC85XX_PCI_ERR_DH 0x001c
+#define MPC85XX_PCI_GAS_TIMR 0x0020
+#define MPC85XX_PCI_PCIX_TIMR 0x0024
+
+struct mpc85xx_mc_pdata {
+ char *name;
+ int edac_idx;
+ void __iomem *mc_vbase;
+ int irq;
+};
+
+struct mpc85xx_l2_pdata {
+ char *name;
+ int edac_idx;
+ void __iomem *l2_vbase;
+ int irq;
+};
+
+struct mpc85xx_pci_pdata {
+ char *name;
+ int edac_idx;
+ void __iomem *pci_vbase;
+ int irq;
+};
+
+#endif
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
new file mode 100644
index 000000000000..bf071f140a05
--- /dev/null
+++ b/drivers/edac/mv64x60_edac.c
@@ -0,0 +1,855 @@
+/*
+ * Marvell MV64x60 Memory Controller kernel module for PPC platforms
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+#include "mv64x60_edac.h"
+
+static const char *mv64x60_ctl_name = "MV64x60";
+static int edac_dev_idx;
+static int edac_pci_idx;
+static int edac_mc_idx;
+
+/*********************** PCI err device **********************************/
+#ifdef CONFIG_PCI
+static void mv64x60_pci_check(struct edac_pci_ctl_info *pci)
+{
+ struct mv64x60_pci_pdata *pdata = pci->pvt_info;
+ u32 cause;
+
+ cause = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
+ if (!cause)
+ return;
+
+ printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose);
+ printk(KERN_ERR "Cause register: 0x%08x\n", cause);
+ printk(KERN_ERR "Address Low: 0x%08x\n",
+ in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO));
+ printk(KERN_ERR "Address High: 0x%08x\n",
+ in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI));
+ printk(KERN_ERR "Attribute: 0x%08x\n",
+ in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR));
+ printk(KERN_ERR "Command: 0x%08x\n",
+ in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD));
+ out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, ~cause);
+
+ if (cause & MV64X60_PCI_PE_MASK)
+ edac_pci_handle_pe(pci, pci->ctl_name);
+
+ if (!(cause & MV64X60_PCI_PE_MASK))
+ edac_pci_handle_npe(pci, pci->ctl_name);
+}
+
+static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id)
+{
+ struct edac_pci_ctl_info *pci = dev_id;
+ struct mv64x60_pci_pdata *pdata = pci->pvt_info;
+ u32 val;
+
+ val = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
+ if (!val)
+ return IRQ_NONE;
+
+ mv64x60_pci_check(pci);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci;
+ struct mv64x60_pci_pdata *pdata;
+ struct resource *r;
+ int res = 0;
+
+ if (!devres_open_group(&pdev->dev, mv64x60_pci_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mv64x60_pci_err");
+ if (!pci)
+ return -ENOMEM;
+
+ pdata = pci->pvt_info;
+
+ pdata->pci_hose = pdev->id;
+ pdata->name = "mpc85xx_pci_err";
+ pdata->irq = NO_IRQ;
+ platform_set_drvdata(pdev, pci);
+ pci->dev = &pdev->dev;
+ pci->dev_name = pdev->dev.bus_id;
+ pci->mod_name = EDAC_MOD_STR;
+ pci->ctl_name = pdata->name;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ pci->edac_check = mv64x60_pci_check;
+
+ pdata->edac_idx = edac_pci_idx++;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "PCI err regs\n", __func__);
+ res = -ENOENT;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->pci_vbase = devm_ioremap(&pdev->dev,
+ r->start,
+ r->end - r->start + 1);
+ if (!pdata->pci_vbase) {
+ printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0);
+ out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0);
+ out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK,
+ MV64X60_PCIx_ERR_MASK_VAL);
+
+ if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
+ debugf3("%s(): failed edac_pci_add_device()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev,
+ pdata->irq,
+ mv64x60_pci_isr,
+ IRQF_DISABLED,
+ "[EDAC] PCI err",
+ pci);
+ if (res < 0) {
+ printk(KERN_ERR "%s: Unable to request irq %d for "
+ "MV64x60 PCI ERR\n", __func__, pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
+ pdata->irq);
+ }
+
+ devres_remove_group(&pdev->dev, mv64x60_pci_err_probe);
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+
+ return 0;
+
+err2:
+ edac_pci_del_device(&pdev->dev);
+err:
+ edac_pci_free_ctl_info(pci);
+ devres_release_group(&pdev->dev, mv64x60_pci_err_probe);
+ return res;
+}
+
+static int mv64x60_pci_err_remove(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
+
+ debugf0("%s()\n", __func__);
+
+ edac_pci_del_device(&pdev->dev);
+
+ edac_pci_free_ctl_info(pci);
+
+ return 0;
+}
+
+static struct platform_driver mv64x60_pci_err_driver = {
+ .probe = mv64x60_pci_err_probe,
+ .remove = __devexit_p(mv64x60_pci_err_remove),
+ .driver = {
+ .name = "mv64x60_pci_err",
+ }
+};
+
+#endif /* CONFIG_PCI */
+
+/*********************** SRAM err device **********************************/
+static void mv64x60_sram_check(struct edac_device_ctl_info *edac_dev)
+{
+ struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
+ u32 cause;
+
+ cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
+ if (!cause)
+ return;
+
+ printk(KERN_ERR "Error in internal SRAM\n");
+ printk(KERN_ERR "Cause register: 0x%08x\n", cause);
+ printk(KERN_ERR "Address Low: 0x%08x\n",
+ in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO));
+ printk(KERN_ERR "Address High: 0x%08x\n",
+ in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI));
+ printk(KERN_ERR "Data Low: 0x%08x\n",
+ in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO));
+ printk(KERN_ERR "Data High: 0x%08x\n",
+ in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI));
+ printk(KERN_ERR "Parity: 0x%08x\n",
+ in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY));
+ out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
+
+ edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *edac_dev = dev_id;
+ struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
+ u32 cause;
+
+ cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
+ if (!cause)
+ return IRQ_NONE;
+
+ mv64x60_sram_check(edac_dev);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev;
+ struct mv64x60_sram_pdata *pdata;
+ struct resource *r;
+ int res = 0;
+
+ if (!devres_open_group(&pdev->dev, mv64x60_sram_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
+ "sram", 1, NULL, 0, 0, NULL, 0,
+ edac_dev_idx);
+ if (!edac_dev) {
+ devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = edac_dev->pvt_info;
+ pdata->name = "mv64x60_sram_err";
+ pdata->irq = NO_IRQ;
+ edac_dev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, edac_dev);
+ edac_dev->dev_name = pdev->dev.bus_id;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "SRAM err regs\n", __func__);
+ res = -ENOENT;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdata->name)) {
+ printk(KERN_ERR "%s: Error while request mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->sram_vbase = devm_ioremap(&pdev->dev,
+ r->start,
+ r->end - r->start + 1);
+ if (!pdata->sram_vbase) {
+ printk(KERN_ERR "%s: Unable to setup SRAM err regs\n",
+ __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ /* setup SRAM err registers */
+ out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
+
+ edac_dev->mod_name = EDAC_MOD_STR;
+ edac_dev->ctl_name = pdata->name;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ edac_dev->edac_check = mv64x60_sram_check;
+
+ pdata->edac_idx = edac_dev_idx++;
+
+ if (edac_device_add_device(edac_dev) > 0) {
+ debugf3("%s(): failed edac_device_add_device()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev,
+ pdata->irq,
+ mv64x60_sram_isr,
+ IRQF_DISABLED,
+ "[EDAC] SRAM err",
+ edac_dev);
+ if (res < 0) {
+ printk(KERN_ERR
+ "%s: Unable to request irq %d for "
+ "MV64x60 SRAM ERR\n", __func__, pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for SRAM Err\n",
+ pdata->irq);
+ }
+
+ devres_remove_group(&pdev->dev, mv64x60_sram_err_probe);
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+
+ return 0;
+
+err2:
+ edac_device_del_device(&pdev->dev);
+err:
+ devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
+ edac_device_free_ctl_info(edac_dev);
+ return res;
+}
+
+static int mv64x60_sram_err_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
+
+ debugf0("%s()\n", __func__);
+
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(edac_dev);
+
+ return 0;
+}
+
+static struct platform_driver mv64x60_sram_err_driver = {
+ .probe = mv64x60_sram_err_probe,
+ .remove = mv64x60_sram_err_remove,
+ .driver = {
+ .name = "mv64x60_sram_err",
+ }
+};
+
+/*********************** CPU err device **********************************/
+static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev)
+{
+ struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
+ u32 cause;
+
+ cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
+ MV64x60_CPU_CAUSE_MASK;
+ if (!cause)
+ return;
+
+ printk(KERN_ERR "Error on CPU interface\n");
+ printk(KERN_ERR "Cause register: 0x%08x\n", cause);
+ printk(KERN_ERR "Address Low: 0x%08x\n",
+ in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO));
+ printk(KERN_ERR "Address High: 0x%08x\n",
+ in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI));
+ printk(KERN_ERR "Data Low: 0x%08x\n",
+ in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO));
+ printk(KERN_ERR "Data High: 0x%08x\n",
+ in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI));
+ printk(KERN_ERR "Parity: 0x%08x\n",
+ in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY));
+ out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
+
+ edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *edac_dev = dev_id;
+ struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
+ u32 cause;
+
+ cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
+ MV64x60_CPU_CAUSE_MASK;
+ if (!cause)
+ return IRQ_NONE;
+
+ mv64x60_cpu_check(edac_dev);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev;
+ struct resource *r;
+ struct mv64x60_cpu_pdata *pdata;
+ int res = 0;
+
+ if (!devres_open_group(&pdev->dev, mv64x60_cpu_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
+ "cpu", 1, NULL, 0, 0, NULL, 0,
+ edac_dev_idx);
+ if (!edac_dev) {
+ devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = edac_dev->pvt_info;
+ pdata->name = "mv64x60_cpu_err";
+ pdata->irq = NO_IRQ;
+ edac_dev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, edac_dev);
+ edac_dev->dev_name = pdev->dev.bus_id;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "CPU err regs\n", __func__);
+ res = -ENOENT;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev,
+ r->start,
+ r->end - r->start + 1);
+ if (!pdata->cpu_vbase[0]) {
+ printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "CPU err regs\n", __func__);
+ res = -ENOENT;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev,
+ r->start,
+ r->end - r->start + 1);
+ if (!pdata->cpu_vbase[1]) {
+ printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ /* setup CPU err registers */
+ out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
+ out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0);
+ out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0x000000ff);
+
+ edac_dev->mod_name = EDAC_MOD_STR;
+ edac_dev->ctl_name = pdata->name;
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ edac_dev->edac_check = mv64x60_cpu_check;
+
+ pdata->edac_idx = edac_dev_idx++;
+
+ if (edac_device_add_device(edac_dev) > 0) {
+ debugf3("%s(): failed edac_device_add_device()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev,
+ pdata->irq,
+ mv64x60_cpu_isr,
+ IRQF_DISABLED,
+ "[EDAC] CPU err",
+ edac_dev);
+ if (res < 0) {
+ printk(KERN_ERR
+ "%s: Unable to request irq %d for MV64x60 "
+ "CPU ERR\n", __func__, pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR
+ " acquired irq %d for CPU Err\n", pdata->irq);
+ }
+
+ devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe);
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+
+ return 0;
+
+err2:
+ edac_device_del_device(&pdev->dev);
+err:
+ devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
+ edac_device_free_ctl_info(edac_dev);
+ return res;
+}
+
+static int mv64x60_cpu_err_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
+
+ debugf0("%s()\n", __func__);
+
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(edac_dev);
+ return 0;
+}
+
+static struct platform_driver mv64x60_cpu_err_driver = {
+ .probe = mv64x60_cpu_err_probe,
+ .remove = mv64x60_cpu_err_remove,
+ .driver = {
+ .name = "mv64x60_cpu_err",
+ }
+};
+
+/*********************** DRAM err device **********************************/
+
+static void mv64x60_mc_check(struct mem_ctl_info *mci)
+{
+ struct mv64x60_mc_pdata *pdata = mci->pvt_info;
+ u32 reg;
+ u32 err_addr;
+ u32 sdram_ecc;
+ u32 comp_ecc;
+ u32 syndrome;
+
+ reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
+ if (!reg)
+ return;
+
+ err_addr = reg & ~0x3;
+ sdram_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD);
+ comp_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC);
+ syndrome = sdram_ecc ^ comp_ecc;
+
+ /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
+ if (!(reg & 0x1))
+ edac_mc_handle_ce(mci, err_addr >> PAGE_SHIFT,
+ err_addr & PAGE_MASK, syndrome, 0, 0,
+ mci->ctl_name);
+ else /* 2 bit error, UE */
+ edac_mc_handle_ue(mci, err_addr >> PAGE_SHIFT,
+ err_addr & PAGE_MASK, 0, mci->ctl_name);
+
+ /* clear the error */
+ out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
+}
+
+static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id)
+{
+ struct mem_ctl_info *mci = dev_id;
+ struct mv64x60_mc_pdata *pdata = mci->pvt_info;
+ u32 reg;
+
+ reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
+ if (!reg)
+ return IRQ_NONE;
+
+ /* writing 0's to the ECC err addr in check function clears irq */
+ mv64x60_mc_check(mci);
+
+ return IRQ_HANDLED;
+}
+
+static void get_total_mem(struct mv64x60_mc_pdata *pdata)
+{
+ struct device_node *np = NULL;
+ const unsigned int *reg;
+
+ np = of_find_node_by_type(NULL, "memory");
+ if (!np)
+ return;
+
+ reg = get_property(np, "reg", NULL);
+
+ pdata->total_mem = reg[1];
+}
+
+static void mv64x60_init_csrows(struct mem_ctl_info *mci,
+ struct mv64x60_mc_pdata *pdata)
+{
+ struct csrow_info *csrow;
+ u32 devtype;
+ u32 ctl;
+
+ get_total_mem(pdata);
+
+ ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
+
+ csrow = &mci->csrows[0];
+ csrow->first_page = 0;
+ csrow->nr_pages = pdata->total_mem >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ csrow->grain = 8;
+
+ csrow->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
+
+ devtype = (ctl >> 20) & 0x3;
+ switch (devtype) {
+ case 0x0:
+ csrow->dtype = DEV_X32;
+ break;
+ case 0x2: /* could be X8 too, but no way to tell */
+ csrow->dtype = DEV_X16;
+ break;
+ case 0x3:
+ csrow->dtype = DEV_X4;
+ break;
+ default:
+ csrow->dtype = DEV_UNKNOWN;
+ break;
+ }
+
+ csrow->edac_mode = EDAC_SECDED;
+}
+
+static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct mv64x60_mc_pdata *pdata;
+ struct resource *r;
+ u32 ctl;
+ int res = 0;
+
+ if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ mci = edac_mc_alloc(sizeof(struct mv64x60_mc_pdata), 1, 1, edac_mc_idx);
+ if (!mci) {
+ printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
+ devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = mci->pvt_info;
+ mci->dev = &pdev->dev;
+ platform_set_drvdata(pdev, mci);
+ pdata->name = "mv64x60_mc_err";
+ pdata->irq = NO_IRQ;
+ mci->dev_name = pdev->dev.bus_id;
+ pdata->edac_idx = edac_mc_idx++;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "MC err regs\n", __func__);
+ res = -ENOENT;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->mc_vbase = devm_ioremap(&pdev->dev,
+ r->start,
+ r->end - r->start + 1);
+ if (!pdata->mc_vbase) {
+ printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
+ if (!(ctl & MV64X60_SDRAM_ECC)) {
+ /* Non-ECC RAM? */
+ printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ debugf3("%s(): init mci\n", __func__);
+ mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = MV64x60_REVISION;
+ mci->ctl_name = mv64x60_ctl_name;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ mci->edac_check = mv64x60_mc_check;
+
+ mci->ctl_page_to_phys = NULL;
+
+ mci->scrub_mode = SCRUB_SW_SRC;
+
+ mv64x60_init_csrows(mci, pdata);
+
+ /* setup MC registers */
+ out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
+ ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
+ ctl = (ctl & 0xff00ffff) | 0x10000;
+ out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ /* acquire interrupt that reports errors */
+ pdata->irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev,
+ pdata->irq,
+ mv64x60_mc_isr,
+ IRQF_DISABLED,
+ "[EDAC] MC err",
+ mci);
+ if (res < 0) {
+ printk(KERN_ERR "%s: Unable to request irq %d for "
+ "MV64x60 DRAM ERR\n", __func__, pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC Err\n",
+ pdata->irq);
+ }
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+
+ return 0;
+
+err2:
+ edac_mc_del_mc(&pdev->dev);
+err:
+ devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
+ edac_mc_free(mci);
+ return res;
+}
+
+static int mv64x60_mc_err_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ debugf0("%s()\n", __func__);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+ return 0;
+}
+
+static struct platform_driver mv64x60_mc_err_driver = {
+ .probe = mv64x60_mc_err_probe,
+ .remove = mv64x60_mc_err_remove,
+ .driver = {
+ .name = "mv64x60_mc_err",
+ }
+};
+
+static int __init mv64x60_edac_init(void)
+{
+ int ret = 0;
+
+ printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n");
+ printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n");
+ /* make sure error reporting method is sane */
+ switch (edac_op_state) {
+ case EDAC_OPSTATE_POLL:
+ case EDAC_OPSTATE_INT:
+ break;
+ default:
+ edac_op_state = EDAC_OPSTATE_INT;
+ break;
+ }
+
+ ret = platform_driver_register(&mv64x60_mc_err_driver);
+ if (ret)
+ printk(KERN_WARNING EDAC_MOD_STR "MC err failed to register\n");
+
+ ret = platform_driver_register(&mv64x60_cpu_err_driver);
+ if (ret)
+ printk(KERN_WARNING EDAC_MOD_STR
+ "CPU err failed to register\n");
+
+ ret = platform_driver_register(&mv64x60_sram_err_driver);
+ if (ret)
+ printk(KERN_WARNING EDAC_MOD_STR
+ "SRAM err failed to register\n");
+
+#ifdef CONFIG_PCI
+ ret = platform_driver_register(&mv64x60_pci_err_driver);
+ if (ret)
+ printk(KERN_WARNING EDAC_MOD_STR
+ "PCI err failed to register\n");
+#endif
+
+ return ret;
+}
+module_init(mv64x60_edac_init);
+
+static void __exit mv64x60_edac_exit(void)
+{
+#ifdef CONFIG_PCI
+ platform_driver_unregister(&mv64x60_pci_err_driver);
+#endif
+ platform_driver_unregister(&mv64x60_sram_err_driver);
+ platform_driver_unregister(&mv64x60_cpu_err_driver);
+ platform_driver_unregister(&mv64x60_mc_err_driver);
+}
+module_exit(mv64x60_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Montavista Software, Inc.");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state,
+ "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
diff --git a/drivers/edac/mv64x60_edac.h b/drivers/edac/mv64x60_edac.h
new file mode 100644
index 000000000000..e042e2daa8f4
--- /dev/null
+++ b/drivers/edac/mv64x60_edac.h
@@ -0,0 +1,114 @@
+/*
+ * EDAC defs for Marvell MV64x60 bridge chip
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#ifndef _MV64X60_EDAC_H_
+#define _MV64X60_EDAC_H_
+
+#define MV64x60_REVISION " Ver: 2.0.0 " __DATE__
+#define EDAC_MOD_STR "MV64x60_edac"
+
+#define mv64x60_printk(level, fmt, arg...) \
+ edac_printk(level, "MV64x60", fmt, ##arg)
+
+#define mv64x60_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "MV64x60", fmt, ##arg)
+
+/* CPU Error Report Registers */
+#define MV64x60_CPU_ERR_ADDR_LO 0x00 /* 0x0070 */
+#define MV64x60_CPU_ERR_ADDR_HI 0x08 /* 0x0078 */
+#define MV64x60_CPU_ERR_DATA_LO 0x00 /* 0x0128 */
+#define MV64x60_CPU_ERR_DATA_HI 0x08 /* 0x0130 */
+#define MV64x60_CPU_ERR_PARITY 0x10 /* 0x0138 */
+#define MV64x60_CPU_ERR_CAUSE 0x18 /* 0x0140 */
+#define MV64x60_CPU_ERR_MASK 0x20 /* 0x0148 */
+
+#define MV64x60_CPU_CAUSE_MASK 0x07ffffff
+
+/* SRAM Error Report Registers */
+#define MV64X60_SRAM_ERR_CAUSE 0x08 /* 0x0388 */
+#define MV64X60_SRAM_ERR_ADDR_LO 0x10 /* 0x0390 */
+#define MV64X60_SRAM_ERR_ADDR_HI 0x78 /* 0x03f8 */
+#define MV64X60_SRAM_ERR_DATA_LO 0x18 /* 0x0398 */
+#define MV64X60_SRAM_ERR_DATA_HI 0x20 /* 0x03a0 */
+#define MV64X60_SRAM_ERR_PARITY 0x28 /* 0x03a8 */
+
+/* SDRAM Controller Registers */
+#define MV64X60_SDRAM_CONFIG 0x00 /* 0x1400 */
+#define MV64X60_SDRAM_ERR_DATA_HI 0x40 /* 0x1440 */
+#define MV64X60_SDRAM_ERR_DATA_LO 0x44 /* 0x1444 */
+#define MV64X60_SDRAM_ERR_ECC_RCVD 0x48 /* 0x1448 */
+#define MV64X60_SDRAM_ERR_ECC_CALC 0x4c /* 0x144c */
+#define MV64X60_SDRAM_ERR_ADDR 0x50 /* 0x1450 */
+#define MV64X60_SDRAM_ERR_ECC_CNTL 0x54 /* 0x1454 */
+#define MV64X60_SDRAM_ERR_ECC_ERR_CNT 0x58 /* 0x1458 */
+
+#define MV64X60_SDRAM_REGISTERED 0x20000
+#define MV64X60_SDRAM_ECC 0x40000
+
+#ifdef CONFIG_PCI
+/*
+ * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
+ * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
+ * well. IOW, don't set bit 0.
+ */
+#define MV64X60_PCIx_ERR_MASK_VAL 0x00a50c24
+
+/* Register offsets from PCIx error address low register */
+#define MV64X60_PCI_ERROR_ADDR_LO 0x00
+#define MV64X60_PCI_ERROR_ADDR_HI 0x04
+#define MV64X60_PCI_ERROR_ATTR 0x08
+#define MV64X60_PCI_ERROR_CMD 0x10
+#define MV64X60_PCI_ERROR_CAUSE 0x18
+#define MV64X60_PCI_ERROR_MASK 0x1c
+
+#define MV64X60_PCI_ERR_SWrPerr 0x0002
+#define MV64X60_PCI_ERR_SRdPerr 0x0004
+#define MV64X60_PCI_ERR_MWrPerr 0x0020
+#define MV64X60_PCI_ERR_MRdPerr 0x0040
+
+#define MV64X60_PCI_PE_MASK (MV64X60_PCI_ERR_SWrPerr | \
+ MV64X60_PCI_ERR_SRdPerr | \
+ MV64X60_PCI_ERR_MWrPerr | \
+ MV64X60_PCI_ERR_MRdPerr)
+
+struct mv64x60_pci_pdata {
+ int pci_hose;
+ void __iomem *pci_vbase;
+ char *name;
+ int irq;
+ int edac_idx;
+};
+
+#endif /* CONFIG_PCI */
+
+struct mv64x60_mc_pdata {
+ void __iomem *mc_vbase;
+ int total_mem;
+ char *name;
+ int irq;
+ int edac_idx;
+};
+
+struct mv64x60_cpu_pdata {
+ void __iomem *cpu_vbase[2];
+ char *name;
+ int irq;
+ int edac_idx;
+};
+
+struct mv64x60_sram_pdata {
+ void __iomem *sram_vbase;
+ char *name;
+ int irq;
+ int edac_idx;
+};
+
+#endif
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 18cdcb3ae1ca..1636806ec55e 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -658,4 +658,5 @@ MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")");
MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR("Dell Inc.");
MODULE_LICENSE("GPL");
-
+/* Any System or BIOS claiming to be by Dell */
+MODULE_ALIAS("dmi:*:[bs]vnD[Ee][Ll][Ll]*:*");
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index 313c99cbdc62..e880d6c8d896 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/device.h>
-#include <linux/autoconf.h>
struct dmi_device_attribute{
struct device_attribute dev_attr;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 9008ed5ef4ce..e0bade732376 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -489,12 +489,3 @@ int dmi_get_year(int field)
return year;
}
-
-/**
- * dmi_get_slot - return dmi_ident[slot]
- * @slot: index into dmi_ident[]
- */
-char *dmi_get_slot(int slot)
-{
- return(dmi_ident[slot]);
-}
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index d168223db159..744011989044 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -11,7 +11,7 @@
*
* This code takes information provided by BIOS EDD calls
* fn41 - Check Extensions Present and
- * fn48 - Get Device Parametes with EDD extensions
+ * fn48 - Get Device Parameters with EDD extensions
* made in setup.S, copied to safe structures in setup.c,
* and presents it in sysfs.
*
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
new file mode 100644
index 000000000000..bbd28342e771
--- /dev/null
+++ b/drivers/gpio/Kconfig
@@ -0,0 +1,73 @@
+#
+# GPIO infrastructure and expanders
+#
+
+config HAVE_GPIO_LIB
+ bool
+ help
+ Platforms select gpiolib if they use this infrastructure
+ for all their GPIOs, usually starting with ones integrated
+ into SOC processors.
+
+menu "GPIO Support"
+ depends on HAVE_GPIO_LIB
+
+config DEBUG_GPIO
+ bool "Debug GPIO calls"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here to add some extra checks and diagnostics to GPIO calls.
+ The checks help ensure that GPIOs have been properly initialized
+ before they are used and that sleeping calls aren not made from
+ nonsleeping contexts. They can make bitbanged serial protocols
+ slower. The diagnostics help catch the type of setup errors
+ that are most common when setting up new platforms or boards.
+
+# put expanders in the right section, in alphabetical order
+
+comment "I2C GPIO expanders:"
+
+config GPIO_PCA953X
+ tristate "PCA953x I/O ports"
+ depends on I2C
+ help
+ Say yes here to support the PCA9534 (8-bit), PCA9535 (16-bit),
+ PCA9536 (4-bit), PCA9537 (4-bit), PCA9538 (8-bit), and PCA9539
+ (16-bit) I/O ports. These parts are made by NXP and TI.
+
+ This driver can also be built as a module. If so, the module
+ will be called pca953x.
+
+config GPIO_PCF857X
+ tristate "PCF857x, PCA857x, and PCA967x I2C GPIO expanders"
+ depends on I2C
+ help
+ Say yes here to provide access to most "quasi-bidirectional" I2C
+ GPIO expanders used for additional digital outputs or inputs.
+ Most of these parts are from NXP, though TI is a second source for
+ some of them. Compatible models include:
+
+ 8 bits: pcf8574, pcf8574a, pca8574, pca8574a,
+ pca9670, pca9672, pca9674, pca9674a
+
+ 16 bits: pcf8575, pcf8575c, pca8575,
+ pca9671, pca9673, pca9675
+
+ Your board setup code will need to declare the expanders in
+ use, and assign numbers to the GPIOs they expose. Those GPIOs
+ can then be used from drivers and other kernel code, just like
+ other GPIOs, but only accessible from task contexts.
+
+ This driver provides an in-kernel interface to those GPIOs using
+ platform-neutral GPIO calls.
+
+comment "SPI GPIO expanders:"
+
+config GPIO_MCP23S08
+ tristate "Microchip MCP23S08 I/O expander"
+ depends on SPI_MASTER
+ help
+ SPI driver for Microchip MCP23S08 I/O expander. This provides
+ a GPIO interface supporting inputs and outputs.
+
+endmenu
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
new file mode 100644
index 000000000000..fdde9923cf33
--- /dev/null
+++ b/drivers/gpio/Makefile
@@ -0,0 +1,9 @@
+# gpio support: dedicated expander chips, etc
+
+ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
+
+obj-$(CONFIG_HAVE_GPIO_LIB) += gpiolib.o
+
+obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
+obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
+obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
new file mode 100644
index 000000000000..d8db2f8ee411
--- /dev/null
+++ b/drivers/gpio/gpiolib.c
@@ -0,0 +1,567 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+
+#include <asm/gpio.h>
+
+
+/* Optional implementation infrastructure for GPIO interfaces.
+ *
+ * Platforms may want to use this if they tend to use very many GPIOs
+ * that aren't part of a System-On-Chip core; or across I2C/SPI/etc.
+ *
+ * When kernel footprint or instruction count is an issue, simpler
+ * implementations may be preferred. The GPIO programming interface
+ * allows for inlining speed-critical get/set operations for common
+ * cases, so that access to SOC-integrated GPIOs can sometimes cost
+ * only an instruction or two per bit.
+ */
+
+
+/* When debugging, extend minimal trust to callers and platform code.
+ * Also emit diagnostic messages that may help initial bringup, when
+ * board setup or driver bugs are most common.
+ *
+ * Otherwise, minimize overhead in what may be bitbanging codepaths.
+ */
+#ifdef DEBUG
+#define extra_checks 1
+#else
+#define extra_checks 0
+#endif
+
+/* gpio_lock prevents conflicts during gpio_desc[] table updates.
+ * While any GPIO is requested, its gpio_chip is not removable;
+ * each GPIO's "requested" flag serves as a lock and refcount.
+ */
+static DEFINE_SPINLOCK(gpio_lock);
+
+struct gpio_desc {
+ struct gpio_chip *chip;
+ unsigned long flags;
+/* flag symbols are bit numbers */
+#define FLAG_REQUESTED 0
+#define FLAG_IS_OUT 1
+
+#ifdef CONFIG_DEBUG_FS
+ const char *label;
+#endif
+};
+static struct gpio_desc gpio_desc[ARCH_NR_GPIOS];
+
+static inline void desc_set_label(struct gpio_desc *d, const char *label)
+{
+#ifdef CONFIG_DEBUG_FS
+ d->label = label;
+#endif
+}
+
+/* Warn when drivers omit gpio_request() calls -- legal but ill-advised
+ * when setting direction, and otherwise illegal. Until board setup code
+ * and drivers use explicit requests everywhere (which won't happen when
+ * those calls have no teeth) we can't avoid autorequesting. This nag
+ * message should motivate switching to explicit requests...
+ */
+static void gpio_ensure_requested(struct gpio_desc *desc)
+{
+ if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
+ pr_warning("GPIO-%d autorequested\n", (int)(desc - gpio_desc));
+ desc_set_label(desc, "[auto]");
+ }
+}
+
+/* caller holds gpio_lock *OR* gpio is marked as requested */
+static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
+{
+ return gpio_desc[gpio].chip;
+}
+
+/**
+ * gpiochip_add() - register a gpio_chip
+ * @chip: the chip to register, with chip->base initialized
+ * Context: potentially before irqs or kmalloc will work
+ *
+ * Returns a negative errno if the chip can't be registered, such as
+ * because the chip->base is invalid or already associated with a
+ * different chip. Otherwise it returns zero as a success code.
+ */
+int gpiochip_add(struct gpio_chip *chip)
+{
+ unsigned long flags;
+ int status = 0;
+ unsigned id;
+
+ /* NOTE chip->base negative is reserved to mean a request for
+ * dynamic allocation. We don't currently support that.
+ */
+
+ if (chip->base < 0 || (chip->base + chip->ngpio) >= ARCH_NR_GPIOS) {
+ status = -EINVAL;
+ goto fail;
+ }
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ /* these GPIO numbers must not be managed by another gpio_chip */
+ for (id = chip->base; id < chip->base + chip->ngpio; id++) {
+ if (gpio_desc[id].chip != NULL) {
+ status = -EBUSY;
+ break;
+ }
+ }
+ if (status == 0) {
+ for (id = chip->base; id < chip->base + chip->ngpio; id++) {
+ gpio_desc[id].chip = chip;
+ gpio_desc[id].flags = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+fail:
+ /* failures here can mean systems won't boot... */
+ if (status)
+ pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n",
+ chip->base, chip->base + chip->ngpio,
+ chip->label ? : "generic");
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpiochip_add);
+
+/**
+ * gpiochip_remove() - unregister a gpio_chip
+ * @chip: the chip to unregister
+ *
+ * A gpio_chip with any GPIOs still requested may not be removed.
+ */
+int gpiochip_remove(struct gpio_chip *chip)
+{
+ unsigned long flags;
+ int status = 0;
+ unsigned id;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ for (id = chip->base; id < chip->base + chip->ngpio; id++) {
+ if (test_bit(FLAG_REQUESTED, &gpio_desc[id].flags)) {
+ status = -EBUSY;
+ break;
+ }
+ }
+ if (status == 0) {
+ for (id = chip->base; id < chip->base + chip->ngpio; id++)
+ gpio_desc[id].chip = NULL;
+ }
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpiochip_remove);
+
+
+/* These "optional" allocation calls help prevent drivers from stomping
+ * on each other, and help provide better diagnostics in debugfs.
+ * They're called even less than the "set direction" calls.
+ */
+int gpio_request(unsigned gpio, const char *label)
+{
+ struct gpio_desc *desc;
+ int status = -EINVAL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ if (gpio >= ARCH_NR_GPIOS)
+ goto done;
+ desc = &gpio_desc[gpio];
+ if (desc->chip == NULL)
+ goto done;
+
+ /* NOTE: gpio_request() can be called in early boot,
+ * before IRQs are enabled.
+ */
+
+ if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
+ desc_set_label(desc, label ? : "?");
+ status = 0;
+ } else
+ status = -EBUSY;
+
+done:
+ if (status)
+ pr_debug("gpio_request: gpio-%d (%s) status %d\n",
+ gpio, label ? : "?", status);
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpio_request);
+
+void gpio_free(unsigned gpio)
+{
+ unsigned long flags;
+ struct gpio_desc *desc;
+
+ if (gpio >= ARCH_NR_GPIOS) {
+ WARN_ON(extra_checks);
+ return;
+ }
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ desc = &gpio_desc[gpio];
+ if (desc->chip && test_and_clear_bit(FLAG_REQUESTED, &desc->flags))
+ desc_set_label(desc, NULL);
+ else
+ WARN_ON(extra_checks);
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gpio_free);
+
+
+/**
+ * gpiochip_is_requested - return string iff signal was requested
+ * @chip: controller managing the signal
+ * @offset: of signal within controller's 0..(ngpio - 1) range
+ *
+ * Returns NULL if the GPIO is not currently requested, else a string.
+ * If debugfs support is enabled, the string returned is the label passed
+ * to gpio_request(); otherwise it is a meaningless constant.
+ *
+ * This function is for use by GPIO controller drivers. The label can
+ * help with diagnostics, and knowing that the signal is used as a GPIO
+ * can help avoid accidentally multiplexing it to another controller.
+ */
+const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned gpio = chip->base + offset;
+
+ if (gpio >= ARCH_NR_GPIOS || gpio_desc[gpio].chip != chip)
+ return NULL;
+ if (test_bit(FLAG_REQUESTED, &gpio_desc[gpio].flags) == 0)
+ return NULL;
+#ifdef CONFIG_DEBUG_FS
+ return gpio_desc[gpio].label;
+#else
+ return "?";
+#endif
+}
+EXPORT_SYMBOL_GPL(gpiochip_is_requested);
+
+
+/* Drivers MUST set GPIO direction before making get/set calls. In
+ * some cases this is done in early boot, before IRQs are enabled.
+ *
+ * As a rule these aren't called more than once (except for drivers
+ * using the open-drain emulation idiom) so these are natural places
+ * to accumulate extra debugging checks. Note that we can't (yet)
+ * rely on gpio_request() having been called beforehand.
+ */
+
+int gpio_direction_input(unsigned gpio)
+{
+ unsigned long flags;
+ struct gpio_chip *chip;
+ struct gpio_desc *desc = &gpio_desc[gpio];
+ int status = -EINVAL;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ if (gpio >= ARCH_NR_GPIOS)
+ goto fail;
+ chip = desc->chip;
+ if (!chip || !chip->get || !chip->direction_input)
+ goto fail;
+ gpio -= chip->base;
+ if (gpio >= chip->ngpio)
+ goto fail;
+ gpio_ensure_requested(desc);
+
+ /* now we know the gpio is valid and chip won't vanish */
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ might_sleep_if(extra_checks && chip->can_sleep);
+
+ status = chip->direction_input(chip, gpio);
+ if (status == 0)
+ clear_bit(FLAG_IS_OUT, &desc->flags);
+ return status;
+fail:
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ if (status)
+ pr_debug("%s: gpio-%d status %d\n",
+ __FUNCTION__, gpio, status);
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpio_direction_input);
+
+int gpio_direction_output(unsigned gpio, int value)
+{
+ unsigned long flags;
+ struct gpio_chip *chip;
+ struct gpio_desc *desc = &gpio_desc[gpio];
+ int status = -EINVAL;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ if (gpio >= ARCH_NR_GPIOS)
+ goto fail;
+ chip = desc->chip;
+ if (!chip || !chip->set || !chip->direction_output)
+ goto fail;
+ gpio -= chip->base;
+ if (gpio >= chip->ngpio)
+ goto fail;
+ gpio_ensure_requested(desc);
+
+ /* now we know the gpio is valid and chip won't vanish */
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ might_sleep_if(extra_checks && chip->can_sleep);
+
+ status = chip->direction_output(chip, gpio, value);
+ if (status == 0)
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ return status;
+fail:
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ if (status)
+ pr_debug("%s: gpio-%d status %d\n",
+ __FUNCTION__, gpio, status);
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpio_direction_output);
+
+
+/* I/O calls are only valid after configuration completed; the relevant
+ * "is this a valid GPIO" error checks should already have been done.
+ *
+ * "Get" operations are often inlinable as reading a pin value register,
+ * and masking the relevant bit in that register.
+ *
+ * When "set" operations are inlinable, they involve writing that mask to
+ * one register to set a low value, or a different register to set it high.
+ * Otherwise locking is needed, so there may be little value to inlining.
+ *
+ *------------------------------------------------------------------------
+ *
+ * IMPORTANT!!! The hot paths -- get/set value -- assume that callers
+ * have requested the GPIO. That can include implicit requesting by
+ * a direction setting call. Marking a gpio as requested locks its chip
+ * in memory, guaranteeing that these table lookups need no more locking
+ * and that gpiochip_remove() will fail.
+ *
+ * REVISIT when debugging, consider adding some instrumentation to ensure
+ * that the GPIO was actually requested.
+ */
+
+/**
+ * __gpio_get_value() - return a gpio's value
+ * @gpio: gpio whose value will be returned
+ * Context: any
+ *
+ * This is used directly or indirectly to implement gpio_get_value().
+ * It returns the zero or nonzero value provided by the associated
+ * gpio_chip.get() method; or zero if no such method is provided.
+ */
+int __gpio_get_value(unsigned gpio)
+{
+ struct gpio_chip *chip;
+
+ chip = gpio_to_chip(gpio);
+ WARN_ON(extra_checks && chip->can_sleep);
+ return chip->get ? chip->get(chip, gpio - chip->base) : 0;
+}
+EXPORT_SYMBOL_GPL(__gpio_get_value);
+
+/**
+ * __gpio_set_value() - assign a gpio's value
+ * @gpio: gpio whose value will be assigned
+ * @value: value to assign
+ * Context: any
+ *
+ * This is used directly or indirectly to implement gpio_set_value().
+ * It invokes the associated gpio_chip.set() method.
+ */
+void __gpio_set_value(unsigned gpio, int value)
+{
+ struct gpio_chip *chip;
+
+ chip = gpio_to_chip(gpio);
+ WARN_ON(extra_checks && chip->can_sleep);
+ chip->set(chip, gpio - chip->base, value);
+}
+EXPORT_SYMBOL_GPL(__gpio_set_value);
+
+/**
+ * __gpio_cansleep() - report whether gpio value access will sleep
+ * @gpio: gpio in question
+ * Context: any
+ *
+ * This is used directly or indirectly to implement gpio_cansleep(). It
+ * returns nonzero if access reading or writing the GPIO value can sleep.
+ */
+int __gpio_cansleep(unsigned gpio)
+{
+ struct gpio_chip *chip;
+
+ /* only call this on GPIOs that are valid! */
+ chip = gpio_to_chip(gpio);
+
+ return chip->can_sleep;
+}
+EXPORT_SYMBOL_GPL(__gpio_cansleep);
+
+
+
+/* There's no value in making it easy to inline GPIO calls that may sleep.
+ * Common examples include ones connected to I2C or SPI chips.
+ */
+
+int gpio_get_value_cansleep(unsigned gpio)
+{
+ struct gpio_chip *chip;
+
+ might_sleep_if(extra_checks);
+ chip = gpio_to_chip(gpio);
+ return chip->get(chip, gpio - chip->base);
+}
+EXPORT_SYMBOL_GPL(gpio_get_value_cansleep);
+
+void gpio_set_value_cansleep(unsigned gpio, int value)
+{
+ struct gpio_chip *chip;
+
+ might_sleep_if(extra_checks);
+ chip = gpio_to_chip(gpio);
+ chip->set(chip, gpio - chip->base, value);
+}
+EXPORT_SYMBOL_GPL(gpio_set_value_cansleep);
+
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+
+static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ unsigned i;
+ unsigned gpio = chip->base;
+ struct gpio_desc *gdesc = &gpio_desc[gpio];
+ int is_out;
+
+ for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) {
+ if (!test_bit(FLAG_REQUESTED, &gdesc->flags))
+ continue;
+
+ is_out = test_bit(FLAG_IS_OUT, &gdesc->flags);
+ seq_printf(s, " gpio-%-3d (%-12s) %s %s",
+ gpio, gdesc->label,
+ is_out ? "out" : "in ",
+ chip->get
+ ? (chip->get(chip, i) ? "hi" : "lo")
+ : "? ");
+
+ if (!is_out) {
+ int irq = gpio_to_irq(gpio);
+ struct irq_desc *desc = irq_desc + irq;
+
+ /* This races with request_irq(), set_irq_type(),
+ * and set_irq_wake() ... but those are "rare".
+ *
+ * More significantly, trigger type flags aren't
+ * currently maintained by genirq.
+ */
+ if (irq >= 0 && desc->action) {
+ char *trigger;
+
+ switch (desc->status & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_NONE:
+ trigger = "(default)";
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ trigger = "edge-falling";
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ trigger = "edge-rising";
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ trigger = "edge-both";
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ trigger = "level-high";
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ trigger = "level-low";
+ break;
+ default:
+ trigger = "?trigger?";
+ break;
+ }
+
+ seq_printf(s, " irq-%d %s%s",
+ irq, trigger,
+ (desc->status & IRQ_WAKEUP)
+ ? " wakeup" : "");
+ }
+ }
+
+ seq_printf(s, "\n");
+ }
+}
+
+static int gpiolib_show(struct seq_file *s, void *unused)
+{
+ struct gpio_chip *chip = NULL;
+ unsigned gpio;
+ int started = 0;
+
+ /* REVISIT this isn't locked against gpio_chip removal ... */
+
+ for (gpio = 0; gpio < ARCH_NR_GPIOS; gpio++) {
+ if (chip == gpio_desc[gpio].chip)
+ continue;
+ chip = gpio_desc[gpio].chip;
+ if (!chip)
+ continue;
+
+ seq_printf(s, "%sGPIOs %d-%d, %s%s:\n",
+ started ? "\n" : "",
+ chip->base, chip->base + chip->ngpio - 1,
+ chip->label ? : "generic",
+ chip->can_sleep ? ", can sleep" : "");
+ started = 1;
+ if (chip->dbg_show)
+ chip->dbg_show(s, chip);
+ else
+ gpiolib_dbg_show(s, chip);
+ }
+ return 0;
+}
+
+static int gpiolib_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, gpiolib_show, NULL);
+}
+
+static struct file_operations gpiolib_operations = {
+ .open = gpiolib_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init gpiolib_debugfs_init(void)
+{
+ /* /sys/kernel/debug/gpio */
+ (void) debugfs_create_file("gpio", S_IFREG | S_IRUGO,
+ NULL, NULL, &gpiolib_operations);
+ return 0;
+}
+subsys_initcall(gpiolib_debugfs_init);
+
+#endif /* DEBUG_FS */
diff --git a/drivers/gpio/mcp23s08.c b/drivers/gpio/mcp23s08.c
new file mode 100644
index 000000000000..bb60e8c1a1f0
--- /dev/null
+++ b/drivers/gpio/mcp23s08.c
@@ -0,0 +1,357 @@
+/*
+ * mcp23s08.c - SPI gpio expander driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/mcp23s08.h>
+
+#include <asm/gpio.h>
+
+
+/* Registers are all 8 bits wide.
+ *
+ * The mcp23s17 has twice as many bits, and can be configured to work
+ * with either 16 bit registers or with two adjacent 8 bit banks.
+ *
+ * Also, there are I2C versions of both chips.
+ */
+#define MCP_IODIR 0x00 /* init/reset: all ones */
+#define MCP_IPOL 0x01
+#define MCP_GPINTEN 0x02
+#define MCP_DEFVAL 0x03
+#define MCP_INTCON 0x04
+#define MCP_IOCON 0x05
+# define IOCON_SEQOP (1 << 5)
+# define IOCON_HAEN (1 << 3)
+# define IOCON_ODR (1 << 2)
+# define IOCON_INTPOL (1 << 1)
+#define MCP_GPPU 0x06
+#define MCP_INTF 0x07
+#define MCP_INTCAP 0x08
+#define MCP_GPIO 0x09
+#define MCP_OLAT 0x0a
+
+struct mcp23s08 {
+ struct spi_device *spi;
+ u8 addr;
+
+ /* lock protects the cached values */
+ struct mutex lock;
+ u8 cache[11];
+
+ struct gpio_chip chip;
+
+ struct work_struct work;
+};
+
+static int mcp23s08_read(struct mcp23s08 *mcp, unsigned reg)
+{
+ u8 tx[2], rx[1];
+ int status;
+
+ tx[0] = mcp->addr | 0x01;
+ tx[1] = reg;
+ status = spi_write_then_read(mcp->spi, tx, sizeof tx, rx, sizeof rx);
+ return (status < 0) ? status : rx[0];
+}
+
+static int mcp23s08_write(struct mcp23s08 *mcp, unsigned reg, u8 val)
+{
+ u8 tx[3];
+
+ tx[0] = mcp->addr;
+ tx[1] = reg;
+ tx[2] = val;
+ return spi_write_then_read(mcp->spi, tx, sizeof tx, NULL, 0);
+}
+
+static int
+mcp23s08_read_regs(struct mcp23s08 *mcp, unsigned reg, u8 *vals, unsigned n)
+{
+ u8 tx[2];
+
+ if ((n + reg) > sizeof mcp->cache)
+ return -EINVAL;
+ tx[0] = mcp->addr | 0x01;
+ tx[1] = reg;
+ return spi_write_then_read(mcp->spi, tx, sizeof tx, vals, n);
+}
+
+/*----------------------------------------------------------------------*/
+
+static int mcp23s08_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip);
+ int status;
+
+ mutex_lock(&mcp->lock);
+ mcp->cache[MCP_IODIR] |= (1 << offset);
+ status = mcp23s08_write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]);
+ mutex_unlock(&mcp->lock);
+ return status;
+}
+
+static int mcp23s08_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip);
+ int status;
+
+ mutex_lock(&mcp->lock);
+
+ /* REVISIT reading this clears any IRQ ... */
+ status = mcp23s08_read(mcp, MCP_GPIO);
+ if (status < 0)
+ status = 0;
+ else {
+ mcp->cache[MCP_GPIO] = status;
+ status = !!(status & (1 << offset));
+ }
+ mutex_unlock(&mcp->lock);
+ return status;
+}
+
+static int __mcp23s08_set(struct mcp23s08 *mcp, unsigned mask, int value)
+{
+ u8 olat = mcp->cache[MCP_OLAT];
+
+ if (value)
+ olat |= mask;
+ else
+ olat &= ~mask;
+ mcp->cache[MCP_OLAT] = olat;
+ return mcp23s08_write(mcp, MCP_OLAT, olat);
+}
+
+static void mcp23s08_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip);
+ u8 mask = 1 << offset;
+
+ mutex_lock(&mcp->lock);
+ __mcp23s08_set(mcp, mask, value);
+ mutex_unlock(&mcp->lock);
+}
+
+static int
+mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip);
+ u8 mask = 1 << offset;
+ int status;
+
+ mutex_lock(&mcp->lock);
+ status = __mcp23s08_set(mcp, mask, value);
+ if (status == 0) {
+ mcp->cache[MCP_IODIR] &= ~mask;
+ status = mcp23s08_write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]);
+ }
+ mutex_unlock(&mcp->lock);
+ return status;
+}
+
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+
+/*
+ * This shows more info than the generic gpio dump code:
+ * pullups, deglitching, open drain drive.
+ */
+static void mcp23s08_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ struct mcp23s08 *mcp;
+ char bank;
+ unsigned t;
+ unsigned mask;
+
+ mcp = container_of(chip, struct mcp23s08, chip);
+
+ /* NOTE: we only handle one bank for now ... */
+ bank = '0' + ((mcp->addr >> 1) & 0x3);
+
+ mutex_lock(&mcp->lock);
+ t = mcp23s08_read_regs(mcp, 0, mcp->cache, sizeof mcp->cache);
+ if (t < 0) {
+ seq_printf(s, " I/O ERROR %d\n", t);
+ goto done;
+ }
+
+ for (t = 0, mask = 1; t < 8; t++, mask <<= 1) {
+ const char *label;
+
+ label = gpiochip_is_requested(chip, t);
+ if (!label)
+ continue;
+
+ seq_printf(s, " gpio-%-3d P%c.%d (%-12s) %s %s %s",
+ chip->base + t, bank, t, label,
+ (mcp->cache[MCP_IODIR] & mask) ? "in " : "out",
+ (mcp->cache[MCP_GPIO] & mask) ? "hi" : "lo",
+ (mcp->cache[MCP_GPPU] & mask) ? " " : "up");
+ /* NOTE: ignoring the irq-related registers */
+ seq_printf(s, "\n");
+ }
+done:
+ mutex_unlock(&mcp->lock);
+}
+
+#else
+#define mcp23s08_dbg_show NULL
+#endif
+
+/*----------------------------------------------------------------------*/
+
+static int mcp23s08_probe(struct spi_device *spi)
+{
+ struct mcp23s08 *mcp;
+ struct mcp23s08_platform_data *pdata;
+ int status;
+ int do_update = 0;
+
+ pdata = spi->dev.platform_data;
+ if (!pdata || pdata->slave > 3 || !pdata->base)
+ return -ENODEV;
+
+ mcp = kzalloc(sizeof *mcp, GFP_KERNEL);
+ if (!mcp)
+ return -ENOMEM;
+
+ mutex_init(&mcp->lock);
+
+ mcp->spi = spi;
+ mcp->addr = 0x40 | (pdata->slave << 1);
+
+ mcp->chip.label = "mcp23s08",
+
+ mcp->chip.direction_input = mcp23s08_direction_input;
+ mcp->chip.get = mcp23s08_get;
+ mcp->chip.direction_output = mcp23s08_direction_output;
+ mcp->chip.set = mcp23s08_set;
+ mcp->chip.dbg_show = mcp23s08_dbg_show;
+
+ mcp->chip.base = pdata->base;
+ mcp->chip.ngpio = 8;
+ mcp->chip.can_sleep = 1;
+
+ spi_set_drvdata(spi, mcp);
+
+ /* verify MCP_IOCON.SEQOP = 0, so sequential reads work */
+ status = mcp23s08_read(mcp, MCP_IOCON);
+ if (status < 0)
+ goto fail;
+ if (status & IOCON_SEQOP) {
+ status &= ~IOCON_SEQOP;
+ status = mcp23s08_write(mcp, MCP_IOCON, (u8) status);
+ if (status < 0)
+ goto fail;
+ }
+
+ /* configure ~100K pullups */
+ status = mcp23s08_write(mcp, MCP_GPPU, pdata->pullups);
+ if (status < 0)
+ goto fail;
+
+ status = mcp23s08_read_regs(mcp, 0, mcp->cache, sizeof mcp->cache);
+ if (status < 0)
+ goto fail;
+
+ /* disable inverter on input */
+ if (mcp->cache[MCP_IPOL] != 0) {
+ mcp->cache[MCP_IPOL] = 0;
+ do_update = 1;
+ }
+
+ /* disable irqs */
+ if (mcp->cache[MCP_GPINTEN] != 0) {
+ mcp->cache[MCP_GPINTEN] = 0;
+ do_update = 1;
+ }
+
+ if (do_update) {
+ u8 tx[4];
+
+ tx[0] = mcp->addr;
+ tx[1] = MCP_IPOL;
+ memcpy(&tx[2], &mcp->cache[MCP_IPOL], sizeof(tx) - 2);
+ status = spi_write_then_read(mcp->spi, tx, sizeof tx, NULL, 0);
+
+ /* FIXME check status... */
+ }
+
+ status = gpiochip_add(&mcp->chip);
+
+ /* NOTE: these chips have a relatively sane IRQ framework, with
+ * per-signal masking and level/edge triggering. It's not yet
+ * handled here...
+ */
+
+ if (pdata->setup) {
+ status = pdata->setup(spi, mcp->chip.base,
+ mcp->chip.ngpio, pdata->context);
+ if (status < 0)
+ dev_dbg(&spi->dev, "setup --> %d\n", status);
+ }
+
+ return 0;
+
+fail:
+ kfree(mcp);
+ return status;
+}
+
+static int mcp23s08_remove(struct spi_device *spi)
+{
+ struct mcp23s08 *mcp = spi_get_drvdata(spi);
+ struct mcp23s08_platform_data *pdata = spi->dev.platform_data;
+ int status = 0;
+
+ if (pdata->teardown) {
+ status = pdata->teardown(spi,
+ mcp->chip.base, mcp->chip.ngpio,
+ pdata->context);
+ if (status < 0) {
+ dev_err(&spi->dev, "%s --> %d\n", "teardown", status);
+ return status;
+ }
+ }
+
+ status = gpiochip_remove(&mcp->chip);
+ if (status == 0)
+ kfree(mcp);
+ else
+ dev_err(&spi->dev, "%s --> %d\n", "remove", status);
+ return status;
+}
+
+static struct spi_driver mcp23s08_driver = {
+ .probe = mcp23s08_probe,
+ .remove = mcp23s08_remove,
+ .driver = {
+ .name = "mcp23s08",
+ .owner = THIS_MODULE,
+ },
+};
+
+/*----------------------------------------------------------------------*/
+
+static int __init mcp23s08_init(void)
+{
+ return spi_register_driver(&mcp23s08_driver);
+}
+module_init(mcp23s08_init);
+
+static void __exit mcp23s08_exit(void)
+{
+ spi_unregister_driver(&mcp23s08_driver);
+}
+module_exit(mcp23s08_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
new file mode 100644
index 000000000000..92583cd4bffd
--- /dev/null
+++ b/drivers/gpio/pca953x.c
@@ -0,0 +1,308 @@
+/*
+ * pca953x.c - 4/8/16 bit I/O ports
+ *
+ * Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
+ * Copyright (C) 2007 Marvell International Ltd.
+ *
+ * Derived from drivers/i2c/chips/pca9539.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pca953x.h>
+
+#include <asm/gpio.h>
+
+#define PCA953X_INPUT 0
+#define PCA953X_OUTPUT 1
+#define PCA953X_INVERT 2
+#define PCA953X_DIRECTION 3
+
+/* This is temporary - in 2.6.26 i2c_driver_data should replace it. */
+struct pca953x_desc {
+ char name[I2C_NAME_SIZE];
+ unsigned long driver_data;
+};
+
+static const struct pca953x_desc pca953x_descs[] = {
+ { "pca9534", 8, },
+ { "pca9535", 16, },
+ { "pca9536", 4, },
+ { "pca9537", 4, },
+ { "pca9538", 8, },
+ { "pca9539", 16, },
+ /* REVISIT several pca955x parts should work here too */
+};
+
+struct pca953x_chip {
+ unsigned gpio_start;
+ uint16_t reg_output;
+ uint16_t reg_direction;
+
+ struct i2c_client *client;
+ struct gpio_chip gpio_chip;
+};
+
+/* NOTE: we can't currently rely on fault codes to come from SMBus
+ * calls, so we map all errors to EIO here and return zero otherwise.
+ */
+static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
+{
+ int ret;
+
+ if (chip->gpio_chip.ngpio <= 8)
+ ret = i2c_smbus_write_byte_data(chip->client, reg, val);
+ else
+ ret = i2c_smbus_write_word_data(chip->client, reg << 1, val);
+
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "failed writing register\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int pca953x_read_reg(struct pca953x_chip *chip, int reg, uint16_t *val)
+{
+ int ret;
+
+ if (chip->gpio_chip.ngpio <= 8)
+ ret = i2c_smbus_read_byte_data(chip->client, reg);
+ else
+ ret = i2c_smbus_read_word_data(chip->client, reg << 1);
+
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "failed reading register\n");
+ return -EIO;
+ }
+
+ *val = (uint16_t)ret;
+ return 0;
+}
+
+static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
+{
+ struct pca953x_chip *chip;
+ uint16_t reg_val;
+ int ret;
+
+ chip = container_of(gc, struct pca953x_chip, gpio_chip);
+
+ reg_val = chip->reg_direction | (1u << off);
+ ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
+ if (ret)
+ return ret;
+
+ chip->reg_direction = reg_val;
+ return 0;
+}
+
+static int pca953x_gpio_direction_output(struct gpio_chip *gc,
+ unsigned off, int val)
+{
+ struct pca953x_chip *chip;
+ uint16_t reg_val;
+ int ret;
+
+ chip = container_of(gc, struct pca953x_chip, gpio_chip);
+
+ /* set output level */
+ if (val)
+ reg_val = chip->reg_output | (1u << off);
+ else
+ reg_val = chip->reg_output & ~(1u << off);
+
+ ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
+ if (ret)
+ return ret;
+
+ chip->reg_output = reg_val;
+
+ /* then direction */
+ reg_val = chip->reg_direction & ~(1u << off);
+ ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
+ if (ret)
+ return ret;
+
+ chip->reg_direction = reg_val;
+ return 0;
+}
+
+static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
+{
+ struct pca953x_chip *chip;
+ uint16_t reg_val;
+ int ret;
+
+ chip = container_of(gc, struct pca953x_chip, gpio_chip);
+
+ ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val);
+ if (ret < 0) {
+ /* NOTE: diagnostic already emitted; that's all we should
+ * do unless gpio_*_value_cansleep() calls become different
+ * from their nonsleeping siblings (and report faults).
+ */
+ return 0;
+ }
+
+ return (reg_val & (1u << off)) ? 1 : 0;
+}
+
+static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
+{
+ struct pca953x_chip *chip;
+ uint16_t reg_val;
+ int ret;
+
+ chip = container_of(gc, struct pca953x_chip, gpio_chip);
+
+ if (val)
+ reg_val = chip->reg_output | (1u << off);
+ else
+ reg_val = chip->reg_output & ~(1u << off);
+
+ ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
+ if (ret)
+ return;
+
+ chip->reg_output = reg_val;
+}
+
+static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
+{
+ struct gpio_chip *gc;
+
+ gc = &chip->gpio_chip;
+
+ gc->direction_input = pca953x_gpio_direction_input;
+ gc->direction_output = pca953x_gpio_direction_output;
+ gc->get = pca953x_gpio_get_value;
+ gc->set = pca953x_gpio_set_value;
+
+ gc->base = chip->gpio_start;
+ gc->ngpio = gpios;
+ gc->label = chip->client->name;
+}
+
+static int __devinit pca953x_probe(struct i2c_client *client)
+{
+ struct pca953x_platform_data *pdata;
+ struct pca953x_chip *chip;
+ int ret, i;
+ const struct pca953x_desc *id = NULL;
+
+ pdata = client->dev.platform_data;
+ if (pdata == NULL)
+ return -ENODEV;
+
+ /* this loop vanishes when we get i2c_device_id */
+ for (i = 0; i < ARRAY_SIZE(pca953x_descs); i++)
+ if (!strcmp(pca953x_descs[i].name, client->name)) {
+ id = pca953x_descs + i;
+ break;
+ }
+ if (!id)
+ return -ENODEV;
+
+ chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
+ if (chip == NULL)
+ return -ENOMEM;
+
+ chip->client = client;
+
+ chip->gpio_start = pdata->gpio_base;
+
+ /* initialize cached registers from their original values.
+ * we can't share this chip with another i2c master.
+ */
+ pca953x_setup_gpio(chip, id->driver_data);
+
+ ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output);
+ if (ret)
+ goto out_failed;
+
+ ret = pca953x_read_reg(chip, PCA953X_DIRECTION, &chip->reg_direction);
+ if (ret)
+ goto out_failed;
+
+ /* set platform specific polarity inversion */
+ ret = pca953x_write_reg(chip, PCA953X_INVERT, pdata->invert);
+ if (ret)
+ goto out_failed;
+
+
+ ret = gpiochip_add(&chip->gpio_chip);
+ if (ret)
+ goto out_failed;
+
+ if (pdata->setup) {
+ ret = pdata->setup(client, chip->gpio_chip.base,
+ chip->gpio_chip.ngpio, pdata->context);
+ if (ret < 0)
+ dev_warn(&client->dev, "setup failed, %d\n", ret);
+ }
+
+ i2c_set_clientdata(client, chip);
+ return 0;
+
+out_failed:
+ kfree(chip);
+ return ret;
+}
+
+static int pca953x_remove(struct i2c_client *client)
+{
+ struct pca953x_platform_data *pdata = client->dev.platform_data;
+ struct pca953x_chip *chip = i2c_get_clientdata(client);
+ int ret = 0;
+
+ if (pdata->teardown) {
+ ret = pdata->teardown(client, chip->gpio_chip.base,
+ chip->gpio_chip.ngpio, pdata->context);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s failed, %d\n",
+ "teardown", ret);
+ return ret;
+ }
+ }
+
+ ret = gpiochip_remove(&chip->gpio_chip);
+ if (ret) {
+ dev_err(&client->dev, "%s failed, %d\n",
+ "gpiochip_remove()", ret);
+ return ret;
+ }
+
+ kfree(chip);
+ return 0;
+}
+
+static struct i2c_driver pca953x_driver = {
+ .driver = {
+ .name = "pca953x",
+ },
+ .probe = pca953x_probe,
+ .remove = pca953x_remove,
+};
+
+static int __init pca953x_init(void)
+{
+ return i2c_add_driver(&pca953x_driver);
+}
+module_init(pca953x_init);
+
+static void __exit pca953x_exit(void)
+{
+ i2c_del_driver(&pca953x_driver);
+}
+module_exit(pca953x_exit);
+
+MODULE_AUTHOR("eric miao <eric.miao@marvell.com>");
+MODULE_DESCRIPTION("GPIO expander driver for PCA953x");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/pcf857x.c b/drivers/gpio/pcf857x.c
new file mode 100644
index 000000000000..c6b3b5378384
--- /dev/null
+++ b/drivers/gpio/pcf857x.c
@@ -0,0 +1,330 @@
+/*
+ * pcf857x - driver for pcf857x, pca857x, and pca967x I2C GPIO expanders
+ *
+ * Copyright (C) 2007 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pcf857x.h>
+
+#include <asm/gpio.h>
+
+
+/*
+ * The pcf857x, pca857x, and pca967x chips only expose one read and one
+ * write register. Writing a "one" bit (to match the reset state) lets
+ * that pin be used as an input; it's not an open-drain model, but acts
+ * a bit like one. This is described as "quasi-bidirectional"; read the
+ * chip documentation for details.
+ *
+ * Many other I2C GPIO expander chips (like the pca953x models) have
+ * more complex register models and more conventional circuitry using
+ * push/pull drivers. They often use the same 0x20..0x27 addresses as
+ * pcf857x parts, making the "legacy" I2C driver model problematic.
+ */
+struct pcf857x {
+ struct gpio_chip chip;
+ struct i2c_client *client;
+ unsigned out; /* software latch */
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* Talk to 8-bit I/O expander */
+
+static int pcf857x_input8(struct gpio_chip *chip, unsigned offset)
+{
+ struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
+
+ gpio->out |= (1 << offset);
+ return i2c_smbus_write_byte(gpio->client, gpio->out);
+}
+
+static int pcf857x_get8(struct gpio_chip *chip, unsigned offset)
+{
+ struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
+ s32 value;
+
+ value = i2c_smbus_read_byte(gpio->client);
+ return (value < 0) ? 0 : (value & (1 << offset));
+}
+
+static int pcf857x_output8(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
+ unsigned bit = 1 << offset;
+
+ if (value)
+ gpio->out |= bit;
+ else
+ gpio->out &= ~bit;
+ return i2c_smbus_write_byte(gpio->client, gpio->out);
+}
+
+static void pcf857x_set8(struct gpio_chip *chip, unsigned offset, int value)
+{
+ pcf857x_output8(chip, offset, value);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Talk to 16-bit I/O expander */
+
+static int i2c_write_le16(struct i2c_client *client, u16 word)
+{
+ u8 buf[2] = { word & 0xff, word >> 8, };
+ int status;
+
+ status = i2c_master_send(client, buf, 2);
+ return (status < 0) ? status : 0;
+}
+
+static int i2c_read_le16(struct i2c_client *client)
+{
+ u8 buf[2];
+ int status;
+
+ status = i2c_master_recv(client, buf, 2);
+ if (status < 0)
+ return status;
+ return (buf[1] << 8) | buf[0];
+}
+
+static int pcf857x_input16(struct gpio_chip *chip, unsigned offset)
+{
+ struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
+
+ gpio->out |= (1 << offset);
+ return i2c_write_le16(gpio->client, gpio->out);
+}
+
+static int pcf857x_get16(struct gpio_chip *chip, unsigned offset)
+{
+ struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
+ int value;
+
+ value = i2c_read_le16(gpio->client);
+ return (value < 0) ? 0 : (value & (1 << offset));
+}
+
+static int pcf857x_output16(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
+ unsigned bit = 1 << offset;
+
+ if (value)
+ gpio->out |= bit;
+ else
+ gpio->out &= ~bit;
+ return i2c_write_le16(gpio->client, gpio->out);
+}
+
+static void pcf857x_set16(struct gpio_chip *chip, unsigned offset, int value)
+{
+ pcf857x_output16(chip, offset, value);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int pcf857x_probe(struct i2c_client *client)
+{
+ struct pcf857x_platform_data *pdata;
+ struct pcf857x *gpio;
+ int status;
+
+ pdata = client->dev.platform_data;
+ if (!pdata)
+ return -ENODEV;
+
+ /* Allocate, initialize, and register this gpio_chip. */
+ gpio = kzalloc(sizeof *gpio, GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ gpio->chip.base = pdata->gpio_base;
+ gpio->chip.can_sleep = 1;
+
+ /* NOTE: the OnSemi jlc1562b is also largely compatible with
+ * these parts, notably for output. It has a low-resolution
+ * DAC instead of pin change IRQs; and its inputs can be the
+ * result of comparators.
+ */
+
+ /* 8574 addresses are 0x20..0x27; 8574a uses 0x38..0x3f;
+ * 9670, 9672, 9764, and 9764a use quite a variety.
+ *
+ * NOTE: we don't distinguish here between *4 and *4a parts.
+ */
+ if (strcmp(client->name, "pcf8574") == 0
+ || strcmp(client->name, "pca8574") == 0
+ || strcmp(client->name, "pca9670") == 0
+ || strcmp(client->name, "pca9672") == 0
+ || strcmp(client->name, "pca9674") == 0
+ ) {
+ gpio->chip.ngpio = 8;
+ gpio->chip.direction_input = pcf857x_input8;
+ gpio->chip.get = pcf857x_get8;
+ gpio->chip.direction_output = pcf857x_output8;
+ gpio->chip.set = pcf857x_set8;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE))
+ status = -EIO;
+
+ /* fail if there's no chip present */
+ else
+ status = i2c_smbus_read_byte(client);
+
+ /* '75/'75c addresses are 0x20..0x27, just like the '74;
+ * the '75c doesn't have a current source pulling high.
+ * 9671, 9673, and 9765 use quite a variety of addresses.
+ *
+ * NOTE: we don't distinguish here between '75 and '75c parts.
+ */
+ } else if (strcmp(client->name, "pcf8575") == 0
+ || strcmp(client->name, "pca8575") == 0
+ || strcmp(client->name, "pca9671") == 0
+ || strcmp(client->name, "pca9673") == 0
+ || strcmp(client->name, "pca9675") == 0
+ ) {
+ gpio->chip.ngpio = 16;
+ gpio->chip.direction_input = pcf857x_input16;
+ gpio->chip.get = pcf857x_get16;
+ gpio->chip.direction_output = pcf857x_output16;
+ gpio->chip.set = pcf857x_set16;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ status = -EIO;
+
+ /* fail if there's no chip present */
+ else
+ status = i2c_read_le16(client);
+
+ } else
+ status = -ENODEV;
+
+ if (status < 0)
+ goto fail;
+
+ gpio->chip.label = client->name;
+
+ gpio->client = client;
+ i2c_set_clientdata(client, gpio);
+
+ /* NOTE: these chips have strange "quasi-bidirectional" I/O pins.
+ * We can't actually know whether a pin is configured (a) as output
+ * and driving the signal low, or (b) as input and reporting a low
+ * value ... without knowing the last value written since the chip
+ * came out of reset (if any). We can't read the latched output.
+ *
+ * In short, the only reliable solution for setting up pin direction
+ * is to do it explicitly. The setup() method can do that, but it
+ * may cause transient glitching since it can't know the last value
+ * written (some pins may need to be driven low).
+ *
+ * Using pdata->n_latch avoids that trouble. When left initialized
+ * to zero, our software copy of the "latch" then matches the chip's
+ * all-ones reset state. Otherwise it flags pins to be driven low.
+ */
+ gpio->out = ~pdata->n_latch;
+
+ status = gpiochip_add(&gpio->chip);
+ if (status < 0)
+ goto fail;
+
+ /* NOTE: these chips can issue "some pin-changed" IRQs, which we
+ * don't yet even try to use. Among other issues, the relevant
+ * genirq state isn't available to modular drivers; and most irq
+ * methods can't be called from sleeping contexts.
+ */
+
+ dev_info(&client->dev, "gpios %d..%d on a %s%s\n",
+ gpio->chip.base,
+ gpio->chip.base + gpio->chip.ngpio - 1,
+ client->name,
+ client->irq ? " (irq ignored)" : "");
+
+ /* Let platform code set up the GPIOs and their users.
+ * Now is the first time anyone could use them.
+ */
+ if (pdata->setup) {
+ status = pdata->setup(client,
+ gpio->chip.base, gpio->chip.ngpio,
+ pdata->context);
+ if (status < 0)
+ dev_warn(&client->dev, "setup --> %d\n", status);
+ }
+
+ return 0;
+
+fail:
+ dev_dbg(&client->dev, "probe error %d for '%s'\n",
+ status, client->name);
+ kfree(gpio);
+ return status;
+}
+
+static int pcf857x_remove(struct i2c_client *client)
+{
+ struct pcf857x_platform_data *pdata = client->dev.platform_data;
+ struct pcf857x *gpio = i2c_get_clientdata(client);
+ int status = 0;
+
+ if (pdata->teardown) {
+ status = pdata->teardown(client,
+ gpio->chip.base, gpio->chip.ngpio,
+ pdata->context);
+ if (status < 0) {
+ dev_err(&client->dev, "%s --> %d\n",
+ "teardown", status);
+ return status;
+ }
+ }
+
+ status = gpiochip_remove(&gpio->chip);
+ if (status == 0)
+ kfree(gpio);
+ else
+ dev_err(&client->dev, "%s --> %d\n", "remove", status);
+ return status;
+}
+
+static struct i2c_driver pcf857x_driver = {
+ .driver = {
+ .name = "pcf857x",
+ .owner = THIS_MODULE,
+ },
+ .probe = pcf857x_probe,
+ .remove = pcf857x_remove,
+};
+
+static int __init pcf857x_init(void)
+{
+ return i2c_add_driver(&pcf857x_driver);
+}
+module_init(pcf857x_init);
+
+static void __exit pcf857x_exit(void)
+{
+ i2c_del_driver(&pcf857x_driver);
+}
+module_exit(pcf857x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Brownell");
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 86c66c345f8b..0c94770b7f83 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -905,7 +905,7 @@ static ssize_t applesmc_key_at_index_store(struct device *dev,
}
static struct led_classdev applesmc_backlight = {
- .name = "smc:kbd_backlight",
+ .name = "smc::kbd_backlight",
.default_trigger = "nand-disk",
.brightness_set = applesmc_brightness_set,
};
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index bd7082c2443d..b21593f93586 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -54,8 +54,8 @@ config PCF8575
hardware. If unsure, say N.
config SENSORS_PCA9539
- tristate "Philips PCA9539 16-bit I/O port"
- depends on EXPERIMENTAL
+ tristate "Philips PCA9539 16-bit I/O port (DEPRECATED)"
+ depends on EXPERIMENTAL && GPIO_PCA9539 = "n"
help
If you say yes here you get support for the Philips PCA9539
16-bit I/O port.
@@ -63,6 +63,9 @@ config SENSORS_PCA9539
This driver can also be built as a module. If so, the module
will be called pca9539.
+ This driver is deprecated and will be dropped soon. Use
+ drivers/gpio/pca9539.c instead.
+
config SENSORS_PCF8591
tristate "Philips PCF8591"
depends on EXPERIMENTAL
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 45b26ed351cf..ab8fb257528e 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -1009,6 +1009,15 @@ config BLK_DEV_Q40IDE
normally be on; disable it only if you are running a custom hard
drive subsystem through an expansion card.
+config BLK_DEV_PALMCHIP_BK3710
+ tristate "Palmchip bk3710 IDE controller support"
+ depends on ARCH_DAVINCI
+ select BLK_DEV_IDEDMA_PCI
+ help
+ Say Y here if you want to support the onchip IDE controller on the
+ TI DaVinci SoC
+
+
config BLK_DEV_MPC8xx_IDE
tristate "MPC8xx IDE support"
depends on 8xx && (LWMON || IVMS8 || IVML24 || TQM8xxL) && IDE=y && BLK_DEV_IDE=y && !PPC_MERGE
diff --git a/drivers/ide/arm/Makefile b/drivers/ide/arm/Makefile
index 5f63ad216862..936e7b0237f5 100644
--- a/drivers/ide/arm/Makefile
+++ b/drivers/ide/arm/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o
obj-$(CONFIG_BLK_DEV_IDE_BAST) += bast-ide.o
+obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o
ifeq ($(CONFIG_IDE_ARM), m)
obj-m += ide_arm.o
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index fb00f3827ecd..e816b0ffcfe6 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -365,7 +365,7 @@ static void icside_dma_timeout(ide_drive_t *drive)
if (icside_dma_test_irq(drive))
return;
- ide_dump_status(drive, "DMA timeout", HWIF(drive)->INB(IDE_STATUS_REG));
+ ide_dump_status(drive, "DMA timeout", ide_read_status(drive));
icside_dma_end(drive);
}
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
new file mode 100644
index 000000000000..c3069970a012
--- /dev/null
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -0,0 +1,395 @@
+/*
+ * Palmchip bk3710 IDE controller
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software, Inc., <source@mvista.com>
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ----------------------------------------------------------------------------
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/hdreg.h>
+#include <linux/ide.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+/* Offset of the primary interface registers */
+#define IDE_PALM_ATA_PRI_REG_OFFSET 0x1F0
+
+/* Primary Control Offset */
+#define IDE_PALM_ATA_PRI_CTL_OFFSET 0x3F6
+
+/*
+ * PalmChip 3710 IDE Controller UDMA timing structure Definition
+ */
+struct palm_bk3710_udmatiming {
+ unsigned int rptime; /* Ready to pause time */
+ unsigned int cycletime; /* Cycle Time */
+};
+
+#define BK3710_BMICP 0x00
+#define BK3710_BMISP 0x02
+#define BK3710_BMIDTP 0x04
+#define BK3710_BMICS 0x08
+#define BK3710_BMISS 0x0A
+#define BK3710_BMIDTS 0x0C
+#define BK3710_IDETIMP 0x40
+#define BK3710_IDETIMS 0x42
+#define BK3710_SIDETIM 0x44
+#define BK3710_SLEWCTL 0x45
+#define BK3710_IDESTATUS 0x47
+#define BK3710_UDMACTL 0x48
+#define BK3710_UDMATIM 0x4A
+#define BK3710_MISCCTL 0x50
+#define BK3710_REGSTB 0x54
+#define BK3710_REGRCVR 0x58
+#define BK3710_DATSTB 0x5C
+#define BK3710_DATRCVR 0x60
+#define BK3710_DMASTB 0x64
+#define BK3710_DMARCVR 0x68
+#define BK3710_UDMASTB 0x6C
+#define BK3710_UDMATRP 0x70
+#define BK3710_UDMAENV 0x74
+#define BK3710_IORDYTMP 0x78
+#define BK3710_IORDYTMS 0x7C
+
+#include "../ide-timing.h"
+
+static long ide_palm_clk;
+
+static const struct palm_bk3710_udmatiming palm_bk3710_udmatimings[6] = {
+ {160, 240}, /* UDMA Mode 0 */
+ {125, 160}, /* UDMA Mode 1 */
+ {100, 120}, /* UDMA Mode 2 */
+ {100, 90}, /* UDMA Mode 3 */
+ {85, 60}, /* UDMA Mode 4 */
+};
+
+static struct clk *ideclkp;
+
+static void palm_bk3710_setudmamode(void __iomem *base, unsigned int dev,
+ unsigned int mode)
+{
+ u8 tenv, trp, t0;
+ u32 val32;
+ u16 val16;
+
+ /* DMA Data Setup */
+ t0 = (palm_bk3710_udmatimings[mode].cycletime + ide_palm_clk - 1)
+ / ide_palm_clk - 1;
+ tenv = (20 + ide_palm_clk - 1) / ide_palm_clk - 1;
+ trp = (palm_bk3710_udmatimings[mode].rptime + ide_palm_clk - 1)
+ / ide_palm_clk - 1;
+
+ /* udmatim Register */
+ val16 = readw(base + BK3710_UDMATIM) & (dev ? 0xFF0F : 0xFFF0);
+ val16 |= (mode << (dev ? 4 : 0));
+ writew(val16, base + BK3710_UDMATIM);
+
+ /* udmastb Ultra DMA Access Strobe Width */
+ val32 = readl(base + BK3710_UDMASTB) & (0xFF << (dev ? 0 : 8));
+ val32 |= (t0 << (dev ? 8 : 0));
+ writel(val32, base + BK3710_UDMASTB);
+
+ /* udmatrp Ultra DMA Ready to Pause Time */
+ val32 = readl(base + BK3710_UDMATRP) & (0xFF << (dev ? 0 : 8));
+ val32 |= (trp << (dev ? 8 : 0));
+ writel(val32, base + BK3710_UDMATRP);
+
+ /* udmaenv Ultra DMA envelop Time */
+ val32 = readl(base + BK3710_UDMAENV) & (0xFF << (dev ? 0 : 8));
+ val32 |= (tenv << (dev ? 8 : 0));
+ writel(val32, base + BK3710_UDMAENV);
+
+ /* Enable UDMA for Device */
+ val16 = readw(base + BK3710_UDMACTL) | (1 << dev);
+ writew(val16, base + BK3710_UDMACTL);
+}
+
+static void palm_bk3710_setdmamode(void __iomem *base, unsigned int dev,
+ unsigned short min_cycle,
+ unsigned int mode)
+{
+ u8 td, tkw, t0;
+ u32 val32;
+ u16 val16;
+ struct ide_timing *t;
+ int cycletime;
+
+ t = ide_timing_find_mode(mode);
+ cycletime = max_t(int, t->cycle, min_cycle);
+
+ /* DMA Data Setup */
+ t0 = (cycletime + ide_palm_clk - 1) / ide_palm_clk;
+ td = (t->active + ide_palm_clk - 1) / ide_palm_clk;
+ tkw = t0 - td - 1;
+ td -= 1;
+
+ val32 = readl(base + BK3710_DMASTB) & (0xFF << (dev ? 0 : 8));
+ val32 |= (td << (dev ? 8 : 0));
+ writel(val32, base + BK3710_DMASTB);
+
+ val32 = readl(base + BK3710_DMARCVR) & (0xFF << (dev ? 0 : 8));
+ val32 |= (tkw << (dev ? 8 : 0));
+ writel(val32, base + BK3710_DMARCVR);
+
+ /* Disable UDMA for Device */
+ val16 = readw(base + BK3710_UDMACTL) & ~(1 << dev);
+ writew(val16, base + BK3710_UDMACTL);
+}
+
+static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
+ unsigned int dev, unsigned int cycletime,
+ unsigned int mode)
+{
+ u8 t2, t2i, t0;
+ u32 val32;
+ struct ide_timing *t;
+
+ /* PIO Data Setup */
+ t0 = (cycletime + ide_palm_clk - 1) / ide_palm_clk;
+ t2 = (ide_timing_find_mode(XFER_PIO_0 + mode)->active +
+ ide_palm_clk - 1) / ide_palm_clk;
+
+ t2i = t0 - t2 - 1;
+ t2 -= 1;
+
+ val32 = readl(base + BK3710_DATSTB) & (0xFF << (dev ? 0 : 8));
+ val32 |= (t2 << (dev ? 8 : 0));
+ writel(val32, base + BK3710_DATSTB);
+
+ val32 = readl(base + BK3710_DATRCVR) & (0xFF << (dev ? 0 : 8));
+ val32 |= (t2i << (dev ? 8 : 0));
+ writel(val32, base + BK3710_DATRCVR);
+
+ if (mate && mate->present) {
+ u8 mode2 = ide_get_best_pio_mode(mate, 255, 4);
+
+ if (mode2 < mode)
+ mode = mode2;
+ }
+
+ /* TASKFILE Setup */
+ t = ide_timing_find_mode(XFER_PIO_0 + mode);
+ t0 = (t->cyc8b + ide_palm_clk - 1) / ide_palm_clk;
+ t2 = (t->act8b + ide_palm_clk - 1) / ide_palm_clk;
+
+ t2i = t0 - t2 - 1;
+ t2 -= 1;
+
+ val32 = readl(base + BK3710_REGSTB) & (0xFF << (dev ? 0 : 8));
+ val32 |= (t2 << (dev ? 8 : 0));
+ writel(val32, base + BK3710_REGSTB);
+
+ val32 = readl(base + BK3710_REGRCVR) & (0xFF << (dev ? 0 : 8));
+ val32 |= (t2i << (dev ? 8 : 0));
+ writel(val32, base + BK3710_REGRCVR);
+}
+
+static void palm_bk3710_set_dma_mode(ide_drive_t *drive, u8 xferspeed)
+{
+ int is_slave = drive->dn & 1;
+ void __iomem *base = (void *)drive->hwif->dma_base;
+
+ if (xferspeed >= XFER_UDMA_0) {
+ palm_bk3710_setudmamode(base, is_slave,
+ xferspeed - XFER_UDMA_0);
+ } else {
+ palm_bk3710_setdmamode(base, is_slave, drive->id->eide_dma_min,
+ xferspeed);
+ }
+}
+
+static void palm_bk3710_set_pio_mode(ide_drive_t *drive, u8 pio)
+{
+ unsigned int cycle_time;
+ int is_slave = drive->dn & 1;
+ ide_drive_t *mate;
+ void __iomem *base = (void *)drive->hwif->dma_base;
+
+ /*
+ * Obtain the drive PIO data for tuning the Palm Chip registers
+ */
+ cycle_time = ide_pio_cycle_time(drive, pio);
+ mate = ide_get_paired_drive(drive);
+ palm_bk3710_setpiomode(base, mate, is_slave, cycle_time, pio);
+}
+
+static void __devinit palm_bk3710_chipinit(void __iomem *base)
+{
+ /*
+ * enable the reset_en of ATA controller so that when ata signals
+ * are brought out, by writing into device config. at that
+ * time por_n signal should not be 'Z' and have a stable value.
+ */
+ writel(0x0300, base + BK3710_MISCCTL);
+
+ /* wait for some time and deassert the reset of ATA Device. */
+ mdelay(100);
+
+ /* Deassert the Reset */
+ writel(0x0200, base + BK3710_MISCCTL);
+
+ /*
+ * Program the IDETIMP Register Value based on the following assumptions
+ *
+ * (ATA_IDETIMP_IDEEN , ENABLE ) |
+ * (ATA_IDETIMP_SLVTIMEN , DISABLE) |
+ * (ATA_IDETIMP_RDYSMPL , 70NS) |
+ * (ATA_IDETIMP_RDYRCVRY , 50NS) |
+ * (ATA_IDETIMP_DMAFTIM1 , PIOCOMP) |
+ * (ATA_IDETIMP_PREPOST1 , DISABLE) |
+ * (ATA_IDETIMP_RDYSEN1 , DISABLE) |
+ * (ATA_IDETIMP_PIOFTIM1 , DISABLE) |
+ * (ATA_IDETIMP_DMAFTIM0 , PIOCOMP) |
+ * (ATA_IDETIMP_PREPOST0 , DISABLE) |
+ * (ATA_IDETIMP_RDYSEN0 , DISABLE) |
+ * (ATA_IDETIMP_PIOFTIM0 , DISABLE)
+ */
+ writew(0xB388, base + BK3710_IDETIMP);
+
+ /*
+ * Configure SIDETIM Register
+ * (ATA_SIDETIM_RDYSMPS1 ,120NS ) |
+ * (ATA_SIDETIM_RDYRCYS1 ,120NS )
+ */
+ writeb(0, base + BK3710_SIDETIM);
+
+ /*
+ * UDMACTL Ultra-ATA DMA Control
+ * (ATA_UDMACTL_UDMAP1 , 0 ) |
+ * (ATA_UDMACTL_UDMAP0 , 0 )
+ *
+ */
+ writew(0, base + BK3710_UDMACTL);
+
+ /*
+ * MISCCTL Miscellaneous Conrol Register
+ * (ATA_MISCCTL_RSTMODEP , 1) |
+ * (ATA_MISCCTL_RESETP , 0) |
+ * (ATA_MISCCTL_TIMORIDE , 1)
+ */
+ writel(0x201, base + BK3710_MISCCTL);
+
+ /*
+ * IORDYTMP IORDY Timer for Primary Register
+ * (ATA_IORDYTMP_IORDYTMP , 0xffff )
+ */
+ writel(0xFFFF, base + BK3710_IORDYTMP);
+
+ /*
+ * Configure BMISP Register
+ * (ATA_BMISP_DMAEN1 , DISABLE ) |
+ * (ATA_BMISP_DMAEN0 , DISABLE ) |
+ * (ATA_BMISP_IORDYINT , CLEAR) |
+ * (ATA_BMISP_INTRSTAT , CLEAR) |
+ * (ATA_BMISP_DMAERROR , CLEAR)
+ */
+ writew(0, base + BK3710_BMISP);
+
+ palm_bk3710_setpiomode(base, NULL, 0, 600, 0);
+ palm_bk3710_setpiomode(base, NULL, 1, 600, 0);
+}
+static int __devinit palm_bk3710_probe(struct platform_device *pdev)
+{
+ hw_regs_t ide_ctlr_info;
+ int index = 0;
+ int pribase;
+ struct clk *clkp;
+ struct resource *mem, *irq;
+ ide_hwif_t *hwif;
+ void __iomem *base;
+
+ clkp = clk_get(NULL, "IDECLK");
+ if (IS_ERR(clkp))
+ return -ENODEV;
+
+ ideclkp = clkp;
+ clk_enable(ideclkp);
+ ide_palm_clk = clk_get_rate(ideclkp)/100000;
+ ide_palm_clk = (10000/ide_palm_clk) + 1;
+ /* Register the IDE interface with Linux ATA Interface */
+ memset(&ide_ctlr_info, 0, sizeof(ide_ctlr_info));
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem == NULL) {
+ printk(KERN_ERR "failed to get memory region resource\n");
+ return -ENODEV;
+ }
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq == NULL) {
+ printk(KERN_ERR "failed to get IRQ resource\n");
+ return -ENODEV;
+ }
+
+ base = (void *)mem->start;
+
+ /* Configure the Palm Chip controller */
+ palm_bk3710_chipinit(base);
+
+ pribase = mem->start + IDE_PALM_ATA_PRI_REG_OFFSET;
+ for (index = 0; index < IDE_NR_PORTS - 2; index++)
+ ide_ctlr_info.io_ports[index] = pribase + index;
+ ide_ctlr_info.io_ports[IDE_CONTROL_OFFSET] = mem->start +
+ IDE_PALM_ATA_PRI_CTL_OFFSET;
+ ide_ctlr_info.irq = irq->start;
+ ide_ctlr_info.chipset = ide_palm3710;
+
+ if (ide_register_hw(&ide_ctlr_info, NULL, &hwif) < 0) {
+ printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n");
+ return -ENODEV;
+ }
+
+ hwif->set_pio_mode = &palm_bk3710_set_pio_mode;
+ hwif->set_dma_mode = &palm_bk3710_set_dma_mode;
+ hwif->mmio = 1;
+ default_hwif_mmiops(hwif);
+ hwif->cbl = ATA_CBL_PATA80;
+ hwif->ultra_mask = 0x1f; /* Ultra DMA Mode 4 Max
+ (input clk 99MHz) */
+ hwif->mwdma_mask = 0x7;
+ hwif->drives[0].autotune = 1;
+ hwif->drives[1].autotune = 1;
+
+ ide_setup_dma(hwif, mem->start);
+
+ return 0;
+}
+
+static struct platform_driver platform_bk_driver = {
+ .driver = {
+ .name = "palm_bk3710",
+ },
+ .probe = palm_bk3710_probe,
+ .remove = NULL,
+};
+
+static int __init palm_bk3710_init(void)
+{
+ return platform_driver_register(&platform_bk_driver);
+}
+
+module_init(palm_bk3710_init);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index 00587a8c2ba1..e79bf8f9b7db 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -753,6 +753,25 @@ static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed)
cris_ide_set_speed(TYPE_DMA, 0, strobe, hold);
}
+static void __init cris_setup_ports(hw_regs_t *hw, unsigned long base)
+{
+ int i;
+
+ memset(hw, 0, sizeof(*hw));
+
+ for (i = 0; i <= 7; i++)
+ hw->io_ports[i] = base + cris_ide_reg_addr(i, 0, 1);
+
+ /*
+ * the IDE control register is at ATA address 6,
+ * with CS1 active instead of CS0
+ */
+ hw->io_ports[IDE_CONTROL_OFFSET] = base + cris_ide_reg_addr(6, 1, 0);
+
+ hw->irq = ide_default_irq(0);
+ hw->ack_intr = cris_ide_ack_intr;
+}
+
static const struct ide_port_info cris_port_info __initdata = {
.chipset = ide_etrax100,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA |
@@ -765,24 +784,16 @@ static const struct ide_port_info cris_port_info __initdata = {
static int __init init_e100_ide(void)
{
hw_regs_t hw;
- int ide_offsets[IDE_NR_PORTS], h, i;
+ int h;
u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
printk("ide: ETRAX FS built-in ATA DMA controller\n");
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
- ide_offsets[i] = cris_ide_reg_addr(i, 0, 1);
-
- /* the IDE control register is at ATA address 6, with CS1 active instead of CS0 */
- ide_offsets[IDE_CONTROL_OFFSET] = cris_ide_reg_addr(6, 1, 0);
-
for (h = 0; h < 4; h++) {
ide_hwif_t *hwif = NULL;
- ide_setup_ports(&hw, cris_ide_base_address(h),
- ide_offsets,
- 0, 0, cris_ide_ack_intr,
- ide_default_irq(0));
+ cris_setup_ports(&hw, cris_ide_base_address(h));
+
hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
if (hwif == NULL)
continue;
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 25aaeae1e830..e07b189f3ec8 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -171,7 +171,7 @@ err:
static acpi_handle ide_acpi_hwif_get_handle(ide_hwif_t *hwif)
{
struct device *dev = hwif->gendev.parent;
- acpi_handle dev_handle;
+ acpi_handle uninitialized_var(dev_handle);
acpi_integer pcidevfn;
acpi_handle chan_handle;
int err;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index ee4d458e2bbf..5e42c19a03e3 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -295,7 +295,8 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
int stat, err, sense_key;
/* Check for errors. */
- stat = HWIF(drive)->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
+
if (stat_ret)
*stat_ret = stat;
@@ -303,7 +304,7 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
return 0;
/* Get the IDE error register. */
- err = HWIF(drive)->INB(IDE_ERROR_REG);
+ err = ide_read_error(drive);
sense_key = err >> 4;
if (rq == NULL) {
@@ -692,7 +693,7 @@ int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw)
/* Some drives (ASUS) seem to tell us that status
* info is available. just get it and ignore.
*/
- (void) HWIF(drive)->INB(IDE_STATUS_REG);
+ (void)ide_read_status(drive);
return 0;
} else {
/* Drive wants a command packet, or invalid ireason... */
@@ -1326,7 +1327,7 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
if (blk_fs_request(rq)) {
if (info->cd_flags & IDE_CD_FLAG_SEEKING) {
unsigned long elapsed = jiffies - info->start_seek;
- int stat = HWIF(drive)->INB(IDE_STATUS_REG);
+ int stat = ide_read_status(drive);
if ((stat & SEEK_STAT) != SEEK_STAT) {
if (elapsed < IDECD_SEEK_TIMEOUT) {
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 3cf59f2c3928..a4bb32883c6b 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -147,7 +147,8 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
u8 stat = 0, dma_stat = 0;
dma_stat = HWIF(drive)->ide_dma_end(drive);
- stat = HWIF(drive)->INB(IDE_STATUS_REG); /* get drive status */
+ stat = ide_read_status(drive);
+
if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
if (!dma_stat) {
struct request *rq = HWGROUP(drive)->rq;
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index f8fe6ee128f3..faf22d716f80 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -465,7 +465,7 @@ static void idefloppy_retry_pc(ide_drive_t *drive)
idefloppy_pc_t *pc;
struct request *rq;
- (void)drive->hwif->INB(IDE_ERROR_REG);
+ (void)ide_read_error(drive);
pc = idefloppy_next_pc_storage(drive);
rq = idefloppy_next_rq_storage(drive);
idefloppy_create_request_sense_cmd(pc);
@@ -501,7 +501,7 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
}
/* Clear the interrupt */
- stat = drive->hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
/* No more interrupts */
if ((stat & DRQ_STAT) == 0) {
@@ -1246,7 +1246,7 @@ static int idefloppy_get_format_progress(ide_drive_t *drive, int __user *arg)
u8 stat;
local_irq_save(flags);
- stat = drive->hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
local_irq_restore(flags);
progress_indication = ((stat & SEEK_STAT) == 0) ? 0 : 0x10000;
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index be469dbbe8fb..709b9e4d2871 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -20,8 +20,14 @@ static int __init ide_generic_init(void)
if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET])
ide_get_lock(NULL, NULL); /* for atari only */
- for (i = 0; i < MAX_HWIFS; i++)
- idx[i] = ide_hwifs[i].present ? 0xff : i;
+ for (i = 0; i < MAX_HWIFS; i++) {
+ ide_hwif_t *hwif = &ide_hwifs[i];
+
+ if (hwif->io_ports[IDE_DATA_OFFSET] && !hwif->present)
+ idx[i] = i;
+ else
+ idx[i] = 0xff;
+ }
ide_device_add_all(idx, NULL);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 4bddef0c0b96..3addbe478d26 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -466,7 +466,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
return ide_stopped;
}
- if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
+ if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
rq->errors |= ERROR_RESET;
if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
@@ -493,7 +493,7 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u
/* add decoding error stuff */
}
- if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
+ if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
/* force an abort */
hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG);
@@ -821,9 +821,8 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
#ifdef DEBUG
printk("%s: DRIVE_CMD (null)\n", drive->name);
#endif
- ide_end_drive_cmd(drive,
- hwif->INB(IDE_STATUS_REG),
- hwif->INB(IDE_ERROR_REG));
+ ide_end_drive_cmd(drive, ide_read_status(drive), ide_read_error(drive));
+
return ide_stopped;
}
@@ -1231,7 +1230,7 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
(void)HWIF(drive)->ide_dma_end(drive);
ret = ide_error(drive, "dma timeout error",
- hwif->INB(IDE_STATUS_REG));
+ ide_read_status(drive));
} else {
printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
hwif->dma_timeout(drive);
@@ -1355,7 +1354,8 @@ void ide_timer_expiry (unsigned long data)
startstop = ide_dma_timeout_retry(drive, wait);
} else
startstop =
- ide_error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG));
+ ide_error(drive, "irq timeout",
+ ide_read_status(drive));
}
drive->service_time = jiffies - drive->service_start;
spin_lock_irq(&ide_lock);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index a95178f5e1bb..c32e759df208 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -430,10 +430,10 @@ int drive_is_ready (ide_drive_t *drive)
* about possible isa-pnp and pci-pnp issues yet.
*/
if (IDE_CONTROL_REG)
- stat = hwif->INB(IDE_ALTSTATUS_REG);
+ stat = ide_read_altstatus(drive);
else
/* Note: this may clear a pending IRQ!! */
- stat = hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if (stat & BUSY_STAT)
/* drive busy: definitely not interrupting */
@@ -458,23 +458,24 @@ EXPORT_SYMBOL(drive_is_ready);
*/
static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat)
{
- ide_hwif_t *hwif = drive->hwif;
unsigned long flags;
int i;
u8 stat;
udelay(1); /* spec allows drive 400ns to assert "BUSY" */
- if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
+ stat = ide_read_status(drive);
+
+ if (stat & BUSY_STAT) {
local_irq_set(flags);
timeout += jiffies;
- while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
+ while ((stat = ide_read_status(drive)) & BUSY_STAT) {
if (time_after(jiffies, timeout)) {
/*
* One last read after the timeout in case
* heavy interrupt load made us not make any
* progress during the timeout..
*/
- stat = hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if (!(stat & BUSY_STAT))
break;
@@ -494,7 +495,9 @@ static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long ti
*/
for (i = 0; i < 10; i++) {
udelay(1);
- if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), good, bad)) {
+ stat = ide_read_status(drive);
+
+ if (OK_STAT(stat, good, bad)) {
*rstat = stat;
return 0;
}
@@ -617,6 +620,7 @@ int ide_driveid_update(ide_drive_t *drive)
ide_hwif_t *hwif = drive->hwif;
struct hd_driveid *id;
unsigned long timeout, flags;
+ u8 stat;
/*
* Re-read drive->id for possible DMA mode
@@ -633,10 +637,15 @@ int ide_driveid_update(ide_drive_t *drive)
SELECT_MASK(drive, 0);
return 0; /* drive timed-out */
}
+
msleep(50); /* give drive a breather */
- } while (hwif->INB(IDE_ALTSTATUS_REG) & BUSY_STAT);
+ stat = ide_read_altstatus(drive);
+ } while (stat & BUSY_STAT);
+
msleep(50); /* wait for IRQ and DRQ_STAT */
- if (!OK_STAT(hwif->INB(IDE_STATUS_REG),DRQ_STAT,BAD_R_STAT)) {
+ stat = ide_read_status(drive);
+
+ if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
SELECT_MASK(drive, 0);
printk("%s: CHECK for good STATUS\n", drive->name);
return 0;
@@ -649,7 +658,7 @@ int ide_driveid_update(ide_drive_t *drive)
return 0;
}
ata_input_data(drive, id, SECTOR_WORDS);
- (void) hwif->INB(IDE_STATUS_REG); /* clear drive IRQ */
+ (void)ide_read_status(drive); /* clear drive IRQ */
local_irq_enable();
local_irq_restore(flags);
ide_fix_driveid(id);
@@ -850,17 +859,16 @@ static ide_startstop_t do_reset1 (ide_drive_t *, int);
static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
{
ide_hwgroup_t *hwgroup = HWGROUP(drive);
- ide_hwif_t *hwif = HWIF(drive);
u8 stat;
SELECT_DRIVE(drive);
udelay (10);
+ stat = ide_read_status(drive);
- if (OK_STAT(stat = hwif->INB(IDE_STATUS_REG), 0, BUSY_STAT)) {
+ if (OK_STAT(stat, 0, BUSY_STAT))
printk("%s: ATAPI reset complete\n", drive->name);
- } else {
+ else {
if (time_before(jiffies, hwgroup->poll_timeout)) {
- BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
/* continue polling */
return ide_started;
@@ -898,9 +906,10 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
}
}
- if (!OK_STAT(tmp = hwif->INB(IDE_STATUS_REG), 0, BUSY_STAT)) {
+ tmp = ide_read_status(drive);
+
+ if (!OK_STAT(tmp, 0, BUSY_STAT)) {
if (time_before(jiffies, hwgroup->poll_timeout)) {
- BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
/* continue polling */
return ide_started;
@@ -909,7 +918,9 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
drive->failures++;
} else {
printk("%s: reset: ", hwif->name);
- if ((tmp = hwif->INB(IDE_ERROR_REG)) == 1) {
+ tmp = ide_read_error(drive);
+
+ if (tmp == 1) {
printk("success\n");
drive->failures = 0;
} else {
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index b42940d8bf70..1ff676cc6473 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -578,7 +578,7 @@ u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
}
printk("}\n");
if ((stat & (BUSY_STAT|ERR_STAT)) == ERR_STAT) {
- err = drive->hwif->INB(IDE_ERROR_REG);
+ err = ide_read_error(drive);
printk("%s: %s: error=0x%02x ", drive->name, msg, err);
if (drive->media == ide_disk)
ide_dump_ata_error(drive, err);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 9c07bdb68d1a..6daea896c5db 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -264,8 +264,7 @@ err_misc:
static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
{
ide_hwif_t *hwif = HWIF(drive);
- int rc;
- unsigned long hd_status;
+ int use_altstatus = 0, rc;
unsigned long timeout;
u8 s = 0, a = 0;
@@ -273,19 +272,17 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
msleep(50);
if (IDE_CONTROL_REG) {
- a = hwif->INB(IDE_ALTSTATUS_REG);
- s = hwif->INB(IDE_STATUS_REG);
- if ((a ^ s) & ~INDEX_STAT) {
- printk(KERN_INFO "%s: probing with STATUS(0x%02x) instead of "
- "ALTSTATUS(0x%02x)\n", drive->name, s, a);
+ a = ide_read_altstatus(drive);
+ s = ide_read_status(drive);
+ if ((a ^ s) & ~INDEX_STAT)
/* ancient Seagate drives, broken interfaces */
- hd_status = IDE_STATUS_REG;
- } else {
+ printk(KERN_INFO "%s: probing with STATUS(0x%02x) "
+ "instead of ALTSTATUS(0x%02x)\n",
+ drive->name, s, a);
+ else
/* use non-intrusive polling */
- hd_status = IDE_ALTSTATUS_REG;
- }
- } else
- hd_status = IDE_STATUS_REG;
+ use_altstatus = 1;
+ }
/* set features register for atapi
* identify command to be sure of reply
@@ -306,11 +303,15 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
}
/* give drive a breather */
msleep(50);
- } while ((hwif->INB(hd_status)) & BUSY_STAT);
+ s = use_altstatus ? ide_read_altstatus(drive)
+ : ide_read_status(drive);
+ } while (s & BUSY_STAT);
/* wait for IRQ and DRQ_STAT */
msleep(50);
- if (OK_STAT((hwif->INB(IDE_STATUS_REG)), DRQ_STAT, BAD_R_STAT)) {
+ s = ide_read_status(drive);
+
+ if (OK_STAT(s, DRQ_STAT, BAD_R_STAT)) {
unsigned long flags;
/* local CPU only; some systems need this */
@@ -320,7 +321,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
/* drive responded with ID */
rc = 0;
/* clear drive IRQ */
- (void) hwif->INB(IDE_STATUS_REG);
+ (void)ide_read_status(drive);
local_irq_restore(flags);
} else {
/* drive refused ID */
@@ -367,7 +368,7 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
ide_set_irq(drive, 0);
/* clear drive IRQ */
- (void) hwif->INB(IDE_STATUS_REG);
+ (void)ide_read_status(drive);
udelay(5);
irq = probe_irq_off(cookie);
if (!hwif->irq) {
@@ -455,7 +456,9 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
return 3;
}
- if (OK_STAT((hwif->INB(IDE_STATUS_REG)), READY_STAT, BUSY_STAT) ||
+ stat = ide_read_status(drive);
+
+ if (OK_STAT(stat, READY_STAT, BUSY_STAT) ||
drive->present || cmd == WIN_PIDENTIFY) {
/* send cmd and wait */
if ((rc = try_to_identify(drive, cmd))) {
@@ -463,7 +466,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
rc = try_to_identify(drive,cmd);
}
- stat = hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if (stat == (BUSY_STAT | READY_STAT))
return 4;
@@ -482,7 +485,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
}
/* ensure drive IRQ is clear */
- stat = hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if (rc == 1)
printk(KERN_ERR "%s: no response (status = 0x%02x)\n",
@@ -496,7 +499,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
SELECT_DRIVE(&hwif->drives[0]);
msleep(50);
/* ensure drive irq is clear */
- (void) hwif->INB(IDE_STATUS_REG);
+ (void)ide_read_status(drive);
}
return rc;
}
@@ -521,7 +524,7 @@ static void enable_nest (ide_drive_t *drive)
msleep(50);
- stat = hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if (!OK_STAT(stat, 0, BAD_STAT))
printk(KERN_CONT "failed (status = 0x%02x)\n", stat);
@@ -1046,7 +1049,7 @@ static int init_irq (ide_hwif_t *hwif)
*/
if (!match || match->irq != hwif->irq) {
int sa = 0;
-#if defined(__mc68000__) || defined(CONFIG_APUS)
+#if defined(__mc68000__)
sa = IRQF_SHARED;
#endif /* __mc68000__ || CONFIG_APUS */
@@ -1069,7 +1072,7 @@ static int init_irq (ide_hwif_t *hwif)
hwif->rqsize = 65536;
}
-#if !defined(__mc68000__) && !defined(CONFIG_APUS)
+#if !defined(__mc68000__)
printk("%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
hwif->io_ports[IDE_DATA_OFFSET],
hwif->io_ports[IDE_DATA_OFFSET]+7,
@@ -1077,7 +1080,7 @@ static int init_irq (ide_hwif_t *hwif)
#else
printk("%s at 0x%08lx on irq %d", hwif->name,
hwif->io_ports[IDE_DATA_OFFSET], hwif->irq);
-#endif /* __mc68000__ && CONFIG_APUS */
+#endif /* __mc68000__ */
if (match)
printk(" (%sed with %s)",
hwif->sharing_irq ? "shar" : "serializ", match->name);
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 975c0ff0f438..bab88ca7f7ec 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -65,6 +65,7 @@ static int proc_ide_read_imodel
case ide_4drives: name = "4drives"; break;
case ide_pmac: name = "mac-io"; break;
case ide_au1xxx: name = "au1xxx"; break;
+ case ide_palm3710: name = "palm3710"; break;
case ide_etrax100: name = "etrax100"; break;
case ide_acorn: name = "acorn"; break;
default: name = "(unknown)"; break;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index bf40d8c824ad..49dd2e7bae7a 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -15,7 +15,7 @@
* Documentation/ide/ChangeLog.ide-tape.1995-2002
*/
-#define IDETAPE_VERSION "1.19"
+#define IDETAPE_VERSION "1.20"
#include <linux/module.h>
#include <linux/types.h>
@@ -39,63 +39,70 @@
#include <scsi/scsi.h>
#include <asm/byteorder.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
+#include <linux/irq.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
#include <asm/unaligned.h>
#include <linux/mtio.h>
+enum {
+ /* output errors only */
+ DBG_ERR = (1 << 0),
+ /* output all sense key/asc */
+ DBG_SENSE = (1 << 1),
+ /* info regarding all chrdev-related procedures */
+ DBG_CHRDEV = (1 << 2),
+ /* all remaining procedures */
+ DBG_PROCS = (1 << 3),
+ /* buffer alloc info (pc_stack & rq_stack) */
+ DBG_PCRQ_STACK = (1 << 4),
+};
+
+/* define to see debug info */
+#define IDETAPE_DEBUG_LOG 0
+
+#if IDETAPE_DEBUG_LOG
+#define debug_log(lvl, fmt, args...) \
+{ \
+ if (tape->debug_mask & lvl) \
+ printk(KERN_INFO "ide-tape: " fmt, ## args); \
+}
+#else
+#define debug_log(lvl, fmt, args...) do {} while (0)
+#endif
+
/**************************** Tunable parameters *****************************/
/*
- * Pipelined mode parameters.
+ * Pipelined mode parameters.
*
- * We try to use the minimum number of stages which is enough to
- * keep the tape constantly streaming. To accomplish that, we implement
- * a feedback loop around the maximum number of stages:
+ * We try to use the minimum number of stages which is enough to keep the tape
+ * constantly streaming. To accomplish that, we implement a feedback loop around
+ * the maximum number of stages:
*
- * We start from MIN maximum stages (we will not even use MIN stages
- * if we don't need them), increment it by RATE*(MAX-MIN)
- * whenever we sense that the pipeline is empty, until we reach
- * the optimum value or until we reach MAX.
+ * We start from MIN maximum stages (we will not even use MIN stages if we don't
+ * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
+ * pipeline is empty, until we reach the optimum value or until we reach MAX.
*
- * Setting the following parameter to 0 is illegal: the pipelined mode
- * cannot be disabled (calculate_speeds() divides by tape->max_stages.)
+ * Setting the following parameter to 0 is illegal: the pipelined mode cannot be
+ * disabled (idetape_calculate_speeds() divides by tape->max_stages.)
*/
#define IDETAPE_MIN_PIPELINE_STAGES 1
#define IDETAPE_MAX_PIPELINE_STAGES 400
#define IDETAPE_INCREASE_STAGES_RATE 20
/*
- * The following are used to debug the driver:
- *
- * Setting IDETAPE_DEBUG_LOG to 1 will log driver flow control.
+ * After each failed packet command we issue a request sense command and retry
+ * the packet command IDETAPE_MAX_PC_RETRIES times.
*
- * Setting them to 0 will restore normal operation mode:
- *
- * 1. Disable logging normal successful operations.
- * 2. Disable self-sanity checks.
- * 3. Errors will still be logged, of course.
- *
- * All the #if DEBUG code will be removed some day, when the driver
- * is verified to be stable enough. This will make it much more
- * esthetic.
- */
-#define IDETAPE_DEBUG_LOG 0
-
-/*
- * After each failed packet command we issue a request sense command
- * and retry the packet command IDETAPE_MAX_PC_RETRIES times.
- *
- * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
+ * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
*/
#define IDETAPE_MAX_PC_RETRIES 3
/*
- * With each packet command, we allocate a buffer of
- * IDETAPE_PC_BUFFER_SIZE bytes. This is used for several packet
- * commands (Not for READ/WRITE commands).
+ * With each packet command, we allocate a buffer of IDETAPE_PC_BUFFER_SIZE
+ * bytes. This is used for several packet commands (Not for READ/WRITE commands)
*/
#define IDETAPE_PC_BUFFER_SIZE 256
@@ -114,48 +121,39 @@
#define IDETAPE_WAIT_CMD (900*HZ)
/*
- * The following parameter is used to select the point in the internal
- * tape fifo in which we will start to refill the buffer. Decreasing
- * the following parameter will improve the system's latency and
- * interactive response, while using a high value might improve system
- * throughput.
+ * The following parameter is used to select the point in the internal tape fifo
+ * in which we will start to refill the buffer. Decreasing the following
+ * parameter will improve the system's latency and interactive response, while
+ * using a high value might improve system throughput.
*/
-#define IDETAPE_FIFO_THRESHOLD 2
+#define IDETAPE_FIFO_THRESHOLD 2
/*
- * DSC polling parameters.
- *
- * Polling for DSC (a single bit in the status register) is a very
- * important function in ide-tape. There are two cases in which we
- * poll for DSC:
+ * DSC polling parameters.
*
- * 1. Before a read/write packet command, to ensure that we
- * can transfer data from/to the tape's data buffers, without
- * causing an actual media access. In case the tape is not
- * ready yet, we take out our request from the device
- * request queue, so that ide.c will service requests from
- * the other device on the same interface meanwhile.
+ * Polling for DSC (a single bit in the status register) is a very important
+ * function in ide-tape. There are two cases in which we poll for DSC:
*
- * 2. After the successful initialization of a "media access
- * packet command", which is a command which can take a long
- * time to complete (it can be several seconds or even an hour).
+ * 1. Before a read/write packet command, to ensure that we can transfer data
+ * from/to the tape's data buffers, without causing an actual media access.
+ * In case the tape is not ready yet, we take out our request from the device
+ * request queue, so that ide.c could service requests from the other device
+ * on the same interface in the meantime.
*
- * Again, we postpone our request in the middle to free the bus
- * for the other device. The polling frequency here should be
- * lower than the read/write frequency since those media access
- * commands are slow. We start from a "fast" frequency -
- * IDETAPE_DSC_MA_FAST (one second), and if we don't receive DSC
- * after IDETAPE_DSC_MA_THRESHOLD (5 minutes), we switch it to a
- * lower frequency - IDETAPE_DSC_MA_SLOW (1 minute).
+ * 2. After the successful initialization of a "media access packet command",
+ * which is a command that can take a long time to complete (the interval can
+ * range from several seconds to even an hour). Again, we postpone our request
+ * in the middle to free the bus for the other device. The polling frequency
+ * here should be lower than the read/write frequency since those media access
+ * commands are slow. We start from a "fast" frequency - IDETAPE_DSC_MA_FAST
+ * (1 second), and if we don't receive DSC after IDETAPE_DSC_MA_THRESHOLD
+ * (5 min), we switch it to a lower frequency - IDETAPE_DSC_MA_SLOW (1 min).
*
- * We also set a timeout for the timer, in case something goes wrong.
- * The timeout should be longer then the maximum execution time of a
- * tape operation.
- */
-
-/*
- * DSC timings.
+ * We also set a timeout for the timer, in case something goes wrong. The
+ * timeout should be longer then the maximum execution time of a tape operation.
*/
+
+/* DSC timings. */
#define IDETAPE_DSC_RW_MIN 5*HZ/100 /* 50 msec */
#define IDETAPE_DSC_RW_MAX 40*HZ/100 /* 400 msec */
#define IDETAPE_DSC_RW_TIMEOUT 2*60*HZ /* 2 minutes */
@@ -166,19 +164,15 @@
/*************************** End of tunable parameters ***********************/
-/*
- * Read/Write error simulation
- */
+/* Read/Write error simulation */
#define SIMULATE_ERRORS 0
-/*
- * For general magnetic tape device compatibility.
- */
-typedef enum {
- idetape_direction_none,
- idetape_direction_read,
- idetape_direction_write
-} idetape_chrdev_direction_t;
+/* tape directions */
+enum {
+ IDETAPE_DIR_NONE = (1 << 0),
+ IDETAPE_DIR_READ = (1 << 1),
+ IDETAPE_DIR_WRITE = (1 << 2),
+};
struct idetape_bh {
u32 b_size;
@@ -187,24 +181,32 @@ struct idetape_bh {
char *b_data;
};
-/*
- * Our view of a packet command.
- */
typedef struct idetape_packet_command_s {
- u8 c[12]; /* Actual packet bytes */
- int retries; /* On each retry, we increment retries */
- int error; /* Error code */
- int request_transfer; /* Bytes to transfer */
- int actually_transferred; /* Bytes actually transferred */
- int buffer_size; /* Size of our data buffer */
+ /* Actual packet bytes */
+ u8 c[12];
+ /* On each retry, we increment retries */
+ int retries;
+ /* Error code */
+ int error;
+ /* Bytes to transfer */
+ int request_transfer;
+ /* Bytes actually transferred */
+ int actually_transferred;
+ /* Size of our data buffer */
+ int buffer_size;
struct idetape_bh *bh;
char *b_data;
int b_count;
- u8 *buffer; /* Data buffer */
- u8 *current_position; /* Pointer into the above buffer */
- ide_startstop_t (*callback) (ide_drive_t *); /* Called when this packet command is completed */
- u8 pc_buffer[IDETAPE_PC_BUFFER_SIZE]; /* Temporary buffer */
- unsigned long flags; /* Status/Action bit flags: long for set_bit */
+ /* Data buffer */
+ u8 *buffer;
+ /* Pointer into the above buffer */
+ u8 *current_position;
+ /* Called when this packet command is completed */
+ ide_startstop_t (*callback) (ide_drive_t *);
+ /* Temporary buffer */
+ u8 pc_buffer[IDETAPE_PC_BUFFER_SIZE];
+ /* Status/Action bit flags: long for set_bit */
+ unsigned long flags;
} idetape_pc_t;
/*
@@ -223,9 +225,7 @@ typedef struct idetape_packet_command_s {
/* Data direction */
#define PC_WRITING 5
-/*
- * A pipeline stage.
- */
+/* A pipeline stage. */
typedef struct idetape_stage_s {
struct request rq; /* The corresponding request */
struct idetape_bh *bh; /* The data buffers */
@@ -233,9 +233,8 @@ typedef struct idetape_stage_s {
} idetape_stage_t;
/*
- * Most of our global data which we need to save even as we leave the
- * driver due to an interrupt or a timer event is stored in a variable
- * of type idetape_tape_t, defined below.
+ * Most of our global data which we need to save even as we leave the driver due
+ * to an interrupt or a timer event is stored in the struct defined below.
*/
typedef struct ide_tape_obj {
ide_drive_t *drive;
@@ -271,15 +270,14 @@ typedef struct ide_tape_obj {
int rq_stack_index;
/*
- * DSC polling variables.
+ * DSC polling variables.
*
- * While polling for DSC we use postponed_rq to postpone the
- * current request so that ide.c will be able to service
- * pending requests on the other device. Note that at most
- * we will have only one DSC (usually data transfer) request
- * in the device request queue. Additional requests can be
- * queued in our internal pipeline, but they will be visible
- * to ide.c only one at a time.
+ * While polling for DSC we use postponed_rq to postpone the current
+ * request so that ide.c will be able to service pending requests on the
+ * other device. Note that at most we will have only one DSC (usually
+ * data transfer) request in the device request queue. Additional
+ * requests can be queued in our internal pipeline, but they will be
+ * visible to ide.c only one at a time.
*/
struct request *postponed_rq;
/* The time in which we started polling for DSC */
@@ -287,73 +285,57 @@ typedef struct ide_tape_obj {
/* Timer used to poll for dsc */
struct timer_list dsc_timer;
/* Read/Write dsc polling frequency */
- unsigned long best_dsc_rw_frequency;
- /* The current polling frequency */
- unsigned long dsc_polling_frequency;
- /* Maximum waiting time */
+ unsigned long best_dsc_rw_freq;
+ unsigned long dsc_poll_freq;
unsigned long dsc_timeout;
- /*
- * Read position information
- */
+ /* Read position information */
u8 partition;
/* Current block */
- unsigned int first_frame_position;
- unsigned int last_frame_position;
- unsigned int blocks_in_buffer;
+ unsigned int first_frame;
- /*
- * Last error information
- */
+ /* Last error information */
u8 sense_key, asc, ascq;
- /*
- * Character device operation
- */
+ /* Character device operation */
unsigned int minor;
/* device name */
char name[4];
/* Current character device data transfer direction */
- idetape_chrdev_direction_t chrdev_direction;
+ u8 chrdev_dir;
- /*
- * Device information
- */
- /* Usually 512 or 1024 bytes */
- unsigned short tape_block_size;
+ /* tape block size, usually 512 or 1024 bytes */
+ unsigned short blk_size;
int user_bs_factor;
/* Copy of the tape's Capabilities and Mechanical Page */
u8 caps[20];
/*
- * Active data transfer request parameters.
- *
- * At most, there is only one ide-tape originated data transfer
- * request in the device request queue. This allows ide.c to
- * easily service requests from the other device when we
- * postpone our active request. In the pipelined operation
- * mode, we use our internal pipeline structure to hold
- * more data requests.
+ * Active data transfer request parameters.
*
- * The data buffer size is chosen based on the tape's
- * recommendation.
+ * At most, there is only one ide-tape originated data transfer request
+ * in the device request queue. This allows ide.c to easily service
+ * requests from the other device when we postpone our active request.
+ * In the pipelined operation mode, we use our internal pipeline
+ * structure to hold more data requests. The data buffer size is chosen
+ * based on the tape's recommendation.
*/
- /* Pointer to the request which is waiting in the device request queue */
- struct request *active_data_request;
- /* Data buffer size (chosen based on the tape's recommendation */
+ /* ptr to the request which is waiting in the device request queue */
+ struct request *active_data_rq;
+ /* Data buffer size chosen based on the tape's recommendation */
int stage_size;
idetape_stage_t *merge_stage;
int merge_stage_size;
struct idetape_bh *bh;
char *b_data;
int b_count;
-
+
/*
- * Pipeline parameters.
+ * Pipeline parameters.
*
- * To accomplish non-pipelined mode, we simply set the following
- * variables to zero (or NULL, where appropriate).
+ * To accomplish non-pipelined mode, we simply set the following
+ * variables to zero (or NULL, where appropriate).
*/
/* Number of currently used stages */
int nr_stages;
@@ -378,20 +360,13 @@ typedef struct ide_tape_obj {
/* Status/Action flags: long for set_bit */
unsigned long flags;
/* protects the ide-tape queue */
- spinlock_t spinlock;
+ spinlock_t lock;
- /*
- * Measures average tape speed
- */
+ /* Measures average tape speed */
unsigned long avg_time;
int avg_size;
int avg_speed;
- char vendor_id[10];
- char product_id[18];
- char firmware_revision[6];
- int firmware_revision_num;
-
/* the door is currently locked */
int door_locked;
/* the tape hardware is write protected */
@@ -400,11 +375,9 @@ typedef struct ide_tape_obj {
char write_prot;
/*
- * Limit the number of times a request can
- * be postponed, to avoid an infinite postpone
- * deadlock.
+ * Limit the number of times a request can be postponed, to avoid an
+ * infinite postpone deadlock.
*/
- /* request postpone count limit */
int postpone_cnt;
/*
@@ -419,30 +392,19 @@ typedef struct ide_tape_obj {
int tape_head;
int last_tape_head;
- /*
- * Speed control at the tape buffers input/output
- */
+ /* Speed control at the tape buffers input/output */
unsigned long insert_time;
int insert_size;
int insert_speed;
int max_insert_speed;
int measure_insert_time;
- /*
- * Measure tape still time, in milliseconds
- */
- unsigned long tape_still_time_begin;
- int tape_still_time;
-
- /*
- * Speed regulation negative feedback loop
- */
+ /* Speed regulation negative feedback loop */
int speed_control;
int pipeline_head_speed;
int controlled_pipeline_head_speed;
int uncontrolled_pipeline_head_speed;
int controlled_last_pipeline_head;
- int uncontrolled_last_pipeline_head;
unsigned long uncontrolled_pipeline_head_time;
unsigned long controlled_pipeline_head_time;
int controlled_previous_pipeline_head;
@@ -451,18 +413,7 @@ typedef struct ide_tape_obj {
unsigned long uncontrolled_previous_head_time;
int restart_speed_control_req;
- /*
- * Debug_level determines amount of debugging output;
- * can be changed using /proc/ide/hdx/settings
- * 0 : almost no debugging output
- * 1 : 0+output errors only
- * 2 : 1+output all sensekey/asc
- * 3 : 2+follow all chrdev related procedures
- * 4 : 3+follow all procedures
- * 5 : 4+include pc_stack rq_stack info
- * 6 : 5+USE_COUNT updates
- */
- int debug_level;
+ u32 debug_mask;
} idetape_tape_t;
static DEFINE_MUTEX(idetape_ref_mutex);
@@ -495,9 +446,7 @@ static void ide_tape_put(struct ide_tape_obj *tape)
mutex_unlock(&idetape_ref_mutex);
}
-/*
- * Tape door status
- */
+/* Tape door status */
#define DOOR_UNLOCKED 0
#define DOOR_LOCKED 1
#define DOOR_EXPLICITLY_LOCKED 2
@@ -517,30 +466,23 @@ static void ide_tape_put(struct ide_tape_obj *tape)
/* 0 = no tape is loaded, so we don't rewind after ejecting */
#define IDETAPE_MEDIUM_PRESENT 9
-/*
- * Some defines for the READ BUFFER command
- */
+/* A define for the READ BUFFER command */
#define IDETAPE_RETRIEVE_FAULTY_BLOCK 6
-/*
- * Some defines for the SPACE command
- */
+/* Some defines for the SPACE command */
#define IDETAPE_SPACE_OVER_FILEMARK 1
#define IDETAPE_SPACE_TO_EOD 3
-/*
- * Some defines for the LOAD UNLOAD command
- */
+/* Some defines for the LOAD UNLOAD command */
#define IDETAPE_LU_LOAD_MASK 1
#define IDETAPE_LU_RETENSION_MASK 2
#define IDETAPE_LU_EOT_MASK 4
/*
- * Special requests for our block device strategy routine.
+ * Special requests for our block device strategy routine.
*
- * In order to service a character device command, we add special
- * requests to the tail of our block device request queue and wait
- * for their completion.
+ * In order to service a character device command, we add special requests to
+ * the tail of our block device request queue and wait for their completion.
*/
enum {
@@ -551,55 +493,20 @@ enum {
REQ_IDETAPE_READ_BUFFER = (1 << 4),
};
-/*
- * Error codes which are returned in rq->errors to the higher part
- * of the driver.
- */
+/* Error codes returned in rq->errors to the higher part of the driver. */
#define IDETAPE_ERROR_GENERAL 101
#define IDETAPE_ERROR_FILEMARK 102
#define IDETAPE_ERROR_EOD 103
-/*
- * The following is used to format the general configuration word of
- * the ATAPI IDENTIFY DEVICE command.
- */
-struct idetape_id_gcw {
- unsigned packet_size :2; /* Packet Size */
- unsigned reserved234 :3; /* Reserved */
- unsigned drq_type :2; /* Command packet DRQ type */
- unsigned removable :1; /* Removable media */
- unsigned device_type :5; /* Device type */
- unsigned reserved13 :1; /* Reserved */
- unsigned protocol :2; /* Protocol type */
-};
-
-/*
- * READ POSITION packet command - Data Format (From Table 6-57)
- */
-typedef struct {
- unsigned reserved0_10 :2; /* Reserved */
- unsigned bpu :1; /* Block Position Unknown */
- unsigned reserved0_543 :3; /* Reserved */
- unsigned eop :1; /* End Of Partition */
- unsigned bop :1; /* Beginning Of Partition */
- u8 partition; /* Partition Number */
- u8 reserved2, reserved3; /* Reserved */
- u32 first_block; /* First Block Location */
- u32 last_block; /* Last Block Location (Optional) */
- u8 reserved12; /* Reserved */
- u8 blocks_in_buffer[3]; /* Blocks In Buffer - (Optional) */
- u32 bytes_in_buffer; /* Bytes In Buffer (Optional) */
-} idetape_read_position_result_t;
-
/* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
#define IDETAPE_BLOCK_DESCRIPTOR 0
#define IDETAPE_CAPABILITIES_PAGE 0x2a
/*
- * The variables below are used for the character device interface.
- * Additional state variables are defined in our ide_drive_t structure.
+ * The variables below are used for the character device interface. Additional
+ * state variables are defined in our ide_drive_t structure.
*/
-static struct ide_tape_obj * idetape_devs[MAX_HWIFS * MAX_DRIVES];
+static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
#define ide_tape_f(file) ((file)->private_data)
@@ -616,23 +523,17 @@ static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
}
/*
- * Function declarations
- *
- */
-static int idetape_chrdev_release (struct inode *inode, struct file *filp);
-static void idetape_write_release (ide_drive_t *drive, unsigned int minor);
-
-/*
* Too bad. The drive wants to send us data which we are not ready to accept.
* Just throw it away.
*/
-static void idetape_discard_data (ide_drive_t *drive, unsigned int bcount)
+static void idetape_discard_data(ide_drive_t *drive, unsigned int bcount)
{
while (bcount--)
(void) HWIF(drive)->INB(IDE_DATA_REG);
}
-static void idetape_input_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsigned int bcount)
+static void idetape_input_buffers(ide_drive_t *drive, idetape_pc_t *pc,
+ unsigned int bcount)
{
struct idetape_bh *bh = pc->bh;
int count;
@@ -644,8 +545,11 @@ static void idetape_input_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsigne
idetape_discard_data(drive, bcount);
return;
}
- count = min((unsigned int)(bh->b_size - atomic_read(&bh->b_count)), bcount);
- HWIF(drive)->atapi_input_bytes(drive, bh->b_data + atomic_read(&bh->b_count), count);
+ count = min(
+ (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
+ bcount);
+ HWIF(drive)->atapi_input_bytes(drive, bh->b_data +
+ atomic_read(&bh->b_count), count);
bcount -= count;
atomic_add(count, &bh->b_count);
if (atomic_read(&bh->b_count) == bh->b_size) {
@@ -657,15 +561,16 @@ static void idetape_input_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsigne
pc->bh = bh;
}
-static void idetape_output_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsigned int bcount)
+static void idetape_output_buffers(ide_drive_t *drive, idetape_pc_t *pc,
+ unsigned int bcount)
{
struct idetape_bh *bh = pc->bh;
int count;
while (bcount) {
if (bh == NULL) {
- printk(KERN_ERR "ide-tape: bh == NULL in "
- "idetape_output_buffers\n");
+ printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
+ __func__);
return;
}
count = min((unsigned int)pc->b_count, (unsigned int)bcount);
@@ -674,7 +579,8 @@ static void idetape_output_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsign
pc->b_data += count;
pc->b_count -= count;
if (!pc->b_count) {
- pc->bh = bh = bh->b_reqnext;
+ bh = bh->b_reqnext;
+ pc->bh = bh;
if (bh) {
pc->b_data = bh->b_data;
pc->b_count = atomic_read(&bh->b_count);
@@ -683,7 +589,7 @@ static void idetape_output_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsign
}
}
-static void idetape_update_buffers (idetape_pc_t *pc)
+static void idetape_update_buffers(idetape_pc_t *pc)
{
struct idetape_bh *bh = pc->bh;
int count;
@@ -693,8 +599,8 @@ static void idetape_update_buffers (idetape_pc_t *pc)
return;
while (bcount) {
if (bh == NULL) {
- printk(KERN_ERR "ide-tape: bh == NULL in "
- "idetape_update_buffers\n");
+ printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
+ __func__);
return;
}
count = min((unsigned int)bh->b_size, (unsigned int)bcount);
@@ -712,17 +618,14 @@ static void idetape_update_buffers (idetape_pc_t *pc)
* driver. A storage space for a maximum of IDETAPE_PC_STACK packet
* commands is allocated at initialization time.
*/
-static idetape_pc_t *idetape_next_pc_storage (ide_drive_t *drive)
+static idetape_pc_t *idetape_next_pc_storage(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 5)
- printk(KERN_INFO "ide-tape: pc_stack_index=%d\n",
- tape->pc_stack_index);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PCRQ_STACK, "pc_stack_index=%d\n", tape->pc_stack_index);
+
if (tape->pc_stack_index == IDETAPE_PC_STACK)
- tape->pc_stack_index=0;
+ tape->pc_stack_index = 0;
return (&tape->pc_stack[tape->pc_stack_index++]);
}
@@ -731,32 +634,26 @@ static idetape_pc_t *idetape_next_pc_storage (ide_drive_t *drive)
* Since we queue packet commands in the request queue, we need to
* allocate a request, along with the allocation of a packet command.
*/
-
+
/**************************************************************
* *
* This should get fixed to use kmalloc(.., GFP_ATOMIC) *
* followed later on by kfree(). -ml *
* *
**************************************************************/
-
-static struct request *idetape_next_rq_storage (ide_drive_t *drive)
+
+static struct request *idetape_next_rq_storage(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 5)
- printk(KERN_INFO "ide-tape: rq_stack_index=%d\n",
- tape->rq_stack_index);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PCRQ_STACK, "rq_stack_index=%d\n", tape->rq_stack_index);
+
if (tape->rq_stack_index == IDETAPE_PC_STACK)
- tape->rq_stack_index=0;
+ tape->rq_stack_index = 0;
return (&tape->rq_stack[tape->rq_stack_index++]);
}
-/*
- * idetape_init_pc initializes a packet command.
- */
-static void idetape_init_pc (idetape_pc_t *pc)
+static void idetape_init_pc(idetape_pc_t *pc)
{
memset(pc->c, 0, 12);
pc->retries = 0;
@@ -780,22 +677,14 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
tape->sense_key = sense[2] & 0xF;
tape->asc = sense[12];
tape->ascq = sense[13];
-#if IDETAPE_DEBUG_LOG
- /*
- * Without debugging, we only log an error if we decided to give up
- * retrying.
- */
- if (tape->debug_level >= 1)
- printk(KERN_INFO "ide-tape: pc = %x, sense key = %x, "
- "asc = %x, ascq = %x\n",
- pc->c[0], tape->sense_key,
- tape->asc, tape->ascq);
-#endif /* IDETAPE_DEBUG_LOG */
+
+ debug_log(DBG_ERR, "pc = %x, sense key = %x, asc = %x, ascq = %x\n",
+ pc->c[0], tape->sense_key, tape->asc, tape->ascq);
/* Correct pc->actually_transferred by asking the tape. */
if (test_bit(PC_DMA_ERROR, &pc->flags)) {
pc->actually_transferred = pc->request_transfer -
- tape->tape_block_size *
+ tape->blk_size *
be32_to_cpu(get_unaligned((u32 *)&sense[3]));
idetape_update_buffers(pc);
}
@@ -843,50 +732,24 @@ static void idetape_activate_next_stage(ide_drive_t *drive)
idetape_stage_t *stage = tape->next_stage;
struct request *rq = &stage->rq;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_active_next_stage\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
+
if (stage == NULL) {
- printk(KERN_ERR "ide-tape: bug: Trying to activate a non existing stage\n");
+ printk(KERN_ERR "ide-tape: bug: Trying to activate a non"
+ " existing stage\n");
return;
}
rq->rq_disk = tape->disk;
rq->buffer = NULL;
rq->special = (void *)stage->bh;
- tape->active_data_request = rq;
+ tape->active_data_rq = rq;
tape->active_stage = stage;
tape->next_stage = stage->next;
}
-/*
- * idetape_increase_max_pipeline_stages is a part of the feedback
- * loop which tries to find the optimum number of stages. In the
- * feedback loop, we are starting from a minimum maximum number of
- * stages, and if we sense that the pipeline is empty, we try to
- * increase it, until we reach the user compile time memory limit.
- */
-static void idetape_increase_max_pipeline_stages (ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- int increase = (tape->max_pipeline - tape->min_pipeline) / 10;
-
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk (KERN_INFO "ide-tape: Reached idetape_increase_max_pipeline_stages\n");
-#endif /* IDETAPE_DEBUG_LOG */
-
- tape->max_stages += max(increase, 1);
- tape->max_stages = max(tape->max_stages, tape->min_pipeline);
- tape->max_stages = min(tape->max_stages, tape->max_pipeline);
-}
-
-/*
- * idetape_kfree_stage calls kfree to completely free a stage, along with
- * its related buffers.
- */
-static void __idetape_kfree_stage (idetape_stage_t *stage)
+/* Free a stage along with its related buffers completely. */
+static void __idetape_kfree_stage(idetape_stage_t *stage)
{
struct idetape_bh *prev_bh, *bh = stage->bh;
int size;
@@ -907,30 +770,29 @@ static void __idetape_kfree_stage (idetape_stage_t *stage)
kfree(stage);
}
-static void idetape_kfree_stage (idetape_tape_t *tape, idetape_stage_t *stage)
+static void idetape_kfree_stage(idetape_tape_t *tape, idetape_stage_t *stage)
{
__idetape_kfree_stage(stage);
}
/*
- * idetape_remove_stage_head removes tape->first_stage from the pipeline.
- * The caller should avoid race conditions.
+ * Remove tape->first_stage from the pipeline. The caller should avoid race
+ * conditions.
*/
-static void idetape_remove_stage_head (ide_drive_t *drive)
+static void idetape_remove_stage_head(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
idetape_stage_t *stage;
-
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_remove_stage_head\n");
-#endif /* IDETAPE_DEBUG_LOG */
+
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
+
if (tape->first_stage == NULL) {
printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n");
return;
}
if (tape->active_stage == tape->first_stage) {
- printk(KERN_ERR "ide-tape: bug: Trying to free our active pipeline stage\n");
+ printk(KERN_ERR "ide-tape: bug: Trying to free our active "
+ "pipeline stage\n");
return;
}
stage = tape->first_stage;
@@ -940,9 +802,11 @@ static void idetape_remove_stage_head (ide_drive_t *drive)
if (tape->first_stage == NULL) {
tape->last_stage = NULL;
if (tape->next_stage != NULL)
- printk(KERN_ERR "ide-tape: bug: tape->next_stage != NULL\n");
+ printk(KERN_ERR "ide-tape: bug: tape->next_stage !="
+ " NULL\n");
if (tape->nr_stages)
- printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 now\n");
+ printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 "
+ "now\n");
}
}
@@ -957,10 +821,8 @@ static void idetape_abort_pipeline(ide_drive_t *drive,
idetape_stage_t *stage = new_last_stage->next;
idetape_stage_t *nstage;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: %s: idetape_abort_pipeline called\n", tape->name);
-#endif
+ debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__);
+
while (stage) {
nstage = stage->next;
idetape_kfree_stage(tape, stage);
@@ -975,8 +837,8 @@ static void idetape_abort_pipeline(ide_drive_t *drive,
}
/*
- * idetape_end_request is used to finish servicing a request, and to
- * insert a pending pipeline request into the main device queue.
+ * Finish servicing a request and insert a pending pipeline request into the
+ * main device queue.
*/
static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
{
@@ -987,15 +849,12 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
int remove_stage = 0;
idetape_stage_t *active_stage;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_end_request\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
switch (uptodate) {
- case 0: error = IDETAPE_ERROR_GENERAL; break;
- case 1: error = 0; break;
- default: error = uptodate;
+ case 0: error = IDETAPE_ERROR_GENERAL; break;
+ case 1: error = 0; break;
+ default: error = uptodate;
}
rq->errors = error;
if (error)
@@ -1006,20 +865,21 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
return 0;
}
- spin_lock_irqsave(&tape->spinlock, flags);
+ spin_lock_irqsave(&tape->lock, flags);
/* The request was a pipelined data transfer request */
- if (tape->active_data_request == rq) {
+ if (tape->active_data_rq == rq) {
active_stage = tape->active_stage;
tape->active_stage = NULL;
- tape->active_data_request = NULL;
+ tape->active_data_rq = NULL;
tape->nr_pending_stages--;
if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
remove_stage = 1;
if (error) {
set_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
if (error == IDETAPE_ERROR_EOD)
- idetape_abort_pipeline(drive, active_stage);
+ idetape_abort_pipeline(drive,
+ active_stage);
}
} else if (rq->cmd[0] & REQ_IDETAPE_READ) {
if (error == IDETAPE_ERROR_EOD) {
@@ -1030,48 +890,57 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
if (tape->next_stage != NULL) {
idetape_activate_next_stage(drive);
+ /* Insert the next request into the request queue. */
+ (void)ide_do_drive_cmd(drive, tape->active_data_rq,
+ ide_end);
+ } else if (!error) {
/*
- * Insert the next request into the request queue.
+ * This is a part of the feedback loop which tries to
+ * find the optimum number of stages. We are starting
+ * from a minimum maximum number of stages, and if we
+ * sense that the pipeline is empty, we try to increase
+ * it, until we reach the user compile time memory
+ * limit.
*/
- (void) ide_do_drive_cmd(drive, tape->active_data_request, ide_end);
- } else if (!error) {
- idetape_increase_max_pipeline_stages(drive);
+ int i = (tape->max_pipeline - tape->min_pipeline) / 10;
+
+ tape->max_stages += max(i, 1);
+ tape->max_stages = max(tape->max_stages,
+ tape->min_pipeline);
+ tape->max_stages = min(tape->max_stages,
+ tape->max_pipeline);
}
}
ide_end_drive_cmd(drive, 0, 0);
-// blkdev_dequeue_request(rq);
-// drive->rq = NULL;
-// end_that_request_last(rq);
if (remove_stage)
idetape_remove_stage_head(drive);
- if (tape->active_data_request == NULL)
+ if (tape->active_data_rq == NULL)
clear_bit(IDETAPE_PIPELINE_ACTIVE, &tape->flags);
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ spin_unlock_irqrestore(&tape->lock, flags);
return 0;
}
-static ide_startstop_t idetape_request_sense_callback (ide_drive_t *drive)
+static ide_startstop_t idetape_request_sense_callback(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_request_sense_callback\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
+
if (!tape->pc->error) {
idetape_analyze_error(drive, tape->pc->buffer);
idetape_end_request(drive, 1, 0);
} else {
- printk(KERN_ERR "ide-tape: Error in REQUEST SENSE itself - Aborting request!\n");
+ printk(KERN_ERR "ide-tape: Error in REQUEST SENSE itself - "
+ "Aborting request!\n");
idetape_end_request(drive, 0, 0);
}
return ide_stopped;
}
-static void idetape_create_request_sense_cmd (idetape_pc_t *pc)
+static void idetape_create_request_sense_cmd(idetape_pc_t *pc)
{
- idetape_init_pc(pc);
+ idetape_init_pc(pc);
pc->c[0] = REQUEST_SENSE;
pc->c[4] = 20;
pc->request_transfer = 20;
@@ -1086,25 +955,22 @@ static void idetape_init_rq(struct request *rq, u8 cmd)
}
/*
- * idetape_queue_pc_head generates a new packet command request in front
- * of the request queue, before the current request, so that it will be
- * processed immediately, on the next pass through the driver.
- *
- * idetape_queue_pc_head is called from the request handling part of
- * the driver (the "bottom" part). Safe storage for the request should
- * be allocated with idetape_next_pc_storage and idetape_next_rq_storage
- * before calling idetape_queue_pc_head.
+ * Generate a new packet command request in front of the request queue, before
+ * the current request, so that it will be processed immediately, on the next
+ * pass through the driver. The function below is called from the request
+ * handling part of the driver (the "bottom" part). Safe storage for the request
+ * should be allocated with ide_tape_next_{pc,rq}_storage() prior to that.
*
- * Memory for those requests is pre-allocated at initialization time, and
- * is limited to IDETAPE_PC_STACK requests. We assume that we have enough
- * space for the maximum possible number of inter-dependent packet commands.
+ * Memory for those requests is pre-allocated at initialization time, and is
+ * limited to IDETAPE_PC_STACK requests. We assume that we have enough space for
+ * the maximum possible number of inter-dependent packet commands.
*
- * The higher level of the driver - The ioctl handler and the character
- * device handling functions should queue request to the lower level part
- * and wait for their completion using idetape_queue_pc_tail or
- * idetape_queue_rw_tail.
+ * The higher level of the driver - The ioctl handler and the character device
+ * handling functions should queue request to the lower level part and wait for
+ * their completion using idetape_queue_pc_tail or idetape_queue_rw_tail.
*/
-static void idetape_queue_pc_head (ide_drive_t *drive, idetape_pc_t *pc,struct request *rq)
+static void idetape_queue_pc_head(ide_drive_t *drive, idetape_pc_t *pc,
+ struct request *rq)
{
struct ide_tape_obj *tape = drive->driver_data;
@@ -1125,7 +991,7 @@ static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
idetape_pc_t *pc;
struct request *rq;
- (void)drive->hwif->INB(IDE_ERROR_REG);
+ (void)ide_read_error(drive);
pc = idetape_next_pc_storage(drive);
rq = idetape_next_rq_storage(drive);
idetape_create_request_sense_cmd(pc);
@@ -1135,50 +1001,46 @@ static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
}
/*
- * idetape_postpone_request postpones the current request so that
- * ide.c will be able to service requests from another device on
- * the same hwgroup while we are polling for DSC.
+ * Postpone the current request so that ide.c will be able to service requests
+ * from another device on the same hwgroup while we are polling for DSC.
*/
-static void idetape_postpone_request (ide_drive_t *drive)
+static void idetape_postpone_request(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: idetape_postpone_request\n");
-#endif
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
+
tape->postponed_rq = HWGROUP(drive)->rq;
- ide_stall_queue(drive, tape->dsc_polling_frequency);
+ ide_stall_queue(drive, tape->dsc_poll_freq);
}
+typedef void idetape_io_buf(ide_drive_t *, idetape_pc_t *, unsigned int);
+
/*
- * idetape_pc_intr is the usual interrupt handler which will be called
- * during a packet command. We will transfer some of the data (as
- * requested by the drive) and will re-point interrupt handler to us.
- * When data transfer is finished, we will act according to the
- * algorithm described before idetape_issue_packet_command.
- *
+ * This is the usual interrupt handler which will be called during a packet
+ * command. We will transfer some of the data (as requested by the drive) and
+ * will re-point interrupt handler to us. When data transfer is finished, we
+ * will act according to the algorithm described before
+ * idetape_issue_pc.
*/
-static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
idetape_tape_t *tape = drive->driver_data;
idetape_pc_t *pc = tape->pc;
+ xfer_func_t *xferfunc;
+ idetape_io_buf *iobuf;
unsigned int temp;
#if SIMULATE_ERRORS
- static int error_sim_count = 0;
+ static int error_sim_count;
#endif
u16 bcount;
u8 stat, ireason;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_pc_intr "
- "interrupt handler\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s - interrupt handler\n", __func__);
/* Clear the interrupt */
- stat = hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if (test_bit(PC_DMA_IN_PROGRESS, &pc->flags)) {
if (hwif->ide_dma_end(drive) || (stat & ERR_STAT)) {
@@ -1208,20 +1070,16 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
pc->actually_transferred = pc->request_transfer;
idetape_update_buffers(pc);
}
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: DMA finished\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "DMA finished\n");
+
}
/* No more interrupts */
if ((stat & DRQ_STAT) == 0) {
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: Packet command completed, %d bytes transferred\n", pc->actually_transferred);
-#endif /* IDETAPE_DEBUG_LOG */
- clear_bit(PC_DMA_IN_PROGRESS, &pc->flags);
+ debug_log(DBG_SENSE, "Packet command completed, %d bytes"
+ " transferred\n", pc->actually_transferred);
+ clear_bit(PC_DMA_IN_PROGRESS, &pc->flags);
local_irq_enable();
#if SIMULATE_ERRORS
@@ -1236,19 +1094,16 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
stat &= ~ERR_STAT;
if ((stat & ERR_STAT) || test_bit(PC_DMA_ERROR, &pc->flags)) {
/* Error detected */
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 1)
- printk(KERN_INFO "ide-tape: %s: I/O error\n",
- tape->name);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_ERR, "%s: I/O error\n", tape->name);
+
if (pc->c[0] == REQUEST_SENSE) {
- printk(KERN_ERR "ide-tape: I/O error in request sense command\n");
+ printk(KERN_ERR "ide-tape: I/O error in request"
+ " sense command\n");
return ide_do_reset(drive);
}
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 1)
- printk(KERN_INFO "ide-tape: [cmd %x]: check condition\n", pc->c[0]);
-#endif
+ debug_log(DBG_ERR, "[cmd %x]: check condition\n",
+ pc->c[0]);
+
/* Retry operation */
return idetape_retry_pc(drive);
}
@@ -1257,7 +1112,7 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
(stat & SEEK_STAT) == 0) {
/* Media access command */
tape->dsc_polling_start = jiffies;
- tape->dsc_polling_frequency = IDETAPE_DSC_MA_FAST;
+ tape->dsc_poll_freq = IDETAPE_DSC_MA_FAST;
tape->dsc_timeout = jiffies + IDETAPE_DSC_MA_TIMEOUT;
/* Allow ide.c to handle other requests */
idetape_postpone_request(drive);
@@ -1282,7 +1137,7 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
ireason = hwif->INB(IDE_IREASON_REG);
if (ireason & CD) {
- printk(KERN_ERR "ide-tape: CoD != 0 in idetape_pc_intr\n");
+ printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
return ide_do_reset(drive);
}
if (((ireason & IO) == IO) == test_bit(PC_WRITING, &pc->flags)) {
@@ -1298,86 +1153,76 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
temp = pc->actually_transferred + bcount;
if (temp > pc->request_transfer) {
if (temp > pc->buffer_size) {
- printk(KERN_ERR "ide-tape: The tape wants to send us more data than expected - discarding data\n");
+ printk(KERN_ERR "ide-tape: The tape wants to "
+ "send us more data than expected "
+ "- discarding data\n");
idetape_discard_data(drive, bcount);
- ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
+ ide_set_handler(drive, &idetape_pc_intr,
+ IDETAPE_WAIT_CMD, NULL);
return ide_started;
}
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_NOTICE "ide-tape: The tape wants to send us more data than expected - allowing transfer\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_SENSE, "The tape wants to send us more "
+ "data than expected - allowing transfer\n");
}
- }
- if (test_bit(PC_WRITING, &pc->flags)) {
- if (pc->bh != NULL)
- idetape_output_buffers(drive, pc, bcount);
- else
- /* Write the current buffer */
- hwif->atapi_output_bytes(drive, pc->current_position,
- bcount);
+ iobuf = &idetape_input_buffers;
+ xferfunc = hwif->atapi_input_bytes;
} else {
- if (pc->bh != NULL)
- idetape_input_buffers(drive, pc, bcount);
- else
- /* Read the current buffer */
- hwif->atapi_input_bytes(drive, pc->current_position,
- bcount);
+ iobuf = &idetape_output_buffers;
+ xferfunc = hwif->atapi_output_bytes;
}
+
+ if (pc->bh)
+ iobuf(drive, pc, bcount);
+ else
+ xferfunc(drive, pc->current_position, bcount);
+
/* Update the current position */
pc->actually_transferred += bcount;
pc->current_position += bcount;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: [cmd %x] transferred %d bytes "
- "on that interrupt\n", pc->c[0], bcount);
-#endif
+
+ debug_log(DBG_SENSE, "[cmd %x] transferred %d bytes on that intr.\n",
+ pc->c[0], bcount);
+
/* And set the interrupt handler again */
ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
return ide_started;
}
/*
- * Packet Command Interface
- *
- * The current Packet Command is available in tape->pc, and will not
- * change until we finish handling it. Each packet command is associated
- * with a callback function that will be called when the command is
- * finished.
+ * Packet Command Interface
*
- * The handling will be done in three stages:
+ * The current Packet Command is available in tape->pc, and will not change
+ * until we finish handling it. Each packet command is associated with a
+ * callback function that will be called when the command is finished.
*
- * 1. idetape_issue_packet_command will send the packet command to the
- * drive, and will set the interrupt handler to idetape_pc_intr.
+ * The handling will be done in three stages:
*
- * 2. On each interrupt, idetape_pc_intr will be called. This step
- * will be repeated until the device signals us that no more
- * interrupts will be issued.
+ * 1. idetape_issue_pc will send the packet command to the drive, and will set
+ * the interrupt handler to idetape_pc_intr.
*
- * 3. ATAPI Tape media access commands have immediate status with a
- * delayed process. In case of a successful initiation of a
- * media access packet command, the DSC bit will be set when the
- * actual execution of the command is finished.
- * Since the tape drive will not issue an interrupt, we have to
- * poll for this event. In this case, we define the request as
- * "low priority request" by setting rq_status to
- * IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and exit
- * the driver.
+ * 2. On each interrupt, idetape_pc_intr will be called. This step will be
+ * repeated until the device signals us that no more interrupts will be issued.
*
- * ide.c will then give higher priority to requests which
- * originate from the other device, until will change rq_status
- * to RQ_ACTIVE.
+ * 3. ATAPI Tape media access commands have immediate status with a delayed
+ * process. In case of a successful initiation of a media access packet command,
+ * the DSC bit will be set when the actual execution of the command is finished.
+ * Since the tape drive will not issue an interrupt, we have to poll for this
+ * event. In this case, we define the request as "low priority request" by
+ * setting rq_status to IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and
+ * exit the driver.
*
- * 4. When the packet command is finished, it will be checked for errors.
+ * ide.c will then give higher priority to requests which originate from the
+ * other device, until will change rq_status to RQ_ACTIVE.
*
- * 5. In case an error was found, we queue a request sense packet
- * command in front of the request queue and retry the operation
- * up to IDETAPE_MAX_PC_RETRIES times.
+ * 4. When the packet command is finished, it will be checked for errors.
*
- * 6. In case no error was found, or we decided to give up and not
- * to retry again, the callback function will be called and then
- * we will handle the next request.
+ * 5. In case an error was found, we queue a request sense packet command in
+ * front of the request queue and retry the operation up to
+ * IDETAPE_MAX_PC_RETRIES times.
*
+ * 6. In case no error was found, or we decided to give up and not to retry
+ * again, the callback function will be called and then we will handle the next
+ * request.
*/
static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
{
@@ -1388,8 +1233,9 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
ide_startstop_t startstop;
u8 ireason;
- if (ide_wait_stat(&startstop,drive,DRQ_STAT,BUSY_STAT,WAIT_READY)) {
- printk(KERN_ERR "ide-tape: Strange, packet command initiated yet DRQ isn't asserted\n");
+ if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) {
+ printk(KERN_ERR "ide-tape: Strange, packet command initiated "
+ "yet DRQ isn't asserted\n");
return startstop;
}
ireason = hwif->INB(IDE_IREASON_REG);
@@ -1422,7 +1268,7 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
return ide_started;
}
-static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape_pc_t *pc)
+static ide_startstop_t idetape_issue_pc(ide_drive_t *drive, idetape_pc_t *pc)
{
ide_hwif_t *hwif = drive->hwif;
idetape_tape_t *tape = drive->driver_data;
@@ -1443,9 +1289,9 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
test_bit(PC_ABORT, &pc->flags)) {
/*
- * We will "abort" retrying a packet command in case
- * a legitimate error code was received (crossing a
- * filemark, or end of the media, for example).
+ * We will "abort" retrying a packet command in case legitimate
+ * error code was received (crossing a filemark, or end of the
+ * media, for example).
*/
if (!test_bit(PC_ABORT, &pc->flags)) {
if (!(pc->c[0] == TEST_UNIT_READY &&
@@ -1464,10 +1310,7 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
tape->failed_pc = NULL;
return pc->callback(drive);
}
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: Retry number - %d, cmd = %02X\n", pc->retries, pc->c[0]);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
pc->retries++;
/* We haven't transferred any data yet */
@@ -1499,31 +1342,24 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
}
}
-/*
- * General packet command callback function.
- */
-static ide_startstop_t idetape_pc_callback (ide_drive_t *drive)
+static ide_startstop_t idetape_pc_callback(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
-
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_pc_callback\n");
-#endif /* IDETAPE_DEBUG_LOG */
+
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
idetape_end_request(drive, tape->pc->error ? 0 : 1, 0);
return ide_stopped;
}
-/*
- * A mode sense command is used to "sense" tape parameters.
- */
-static void idetape_create_mode_sense_cmd (idetape_pc_t *pc, u8 page_code)
+/* A mode sense command is used to "sense" tape parameters. */
+static void idetape_create_mode_sense_cmd(idetape_pc_t *pc, u8 page_code)
{
idetape_init_pc(pc);
pc->c[0] = MODE_SENSE;
if (page_code != IDETAPE_BLOCK_DESCRIPTOR)
- pc->c[1] = 8; /* DBD = 1 - Don't return block descriptors */
+ /* DBD = 1 - Don't return block descriptors */
+ pc->c[1] = 8;
pc->c[2] = page_code;
/*
* Changed pc->c[3] to 0 (255 will at best return unused info).
@@ -1533,7 +1369,8 @@ static void idetape_create_mode_sense_cmd (idetape_pc_t *pc, u8 page_code)
* and return an error when 255 is used.
*/
pc->c[3] = 0;
- pc->c[4] = 255; /* (We will just discard data in that case) */
+ /* We will just discard data in that case */
+ pc->c[4] = 255;
if (page_code == IDETAPE_BLOCK_DESCRIPTOR)
pc->request_transfer = 12;
else if (page_code == IDETAPE_CAPABILITIES_PAGE)
@@ -1543,62 +1380,77 @@ static void idetape_create_mode_sense_cmd (idetape_pc_t *pc, u8 page_code)
pc->callback = &idetape_pc_callback;
}
-static void calculate_speeds(ide_drive_t *drive)
+static void idetape_calculate_speeds(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
- int full = 125, empty = 75;
- if (time_after(jiffies, tape->controlled_pipeline_head_time + 120 * HZ)) {
- tape->controlled_previous_pipeline_head = tape->controlled_last_pipeline_head;
- tape->controlled_previous_head_time = tape->controlled_pipeline_head_time;
+ if (time_after(jiffies,
+ tape->controlled_pipeline_head_time + 120 * HZ)) {
+ tape->controlled_previous_pipeline_head =
+ tape->controlled_last_pipeline_head;
+ tape->controlled_previous_head_time =
+ tape->controlled_pipeline_head_time;
tape->controlled_last_pipeline_head = tape->pipeline_head;
tape->controlled_pipeline_head_time = jiffies;
}
if (time_after(jiffies, tape->controlled_pipeline_head_time + 60 * HZ))
- tape->controlled_pipeline_head_speed = (tape->pipeline_head - tape->controlled_last_pipeline_head) * 32 * HZ / (jiffies - tape->controlled_pipeline_head_time);
+ tape->controlled_pipeline_head_speed = (tape->pipeline_head -
+ tape->controlled_last_pipeline_head) * 32 * HZ /
+ (jiffies - tape->controlled_pipeline_head_time);
else if (time_after(jiffies, tape->controlled_previous_head_time))
- tape->controlled_pipeline_head_speed = (tape->pipeline_head - tape->controlled_previous_pipeline_head) * 32 * HZ / (jiffies - tape->controlled_previous_head_time);
+ tape->controlled_pipeline_head_speed = (tape->pipeline_head -
+ tape->controlled_previous_pipeline_head) * 32 *
+ HZ / (jiffies - tape->controlled_previous_head_time);
- if (tape->nr_pending_stages < tape->max_stages /*- 1 */) {
+ if (tape->nr_pending_stages < tape->max_stages/*- 1 */) {
/* -1 for read mode error recovery */
- if (time_after(jiffies, tape->uncontrolled_previous_head_time + 10 * HZ)) {
+ if (time_after(jiffies, tape->uncontrolled_previous_head_time +
+ 10 * HZ)) {
tape->uncontrolled_pipeline_head_time = jiffies;
- tape->uncontrolled_pipeline_head_speed = (tape->pipeline_head - tape->uncontrolled_previous_pipeline_head) * 32 * HZ / (jiffies - tape->uncontrolled_previous_head_time);
+ tape->uncontrolled_pipeline_head_speed =
+ (tape->pipeline_head -
+ tape->uncontrolled_previous_pipeline_head) *
+ 32 * HZ / (jiffies -
+ tape->uncontrolled_previous_head_time);
}
} else {
tape->uncontrolled_previous_head_time = jiffies;
tape->uncontrolled_previous_pipeline_head = tape->pipeline_head;
- if (time_after(jiffies, tape->uncontrolled_pipeline_head_time + 30 * HZ)) {
+ if (time_after(jiffies, tape->uncontrolled_pipeline_head_time +
+ 30 * HZ))
tape->uncontrolled_pipeline_head_time = jiffies;
- }
+
}
- tape->pipeline_head_speed = max(tape->uncontrolled_pipeline_head_speed, tape->controlled_pipeline_head_speed);
- if (tape->speed_control == 0) {
- tape->max_insert_speed = 5000;
- } else if (tape->speed_control == 1) {
+ tape->pipeline_head_speed = max(tape->uncontrolled_pipeline_head_speed,
+ tape->controlled_pipeline_head_speed);
+
+ if (tape->speed_control == 1) {
if (tape->nr_pending_stages >= tape->max_stages / 2)
tape->max_insert_speed = tape->pipeline_head_speed +
- (1100 - tape->pipeline_head_speed) * 2 * (tape->nr_pending_stages - tape->max_stages / 2) / tape->max_stages;
+ (1100 - tape->pipeline_head_speed) * 2 *
+ (tape->nr_pending_stages - tape->max_stages / 2)
+ / tape->max_stages;
else
tape->max_insert_speed = 500 +
- (tape->pipeline_head_speed - 500) * 2 * tape->nr_pending_stages / tape->max_stages;
+ (tape->pipeline_head_speed - 500) * 2 *
+ tape->nr_pending_stages / tape->max_stages;
+
if (tape->nr_pending_stages >= tape->max_stages * 99 / 100)
tape->max_insert_speed = 5000;
- } else if (tape->speed_control == 2) {
- tape->max_insert_speed = tape->pipeline_head_speed * empty / 100 +
- (tape->pipeline_head_speed * full / 100 - tape->pipeline_head_speed * empty / 100) * tape->nr_pending_stages / tape->max_stages;
} else
tape->max_insert_speed = tape->speed_control;
+
tape->max_insert_speed = max(tape->max_insert_speed, 500);
}
-static ide_startstop_t idetape_media_access_finished (ide_drive_t *drive)
+static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
idetape_pc_t *pc = tape->pc;
u8 stat;
- stat = drive->hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
+
if (stat & SEEK_STAT) {
if (stat & ERR_STAT) {
/* Error detected */
@@ -1618,14 +1470,14 @@ static ide_startstop_t idetape_media_access_finished (ide_drive_t *drive)
return pc->callback(drive);
}
-static ide_startstop_t idetape_rw_callback (ide_drive_t *drive)
+static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
struct request *rq = HWGROUP(drive)->rq;
- int blocks = tape->pc->actually_transferred / tape->tape_block_size;
+ int blocks = tape->pc->actually_transferred / tape->blk_size;
- tape->avg_size += blocks * tape->tape_block_size;
- tape->insert_size += blocks * tape->tape_block_size;
+ tape->avg_size += blocks * tape->blk_size;
+ tape->insert_size += blocks * tape->blk_size;
if (tape->insert_size > 1024 * 1024)
tape->measure_insert_time = 1;
if (tape->measure_insert_time) {
@@ -1634,19 +1486,17 @@ static ide_startstop_t idetape_rw_callback (ide_drive_t *drive)
tape->insert_size = 0;
}
if (time_after(jiffies, tape->insert_time))
- tape->insert_speed = tape->insert_size / 1024 * HZ / (jiffies - tape->insert_time);
+ tape->insert_speed = tape->insert_size / 1024 * HZ /
+ (jiffies - tape->insert_time);
if (time_after_eq(jiffies, tape->avg_time + HZ)) {
- tape->avg_speed = tape->avg_size * HZ / (jiffies - tape->avg_time) / 1024;
+ tape->avg_speed = tape->avg_size * HZ /
+ (jiffies - tape->avg_time) / 1024;
tape->avg_size = 0;
tape->avg_time = jiffies;
}
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_rw_callback\n");
-#endif /* IDETAPE_DEBUG_LOG */
-
- tape->first_frame_position += blocks;
+ tape->first_frame += blocks;
rq->current_nr_sectors -= blocks;
if (!tape->pc->error)
@@ -1656,7 +1506,8 @@ static ide_startstop_t idetape_rw_callback (ide_drive_t *drive)
return ide_stopped;
}
-static void idetape_create_read_cmd(idetape_tape_t *tape, idetape_pc_t *pc, unsigned int length, struct idetape_bh *bh)
+static void idetape_create_read_cmd(idetape_tape_t *tape, idetape_pc_t *pc,
+ unsigned int length, struct idetape_bh *bh)
{
idetape_init_pc(pc);
pc->c[0] = READ_6;
@@ -1666,12 +1517,14 @@ static void idetape_create_read_cmd(idetape_tape_t *tape, idetape_pc_t *pc, unsi
pc->bh = bh;
atomic_set(&bh->b_count, 0);
pc->buffer = NULL;
- pc->request_transfer = pc->buffer_size = length * tape->tape_block_size;
+ pc->buffer_size = length * tape->blk_size;
+ pc->request_transfer = pc->buffer_size;
if (pc->request_transfer == tape->stage_size)
set_bit(PC_DMA_RECOMMENDED, &pc->flags);
}
-static void idetape_create_read_buffer_cmd(idetape_tape_t *tape, idetape_pc_t *pc, unsigned int length, struct idetape_bh *bh)
+static void idetape_create_read_buffer_cmd(idetape_tape_t *tape,
+ idetape_pc_t *pc, struct idetape_bh *bh)
{
int size = 32768;
struct idetape_bh *p = bh;
@@ -1689,10 +1542,12 @@ static void idetape_create_read_buffer_cmd(idetape_tape_t *tape, idetape_pc_t *p
atomic_set(&p->b_count, 0);
p = p->b_reqnext;
}
- pc->request_transfer = pc->buffer_size = size;
+ pc->request_transfer = size;
+ pc->buffer_size = size;
}
-static void idetape_create_write_cmd(idetape_tape_t *tape, idetape_pc_t *pc, unsigned int length, struct idetape_bh *bh)
+static void idetape_create_write_cmd(idetape_tape_t *tape, idetape_pc_t *pc,
+ unsigned int length, struct idetape_bh *bh)
{
idetape_init_pc(pc);
pc->c[0] = WRITE_6;
@@ -1704,14 +1559,12 @@ static void idetape_create_write_cmd(idetape_tape_t *tape, idetape_pc_t *pc, uns
pc->b_data = bh->b_data;
pc->b_count = atomic_read(&bh->b_count);
pc->buffer = NULL;
- pc->request_transfer = pc->buffer_size = length * tape->tape_block_size;
+ pc->buffer_size = length * tape->blk_size;
+ pc->request_transfer = pc->buffer_size;
if (pc->request_transfer == tape->stage_size)
set_bit(PC_DMA_RECOMMENDED, &pc->flags);
}
-/*
- * idetape_do_request is our request handling function.
- */
static ide_startstop_t idetape_do_request(ide_drive_t *drive,
struct request *rq, sector_t block)
{
@@ -1720,30 +1573,22 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
struct request *postponed_rq = tape->postponed_rq;
u8 stat;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: sector: %ld, "
- "nr_sectors: %ld, current_nr_sectors: %d\n",
+ debug_log(DBG_SENSE, "sector: %ld, nr_sectors: %ld,"
+ " current_nr_sectors: %d\n",
rq->sector, rq->nr_sectors, rq->current_nr_sectors);
-#endif /* IDETAPE_DEBUG_LOG */
if (!blk_special_request(rq)) {
- /*
- * We do not support buffer cache originated requests.
- */
+ /* We do not support buffer cache originated requests. */
printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
"request queue (%d)\n", drive->name, rq->cmd_type);
ide_end_request(drive, 0, 0);
return ide_stopped;
}
- /*
- * Retry a failed packet command
- */
- if (tape->failed_pc != NULL &&
- tape->pc->c[0] == REQUEST_SENSE) {
- return idetape_issue_packet_command(drive, tape->failed_pc);
- }
+ /* Retry a failed packet command */
+ if (tape->failed_pc && tape->pc->c[0] == REQUEST_SENSE)
+ return idetape_issue_pc(drive, tape->failed_pc);
+
if (postponed_rq != NULL)
if (rq != postponed_rq) {
printk(KERN_ERR "ide-tape: ide-tape.c bug - "
@@ -1758,7 +1603,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
* If the tape is still busy, postpone our request and service
* the other device meanwhile.
*/
- stat = drive->hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
set_bit(IDETAPE_IGNORE_DSC, &tape->flags);
@@ -1768,16 +1613,15 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
drive->post_reset = 0;
}
- if (tape->tape_still_time > 100 && tape->tape_still_time < 200)
- tape->measure_insert_time = 1;
if (time_after(jiffies, tape->insert_time))
- tape->insert_speed = tape->insert_size / 1024 * HZ / (jiffies - tape->insert_time);
- calculate_speeds(drive);
+ tape->insert_speed = tape->insert_size / 1024 * HZ /
+ (jiffies - tape->insert_time);
+ idetape_calculate_speeds(drive);
if (!test_and_clear_bit(IDETAPE_IGNORE_DSC, &tape->flags) &&
(stat & SEEK_STAT) == 0) {
if (postponed_rq == NULL) {
tape->dsc_polling_start = jiffies;
- tape->dsc_polling_frequency = tape->best_dsc_rw_frequency;
+ tape->dsc_poll_freq = tape->best_dsc_rw_freq;
tape->dsc_timeout = jiffies + IDETAPE_DSC_RW_TIMEOUT;
} else if (time_after(jiffies, tape->dsc_timeout)) {
printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
@@ -1788,8 +1632,10 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
} else {
return ide_do_reset(drive);
}
- } else if (time_after(jiffies, tape->dsc_polling_start + IDETAPE_DSC_MA_THRESHOLD))
- tape->dsc_polling_frequency = IDETAPE_DSC_MA_SLOW;
+ } else if (time_after(jiffies,
+ tape->dsc_polling_start +
+ IDETAPE_DSC_MA_THRESHOLD))
+ tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
idetape_postpone_request(drive);
return ide_stopped;
}
@@ -1797,20 +1643,23 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
tape->buffer_head++;
tape->postpone_cnt = 0;
pc = idetape_next_pc_storage(drive);
- idetape_create_read_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special);
+ idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
+ (struct idetape_bh *)rq->special);
goto out;
}
if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
tape->buffer_head++;
tape->postpone_cnt = 0;
pc = idetape_next_pc_storage(drive);
- idetape_create_write_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special);
+ idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
+ (struct idetape_bh *)rq->special);
goto out;
}
if (rq->cmd[0] & REQ_IDETAPE_READ_BUFFER) {
tape->postpone_cnt = 0;
pc = idetape_next_pc_storage(drive);
- idetape_create_read_buffer_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special);
+ idetape_create_read_buffer_cmd(tape, pc,
+ (struct idetape_bh *)rq->special);
goto out;
}
if (rq->cmd[0] & REQ_IDETAPE_PC1) {
@@ -1825,49 +1674,51 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
}
BUG();
out:
- return idetape_issue_packet_command(drive, pc);
+ return idetape_issue_pc(drive, pc);
}
-/*
- * Pipeline related functions
- */
-static inline int idetape_pipeline_active (idetape_tape_t *tape)
+/* Pipeline related functions */
+static inline int idetape_pipeline_active(idetape_tape_t *tape)
{
int rc1, rc2;
rc1 = test_bit(IDETAPE_PIPELINE_ACTIVE, &tape->flags);
- rc2 = (tape->active_data_request != NULL);
+ rc2 = (tape->active_data_rq != NULL);
return rc1;
}
/*
- * idetape_kmalloc_stage uses __get_free_page to allocate a pipeline
- * stage, along with all the necessary small buffers which together make
- * a buffer of size tape->stage_size (or a bit more). We attempt to
- * combine sequential pages as much as possible.
+ * The function below uses __get_free_page to allocate a pipeline stage, along
+ * with all the necessary small buffers which together make a buffer of size
+ * tape->stage_size (or a bit more). We attempt to combine sequential pages as
+ * much as possible.
*
- * Returns a pointer to the new allocated stage, or NULL if we
- * can't (or don't want to) allocate a stage.
+ * It returns a pointer to the new allocated stage, or NULL if we can't (or
+ * don't want to) allocate a stage.
*
- * Pipeline stages are optional and are used to increase performance.
- * If we can't allocate them, we'll manage without them.
+ * Pipeline stages are optional and are used to increase performance. If we
+ * can't allocate them, we'll manage without them.
*/
-static idetape_stage_t *__idetape_kmalloc_stage (idetape_tape_t *tape, int full, int clear)
+static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full,
+ int clear)
{
idetape_stage_t *stage;
struct idetape_bh *prev_bh, *bh;
int pages = tape->pages_per_stage;
char *b_data = NULL;
- if ((stage = kmalloc(sizeof (idetape_stage_t),GFP_KERNEL)) == NULL)
+ stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL);
+ if (!stage)
return NULL;
stage->next = NULL;
- bh = stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
+ stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
+ bh = stage->bh;
if (bh == NULL)
goto abort;
bh->b_reqnext = NULL;
- if ((bh->b_data = (char *) __get_free_page (GFP_KERNEL)) == NULL)
+ bh->b_data = (char *) __get_free_page(GFP_KERNEL);
+ if (!bh->b_data)
goto abort;
if (clear)
memset(bh->b_data, 0, PAGE_SIZE);
@@ -1875,7 +1726,8 @@ static idetape_stage_t *__idetape_kmalloc_stage (idetape_tape_t *tape, int full,
atomic_set(&bh->b_count, full ? bh->b_size : 0);
while (--pages) {
- if ((b_data = (char *) __get_free_page (GFP_KERNEL)) == NULL)
+ b_data = (char *) __get_free_page(GFP_KERNEL);
+ if (!b_data)
goto abort;
if (clear)
memset(b_data, 0, PAGE_SIZE);
@@ -1893,7 +1745,8 @@ static idetape_stage_t *__idetape_kmalloc_stage (idetape_tape_t *tape, int full,
continue;
}
prev_bh = bh;
- if ((bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL)) == NULL) {
+ bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
+ if (!bh) {
free_page((unsigned long) b_data);
goto abort;
}
@@ -1912,14 +1765,11 @@ abort:
return NULL;
}
-static idetape_stage_t *idetape_kmalloc_stage (idetape_tape_t *tape)
+static idetape_stage_t *idetape_kmalloc_stage(idetape_tape_t *tape)
{
idetape_stage_t *cache_stage = tape->cache_stage;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_kmalloc_stage\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
if (tape->nr_stages >= tape->max_stages)
return NULL;
@@ -1930,7 +1780,8 @@ static idetape_stage_t *idetape_kmalloc_stage (idetape_tape_t *tape)
return __idetape_kmalloc_stage(tape, 0, 0);
}
-static int idetape_copy_stage_from_user (idetape_tape_t *tape, idetape_stage_t *stage, const char __user *buf, int n)
+static int idetape_copy_stage_from_user(idetape_tape_t *tape,
+ idetape_stage_t *stage, const char __user *buf, int n)
{
struct idetape_bh *bh = tape->bh;
int count;
@@ -1938,12 +1789,15 @@ static int idetape_copy_stage_from_user (idetape_tape_t *tape, idetape_stage_t *
while (n) {
if (bh == NULL) {
- printk(KERN_ERR "ide-tape: bh == NULL in "
- "idetape_copy_stage_from_user\n");
+ printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
+ __func__);
return 1;
}
- count = min((unsigned int)(bh->b_size - atomic_read(&bh->b_count)), (unsigned int)n);
- if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf, count))
+ count = min((unsigned int)
+ (bh->b_size - atomic_read(&bh->b_count)),
+ (unsigned int)n);
+ if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
+ count))
ret = 1;
n -= count;
atomic_add(count, &bh->b_count);
@@ -1958,7 +1812,8 @@ static int idetape_copy_stage_from_user (idetape_tape_t *tape, idetape_stage_t *
return ret;
}
-static int idetape_copy_stage_to_user (idetape_tape_t *tape, char __user *buf, idetape_stage_t *stage, int n)
+static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
+ idetape_stage_t *stage, int n)
{
struct idetape_bh *bh = tape->bh;
int count;
@@ -1966,8 +1821,8 @@ static int idetape_copy_stage_to_user (idetape_tape_t *tape, char __user *buf, i
while (n) {
if (bh == NULL) {
- printk(KERN_ERR "ide-tape: bh == NULL in "
- "idetape_copy_stage_to_user\n");
+ printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
+ __func__);
return 1;
}
count = min(tape->b_count, n);
@@ -1978,7 +1833,8 @@ static int idetape_copy_stage_to_user (idetape_tape_t *tape, char __user *buf, i
tape->b_count -= count;
buf += count;
if (!tape->b_count) {
- tape->bh = bh = bh->b_reqnext;
+ bh = bh->b_reqnext;
+ tape->bh = bh;
if (bh) {
tape->b_data = bh->b_data;
tape->b_count = atomic_read(&bh->b_count);
@@ -1988,12 +1844,12 @@ static int idetape_copy_stage_to_user (idetape_tape_t *tape, char __user *buf, i
return ret;
}
-static void idetape_init_merge_stage (idetape_tape_t *tape)
+static void idetape_init_merge_stage(idetape_tape_t *tape)
{
struct idetape_bh *bh = tape->merge_stage->bh;
-
+
tape->bh = bh;
- if (tape->chrdev_direction == idetape_direction_write)
+ if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
atomic_set(&bh->b_count, 0);
else {
tape->b_data = bh->b_data;
@@ -2001,7 +1857,7 @@ static void idetape_init_merge_stage (idetape_tape_t *tape)
}
}
-static void idetape_switch_buffers (idetape_tape_t *tape, idetape_stage_t *stage)
+static void idetape_switch_buffers(idetape_tape_t *tape, idetape_stage_t *stage)
{
struct idetape_bh *tmp;
@@ -2011,87 +1867,76 @@ static void idetape_switch_buffers (idetape_tape_t *tape, idetape_stage_t *stage
idetape_init_merge_stage(tape);
}
-/*
- * idetape_add_stage_tail adds a new stage at the end of the pipeline.
- */
-static void idetape_add_stage_tail (ide_drive_t *drive,idetape_stage_t *stage)
+/* Add a new stage at the end of the pipeline. */
+static void idetape_add_stage_tail(ide_drive_t *drive, idetape_stage_t *stage)
{
idetape_tape_t *tape = drive->driver_data;
unsigned long flags;
-
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk (KERN_INFO "ide-tape: Reached idetape_add_stage_tail\n");
-#endif /* IDETAPE_DEBUG_LOG */
- spin_lock_irqsave(&tape->spinlock, flags);
+
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
+
+ spin_lock_irqsave(&tape->lock, flags);
stage->next = NULL;
if (tape->last_stage != NULL)
- tape->last_stage->next=stage;
+ tape->last_stage->next = stage;
else
- tape->first_stage = tape->next_stage=stage;
+ tape->first_stage = stage;
+ tape->next_stage = stage;
tape->last_stage = stage;
if (tape->next_stage == NULL)
tape->next_stage = tape->last_stage;
tape->nr_stages++;
tape->nr_pending_stages++;
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ spin_unlock_irqrestore(&tape->lock, flags);
}
-/*
- * idetape_wait_for_request installs a completion in a pending request
- * and sleeps until it is serviced.
- *
- * The caller should ensure that the request will not be serviced
- * before we install the completion (usually by disabling interrupts).
+/* Install a completion in a pending request and sleep until it is serviced. The
+ * caller should ensure that the request will not be serviced before we install
+ * the completion (usually by disabling interrupts).
*/
-static void idetape_wait_for_request (ide_drive_t *drive, struct request *rq)
+static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq)
{
DECLARE_COMPLETION_ONSTACK(wait);
idetape_tape_t *tape = drive->driver_data;
if (rq == NULL || !blk_special_request(rq)) {
- printk (KERN_ERR "ide-tape: bug: Trying to sleep on non-valid request\n");
+ printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid"
+ " request\n");
return;
}
rq->end_io_data = &wait;
rq->end_io = blk_end_sync_rq;
- spin_unlock_irq(&tape->spinlock);
+ spin_unlock_irq(&tape->lock);
wait_for_completion(&wait);
/* The stage and its struct request have been deallocated */
- spin_lock_irq(&tape->spinlock);
+ spin_lock_irq(&tape->lock);
}
-static ide_startstop_t idetape_read_position_callback (ide_drive_t *drive)
+static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
- idetape_read_position_result_t *result;
-
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_read_position_callback\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ u8 *readpos = tape->pc->buffer;
+
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
if (!tape->pc->error) {
- result = (idetape_read_position_result_t *) tape->pc->buffer;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: BOP - %s\n",result->bop ? "Yes":"No");
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: EOP - %s\n",result->eop ? "Yes":"No");
-#endif /* IDETAPE_DEBUG_LOG */
- if (result->bpu) {
- printk(KERN_INFO "ide-tape: Block location is unknown to the tape\n");
+ debug_log(DBG_SENSE, "BOP - %s\n",
+ (readpos[0] & 0x80) ? "Yes" : "No");
+ debug_log(DBG_SENSE, "EOP - %s\n",
+ (readpos[0] & 0x40) ? "Yes" : "No");
+
+ if (readpos[0] & 0x4) {
+ printk(KERN_INFO "ide-tape: Block location is unknown"
+ "to the tape\n");
clear_bit(IDETAPE_ADDRESS_VALID, &tape->flags);
idetape_end_request(drive, 0, 0);
} else {
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: Block Location - %u\n", ntohl(result->first_block));
-#endif /* IDETAPE_DEBUG_LOG */
- tape->partition = result->partition;
- tape->first_frame_position = ntohl(result->first_block);
- tape->last_frame_position = ntohl(result->last_block);
- tape->blocks_in_buffer = result->blocks_in_buffer[2];
+ debug_log(DBG_SENSE, "Block Location - %u\n",
+ be32_to_cpu(*(u32 *)&readpos[4]));
+
+ tape->partition = readpos[1];
+ tape->first_frame =
+ be32_to_cpu(*(u32 *)&readpos[4]);
set_bit(IDETAPE_ADDRESS_VALID, &tape->flags);
idetape_end_request(drive, 1, 0);
}
@@ -2102,14 +1947,11 @@ static ide_startstop_t idetape_read_position_callback (ide_drive_t *drive)
}
/*
- * idetape_create_write_filemark_cmd will:
- *
- * 1. Write a filemark if write_filemark=1.
- * 2. Flush the device buffers without writing a filemark
- * if write_filemark=0.
- *
+ * Write a filemark if write_filemark=1. Flush the device buffers without
+ * writing a filemark otherwise.
*/
-static void idetape_create_write_filemark_cmd (ide_drive_t *drive, idetape_pc_t *pc,int write_filemark)
+static void idetape_create_write_filemark_cmd(ide_drive_t *drive,
+ idetape_pc_t *pc, int write_filemark)
{
idetape_init_pc(pc);
pc->c[0] = WRITE_FILEMARKS;
@@ -2126,26 +1968,19 @@ static void idetape_create_test_unit_ready_cmd(idetape_pc_t *pc)
}
/*
- * idetape_queue_pc_tail is based on the following functions:
- *
- * ide_do_drive_cmd from ide.c
- * cdrom_queue_request and cdrom_queue_packet_command from ide-cd.c
+ * We add a special packet command request to the tail of the request queue, and
+ * wait for it to be serviced. This is not to be called from within the request
+ * handling part of the driver! We allocate here data on the stack and it is
+ * valid until the request is finished. This is not the case for the bottom part
+ * of the driver, where we are always leaving the functions to wait for an
+ * interrupt or a timer event.
*
- * We add a special packet command request to the tail of the request
- * queue, and wait for it to be serviced.
- *
- * This is not to be called from within the request handling part
- * of the driver ! We allocate here data in the stack, and it is valid
- * until the request is finished. This is not the case for the bottom
- * part of the driver, where we are always leaving the functions to wait
- * for an interrupt or a timer event.
- *
- * From the bottom part of the driver, we should allocate safe memory
- * using idetape_next_pc_storage and idetape_next_rq_storage, and add
- * the request to the request list without waiting for it to be serviced !
- * In that case, we usually use idetape_queue_pc_head.
+ * From the bottom part of the driver, we should allocate safe memory using
+ * idetape_next_pc_storage() and ide_tape_next_rq_storage(), and add the request
+ * to the request list without waiting for it to be serviced! In that case, we
+ * usually use idetape_queue_pc_head().
*/
-static int __idetape_queue_pc_tail (ide_drive_t *drive, idetape_pc_t *pc)
+static int __idetape_queue_pc_tail(ide_drive_t *drive, idetape_pc_t *pc)
{
struct ide_tape_obj *tape = drive->driver_data;
struct request rq;
@@ -2156,7 +1991,8 @@ static int __idetape_queue_pc_tail (ide_drive_t *drive, idetape_pc_t *pc)
return ide_do_drive_cmd(drive, &rq, ide_wait);
}
-static void idetape_create_load_unload_cmd (ide_drive_t *drive, idetape_pc_t *pc,int cmd)
+static void idetape_create_load_unload_cmd(ide_drive_t *drive, idetape_pc_t *pc,
+ int cmd)
{
idetape_init_pc(pc);
pc->c[0] = START_STOP;
@@ -2171,9 +2007,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
idetape_pc_t pc;
int load_attempted = 0;
- /*
- * Wait for the tape to become ready
- */
+ /* Wait for the tape to become ready */
set_bit(IDETAPE_MEDIUM_PRESENT, &tape->flags);
timeout += jiffies;
while (time_before(jiffies, timeout)) {
@@ -2181,10 +2015,12 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
if (!__idetape_queue_pc_tail(drive, &pc))
return 0;
if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
- || (tape->asc == 0x3A)) { /* no media */
+ || (tape->asc == 0x3A)) {
+ /* no media */
if (load_attempted)
return -ENOMEDIUM;
- idetape_create_load_unload_cmd(drive, &pc, IDETAPE_LU_LOAD_MASK);
+ idetape_create_load_unload_cmd(drive, &pc,
+ IDETAPE_LU_LOAD_MASK);
__idetape_queue_pc_tail(drive, &pc);
load_attempted = 1;
/* not about to be ready */
@@ -2196,24 +2032,25 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
return -EIO;
}
-static int idetape_queue_pc_tail (ide_drive_t *drive,idetape_pc_t *pc)
+static int idetape_queue_pc_tail(ide_drive_t *drive, idetape_pc_t *pc)
{
return __idetape_queue_pc_tail(drive, pc);
}
-static int idetape_flush_tape_buffers (ide_drive_t *drive)
+static int idetape_flush_tape_buffers(ide_drive_t *drive)
{
idetape_pc_t pc;
int rc;
idetape_create_write_filemark_cmd(drive, &pc, 0);
- if ((rc = idetape_queue_pc_tail(drive, &pc)))
+ rc = idetape_queue_pc_tail(drive, &pc);
+ if (rc)
return rc;
idetape_wait_ready(drive, 60 * 5 * HZ);
return 0;
}
-static void idetape_create_read_position_cmd (idetape_pc_t *pc)
+static void idetape_create_read_position_cmd(idetape_pc_t *pc)
{
idetape_init_pc(pc);
pc->c[0] = READ_POSITION;
@@ -2221,25 +2058,23 @@ static void idetape_create_read_position_cmd (idetape_pc_t *pc)
pc->callback = &idetape_read_position_callback;
}
-static int idetape_read_position (ide_drive_t *drive)
+static int idetape_read_position(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
idetape_pc_t pc;
int position;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_read_position\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
idetape_create_read_position_cmd(&pc);
if (idetape_queue_pc_tail(drive, &pc))
return -1;
- position = tape->first_frame_position;
+ position = tape->first_frame;
return position;
}
-static void idetape_create_locate_cmd (ide_drive_t *drive, idetape_pc_t *pc, unsigned int block, u8 partition, int skip)
+static void idetape_create_locate_cmd(ide_drive_t *drive, idetape_pc_t *pc,
+ unsigned int block, u8 partition, int skip)
{
idetape_init_pc(pc);
pc->c[0] = POSITION_TO_ELEMENT;
@@ -2250,7 +2085,8 @@ static void idetape_create_locate_cmd (ide_drive_t *drive, idetape_pc_t *pc, uns
pc->callback = &idetape_pc_callback;
}
-static int idetape_create_prevent_cmd (ide_drive_t *drive, idetape_pc_t *pc, int prevent)
+static int idetape_create_prevent_cmd(ide_drive_t *drive, idetape_pc_t *pc,
+ int prevent)
{
idetape_tape_t *tape = drive->driver_data;
@@ -2265,17 +2101,17 @@ static int idetape_create_prevent_cmd (ide_drive_t *drive, idetape_pc_t *pc, int
return 1;
}
-static int __idetape_discard_read_pipeline (ide_drive_t *drive)
+static int __idetape_discard_read_pipeline(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
unsigned long flags;
int cnt;
- if (tape->chrdev_direction != idetape_direction_read)
+ if (tape->chrdev_dir != IDETAPE_DIR_READ)
return 0;
/* Remove merge stage. */
- cnt = tape->merge_stage_size / tape->tape_block_size;
+ cnt = tape->merge_stage_size / tape->blk_size;
if (test_and_clear_bit(IDETAPE_FILEMARK, &tape->flags))
++cnt; /* Filemarks count as 1 sector */
tape->merge_stage_size = 0;
@@ -2286,22 +2122,22 @@ static int __idetape_discard_read_pipeline (ide_drive_t *drive)
/* Clear pipeline flags. */
clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
- tape->chrdev_direction = idetape_direction_none;
+ tape->chrdev_dir = IDETAPE_DIR_NONE;
/* Remove pipeline stages. */
if (tape->first_stage == NULL)
return 0;
- spin_lock_irqsave(&tape->spinlock, flags);
+ spin_lock_irqsave(&tape->lock, flags);
tape->next_stage = NULL;
if (idetape_pipeline_active(tape))
- idetape_wait_for_request(drive, tape->active_data_request);
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ idetape_wait_for_request(drive, tape->active_data_rq);
+ spin_unlock_irqrestore(&tape->lock, flags);
while (tape->first_stage != NULL) {
struct request *rq_ptr = &tape->first_stage->rq;
- cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
+ cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
++cnt;
idetape_remove_stage_head(drive);
@@ -2312,21 +2148,19 @@ static int __idetape_discard_read_pipeline (ide_drive_t *drive)
}
/*
- * idetape_position_tape positions the tape to the requested block
- * using the LOCATE packet command. A READ POSITION command is then
- * issued to check where we are positioned.
- *
- * Like all higher level operations, we queue the commands at the tail
- * of the request queue and wait for their completion.
- *
+ * Position the tape to the requested block using the LOCATE packet command.
+ * A READ POSITION command is then issued to check where we are positioned. Like
+ * all higher level operations, we queue the commands at the tail of the request
+ * queue and wait for their completion.
*/
-static int idetape_position_tape (ide_drive_t *drive, unsigned int block, u8 partition, int skip)
+static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
+ u8 partition, int skip)
{
idetape_tape_t *tape = drive->driver_data;
int retval;
idetape_pc_t pc;
- if (tape->chrdev_direction == idetape_direction_read)
+ if (tape->chrdev_dir == IDETAPE_DIR_READ)
__idetape_discard_read_pipeline(drive);
idetape_wait_ready(drive, 60 * 5 * HZ);
idetape_create_locate_cmd(drive, &pc, block, partition, skip);
@@ -2338,7 +2172,8 @@ static int idetape_position_tape (ide_drive_t *drive, unsigned int block, u8 par
return (idetape_queue_pc_tail(drive, &pc));
}
-static void idetape_discard_read_pipeline (ide_drive_t *drive, int restore_position)
+static void idetape_discard_read_pipeline(ide_drive_t *drive,
+ int restore_position)
{
idetape_tape_t *tape = drive->driver_data;
int cnt;
@@ -2349,35 +2184,37 @@ static void idetape_discard_read_pipeline (ide_drive_t *drive, int restore_posit
position = idetape_read_position(drive);
seek = position > cnt ? position - cnt : 0;
if (idetape_position_tape(drive, seek, 0, 0)) {
- printk(KERN_INFO "ide-tape: %s: position_tape failed in discard_pipeline()\n", tape->name);
+ printk(KERN_INFO "ide-tape: %s: position_tape failed in"
+ " discard_pipeline()\n", tape->name);
return;
}
}
}
/*
- * idetape_queue_rw_tail generates a read/write request for the block
- * device interface and wait for it to be serviced.
+ * Generate a read/write request for the block device interface and wait for it
+ * to be serviced.
*/
-static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks, struct idetape_bh *bh)
+static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
+ struct idetape_bh *bh)
{
idetape_tape_t *tape = drive->driver_data;
struct request rq;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: idetape_queue_rw_tail: cmd=%d\n",cmd);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
+
if (idetape_pipeline_active(tape)) {
- printk(KERN_ERR "ide-tape: bug: the pipeline is active in idetape_queue_rw_tail\n");
+ printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
+ __func__);
return (0);
}
idetape_init_rq(&rq, cmd);
rq.rq_disk = tape->disk;
rq.special = (void *)bh;
- rq.sector = tape->first_frame_position;
- rq.nr_sectors = rq.current_nr_sectors = blocks;
+ rq.sector = tape->first_frame;
+ rq.nr_sectors = blocks;
+ rq.current_nr_sectors = blocks;
(void) ide_do_drive_cmd(drive, &rq, ide_wait);
if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
@@ -2387,14 +2224,11 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks, struct
idetape_init_merge_stage(tape);
if (rq.errors == IDETAPE_ERROR_GENERAL)
return -EIO;
- return (tape->tape_block_size * (blocks-rq.current_nr_sectors));
+ return (tape->blk_size * (blocks-rq.current_nr_sectors));
}
-/*
- * idetape_insert_pipeline_into_queue is used to start servicing the
- * pipeline stages, starting from tape->next_stage.
- */
-static void idetape_insert_pipeline_into_queue (ide_drive_t *drive)
+/* start servicing the pipeline stages, starting from tape->next_stage. */
+static void idetape_plug_pipeline(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
@@ -2403,19 +2237,20 @@ static void idetape_insert_pipeline_into_queue (ide_drive_t *drive)
if (!idetape_pipeline_active(tape)) {
set_bit(IDETAPE_PIPELINE_ACTIVE, &tape->flags);
idetape_activate_next_stage(drive);
- (void) ide_do_drive_cmd(drive, tape->active_data_request, ide_end);
+ (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
}
}
-static void idetape_create_inquiry_cmd (idetape_pc_t *pc)
+static void idetape_create_inquiry_cmd(idetape_pc_t *pc)
{
idetape_init_pc(pc);
pc->c[0] = INQUIRY;
- pc->c[4] = pc->request_transfer = 254;
+ pc->c[4] = 254;
+ pc->request_transfer = 254;
pc->callback = &idetape_pc_callback;
}
-static void idetape_create_rewind_cmd (ide_drive_t *drive, idetape_pc_t *pc)
+static void idetape_create_rewind_cmd(ide_drive_t *drive, idetape_pc_t *pc)
{
idetape_init_pc(pc);
pc->c[0] = REZERO_UNIT;
@@ -2423,7 +2258,7 @@ static void idetape_create_rewind_cmd (ide_drive_t *drive, idetape_pc_t *pc)
pc->callback = &idetape_pc_callback;
}
-static void idetape_create_erase_cmd (idetape_pc_t *pc)
+static void idetape_create_erase_cmd(idetape_pc_t *pc)
{
idetape_init_pc(pc);
pc->c[0] = ERASE;
@@ -2432,7 +2267,7 @@ static void idetape_create_erase_cmd (idetape_pc_t *pc)
pc->callback = &idetape_pc_callback;
}
-static void idetape_create_space_cmd (idetape_pc_t *pc,int count, u8 cmd)
+static void idetape_create_space_cmd(idetape_pc_t *pc, int count, u8 cmd)
{
idetape_init_pc(pc);
pc->c[0] = SPACE;
@@ -2442,89 +2277,87 @@ static void idetape_create_space_cmd (idetape_pc_t *pc,int count, u8 cmd)
pc->callback = &idetape_pc_callback;
}
-static void idetape_wait_first_stage (ide_drive_t *drive)
+static void idetape_wait_first_stage(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
unsigned long flags;
if (tape->first_stage == NULL)
return;
- spin_lock_irqsave(&tape->spinlock, flags);
+ spin_lock_irqsave(&tape->lock, flags);
if (tape->active_stage == tape->first_stage)
- idetape_wait_for_request(drive, tape->active_data_request);
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ idetape_wait_for_request(drive, tape->active_data_rq);
+ spin_unlock_irqrestore(&tape->lock, flags);
}
/*
- * idetape_add_chrdev_write_request tries to add a character device
- * originated write request to our pipeline. In case we don't succeed,
- * we revert to non-pipelined operation mode for this request.
+ * Try to add a character device originated write request to our pipeline. In
+ * case we don't succeed, we revert to non-pipelined operation mode for this
+ * request. In order to accomplish that, we
*
- * 1. Try to allocate a new pipeline stage.
- * 2. If we can't, wait for more and more requests to be serviced
- * and try again each time.
- * 3. If we still can't allocate a stage, fallback to
- * non-pipelined operation mode for this request.
+ * 1. Try to allocate a new pipeline stage.
+ * 2. If we can't, wait for more and more requests to be serviced and try again
+ * each time.
+ * 3. If we still can't allocate a stage, fallback to non-pipelined operation
+ * mode for this request.
*/
-static int idetape_add_chrdev_write_request (ide_drive_t *drive, int blocks)
+static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
{
idetape_tape_t *tape = drive->driver_data;
idetape_stage_t *new_stage;
unsigned long flags;
struct request *rq;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 3)
- printk(KERN_INFO "ide-tape: Reached idetape_add_chrdev_write_request\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
- /*
- * Attempt to allocate a new stage.
- * Pay special attention to possible race conditions.
- */
+ /* Attempt to allocate a new stage. Beware possible race conditions. */
while ((new_stage = idetape_kmalloc_stage(tape)) == NULL) {
- spin_lock_irqsave(&tape->spinlock, flags);
+ spin_lock_irqsave(&tape->lock, flags);
if (idetape_pipeline_active(tape)) {
- idetape_wait_for_request(drive, tape->active_data_request);
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ idetape_wait_for_request(drive, tape->active_data_rq);
+ spin_unlock_irqrestore(&tape->lock, flags);
} else {
- spin_unlock_irqrestore(&tape->spinlock, flags);
- idetape_insert_pipeline_into_queue(drive);
+ spin_unlock_irqrestore(&tape->lock, flags);
+ idetape_plug_pipeline(drive);
if (idetape_pipeline_active(tape))
continue;
/*
- * Linux is short on memory. Fallback to
- * non-pipelined operation mode for this request.
+ * The machine is short on memory. Fallback to non-
+ * pipelined operation mode for this request.
*/
- return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks, tape->merge_stage->bh);
+ return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
+ blocks, tape->merge_stage->bh);
}
}
rq = &new_stage->rq;
idetape_init_rq(rq, REQ_IDETAPE_WRITE);
/* Doesn't actually matter - We always assume sequential access */
- rq->sector = tape->first_frame_position;
- rq->nr_sectors = rq->current_nr_sectors = blocks;
+ rq->sector = tape->first_frame;
+ rq->current_nr_sectors = blocks;
+ rq->nr_sectors = blocks;
idetape_switch_buffers(tape, new_stage);
idetape_add_stage_tail(drive, new_stage);
tape->pipeline_head++;
- calculate_speeds(drive);
+ idetape_calculate_speeds(drive);
/*
- * Estimate whether the tape has stopped writing by checking
- * if our write pipeline is currently empty. If we are not
- * writing anymore, wait for the pipeline to be full enough
- * (90%) before starting to service requests, so that we will
- * be able to keep up with the higher speeds of the tape.
+ * Estimate whether the tape has stopped writing by checking if our
+ * write pipeline is currently empty. If we are not writing anymore,
+ * wait for the pipeline to be almost completely full (90%) before
+ * starting to service requests, so that we will be able to keep up with
+ * the higher speeds of the tape.
*/
if (!idetape_pipeline_active(tape)) {
if (tape->nr_stages >= tape->max_stages * 9 / 10 ||
- tape->nr_stages >= tape->max_stages - tape->uncontrolled_pipeline_head_speed * 3 * 1024 / tape->tape_block_size) {
+ tape->nr_stages >= tape->max_stages -
+ tape->uncontrolled_pipeline_head_speed * 3 * 1024 /
+ tape->blk_size) {
tape->measure_insert_time = 1;
tape->insert_time = jiffies;
tape->insert_size = 0;
tape->insert_speed = 0;
- idetape_insert_pipeline_into_queue(drive);
+ idetape_plug_pipeline(drive);
}
}
if (test_and_clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags))
@@ -2534,31 +2367,32 @@ static int idetape_add_chrdev_write_request (ide_drive_t *drive, int blocks)
}
/*
- * idetape_wait_for_pipeline will wait until all pending pipeline
- * requests are serviced. Typically called on device close.
+ * Wait until all pending pipeline requests are serviced. Typically called on
+ * device close.
*/
-static void idetape_wait_for_pipeline (ide_drive_t *drive)
+static void idetape_wait_for_pipeline(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
unsigned long flags;
while (tape->next_stage || idetape_pipeline_active(tape)) {
- idetape_insert_pipeline_into_queue(drive);
- spin_lock_irqsave(&tape->spinlock, flags);
+ idetape_plug_pipeline(drive);
+ spin_lock_irqsave(&tape->lock, flags);
if (idetape_pipeline_active(tape))
- idetape_wait_for_request(drive, tape->active_data_request);
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ idetape_wait_for_request(drive, tape->active_data_rq);
+ spin_unlock_irqrestore(&tape->lock, flags);
}
}
-static void idetape_empty_write_pipeline (ide_drive_t *drive)
+static void idetape_empty_write_pipeline(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
int blocks, min;
struct idetape_bh *bh;
- if (tape->chrdev_direction != idetape_direction_write) {
- printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline, but we are not writing.\n");
+ if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
+ printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline,"
+ " but we are not writing.\n");
return;
}
if (tape->merge_stage_size > tape->stage_size) {
@@ -2566,12 +2400,13 @@ static void idetape_empty_write_pipeline (ide_drive_t *drive)
tape->merge_stage_size = tape->stage_size;
}
if (tape->merge_stage_size) {
- blocks = tape->merge_stage_size / tape->tape_block_size;
- if (tape->merge_stage_size % tape->tape_block_size) {
+ blocks = tape->merge_stage_size / tape->blk_size;
+ if (tape->merge_stage_size % tape->blk_size) {
unsigned int i;
blocks++;
- i = tape->tape_block_size - tape->merge_stage_size % tape->tape_block_size;
+ i = tape->blk_size - tape->merge_stage_size %
+ tape->blk_size;
bh = tape->bh->b_reqnext;
while (bh) {
atomic_set(&bh->b_count, 0);
@@ -2580,12 +2415,14 @@ static void idetape_empty_write_pipeline (ide_drive_t *drive)
bh = tape->bh;
while (i) {
if (bh == NULL) {
-
- printk(KERN_INFO "ide-tape: bug, bh NULL\n");
+ printk(KERN_INFO "ide-tape: bug,"
+ " bh NULL\n");
break;
}
- min = min(i, (unsigned int)(bh->b_size - atomic_read(&bh->b_count)));
- memset(bh->b_data + atomic_read(&bh->b_count), 0, min);
+ min = min(i, (unsigned int)(bh->b_size -
+ atomic_read(&bh->b_count)));
+ memset(bh->b_data + atomic_read(&bh->b_count),
+ 0, min);
atomic_add(min, &bh->b_count);
i -= min;
bh = bh->b_reqnext;
@@ -2600,13 +2437,13 @@ static void idetape_empty_write_pipeline (ide_drive_t *drive)
tape->merge_stage = NULL;
}
clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
- tape->chrdev_direction = idetape_direction_none;
+ tape->chrdev_dir = IDETAPE_DIR_NONE;
/*
- * On the next backup, perform the feedback loop again.
- * (I don't want to keep sense information between backups,
- * as some systems are constantly on, and the system load
- * can be totally different on the next backup).
+ * On the next backup, perform the feedback loop again. (I don't want to
+ * keep sense information between backups, as some systems are
+ * constantly on, and the system load can be totally different on the
+ * next backup).
*/
tape->max_stages = tape->min_pipeline;
if (tape->first_stage != NULL ||
@@ -2621,21 +2458,25 @@ static void idetape_empty_write_pipeline (ide_drive_t *drive)
}
}
-static void idetape_restart_speed_control (ide_drive_t *drive)
+static void idetape_restart_speed_control(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
tape->restart_speed_control_req = 0;
tape->pipeline_head = 0;
- tape->controlled_last_pipeline_head = tape->uncontrolled_last_pipeline_head = 0;
- tape->controlled_previous_pipeline_head = tape->uncontrolled_previous_pipeline_head = 0;
- tape->pipeline_head_speed = tape->controlled_pipeline_head_speed = 5000;
+ tape->controlled_last_pipeline_head = 0;
+ tape->controlled_previous_pipeline_head = 0;
+ tape->uncontrolled_previous_pipeline_head = 0;
+ tape->controlled_pipeline_head_speed = 5000;
+ tape->pipeline_head_speed = 5000;
tape->uncontrolled_pipeline_head_speed = 0;
- tape->controlled_pipeline_head_time = tape->uncontrolled_pipeline_head_time = jiffies;
- tape->controlled_previous_head_time = tape->uncontrolled_previous_head_time = jiffies;
+ tape->controlled_pipeline_head_time =
+ tape->uncontrolled_pipeline_head_time = jiffies;
+ tape->controlled_previous_head_time =
+ tape->uncontrolled_previous_head_time = jiffies;
}
-static int idetape_initiate_read (ide_drive_t *drive, int max_stages)
+static int idetape_init_read(ide_drive_t *drive, int max_stages)
{
idetape_tape_t *tape = drive->driver_data;
idetape_stage_t *new_stage;
@@ -2644,32 +2485,35 @@ static int idetape_initiate_read (ide_drive_t *drive, int max_stages)
u16 blocks = *(u16 *)&tape->caps[12];
/* Initialize read operation */
- if (tape->chrdev_direction != idetape_direction_read) {
- if (tape->chrdev_direction == idetape_direction_write) {
+ if (tape->chrdev_dir != IDETAPE_DIR_READ) {
+ if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
idetape_empty_write_pipeline(drive);
idetape_flush_tape_buffers(drive);
}
if (tape->merge_stage || tape->merge_stage_size) {
- printk (KERN_ERR "ide-tape: merge_stage_size should be 0 now\n");
+ printk(KERN_ERR "ide-tape: merge_stage_size should be"
+ " 0 now\n");
tape->merge_stage_size = 0;
}
- if ((tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0)) == NULL)
+ tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
+ if (!tape->merge_stage)
return -ENOMEM;
- tape->chrdev_direction = idetape_direction_read;
+ tape->chrdev_dir = IDETAPE_DIR_READ;
/*
- * Issue a read 0 command to ensure that DSC handshake
- * is switched from completion mode to buffer available
- * mode.
- * No point in issuing this if DSC overlap isn't supported,
- * some drives (Seagate STT3401A) will return an error.
+ * Issue a read 0 command to ensure that DSC handshake is
+ * switched from completion mode to buffer available mode.
+ * No point in issuing this if DSC overlap isn't supported, some
+ * drives (Seagate STT3401A) will return an error.
*/
if (drive->dsc_overlap) {
- bytes_read = idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, 0, tape->merge_stage->bh);
+ bytes_read = idetape_queue_rw_tail(drive,
+ REQ_IDETAPE_READ, 0,
+ tape->merge_stage->bh);
if (bytes_read < 0) {
__idetape_kfree_stage(tape->merge_stage);
tape->merge_stage = NULL;
- tape->chrdev_direction = idetape_direction_none;
+ tape->chrdev_dir = IDETAPE_DIR_NONE;
return bytes_read;
}
}
@@ -2677,8 +2521,9 @@ static int idetape_initiate_read (ide_drive_t *drive, int max_stages)
if (tape->restart_speed_control_req)
idetape_restart_speed_control(drive);
idetape_init_rq(&rq, REQ_IDETAPE_READ);
- rq.sector = tape->first_frame_position;
- rq.nr_sectors = rq.current_nr_sectors = blocks;
+ rq.sector = tape->first_frame;
+ rq.nr_sectors = blocks;
+ rq.current_nr_sectors = blocks;
if (!test_bit(IDETAPE_PIPELINE_ERROR, &tape->flags) &&
tape->nr_stages < max_stages) {
new_stage = idetape_kmalloc_stage(tape);
@@ -2696,50 +2541,43 @@ static int idetape_initiate_read (ide_drive_t *drive, int max_stages)
tape->insert_time = jiffies;
tape->insert_size = 0;
tape->insert_speed = 0;
- idetape_insert_pipeline_into_queue(drive);
+ idetape_plug_pipeline(drive);
}
}
return 0;
}
/*
- * idetape_add_chrdev_read_request is called from idetape_chrdev_read
- * to service a character device read request and add read-ahead
- * requests to our pipeline.
+ * Called from idetape_chrdev_read() to service a character device read request
+ * and add read-ahead requests to our pipeline.
*/
-static int idetape_add_chrdev_read_request (ide_drive_t *drive,int blocks)
+static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
{
idetape_tape_t *tape = drive->driver_data;
unsigned long flags;
struct request *rq_ptr;
int bytes_read;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_add_chrdev_read_request, %d blocks\n", blocks);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
- /*
- * If we are at a filemark, return a read length of 0
- */
+ /* If we are at a filemark, return a read length of 0 */
if (test_bit(IDETAPE_FILEMARK, &tape->flags))
return 0;
- /*
- * Wait for the next block to be available at the head
- * of the pipeline
- */
- idetape_initiate_read(drive, tape->max_stages);
+ /* Wait for the next block to reach the head of the pipeline. */
+ idetape_init_read(drive, tape->max_stages);
if (tape->first_stage == NULL) {
if (test_bit(IDETAPE_PIPELINE_ERROR, &tape->flags))
return 0;
- return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks, tape->merge_stage->bh);
+ return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
+ tape->merge_stage->bh);
}
idetape_wait_first_stage(drive);
rq_ptr = &tape->first_stage->rq;
- bytes_read = tape->tape_block_size * (rq_ptr->nr_sectors - rq_ptr->current_nr_sectors);
- rq_ptr->nr_sectors = rq_ptr->current_nr_sectors = 0;
-
+ bytes_read = tape->blk_size * (rq_ptr->nr_sectors -
+ rq_ptr->current_nr_sectors);
+ rq_ptr->nr_sectors = 0;
+ rq_ptr->current_nr_sectors = 0;
if (rq_ptr->errors == IDETAPE_ERROR_EOD)
return 0;
@@ -2747,43 +2585,46 @@ static int idetape_add_chrdev_read_request (ide_drive_t *drive,int blocks)
idetape_switch_buffers(tape, tape->first_stage);
if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
set_bit(IDETAPE_FILEMARK, &tape->flags);
- spin_lock_irqsave(&tape->spinlock, flags);
+ spin_lock_irqsave(&tape->lock, flags);
idetape_remove_stage_head(drive);
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ spin_unlock_irqrestore(&tape->lock, flags);
tape->pipeline_head++;
- calculate_speeds(drive);
+ idetape_calculate_speeds(drive);
}
- if (bytes_read > blocks * tape->tape_block_size) {
- printk(KERN_ERR "ide-tape: bug: trying to return more bytes than requested\n");
- bytes_read = blocks * tape->tape_block_size;
+ if (bytes_read > blocks * tape->blk_size) {
+ printk(KERN_ERR "ide-tape: bug: trying to return more bytes"
+ " than requested\n");
+ bytes_read = blocks * tape->blk_size;
}
return (bytes_read);
}
-static void idetape_pad_zeros (ide_drive_t *drive, int bcount)
+static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
{
idetape_tape_t *tape = drive->driver_data;
struct idetape_bh *bh;
int blocks;
-
+
while (bcount) {
unsigned int count;
bh = tape->merge_stage->bh;
count = min(tape->stage_size, bcount);
bcount -= count;
- blocks = count / tape->tape_block_size;
+ blocks = count / tape->blk_size;
while (count) {
- atomic_set(&bh->b_count, min(count, (unsigned int)bh->b_size));
+ atomic_set(&bh->b_count,
+ min(count, (unsigned int)bh->b_size));
memset(bh->b_data, 0, atomic_read(&bh->b_count));
count -= atomic_read(&bh->b_count);
bh = bh->b_reqnext;
}
- idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks, tape->merge_stage->bh);
+ idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
+ tape->merge_stage->bh);
}
}
-static int idetape_pipeline_size (ide_drive_t *drive)
+static int idetape_pipeline_size(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
idetape_stage_t *stage;
@@ -2794,9 +2635,10 @@ static int idetape_pipeline_size (ide_drive_t *drive)
stage = tape->first_stage;
while (stage != NULL) {
rq = &stage->rq;
- size += tape->tape_block_size * (rq->nr_sectors-rq->current_nr_sectors);
+ size += tape->blk_size * (rq->nr_sectors -
+ rq->current_nr_sectors);
if (rq->errors == IDETAPE_ERROR_FILEMARK)
- size += tape->tape_block_size;
+ size += tape->blk_size;
stage = stage->next;
}
size += tape->merge_stage_size;
@@ -2804,20 +2646,18 @@ static int idetape_pipeline_size (ide_drive_t *drive)
}
/*
- * Rewinds the tape to the Beginning Of the current Partition (BOP).
- *
- * We currently support only one partition.
- */
-static int idetape_rewind_tape (ide_drive_t *drive)
+ * Rewinds the tape to the Beginning Of the current Partition (BOP). We
+ * currently support only one partition.
+ */
+static int idetape_rewind_tape(ide_drive_t *drive)
{
int retval;
idetape_pc_t pc;
-#if IDETAPE_DEBUG_LOG
- idetape_tape_t *tape = drive->driver_data;
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: Reached idetape_rewind_tape\n");
-#endif /* IDETAPE_DEBUG_LOG */
-
+ idetape_tape_t *tape;
+ tape = drive->driver_data;
+
+ debug_log(DBG_SENSE, "Enter %s\n", __func__);
+
idetape_create_rewind_cmd(drive, &pc);
retval = idetape_queue_pc_tail(drive, &pc);
if (retval)
@@ -2830,14 +2670,9 @@ static int idetape_rewind_tape (ide_drive_t *drive)
return 0;
}
-/*
- * Our special ide-tape ioctl's.
- *
- * Currently there aren't any ioctl's.
- * mtio.h compatible commands should be issued to the character device
- * interface.
- */
-static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+/* mtio.h compatible commands should be issued to the chrdev interface. */
+static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
+ unsigned long arg)
{
idetape_tape_t *tape = drive->driver_data;
void __user *argp = (void __user *)arg;
@@ -2848,44 +2683,41 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd, unsigned l
int nr_stages;
} config;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_blkdev_ioctl\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
+
switch (cmd) {
- case 0x0340:
- if (copy_from_user(&config, argp, sizeof(config)))
- return -EFAULT;
- tape->best_dsc_rw_frequency = config.dsc_rw_frequency;
- tape->max_stages = config.nr_stages;
- break;
- case 0x0350:
- config.dsc_rw_frequency = (int) tape->best_dsc_rw_frequency;
- config.nr_stages = tape->max_stages;
- if (copy_to_user(argp, &config, sizeof(config)))
- return -EFAULT;
- break;
- default:
- return -EIO;
+ case 0x0340:
+ if (copy_from_user(&config, argp, sizeof(config)))
+ return -EFAULT;
+ tape->best_dsc_rw_freq = config.dsc_rw_frequency;
+ tape->max_stages = config.nr_stages;
+ break;
+ case 0x0350:
+ config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
+ config.nr_stages = tape->max_stages;
+ if (copy_to_user(argp, &config, sizeof(config)))
+ return -EFAULT;
+ break;
+ default:
+ return -EIO;
}
return 0;
}
/*
- * idetape_space_over_filemarks is now a bit more complicated than just
- * passing the command to the tape since we may have crossed some
- * filemarks during our pipelined read-ahead mode.
- *
- * As a minor side effect, the pipeline enables us to support MTFSFM when
- * the filemark is in our internal pipeline even if the tape doesn't
- * support spacing over filemarks in the reverse direction.
+ * The function below is now a bit more complicated than just passing the
+ * command to the tape since we may have crossed some filemarks during our
+ * pipelined read-ahead mode. As a minor side effect, the pipeline enables us to
+ * support MTFSFM when the filemark is in our internal pipeline even if the tape
+ * doesn't support spacing over filemarks in the reverse direction.
*/
-static int idetape_space_over_filemarks (ide_drive_t *drive,short mt_op,int mt_count)
+static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
+ int mt_count)
{
idetape_tape_t *tape = drive->driver_data;
idetape_pc_t pc;
unsigned long flags;
- int retval,count=0;
+ int retval, count = 0;
int sprev = !!(tape->caps[4] & 0x20);
if (mt_count == 0)
@@ -2893,14 +2725,11 @@ static int idetape_space_over_filemarks (ide_drive_t *drive,short mt_op,int mt_c
if (MTBSF == mt_op || MTBSFM == mt_op) {
if (!sprev)
return -EIO;
- mt_count = - mt_count;
+ mt_count = -mt_count;
}
- if (tape->chrdev_direction == idetape_direction_read) {
- /*
- * We have a read-ahead buffer. Scan it for crossed
- * filemarks.
- */
+ if (tape->chrdev_dir == IDETAPE_DIR_READ) {
+ /* its a read-ahead buffer, scan it for crossed filemarks. */
tape->merge_stage_size = 0;
if (test_and_clear_bit(IDETAPE_FILEMARK, &tape->flags))
++count;
@@ -2910,24 +2739,27 @@ static int idetape_space_over_filemarks (ide_drive_t *drive,short mt_op,int mt_c
set_bit(IDETAPE_FILEMARK, &tape->flags);
return 0;
}
- spin_lock_irqsave(&tape->spinlock, flags);
+ spin_lock_irqsave(&tape->lock, flags);
if (tape->first_stage == tape->active_stage) {
/*
- * We have reached the active stage in the read pipeline.
- * There is no point in allowing the drive to continue
- * reading any farther, so we stop the pipeline.
+ * We have reached the active stage in the read
+ * pipeline. There is no point in allowing the
+ * drive to continue reading any farther, so we
+ * stop the pipeline.
*
- * This section should be moved to a separate subroutine,
- * because a similar function is performed in
- * __idetape_discard_read_pipeline(), for example.
+ * This section should be moved to a separate
+ * subroutine because similar operations are
+ * done in __idetape_discard_read_pipeline(),
+ * for example.
*/
tape->next_stage = NULL;
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ spin_unlock_irqrestore(&tape->lock, flags);
idetape_wait_first_stage(drive);
tape->next_stage = tape->first_stage->next;
} else
- spin_unlock_irqrestore(&tape->spinlock, flags);
- if (tape->first_stage->rq.errors == IDETAPE_ERROR_FILEMARK)
+ spin_unlock_irqrestore(&tape->lock, flags);
+ if (tape->first_stage->rq.errors ==
+ IDETAPE_ERROR_FILEMARK)
++count;
idetape_remove_stage_head(drive);
}
@@ -2935,73 +2767,74 @@ static int idetape_space_over_filemarks (ide_drive_t *drive,short mt_op,int mt_c
}
/*
- * The filemark was not found in our internal pipeline.
- * Now we can issue the space command.
+ * The filemark was not found in our internal pipeline; now we can issue
+ * the space command.
*/
switch (mt_op) {
- case MTFSF:
- case MTBSF:
- idetape_create_space_cmd(&pc,mt_count-count,IDETAPE_SPACE_OVER_FILEMARK);
- return (idetape_queue_pc_tail(drive, &pc));
- case MTFSFM:
- case MTBSFM:
- if (!sprev)
- return (-EIO);
- retval = idetape_space_over_filemarks(drive, MTFSF, mt_count-count);
- if (retval) return (retval);
- count = (MTBSFM == mt_op ? 1 : -1);
- return (idetape_space_over_filemarks(drive, MTFSF, count));
- default:
- printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",mt_op);
- return (-EIO);
+ case MTFSF:
+ case MTBSF:
+ idetape_create_space_cmd(&pc, mt_count - count,
+ IDETAPE_SPACE_OVER_FILEMARK);
+ return idetape_queue_pc_tail(drive, &pc);
+ case MTFSFM:
+ case MTBSFM:
+ if (!sprev)
+ return -EIO;
+ retval = idetape_space_over_filemarks(drive, MTFSF,
+ mt_count - count);
+ if (retval)
+ return retval;
+ count = (MTBSFM == mt_op ? 1 : -1);
+ return idetape_space_over_filemarks(drive, MTFSF, count);
+ default:
+ printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
+ mt_op);
+ return -EIO;
}
}
-
/*
- * Our character device read / write functions.
+ * Our character device read / write functions.
*
- * The tape is optimized to maximize throughput when it is transferring
- * an integral number of the "continuous transfer limit", which is
- * a parameter of the specific tape (26 KB on my particular tape).
- * (32 kB for Onstream)
+ * The tape is optimized to maximize throughput when it is transferring an
+ * integral number of the "continuous transfer limit", which is a parameter of
+ * the specific tape (26kB on my particular tape, 32kB for Onstream).
*
- * As of version 1.3 of the driver, the character device provides an
- * abstract continuous view of the media - any mix of block sizes (even 1
- * byte) on the same backup/restore procedure is supported. The driver
- * will internally convert the requests to the recommended transfer unit,
- * so that an unmatch between the user's block size to the recommended
- * size will only result in a (slightly) increased driver overhead, but
- * will no longer hit performance.
- * This is not applicable to Onstream.
+ * As of version 1.3 of the driver, the character device provides an abstract
+ * continuous view of the media - any mix of block sizes (even 1 byte) on the
+ * same backup/restore procedure is supported. The driver will internally
+ * convert the requests to the recommended transfer unit, so that an unmatch
+ * between the user's block size to the recommended size will only result in a
+ * (slightly) increased driver overhead, but will no longer hit performance.
+ * This is not applicable to Onstream.
*/
-static ssize_t idetape_chrdev_read (struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
struct ide_tape_obj *tape = ide_tape_f(file);
ide_drive_t *drive = tape->drive;
- ssize_t bytes_read,temp, actually_read = 0, rc;
+ ssize_t bytes_read, temp, actually_read = 0, rc;
ssize_t ret = 0;
u16 ctl = *(u16 *)&tape->caps[12];
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 3)
- printk(KERN_INFO "ide-tape: Reached idetape_chrdev_read, count %Zd\n", count);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
- if (tape->chrdev_direction != idetape_direction_read) {
+ if (tape->chrdev_dir != IDETAPE_DIR_READ) {
if (test_bit(IDETAPE_DETECT_BS, &tape->flags))
- if (count > tape->tape_block_size &&
- (count % tape->tape_block_size) == 0)
- tape->user_bs_factor = count / tape->tape_block_size;
+ if (count > tape->blk_size &&
+ (count % tape->blk_size) == 0)
+ tape->user_bs_factor = count / tape->blk_size;
}
- if ((rc = idetape_initiate_read(drive, tape->max_stages)) < 0)
+ rc = idetape_init_read(drive, tape->max_stages);
+ if (rc < 0)
return rc;
if (count == 0)
return (0);
if (tape->merge_stage_size) {
- actually_read = min((unsigned int)(tape->merge_stage_size), (unsigned int)count);
- if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, actually_read))
+ actually_read = min((unsigned int)(tape->merge_stage_size),
+ (unsigned int)count);
+ if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
+ actually_read))
ret = -EFAULT;
buf += actually_read;
tape->merge_stage_size -= actually_read;
@@ -3011,7 +2844,8 @@ static ssize_t idetape_chrdev_read (struct file *file, char __user *buf,
bytes_read = idetape_add_chrdev_read_request(drive, ctl);
if (bytes_read <= 0)
goto finish;
- if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, bytes_read))
+ if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
+ bytes_read))
ret = -EFAULT;
buf += bytes_read;
count -= bytes_read;
@@ -3022,25 +2856,24 @@ static ssize_t idetape_chrdev_read (struct file *file, char __user *buf,
if (bytes_read <= 0)
goto finish;
temp = min((unsigned long)count, (unsigned long)bytes_read);
- if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, temp))
+ if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
+ temp))
ret = -EFAULT;
actually_read += temp;
tape->merge_stage_size = bytes_read-temp;
}
finish:
if (!actually_read && test_bit(IDETAPE_FILEMARK, &tape->flags)) {
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: %s: spacing over filemark\n", tape->name);
-#endif
+ debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
+
idetape_space_over_filemarks(drive, MTFSF, 1);
return 0;
}
- return (ret) ? ret : actually_read;
+ return ret ? ret : actually_read;
}
-static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
+static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct ide_tape_obj *tape = ide_tape_f(file);
@@ -3053,39 +2886,37 @@ static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
if (tape->write_prot)
return -EACCES;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 3)
- printk(KERN_INFO "ide-tape: Reached idetape_chrdev_write, "
- "count %Zd\n", count);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
/* Initialize write operation */
- if (tape->chrdev_direction != idetape_direction_write) {
- if (tape->chrdev_direction == idetape_direction_read)
+ if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
+ if (tape->chrdev_dir == IDETAPE_DIR_READ)
idetape_discard_read_pipeline(drive, 1);
if (tape->merge_stage || tape->merge_stage_size) {
printk(KERN_ERR "ide-tape: merge_stage_size "
"should be 0 now\n");
tape->merge_stage_size = 0;
}
- if ((tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0)) == NULL)
+ tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
+ if (!tape->merge_stage)
return -ENOMEM;
- tape->chrdev_direction = idetape_direction_write;
+ tape->chrdev_dir = IDETAPE_DIR_WRITE;
idetape_init_merge_stage(tape);
/*
- * Issue a write 0 command to ensure that DSC handshake
- * is switched from completion mode to buffer available
- * mode.
- * No point in issuing this if DSC overlap isn't supported,
- * some drives (Seagate STT3401A) will return an error.
+ * Issue a write 0 command to ensure that DSC handshake is
+ * switched from completion mode to buffer available mode. No
+ * point in issuing this if DSC overlap isn't supported, some
+ * drives (Seagate STT3401A) will return an error.
*/
if (drive->dsc_overlap) {
- ssize_t retval = idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, 0, tape->merge_stage->bh);
+ ssize_t retval = idetape_queue_rw_tail(drive,
+ REQ_IDETAPE_WRITE, 0,
+ tape->merge_stage->bh);
if (retval < 0) {
__idetape_kfree_stage(tape->merge_stage);
tape->merge_stage = NULL;
- tape->chrdev_direction = idetape_direction_none;
+ tape->chrdev_dir = IDETAPE_DIR_NONE;
return retval;
}
}
@@ -3096,11 +2927,14 @@ static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
idetape_restart_speed_control(drive);
if (tape->merge_stage_size) {
if (tape->merge_stage_size >= tape->stage_size) {
- printk(KERN_ERR "ide-tape: bug: merge buffer too big\n");
+ printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
tape->merge_stage_size = 0;
}
- actually_written = min((unsigned int)(tape->stage_size - tape->merge_stage_size), (unsigned int)count);
- if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, actually_written))
+ actually_written = min((unsigned int)
+ (tape->stage_size - tape->merge_stage_size),
+ (unsigned int)count);
+ if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
+ actually_written))
ret = -EFAULT;
buf += actually_written;
tape->merge_stage_size += actually_written;
@@ -3116,7 +2950,8 @@ static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
}
while (count >= tape->stage_size) {
ssize_t retval;
- if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, tape->stage_size))
+ if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
+ tape->stage_size))
ret = -EFAULT;
buf += tape->stage_size;
count -= tape->stage_size;
@@ -3127,14 +2962,15 @@ static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
}
if (count) {
actually_written += count;
- if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, count))
+ if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
+ count))
ret = -EFAULT;
tape->merge_stage_size += count;
}
- return (ret) ? ret : actually_written;
+ return ret ? ret : actually_written;
}
-static int idetape_write_filemark (ide_drive_t *drive)
+static int idetape_write_filemark(ide_drive_t *drive)
{
idetape_pc_t pc;
@@ -3165,113 +3001,117 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
{
idetape_tape_t *tape = drive->driver_data;
idetape_pc_t pc;
- int i,retval;
+ int i, retval;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 1)
- printk(KERN_INFO "ide-tape: Handling MTIOCTOP ioctl: "
- "mt_op=%d, mt_count=%d\n", mt_op, mt_count);
-#endif /* IDETAPE_DEBUG_LOG */
- /*
- * Commands which need our pipelined read-ahead stages.
- */
+ debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
+ mt_op, mt_count);
+
+ /* Commands which need our pipelined read-ahead stages. */
switch (mt_op) {
- case MTFSF:
- case MTFSFM:
- case MTBSF:
- case MTBSFM:
- if (!mt_count)
- return (0);
- return (idetape_space_over_filemarks(drive,mt_op,mt_count));
- default:
- break;
+ case MTFSF:
+ case MTFSFM:
+ case MTBSF:
+ case MTBSFM:
+ if (!mt_count)
+ return 0;
+ return idetape_space_over_filemarks(drive, mt_op, mt_count);
+ default:
+ break;
}
+
switch (mt_op) {
- case MTWEOF:
- if (tape->write_prot)
- return -EACCES;
- idetape_discard_read_pipeline(drive, 1);
- for (i = 0; i < mt_count; i++) {
- retval = idetape_write_filemark(drive);
- if (retval)
- return retval;
- }
- return (0);
- case MTREW:
- idetape_discard_read_pipeline(drive, 0);
- if (idetape_rewind_tape(drive))
+ case MTWEOF:
+ if (tape->write_prot)
+ return -EACCES;
+ idetape_discard_read_pipeline(drive, 1);
+ for (i = 0; i < mt_count; i++) {
+ retval = idetape_write_filemark(drive);
+ if (retval)
+ return retval;
+ }
+ return 0;
+ case MTREW:
+ idetape_discard_read_pipeline(drive, 0);
+ if (idetape_rewind_tape(drive))
+ return -EIO;
+ return 0;
+ case MTLOAD:
+ idetape_discard_read_pipeline(drive, 0);
+ idetape_create_load_unload_cmd(drive, &pc,
+ IDETAPE_LU_LOAD_MASK);
+ return idetape_queue_pc_tail(drive, &pc);
+ case MTUNLOAD:
+ case MTOFFL:
+ /*
+ * If door is locked, attempt to unlock before
+ * attempting to eject.
+ */
+ if (tape->door_locked) {
+ if (idetape_create_prevent_cmd(drive, &pc, 0))
+ if (!idetape_queue_pc_tail(drive, &pc))
+ tape->door_locked = DOOR_UNLOCKED;
+ }
+ idetape_discard_read_pipeline(drive, 0);
+ idetape_create_load_unload_cmd(drive, &pc,
+ !IDETAPE_LU_LOAD_MASK);
+ retval = idetape_queue_pc_tail(drive, &pc);
+ if (!retval)
+ clear_bit(IDETAPE_MEDIUM_PRESENT, &tape->flags);
+ return retval;
+ case MTNOP:
+ idetape_discard_read_pipeline(drive, 0);
+ return idetape_flush_tape_buffers(drive);
+ case MTRETEN:
+ idetape_discard_read_pipeline(drive, 0);
+ idetape_create_load_unload_cmd(drive, &pc,
+ IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
+ return idetape_queue_pc_tail(drive, &pc);
+ case MTEOM:
+ idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
+ return idetape_queue_pc_tail(drive, &pc);
+ case MTERASE:
+ (void)idetape_rewind_tape(drive);
+ idetape_create_erase_cmd(&pc);
+ return idetape_queue_pc_tail(drive, &pc);
+ case MTSETBLK:
+ if (mt_count) {
+ if (mt_count < tape->blk_size ||
+ mt_count % tape->blk_size)
return -EIO;
+ tape->user_bs_factor = mt_count / tape->blk_size;
+ clear_bit(IDETAPE_DETECT_BS, &tape->flags);
+ } else
+ set_bit(IDETAPE_DETECT_BS, &tape->flags);
+ return 0;
+ case MTSEEK:
+ idetape_discard_read_pipeline(drive, 0);
+ return idetape_position_tape(drive,
+ mt_count * tape->user_bs_factor, tape->partition, 0);
+ case MTSETPART:
+ idetape_discard_read_pipeline(drive, 0);
+ return idetape_position_tape(drive, 0, mt_count, 0);
+ case MTFSR:
+ case MTBSR:
+ case MTLOCK:
+ if (!idetape_create_prevent_cmd(drive, &pc, 1))
return 0;
- case MTLOAD:
- idetape_discard_read_pipeline(drive, 0);
- idetape_create_load_unload_cmd(drive, &pc, IDETAPE_LU_LOAD_MASK);
- return (idetape_queue_pc_tail(drive, &pc));
- case MTUNLOAD:
- case MTOFFL:
- /*
- * If door is locked, attempt to unlock before
- * attempting to eject.
- */
- if (tape->door_locked) {
- if (idetape_create_prevent_cmd(drive, &pc, 0))
- if (!idetape_queue_pc_tail(drive, &pc))
- tape->door_locked = DOOR_UNLOCKED;
- }
- idetape_discard_read_pipeline(drive, 0);
- idetape_create_load_unload_cmd(drive, &pc,!IDETAPE_LU_LOAD_MASK);
- retval = idetape_queue_pc_tail(drive, &pc);
- if (!retval)
- clear_bit(IDETAPE_MEDIUM_PRESENT, &tape->flags);
+ retval = idetape_queue_pc_tail(drive, &pc);
+ if (retval)
return retval;
- case MTNOP:
- idetape_discard_read_pipeline(drive, 0);
- return (idetape_flush_tape_buffers(drive));
- case MTRETEN:
- idetape_discard_read_pipeline(drive, 0);
- idetape_create_load_unload_cmd(drive, &pc,IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
- return (idetape_queue_pc_tail(drive, &pc));
- case MTEOM:
- idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
- return (idetape_queue_pc_tail(drive, &pc));
- case MTERASE:
- (void) idetape_rewind_tape(drive);
- idetape_create_erase_cmd(&pc);
- return (idetape_queue_pc_tail(drive, &pc));
- case MTSETBLK:
- if (mt_count) {
- if (mt_count < tape->tape_block_size || mt_count % tape->tape_block_size)
- return -EIO;
- tape->user_bs_factor = mt_count / tape->tape_block_size;
- clear_bit(IDETAPE_DETECT_BS, &tape->flags);
- } else
- set_bit(IDETAPE_DETECT_BS, &tape->flags);
- return 0;
- case MTSEEK:
- idetape_discard_read_pipeline(drive, 0);
- return idetape_position_tape(drive, mt_count * tape->user_bs_factor, tape->partition, 0);
- case MTSETPART:
- idetape_discard_read_pipeline(drive, 0);
- return (idetape_position_tape(drive, 0, mt_count, 0));
- case MTFSR:
- case MTBSR:
- case MTLOCK:
- if (!idetape_create_prevent_cmd(drive, &pc, 1))
- return 0;
- retval = idetape_queue_pc_tail(drive, &pc);
- if (retval) return retval;
- tape->door_locked = DOOR_EXPLICITLY_LOCKED;
- return 0;
- case MTUNLOCK:
- if (!idetape_create_prevent_cmd(drive, &pc, 0))
- return 0;
- retval = idetape_queue_pc_tail(drive, &pc);
- if (retval) return retval;
- tape->door_locked = DOOR_UNLOCKED;
+ tape->door_locked = DOOR_EXPLICITLY_LOCKED;
+ return 0;
+ case MTUNLOCK:
+ if (!idetape_create_prevent_cmd(drive, &pc, 0))
return 0;
- default:
- printk(KERN_ERR "ide-tape: MTIO operation %d not "
- "supported\n", mt_op);
- return (-EIO);
+ retval = idetape_queue_pc_tail(drive, &pc);
+ if (retval)
+ return retval;
+ tape->door_locked = DOOR_UNLOCKED;
+ return 0;
+ default:
+ printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
+ mt_op);
+ return -EIO;
}
}
@@ -3288,50 +3128,51 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
struct mtop mtop;
struct mtget mtget;
struct mtpos mtpos;
- int block_offset = 0, position = tape->first_frame_position;
+ int block_offset = 0, position = tape->first_frame;
void __user *argp = (void __user *)arg;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 3)
- printk(KERN_INFO "ide-tape: Reached idetape_chrdev_ioctl, "
- "cmd=%u\n", cmd);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
tape->restart_speed_control_req = 1;
- if (tape->chrdev_direction == idetape_direction_write) {
+ if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
idetape_empty_write_pipeline(drive);
idetape_flush_tape_buffers(drive);
}
if (cmd == MTIOCGET || cmd == MTIOCPOS) {
- block_offset = idetape_pipeline_size(drive) / (tape->tape_block_size * tape->user_bs_factor);
- if ((position = idetape_read_position(drive)) < 0)
+ block_offset = idetape_pipeline_size(drive) /
+ (tape->blk_size * tape->user_bs_factor);
+ position = idetape_read_position(drive);
+ if (position < 0)
return -EIO;
}
switch (cmd) {
- case MTIOCTOP:
- if (copy_from_user(&mtop, argp, sizeof (struct mtop)))
- return -EFAULT;
- return (idetape_mtioctop(drive,mtop.mt_op,mtop.mt_count));
- case MTIOCGET:
- memset(&mtget, 0, sizeof (struct mtget));
- mtget.mt_type = MT_ISSCSI2;
- mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
- mtget.mt_dsreg = ((tape->tape_block_size * tape->user_bs_factor) << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
- if (tape->drv_write_prot) {
- mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
- }
- if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
- return -EFAULT;
- return 0;
- case MTIOCPOS:
- mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
- if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
- return -EFAULT;
- return 0;
- default:
- if (tape->chrdev_direction == idetape_direction_read)
- idetape_discard_read_pipeline(drive, 1);
- return idetape_blkdev_ioctl(drive, cmd, arg);
+ case MTIOCTOP:
+ if (copy_from_user(&mtop, argp, sizeof(struct mtop)))
+ return -EFAULT;
+ return idetape_mtioctop(drive, mtop.mt_op, mtop.mt_count);
+ case MTIOCGET:
+ memset(&mtget, 0, sizeof(struct mtget));
+ mtget.mt_type = MT_ISSCSI2;
+ mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
+ mtget.mt_dsreg =
+ ((tape->blk_size * tape->user_bs_factor)
+ << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
+
+ if (tape->drv_write_prot)
+ mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
+
+ if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
+ return -EFAULT;
+ return 0;
+ case MTIOCPOS:
+ mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
+ if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
+ return -EFAULT;
+ return 0;
+ default:
+ if (tape->chrdev_dir == IDETAPE_DIR_READ)
+ idetape_discard_read_pipeline(drive, 1);
+ return idetape_blkdev_ioctl(drive, cmd, arg);
}
}
@@ -3347,23 +3188,20 @@ static void ide_tape_get_bsize_from_bdesc(ide_drive_t *drive)
idetape_create_mode_sense_cmd(&pc, IDETAPE_BLOCK_DESCRIPTOR);
if (idetape_queue_pc_tail(drive, &pc)) {
printk(KERN_ERR "ide-tape: Can't get block descriptor\n");
- if (tape->tape_block_size == 0) {
+ if (tape->blk_size == 0) {
printk(KERN_WARNING "ide-tape: Cannot deal with zero "
"block size, assuming 32k\n");
- tape->tape_block_size = 32768;
+ tape->blk_size = 32768;
}
return;
}
- tape->tape_block_size = (pc.buffer[4 + 5] << 16) +
+ tape->blk_size = (pc.buffer[4 + 5] << 16) +
(pc.buffer[4 + 6] << 8) +
pc.buffer[4 + 7];
tape->drv_write_prot = (pc.buffer[2] & 0x80) >> 7;
}
-/*
- * Our character device open function.
- */
-static int idetape_chrdev_open (struct inode *inode, struct file *filp)
+static int idetape_chrdev_open(struct inode *inode, struct file *filp)
{
unsigned int minor = iminor(inode), i = minor & ~0xc0;
ide_drive_t *drive;
@@ -3371,6 +3209,15 @@ static int idetape_chrdev_open (struct inode *inode, struct file *filp)
idetape_pc_t pc;
int retval;
+ if (i >= MAX_HWIFS * MAX_DRIVES)
+ return -ENXIO;
+
+ tape = ide_tape_chrdev_get(i);
+ if (!tape)
+ return -ENXIO;
+
+ debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
+
/*
* We really want to do nonseekable_open(inode, filp); here, but some
* versions of tar incorrectly call lseek on tapes and bail out if that
@@ -3378,16 +3225,6 @@ static int idetape_chrdev_open (struct inode *inode, struct file *filp)
*/
filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
-#if IDETAPE_DEBUG_LOG
- printk(KERN_INFO "ide-tape: Reached idetape_chrdev_open\n");
-#endif /* IDETAPE_DEBUG_LOG */
-
- if (i >= MAX_HWIFS * MAX_DRIVES)
- return -ENXIO;
-
- if (!(tape = ide_tape_chrdev_get(i)))
- return -ENXIO;
-
drive = tape->drive;
filp->private_data = tape;
@@ -3408,7 +3245,7 @@ static int idetape_chrdev_open (struct inode *inode, struct file *filp)
if (!test_bit(IDETAPE_ADDRESS_VALID, &tape->flags))
(void)idetape_rewind_tape(drive);
- if (tape->chrdev_direction != idetape_direction_read)
+ if (tape->chrdev_dir != IDETAPE_DIR_READ)
clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
/* Read block size and write protect status from drive. */
@@ -3430,10 +3267,8 @@ static int idetape_chrdev_open (struct inode *inode, struct file *filp)
}
}
- /*
- * Lock the tape drive door so user can't eject.
- */
- if (tape->chrdev_direction == idetape_direction_none) {
+ /* Lock the tape drive door so user can't eject. */
+ if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
if (idetape_create_prevent_cmd(drive, &pc, 1)) {
if (!idetape_queue_pc_tail(drive, &pc)) {
if (tape->door_locked != DOOR_EXPLICITLY_LOCKED)
@@ -3450,14 +3285,15 @@ out_put_tape:
return retval;
}
-static void idetape_write_release (ide_drive_t *drive, unsigned int minor)
+static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
{
idetape_tape_t *tape = drive->driver_data;
idetape_empty_write_pipeline(drive);
tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0);
if (tape->merge_stage != NULL) {
- idetape_pad_zeros(drive, tape->tape_block_size * (tape->user_bs_factor - 1));
+ idetape_pad_zeros(drive, tape->blk_size *
+ (tape->user_bs_factor - 1));
__idetape_kfree_stage(tape->merge_stage);
tape->merge_stage = NULL;
}
@@ -3466,10 +3302,7 @@ static void idetape_write_release (ide_drive_t *drive, unsigned int minor)
idetape_flush_tape_buffers(drive);
}
-/*
- * Our character device release function.
- */
-static int idetape_chrdev_release (struct inode *inode, struct file *filp)
+static int idetape_chrdev_release(struct inode *inode, struct file *filp)
{
struct ide_tape_obj *tape = ide_tape_f(filp);
ide_drive_t *drive = tape->drive;
@@ -3478,14 +3311,12 @@ static int idetape_chrdev_release (struct inode *inode, struct file *filp)
lock_kernel();
tape = drive->driver_data;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 3)
- printk(KERN_INFO "ide-tape: Reached idetape_chrdev_release\n");
-#endif /* IDETAPE_DEBUG_LOG */
- if (tape->chrdev_direction == idetape_direction_write)
+ debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
+
+ if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
idetape_write_release(drive, minor);
- if (tape->chrdev_direction == idetape_direction_read) {
+ if (tape->chrdev_dir == IDETAPE_DIR_READ) {
if (minor < 128)
idetape_discard_read_pipeline(drive, 1);
else
@@ -3497,7 +3328,7 @@ static int idetape_chrdev_release (struct inode *inode, struct file *filp)
}
if (minor < 128 && test_bit(IDETAPE_MEDIUM_PRESENT, &tape->flags))
(void) idetape_rewind_tape(drive);
- if (tape->chrdev_direction == idetape_direction_none) {
+ if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
if (tape->door_locked == DOOR_LOCKED) {
if (idetape_create_prevent_cmd(drive, &pc, 0)) {
if (!idetape_queue_pc_tail(drive, &pc))
@@ -3512,37 +3343,39 @@ static int idetape_chrdev_release (struct inode *inode, struct file *filp)
}
/*
- * idetape_identify_device is called to check the contents of the
- * ATAPI IDENTIFY command results. We return:
+ * check the contents of the ATAPI IDENTIFY command results. We return:
*
- * 1 If the tape can be supported by us, based on the information
- * we have so far.
+ * 1 - If the tape can be supported by us, based on the information we have so
+ * far.
*
- * 0 If this tape driver is not currently supported by us.
+ * 0 - If this tape driver is not currently supported by us.
*/
-static int idetape_identify_device (ide_drive_t *drive)
+static int idetape_identify_device(ide_drive_t *drive)
{
- struct idetape_id_gcw gcw;
- struct hd_driveid *id = drive->id;
+ u8 gcw[2], protocol, device_type, removable, packet_size;
if (drive->id_read == 0)
return 1;
- *((unsigned short *) &gcw) = id->config;
+ *((unsigned short *) &gcw) = drive->id->config;
+
+ protocol = (gcw[1] & 0xC0) >> 6;
+ device_type = gcw[1] & 0x1F;
+ removable = !!(gcw[0] & 0x80);
+ packet_size = gcw[0] & 0x3;
/* Check that we can support this device */
-
- if (gcw.protocol != 2)
+ if (protocol != 2)
printk(KERN_ERR "ide-tape: Protocol (0x%02x) is not ATAPI\n",
- gcw.protocol);
- else if (gcw.device_type != 1)
+ protocol);
+ else if (device_type != 1)
printk(KERN_ERR "ide-tape: Device type (0x%02x) is not set "
- "to tape\n", gcw.device_type);
- else if (!gcw.removable)
+ "to tape\n", device_type);
+ else if (!removable)
printk(KERN_ERR "ide-tape: The removable flag is not set\n");
- else if (gcw.packet_size != 0) {
- printk(KERN_ERR "ide-tape: Packet size (0x%02x) is not 12 "
- "bytes long\n", gcw.packet_size);
+ else if (packet_size != 0) {
+ printk(KERN_ERR "ide-tape: Packet size (0x%02x) is not 12"
+ " bytes\n", packet_size);
} else
return 1;
return 0;
@@ -3550,9 +3383,9 @@ static int idetape_identify_device (ide_drive_t *drive)
static void idetape_get_inquiry_results(ide_drive_t *drive)
{
- char *r;
idetape_tape_t *tape = drive->driver_data;
idetape_pc_t pc;
+ char fw_rev[6], vendor_id[10], product_id[18];
idetape_create_inquiry_cmd(&pc);
if (idetape_queue_pc_tail(drive, &pc)) {
@@ -3560,27 +3393,23 @@ static void idetape_get_inquiry_results(ide_drive_t *drive)
tape->name);
return;
}
- memcpy(tape->vendor_id, &pc.buffer[8], 8);
- memcpy(tape->product_id, &pc.buffer[16], 16);
- memcpy(tape->firmware_revision, &pc.buffer[32], 4);
-
- ide_fixstring(tape->vendor_id, 10, 0);
- ide_fixstring(tape->product_id, 18, 0);
- ide_fixstring(tape->firmware_revision, 6, 0);
- r = tape->firmware_revision;
- if (*(r + 1) == '.')
- tape->firmware_revision_num = (*r - '0') * 100 +
- (*(r + 2) - '0') * 10 + *(r + 3) - '0';
+ memcpy(vendor_id, &pc.buffer[8], 8);
+ memcpy(product_id, &pc.buffer[16], 16);
+ memcpy(fw_rev, &pc.buffer[32], 4);
+
+ ide_fixstring(vendor_id, 10, 0);
+ ide_fixstring(product_id, 18, 0);
+ ide_fixstring(fw_rev, 6, 0);
+
printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n",
- drive->name, tape->name, tape->vendor_id,
- tape->product_id, tape->firmware_revision);
+ drive->name, tape->name, vendor_id, product_id, fw_rev);
}
/*
* Ask the tape about its various parameters. In particular, we will adjust our
* data transfer buffer size to the recommended value as returned by the tape.
*/
-static void idetape_get_mode_sense_results (ide_drive_t *drive)
+static void idetape_get_mode_sense_results(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
idetape_pc_t pc;
@@ -3591,7 +3420,7 @@ static void idetape_get_mode_sense_results (ide_drive_t *drive)
if (idetape_queue_pc_tail(drive, &pc)) {
printk(KERN_ERR "ide-tape: Can't get tape parameters - assuming"
" some default values\n");
- tape->tape_block_size = 512;
+ tape->blk_size = 512;
put_unaligned(52, (u16 *)&tape->caps[12]);
put_unaligned(540, (u16 *)&tape->caps[14]);
put_unaligned(6*52, (u16 *)&tape->caps[16]);
@@ -3621,62 +3450,75 @@ static void idetape_get_mode_sense_results (ide_drive_t *drive)
memcpy(&tape->caps, caps, 20);
if (caps[7] & 0x02)
- tape->tape_block_size = 512;
+ tape->blk_size = 512;
else if (caps[7] & 0x04)
- tape->tape_block_size = 1024;
+ tape->blk_size = 1024;
}
#ifdef CONFIG_IDE_PROC_FS
-static void idetape_add_settings (ide_drive_t *drive)
+static void idetape_add_settings(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
-/*
- * drive setting name read/write data type min max mul_factor div_factor data pointer set function
- */
ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
1, 2, (u16 *)&tape->caps[16], NULL);
- ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
- ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_stages, NULL);
- ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
- ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages, NULL);
- ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_pending_stages, NULL);
+ ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
+ tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
+ ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
+ tape->stage_size / 1024, 1, &tape->max_stages, NULL);
+ ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
+ tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
+ ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
+ 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
+ NULL);
+ ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
+ 0xffff, tape->stage_size / 1024, 1,
+ &tape->nr_pending_stages, NULL);
ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
1, 1, (u16 *)&tape->caps[14], NULL);
- ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1024, &tape->stage_size, NULL);
- ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_frequency, NULL);
- ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
- ide_add_setting(drive, "pipeline_head_speed_c",SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed, NULL);
- ide_add_setting(drive, "pipeline_head_speed_u",SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->uncontrolled_pipeline_head_speed,NULL);
- ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->avg_speed, NULL);
- ide_add_setting(drive, "debug_level", SETTING_RW, TYPE_INT, 0, 0xffff, 1, 1, &tape->debug_level, NULL);
+ ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1,
+ 1024, &tape->stage_size, NULL);
+ ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
+ IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
+ NULL);
+ ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
+ 1, &drive->dsc_overlap, NULL);
+ ide_add_setting(drive, "pipeline_head_speed_c", SETTING_READ, TYPE_INT,
+ 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed,
+ NULL);
+ ide_add_setting(drive, "pipeline_head_speed_u", SETTING_READ, TYPE_INT,
+ 0, 0xffff, 1, 1,
+ &tape->uncontrolled_pipeline_head_speed, NULL);
+ ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
+ 1, 1, &tape->avg_speed, NULL);
+ ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
+ 1, &tape->debug_mask, NULL);
}
#else
static inline void idetape_add_settings(ide_drive_t *drive) { ; }
#endif
/*
- * ide_setup is called to:
+ * The function below is called to:
*
- * 1. Initialize our various state variables.
- * 2. Ask the tape for its capabilities.
- * 3. Allocate a buffer which will be used for data
- * transfer. The buffer size is chosen based on
- * the recommendation which we received in step (2).
+ * 1. Initialize our various state variables.
+ * 2. Ask the tape for its capabilities.
+ * 3. Allocate a buffer which will be used for data transfer. The buffer size
+ * is chosen based on the recommendation which we received in step 2.
*
- * Note that at this point ide.c already assigned us an irq, so that
- * we can queue requests here and wait for their completion.
+ * Note that at this point ide.c already assigned us an irq, so that we can
+ * queue requests here and wait for their completion.
*/
-static void idetape_setup (ide_drive_t *drive, idetape_tape_t *tape, int minor)
+static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
{
unsigned long t1, tmid, tn, t;
int speed;
- struct idetape_id_gcw gcw;
int stage_size;
+ u8 gcw[2];
struct sysinfo si;
u16 *ctl = (u16 *)&tape->caps[12];
- spin_lock_init(&tape->spinlock);
+ spin_lock_init(&tape->lock);
drive->dsc_overlap = 1;
if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
printk(KERN_INFO "ide-tape: %s: disabling DSC overlap\n",
@@ -3690,25 +3532,29 @@ static void idetape_setup (ide_drive_t *drive, idetape_tape_t *tape, int minor)
tape->name[0] = 'h';
tape->name[1] = 't';
tape->name[2] = '0' + minor;
- tape->chrdev_direction = idetape_direction_none;
+ tape->chrdev_dir = IDETAPE_DIR_NONE;
tape->pc = tape->pc_stack;
tape->max_insert_speed = 10000;
tape->speed_control = 1;
*((unsigned short *) &gcw) = drive->id->config;
- if (gcw.drq_type == 1)
+
+ /* Command packet DRQ type */
+ if (((gcw[0] & 0x60) >> 5) == 1)
set_bit(IDETAPE_DRQ_INTERRUPT, &tape->flags);
- tape->min_pipeline = tape->max_pipeline = tape->max_stages = 10;
-
+ tape->min_pipeline = 10;
+ tape->max_pipeline = 10;
+ tape->max_stages = 10;
+
idetape_get_inquiry_results(drive);
idetape_get_mode_sense_results(drive);
ide_tape_get_bsize_from_bdesc(drive);
tape->user_bs_factor = 1;
- tape->stage_size = *ctl * tape->tape_block_size;
+ tape->stage_size = *ctl * tape->blk_size;
while (tape->stage_size > 0xffff) {
printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
*ctl /= 2;
- tape->stage_size = *ctl * tape->tape_block_size;
+ tape->stage_size = *ctl * tape->blk_size;
}
stage_size = tape->stage_size;
tape->pages_per_stage = stage_size / PAGE_SIZE;
@@ -3722,17 +3568,22 @@ static void idetape_setup (ide_drive_t *drive, idetape_tape_t *tape, int minor)
tape->max_stages = speed * 1000 * 10 / tape->stage_size;
- /*
- * Limit memory use for pipeline to 10% of physical memory
- */
+ /* Limit memory use for pipeline to 10% of physical memory */
si_meminfo(&si);
- if (tape->max_stages * tape->stage_size > si.totalram * si.mem_unit / 10)
- tape->max_stages = si.totalram * si.mem_unit / (10 * tape->stage_size);
+ if (tape->max_stages * tape->stage_size >
+ si.totalram * si.mem_unit / 10)
+ tape->max_stages =
+ si.totalram * si.mem_unit / (10 * tape->stage_size);
+
tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
- tape->max_pipeline = min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
- if (tape->max_stages == 0)
- tape->max_stages = tape->min_pipeline = tape->max_pipeline = 1;
+ tape->max_pipeline =
+ min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
+ if (tape->max_stages == 0) {
+ tape->max_stages = 1;
+ tape->min_pipeline = 1;
+ tape->max_pipeline = 1;
+ }
t1 = (tape->stage_size * HZ) / (speed * 1000);
tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
@@ -3744,17 +3595,19 @@ static void idetape_setup (ide_drive_t *drive, idetape_tape_t *tape, int minor)
t = t1;
/*
- * Ensure that the number we got makes sense; limit
- * it within IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
+ * Ensure that the number we got makes sense; limit it within
+ * IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
*/
- tape->best_dsc_rw_frequency = max_t(unsigned long, min_t(unsigned long, t, IDETAPE_DSC_RW_MAX), IDETAPE_DSC_RW_MIN);
+ tape->best_dsc_rw_freq = max_t(unsigned long,
+ min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
+ IDETAPE_DSC_RW_MIN);
printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
"%dkB pipeline, %lums tDSC%s\n",
drive->name, tape->name, *(u16 *)&tape->caps[14],
(*(u16 *)&tape->caps[16] * 512) / tape->stage_size,
tape->stage_size / 1024,
tape->max_stages * tape->stage_size / 1024,
- tape->best_dsc_rw_frequency * 1000 / HZ,
+ tape->best_dsc_rw_freq * 1000 / HZ,
drive->using_dma ? ", DMA":"");
idetape_add_settings(drive);
@@ -3782,7 +3635,8 @@ static void ide_tape_release(struct kref *kref)
drive->dsc_overlap = 0;
drive->driver_data = NULL;
device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
- device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor + 128));
+ device_destroy(idetape_sysfs_class,
+ MKDEV(IDETAPE_MAJOR, tape->minor + 128));
idetape_devs[tape->minor] = NULL;
g->private_data = NULL;
put_disk(g);
@@ -3831,9 +3685,7 @@ static ide_driver_t idetape_driver = {
#endif
};
-/*
- * Our character device supporting functions, passed to register_chrdev.
- */
+/* Our character device supporting functions, passed to register_chrdev. */
static const struct file_operations idetape_fops = {
.owner = THIS_MODULE,
.read = idetape_chrdev_read,
@@ -3848,7 +3700,8 @@ static int idetape_open(struct inode *inode, struct file *filp)
struct gendisk *disk = inode->i_bdev->bd_disk;
struct ide_tape_obj *tape;
- if (!(tape = ide_tape_get(disk)))
+ tape = ide_tape_get(disk);
+ if (!tape)
return -ENXIO;
return 0;
@@ -3895,21 +3748,20 @@ static int ide_tape_probe(ide_drive_t *drive)
goto failed;
if (drive->media != ide_tape)
goto failed;
- if (!idetape_identify_device (drive)) {
- printk(KERN_ERR "ide-tape: %s: not supported by this version of ide-tape\n", drive->name);
+ if (!idetape_identify_device(drive)) {
+ printk(KERN_ERR "ide-tape: %s: not supported by this version of"
+ " the driver\n", drive->name);
goto failed;
}
if (drive->scsi) {
- printk("ide-tape: passing drive %s to ide-scsi emulation.\n", drive->name);
+ printk(KERN_INFO "ide-tape: passing drive %s to ide-scsi"
+ " emulation.\n", drive->name);
goto failed;
}
- if (strstr(drive->id->model, "OnStream DI-")) {
- printk(KERN_WARNING "ide-tape: Use drive %s with ide-scsi emulation and osst.\n", drive->name);
- printk(KERN_WARNING "ide-tape: OnStream support will be removed soon from ide-tape!\n");
- }
- tape = kzalloc(sizeof (idetape_tape_t), GFP_KERNEL);
+ tape = kzalloc(sizeof(idetape_tape_t), GFP_KERNEL);
if (tape == NULL) {
- printk(KERN_ERR "ide-tape: %s: Can't allocate a tape structure\n", drive->name);
+ printk(KERN_ERR "ide-tape: %s: Can't allocate a tape struct\n",
+ drive->name);
goto failed;
}
@@ -3955,10 +3807,7 @@ failed:
return -ENODEV;
}
-MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
-MODULE_LICENSE("GPL");
-
-static void __exit idetape_exit (void)
+static void __exit idetape_exit(void)
{
driver_unregister(&idetape_driver.gen_driver);
class_destroy(idetape_sysfs_class);
@@ -3977,7 +3826,8 @@ static int __init idetape_init(void)
}
if (register_chrdev(IDETAPE_MAJOR, "ht", &idetape_fops)) {
- printk(KERN_ERR "ide-tape: Failed to register character device interface\n");
+ printk(KERN_ERR "ide-tape: Failed to register chrdev"
+ " interface\n");
error = -EBUSY;
goto out_free_class;
}
@@ -4000,3 +3850,5 @@ MODULE_ALIAS("ide:*m-tape*");
module_init(idetape_init);
module_exit(idetape_exit);
MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR);
+MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 4e1da1c78cb5..0518a2e948cf 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -189,12 +189,11 @@ EXPORT_SYMBOL_GPL(do_rw_taskfile);
*/
static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
- u8 stat;
+ u8 stat = ide_read_status(drive);
- if (OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) {
+ if (OK_STAT(stat, READY_STAT, BAD_STAT))
drive->mult_count = drive->mult_req;
- } else {
+ else {
drive->mult_req = drive->mult_count = 0;
drive->special.b.recalibrate = 1;
(void) ide_dump_status(drive, "set_multmode", stat);
@@ -207,11 +206,10 @@ static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
*/
static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
int retries = 5;
u8 stat;
- while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
+ while (((stat = ide_read_status(drive)) & BUSY_STAT) && retries--)
udelay(10);
if (OK_STAT(stat, READY_STAT, BAD_STAT))
@@ -230,10 +228,9 @@ static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
*/
static ide_startstop_t recal_intr(ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
- u8 stat;
+ u8 stat = ide_read_status(drive);
- if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG), READY_STAT, BAD_STAT))
+ if (!OK_STAT(stat, READY_STAT, BAD_STAT))
return ide_error(drive, "recal_intr", stat);
return ide_stopped;
}
@@ -244,23 +241,23 @@ static ide_startstop_t recal_intr(ide_drive_t *drive)
static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
{
ide_task_t *args = HWGROUP(drive)->rq->special;
- ide_hwif_t *hwif = HWIF(drive);
u8 stat;
local_irq_enable_in_hardirq();
- if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) {
+ stat = ide_read_status(drive);
+
+ if (!OK_STAT(stat, READY_STAT, BAD_STAT))
return ide_error(drive, "task_no_data_intr", stat);
/* calls ide_end_drive_cmd */
- }
+
if (args)
- ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
+ ide_end_drive_cmd(drive, stat, ide_read_error(drive));
return ide_stopped;
}
static u8 wait_drive_not_busy(ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
int retries;
u8 stat;
@@ -269,7 +266,9 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
* This can take up to 10 usec, but we will wait max 1 ms.
*/
for (retries = 0; retries < 100; retries++) {
- if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT)
+ stat = ide_read_status(drive);
+
+ if (stat & BUSY_STAT)
udelay(10);
else
break;
@@ -408,7 +407,7 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
{
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
- u8 err = drive->hwif->INB(IDE_ERROR_REG);
+ u8 err = ide_read_error(drive);
ide_end_drive_cmd(drive, stat, err);
return;
@@ -430,7 +429,7 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
struct request *rq = HWGROUP(drive)->rq;
- u8 stat = hwif->INB(IDE_STATUS_REG);
+ u8 stat = ide_read_status(drive);
/* new way for dealing with premature shared PCI interrupts */
if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
@@ -465,7 +464,7 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
struct request *rq = HWGROUP(drive)->rq;
- u8 stat = hwif->INB(IDE_STATUS_REG);
+ u8 stat = ide_read_status(drive);
if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
return task_error(drive, rq, __FUNCTION__, stat);
diff --git a/drivers/ide/ide-timing.h b/drivers/ide/ide-timing.h
index adeda7626529..3b12ffe77071 100644
--- a/drivers/ide/ide-timing.h
+++ b/drivers/ide/ide-timing.h
@@ -199,7 +199,7 @@ static int ide_timing_compute(ide_drive_t *drive, short speed, struct ide_timing
}
/*
- * Lenghten active & recovery time so that cycle time is correct.
+ * Lengthen active & recovery time so that cycle time is correct.
*/
if (t->act8b + t->rec8b < t->cyc8b) {
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index ac6136001615..ad0e9955f73c 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -618,60 +618,6 @@ abort:
EXPORT_SYMBOL(ide_unregister);
-
-/**
- * ide_setup_ports - set up IDE interface ports
- * @hw: register descriptions
- * @base: base register
- * @offsets: table of register offsets
- * @ctrl: control register
- * @ack_irq: IRQ ack
- * @irq: interrupt lie
- *
- * Setup hw_regs_t structure described by parameters. You
- * may set up the hw structure yourself OR use this routine to
- * do it for you. This is basically a helper
- *
- */
-
-void ide_setup_ports ( hw_regs_t *hw,
- unsigned long base, int *offsets,
- unsigned long ctrl, unsigned long intr,
- ide_ack_intr_t *ack_intr,
-/*
- * ide_io_ops_t *iops,
- */
- int irq)
-{
- int i;
-
- memset(hw, 0, sizeof(hw_regs_t));
- for (i = 0; i < IDE_NR_PORTS; i++) {
- if (offsets[i] == -1) {
- switch(i) {
- case IDE_CONTROL_OFFSET:
- hw->io_ports[i] = ctrl;
- break;
-#if defined(CONFIG_AMIGA) || defined(CONFIG_MAC)
- case IDE_IRQ_OFFSET:
- hw->io_ports[i] = intr;
- break;
-#endif /* (CONFIG_AMIGA) || (CONFIG_MAC) */
- default:
- hw->io_ports[i] = 0;
- break;
- }
- } else {
- hw->io_ports[i] = base + offsets[i];
- }
- }
- hw->irq = irq;
- hw->ack_intr = ack_intr;
-/*
- * hw->iops = iops;
- */
-}
-
void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
{
memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
index 8bdb79da17e8..50ffa871d5e9 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/legacy/buddha.c
@@ -56,31 +56,11 @@ static u_int xsurf_bases[XSURF_NUM_HWIFS] __initdata = {
XSURF_BASE1, XSURF_BASE2
};
-
/*
* Offsets from one of the above bases
*/
-#define BUDDHA_DATA 0x00
-#define BUDDHA_ERROR 0x06 /* see err-bits */
-#define BUDDHA_NSECTOR 0x0a /* nr of sectors to read/write */
-#define BUDDHA_SECTOR 0x0e /* starting sector */
-#define BUDDHA_LCYL 0x12 /* starting cylinder */
-#define BUDDHA_HCYL 0x16 /* high byte of starting cyl */
-#define BUDDHA_SELECT 0x1a /* 101dhhhh , d=drive, hhhh=head */
-#define BUDDHA_STATUS 0x1e /* see status-bits */
#define BUDDHA_CONTROL 0x11a
-#define XSURF_CONTROL -1 /* X-Surf has no CS1* (Control/AltStat) */
-
-static int buddha_offsets[IDE_NR_PORTS] __initdata = {
- BUDDHA_DATA, BUDDHA_ERROR, BUDDHA_NSECTOR, BUDDHA_SECTOR, BUDDHA_LCYL,
- BUDDHA_HCYL, BUDDHA_SELECT, BUDDHA_STATUS, BUDDHA_CONTROL, -1
-};
-
-static int xsurf_offsets[IDE_NR_PORTS] __initdata = {
- BUDDHA_DATA, BUDDHA_ERROR, BUDDHA_NSECTOR, BUDDHA_SECTOR, BUDDHA_LCYL,
- BUDDHA_HCYL, BUDDHA_SELECT, BUDDHA_STATUS, XSURF_CONTROL, -1
-};
/*
* Other registers
@@ -140,6 +120,26 @@ static int xsurf_ack_intr(ide_hwif_t *hwif)
return 1;
}
+static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
+ unsigned long ctl, unsigned long irq_port,
+ ide_ack_intr_t *ack_intr)
+{
+ int i;
+
+ memset(hw, 0, sizeof(*hw));
+
+ hw->io_ports[IDE_DATA_OFFSET] = base;
+
+ for (i = 1; i < 8; i++)
+ hw->io_ports[i] = base + 2 + i * 4;
+
+ hw->io_ports[IDE_CONTROL_OFFSET] = ctl;
+ hw->io_ports[IDE_IRQ_OFFSET] = irq_port;
+
+ hw->irq = IRQ_AMIGA_PORTS;
+ hw->ack_intr = ack_intr;
+}
+
/*
* Probe for a Buddha or Catweasel IDE interface
*/
@@ -202,22 +202,24 @@ fail_base2:
printk(KERN_INFO "ide: %s IDE controller\n",
buddha_board_name[type]);
- for(i=0;i<buddha_num_hwifs;i++) {
- if(type != BOARD_XSURF) {
- ide_setup_ports(&hw, (buddha_board+buddha_bases[i]),
- buddha_offsets, 0,
- (buddha_board+buddha_irqports[i]),
- buddha_ack_intr,
-// budda_iops,
- IRQ_AMIGA_PORTS);
+ for (i = 0; i < buddha_num_hwifs; i++) {
+ unsigned long base, ctl, irq_port;
+ ide_ack_intr_t *ack_intr;
+
+ if (type != BOARD_XSURF) {
+ base = buddha_board + buddha_bases[i];
+ ctl = base + BUDDHA_CONTROL;
+ irq_port = buddha_board + buddha_irqports[i];
+ ack_intr = buddha_ack_intr;
} else {
- ide_setup_ports(&hw, (buddha_board+xsurf_bases[i]),
- xsurf_offsets, 0,
- (buddha_board+xsurf_irqports[i]),
- xsurf_ack_intr,
-// xsurf_iops,
- IRQ_AMIGA_PORTS);
- }
+ base = buddha_board + xsurf_bases[i];
+ /* X-Surf has no CS1* (Control/AltStat) */
+ ctl = 0;
+ irq_port = buddha_board + xsurf_irqports[i];
+ ack_intr = xsurf_ack_intr;
+ }
+
+ buddha_setup_ports(&hw, base, ctl, irq_port, ack_intr);
hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
if (hwif) {
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index 85b69a82825f..f044048903b3 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -33,22 +33,8 @@
* Offsets from the above base
*/
-#define ATA_HD_DATA 0x00
-#define ATA_HD_ERROR 0x05 /* see err-bits */
-#define ATA_HD_NSECTOR 0x09 /* nr of sectors to read/write */
-#define ATA_HD_SECTOR 0x0d /* starting sector */
-#define ATA_HD_LCYL 0x11 /* starting cylinder */
-#define ATA_HD_HCYL 0x15 /* high byte of starting cyl */
-#define ATA_HD_SELECT 0x19 /* 101dhhhh , d=drive, hhhh=head */
-#define ATA_HD_STATUS 0x1d /* see status-bits */
#define ATA_HD_CONTROL 0x39
-static int falconide_offsets[IDE_NR_PORTS] __initdata = {
- ATA_HD_DATA, ATA_HD_ERROR, ATA_HD_NSECTOR, ATA_HD_SECTOR, ATA_HD_LCYL,
- ATA_HD_HCYL, ATA_HD_SELECT, ATA_HD_STATUS, ATA_HD_CONTROL, -1
-};
-
-
/*
* falconide_intr_lock is used to obtain access to the IDE interrupt,
* which is shared between several drivers.
@@ -57,6 +43,22 @@ static int falconide_offsets[IDE_NR_PORTS] __initdata = {
int falconide_intr_lock;
EXPORT_SYMBOL(falconide_intr_lock);
+static void __init falconide_setup_ports(hw_regs_t *hw)
+{
+ int i;
+
+ memset(hw, 0, sizeof(*hw));
+
+ hw->io_ports[IDE_DATA_OFFSET] = ATA_HD_BASE;
+
+ for (i = 1; i < 8; i++)
+ hw->io_ports[i] = ATA_HD_BASE + 1 + i * 4;
+
+ hw->io_ports[IDE_CONTROL_OFFSET] = ATA_HD_CONTROL;
+
+ hw->irq = IRQ_MFP_IDE;
+ hw->ack_intr = NULL;
+}
/*
* Probe for a Falcon IDE interface
@@ -64,16 +66,15 @@ EXPORT_SYMBOL(falconide_intr_lock);
static int __init falconide_init(void)
{
- if (MACH_IS_ATARI && ATARIHW_PRESENT(IDE)) {
hw_regs_t hw;
ide_hwif_t *hwif;
+ if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
+ return 0;
+
printk(KERN_INFO "ide: Falcon IDE controller\n");
- ide_setup_ports(&hw, ATA_HD_BASE, falconide_offsets,
- 0, 0, NULL,
-// falconide_iops,
- IRQ_MFP_IDE);
+ falconide_setup_ports(&hw);
hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
if (hwif) {
@@ -85,9 +86,8 @@ static int __init falconide_init(void)
ide_device_add(idx, NULL);
}
- }
- return 0;
+ return 0;
}
module_init(falconide_init);
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
index fc29ce75aff1..9d3851d27677 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/legacy/gayle.c
@@ -34,22 +34,8 @@
* Offsets from one of the above bases
*/
-#define GAYLE_DATA 0x00
-#define GAYLE_ERROR 0x06 /* see err-bits */
-#define GAYLE_NSECTOR 0x0a /* nr of sectors to read/write */
-#define GAYLE_SECTOR 0x0e /* starting sector */
-#define GAYLE_LCYL 0x12 /* starting cylinder */
-#define GAYLE_HCYL 0x16 /* high byte of starting cyl */
-#define GAYLE_SELECT 0x1a /* 101dhhhh , d=drive, hhhh=head */
-#define GAYLE_STATUS 0x1e /* see status-bits */
#define GAYLE_CONTROL 0x101a
-static int gayle_offsets[IDE_NR_PORTS] __initdata = {
- GAYLE_DATA, GAYLE_ERROR, GAYLE_NSECTOR, GAYLE_SECTOR, GAYLE_LCYL,
- GAYLE_HCYL, GAYLE_SELECT, GAYLE_STATUS, -1, -1
-};
-
-
/*
* These are at different offsets from the base
*/
@@ -106,6 +92,26 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif)
return 1;
}
+static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
+ unsigned long ctl, unsigned long irq_port,
+ ide_ack_intr_t *ack_intr);
+{
+ int i;
+
+ memset(hw, 0, sizeof(*hw));
+
+ hw->io_ports[IDE_DATA_OFFSET] = base;
+
+ for (i = 1; i < 8; i++)
+ hw->io_ports[i] = base + 2 + i * 4;
+
+ hw->io_ports[IDE_CONTROL_OFFSET] = ctl;
+ hw->io_ports[IDE_IRQ_OFFSET] = irq_port;
+
+ hw->irq = IRQ_AMIGA_PORTS;
+ hw->ack_intr = ack_intr;
+}
+
/*
* Probe for a Gayle IDE interface (and optionally for an IDE doubler)
*/
@@ -167,10 +173,7 @@ found:
base = (unsigned long)ZTWO_VADDR(phys_base);
ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0;
- ide_setup_ports(&hw, base, gayle_offsets,
- ctrlport, irqport, ack_intr,
-// &gayle_iops,
- IRQ_AMIGA_PORTS);
+ gayle_setup_ports(&hw, base, ctrlport, irqport, ack_intr);
hwif = ide_find_port(base);
if (hwif) {
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
index 8e05d88e81ba..0b0d86731927 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/ide/legacy/hd.c
@@ -421,11 +421,14 @@ static void bad_rw_intr(void)
static inline int wait_DRQ(void)
{
- int retries = 100000, stat;
+ int retries;
+ int stat;
- while (--retries > 0)
- if ((stat = inb_p(HD_STATUS)) & DRQ_STAT)
+ for (retries = 0; retries < 100000; retries++) {
+ stat = inb_p(HD_STATUS);
+ if (stat & DRQ_STAT)
return 0;
+ }
dump_status("wait_DRQ", stat);
return -1;
}
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
index 26c82ce602de..688fcae17488 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/legacy/ide_platform.c
@@ -17,7 +17,7 @@
#include <linux/ide.h>
#include <linux/ioport.h>
#include <linux/module.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <linux/platform_device.h>
#include <linux/io.h>
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index 06df8df857a3..a61e60737dc7 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -31,14 +31,6 @@
* These match MkLinux so they should be correct.
*/
-#define IDE_DATA 0x00
-#define IDE_ERROR 0x04 /* see err-bits */
-#define IDE_NSECTOR 0x08 /* nr of sectors to read/write */
-#define IDE_SECTOR 0x0c /* starting sector */
-#define IDE_LCYL 0x10 /* starting cylinder */
-#define IDE_HCYL 0x14 /* high byte of starting cyl */
-#define IDE_SELECT 0x18 /* 101dhhhh , d=drive, hhhh=head */
-#define IDE_STATUS 0x1c /* see status-bits */
#define IDE_CONTROL 0x38 /* control/altstatus */
/*
@@ -63,11 +55,6 @@
volatile unsigned char *ide_ifr = (unsigned char *) (IDE_BASE + IDE_IFR);
-static int macide_offsets[IDE_NR_PORTS] = {
- IDE_DATA, IDE_ERROR, IDE_NSECTOR, IDE_SECTOR, IDE_LCYL,
- IDE_HCYL, IDE_SELECT, IDE_STATUS, IDE_CONTROL
-};
-
int macide_ack_intr(ide_hwif_t* hwif)
{
if (*ide_ifr & 0x20) {
@@ -77,6 +64,22 @@ int macide_ack_intr(ide_hwif_t* hwif)
return 0;
}
+static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base,
+ int irq, ide_ack_intr_t *ack_intr)
+{
+ int i;
+
+ memset(hw, 0, sizeof(*hw));
+
+ for (i = 0; i < 8; i++)
+ hw->io_ports[i] = base + i * 4;
+
+ hw->io_ports[IDE_CONTROL_OFFSET] = IDE_CONTROL;
+
+ hw->irq = irq;
+ hw->ack_intr = ack_intr;
+}
+
static const char *mac_ide_name[] =
{ "Quadra", "Powerbook", "Powerbook Baboon" };
@@ -86,27 +89,27 @@ static const char *mac_ide_name[] =
static int __init macide_init(void)
{
- hw_regs_t hw;
ide_hwif_t *hwif;
+ ide_ack_intr_t *ack_intr;
+ unsigned long base;
+ int irq;
+ hw_regs_t hw;
switch (macintosh_config->ide_type) {
case MAC_IDE_QUADRA:
- ide_setup_ports(&hw, IDE_BASE, macide_offsets,
- 0, 0, macide_ack_intr,
-// quadra_ide_iops,
- IRQ_NUBUS_F);
+ base = IDE_BASE;
+ ack_intr = macide_ack_intr;
+ irq = IRQ_NUBUS_F;
break;
case MAC_IDE_PB:
- ide_setup_ports(&hw, IDE_BASE, macide_offsets,
- 0, 0, macide_ack_intr,
-// macide_pb_iops,
- IRQ_NUBUS_C);
+ base = IDE_BASE;
+ ack_intr = macide_ack_intr;
+ irq = IRQ_NUBUS_C;
break;
case MAC_IDE_BABOON:
- ide_setup_ports(&hw, BABOON_BASE, macide_offsets,
- 0, 0, NULL,
-// macide_baboon_iops,
- IRQ_BABOON_1);
+ base = BABOON_BASE;
+ ack_intr = NULL;
+ irq = IRQ_BABOON_1;
break;
default:
return -ENODEV;
@@ -115,6 +118,8 @@ static int __init macide_init(void)
printk(KERN_INFO "ide: Macintosh %s IDE controller\n",
mac_ide_name[macintosh_config->ide_type - 1]);
+ macide_setup_ports(&hw, base, irq, ack_intr);
+
hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
if (hwif) {
u8 index = hwif->index;
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index 2f0b34d892a1..1381b91bc316 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -66,16 +66,12 @@ static int q40ide_default_irq(unsigned long base)
/*
- * This is very similar to ide_setup_ports except that addresses
- * are pretranslated for q40 ISA access
+ * Addresses are pretranslated for Q40 ISA access.
*/
void q40_ide_setup_ports ( hw_regs_t *hw,
unsigned long base, int *offsets,
unsigned long ctrl, unsigned long intr,
ide_ack_intr_t *ack_intr,
-/*
- * ide_io_ops_t *iops,
- */
int irq)
{
int i;
@@ -92,9 +88,6 @@ void q40_ide_setup_ports ( hw_regs_t *hw,
hw->irq = irq;
hw->ack_intr = ack_intr;
-/*
- * hw->iops = iops;
- */
}
diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile
index 94803253e8af..02e6ee7d751d 100644
--- a/drivers/ide/pci/Makefile
+++ b/drivers/ide/pci/Makefile
@@ -34,7 +34,8 @@ obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o
obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o
# Must appear at the end of the block
-obj-$(CONFIG_BLK_DEV_GENERIC) += generic.o
+obj-$(CONFIG_BLK_DEV_GENERIC) += ide-pci-generic.o
+ide-pci-generic-y += generic.o
ifeq ($(CONFIG_BLK_DEV_CMD640), m)
obj-m += cmd640.o
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/pci/generic.c
index 9262a9174b4e..7fd83a9d4dee 100644
--- a/drivers/ide/pci/generic.c
+++ b/drivers/ide/pci/generic.c
@@ -29,19 +29,6 @@
static int ide_generic_all; /* Set to claim all devices */
-/*
- * the module_param_named() was added for the modular case
- * the __setup() is left as compatibility for existing setups
- */
-#ifndef MODULE
-static int __init ide_generic_all_on(char *unused)
-{
- ide_generic_all = 1;
- printk(KERN_INFO "IDE generic will claim all unknown PCI IDE storage controllers.\n");
- return 1;
-}
-const __setup("all-generic-ide", ide_generic_all_on);
-#endif
module_param_named(all_generic_ide, ide_generic_all, bool, 0444);
MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers.");
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index ef5b39fa042b..cc4be9621bc0 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -704,9 +704,6 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
hwif->sata_scr[SATA_STATUS_OFFSET] = base + 0x104;
hwif->sata_scr[SATA_ERROR_OFFSET] = base + 0x108;
hwif->sata_scr[SATA_CONTROL_OFFSET] = base + 0x100;
- hwif->sata_misc[SATA_MISC_OFFSET] = base + 0x140;
- hwif->sata_misc[SATA_PHY_OFFSET] = base + 0x144;
- hwif->sata_misc[SATA_IEN_OFFSET] = base + 0x148;
}
memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index a193dfbf99d2..a5dc78ae62d4 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -44,8 +44,8 @@ source "drivers/infiniband/hw/ipath/Kconfig"
source "drivers/infiniband/hw/ehca/Kconfig"
source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
-
source "drivers/infiniband/hw/mlx4/Kconfig"
+source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index 75f325e40b54..ed35e4496241 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
+obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index c0150147d347..638b727d42e0 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -974,6 +974,9 @@ static void cm_format_req(struct cm_req_msg *req_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_req_param *param)
{
+ struct ib_sa_path_rec *pri_path = param->primary_path;
+ struct ib_sa_path_rec *alt_path = param->alternate_path;
+
cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
@@ -997,35 +1000,46 @@ static void cm_format_req(struct cm_req_msg *req_msg,
cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
cm_req_set_srq(req_msg, param->srq);
- req_msg->primary_local_lid = param->primary_path->slid;
- req_msg->primary_remote_lid = param->primary_path->dlid;
- req_msg->primary_local_gid = param->primary_path->sgid;
- req_msg->primary_remote_gid = param->primary_path->dgid;
- cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
- cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
- req_msg->primary_traffic_class = param->primary_path->traffic_class;
- req_msg->primary_hop_limit = param->primary_path->hop_limit;
- cm_req_set_primary_sl(req_msg, param->primary_path->sl);
- cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
+ if (pri_path->hop_limit <= 1) {
+ req_msg->primary_local_lid = pri_path->slid;
+ req_msg->primary_remote_lid = pri_path->dlid;
+ } else {
+ /* Work-around until there's a way to obtain remote LID info */
+ req_msg->primary_local_lid = IB_LID_PERMISSIVE;
+ req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
+ }
+ req_msg->primary_local_gid = pri_path->sgid;
+ req_msg->primary_remote_gid = pri_path->dgid;
+ cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
+ cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
+ req_msg->primary_traffic_class = pri_path->traffic_class;
+ req_msg->primary_hop_limit = pri_path->hop_limit;
+ cm_req_set_primary_sl(req_msg, pri_path->sl);
+ cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
cm_req_set_primary_local_ack_timeout(req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
- param->primary_path->packet_life_time));
+ pri_path->packet_life_time));
- if (param->alternate_path) {
- req_msg->alt_local_lid = param->alternate_path->slid;
- req_msg->alt_remote_lid = param->alternate_path->dlid;
- req_msg->alt_local_gid = param->alternate_path->sgid;
- req_msg->alt_remote_gid = param->alternate_path->dgid;
+ if (alt_path) {
+ if (alt_path->hop_limit <= 1) {
+ req_msg->alt_local_lid = alt_path->slid;
+ req_msg->alt_remote_lid = alt_path->dlid;
+ } else {
+ req_msg->alt_local_lid = IB_LID_PERMISSIVE;
+ req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
+ }
+ req_msg->alt_local_gid = alt_path->sgid;
+ req_msg->alt_remote_gid = alt_path->dgid;
cm_req_set_alt_flow_label(req_msg,
- param->alternate_path->flow_label);
- cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
- req_msg->alt_traffic_class = param->alternate_path->traffic_class;
- req_msg->alt_hop_limit = param->alternate_path->hop_limit;
- cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
- cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
+ alt_path->flow_label);
+ cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
+ req_msg->alt_traffic_class = alt_path->traffic_class;
+ req_msg->alt_hop_limit = alt_path->hop_limit;
+ cm_req_set_alt_sl(req_msg, alt_path->sl);
+ cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
cm_req_set_alt_local_ack_timeout(req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
- param->alternate_path->packet_life_time));
+ alt_path->packet_life_time));
}
if (param->private_data && param->private_data_len)
@@ -1441,6 +1455,34 @@ out:
return listen_cm_id_priv;
}
+/*
+ * Work-around for inter-subnet connections. If the LIDs are permissive,
+ * we need to override the LID/SL data in the REQ with the LID information
+ * in the work completion.
+ */
+static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
+{
+ if (!cm_req_get_primary_subnet_local(req_msg)) {
+ if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
+ req_msg->primary_local_lid = cpu_to_be16(wc->slid);
+ cm_req_set_primary_sl(req_msg, wc->sl);
+ }
+
+ if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
+ req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
+ }
+
+ if (!cm_req_get_alt_subnet_local(req_msg)) {
+ if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
+ req_msg->alt_local_lid = cpu_to_be16(wc->slid);
+ cm_req_set_alt_sl(req_msg, wc->sl);
+ }
+
+ if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
+ req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
+ }
+}
+
static int cm_req_handler(struct cm_work *work)
{
struct ib_cm_id *cm_id;
@@ -1481,6 +1523,7 @@ static int cm_req_handler(struct cm_work *work)
cm_id_priv->id.service_id = req_msg->service_id;
cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
+ cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
if (ret) {
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 6c7aa59794d4..7f00347364f7 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -320,10 +320,13 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
.max_maps = pool->max_remaps,
.page_shift = params->page_shift
};
+ int bytes_per_fmr = sizeof *fmr;
+
+ if (pool->cache_bucket)
+ bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
for (i = 0; i < params->pool_size; ++i) {
- fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64),
- GFP_KERNEL);
+ fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
if (!fmr) {
printk(KERN_WARNING PFX "failed to allocate fmr "
"struct for FMR %d\n", i);
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index f281d16040f5..92cce8aacbb7 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -101,6 +101,7 @@ struct ehca_sport {
spinlock_t mod_sqp_lock;
enum ib_port_state port_state;
struct ehca_sma_attr saved_attr;
+ u32 pma_qp_nr;
};
#define HCA_CAP_MR_PGSIZE_4K 0x80000000
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 863b34fa9ff9..b5ca94c6b8d9 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -403,6 +403,8 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
sport->port_state = IB_PORT_ACTIVE;
dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
"is active");
+ ehca_query_sma_attr(shca, port,
+ &sport->saved_attr);
} else
notify_port_conf_change(shca, port);
break;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index c469bfde2708..a8a2ea585d2f 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -187,6 +187,11 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context);
int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad,
+ struct ib_mad *out_mad);
+
void ehca_poll_eqs(unsigned long data);
int ehca_calc_ipd(struct ehca_shca *shca, int port,
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 84c9b7b8669b..a86ebcc79a95 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -472,7 +472,7 @@ int ehca_init_device(struct ehca_shca *shca)
shca->ib_device.dealloc_fmr = ehca_dealloc_fmr;
shca->ib_device.attach_mcast = ehca_attach_mcast;
shca->ib_device.detach_mcast = ehca_detach_mcast;
- /* shca->ib_device.process_mad = ehca_process_mad; */
+ shca->ib_device.process_mad = ehca_process_mad;
shca->ib_device.mmap = ehca_mmap;
if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 3aacc8cf1e44..2ce8cffb8664 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -209,6 +209,10 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
return -EINVAL;
}
+ if (unlikely(send_wr->wr.ud.remote_qpn == 0)) {
+ ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num);
+ return -EINVAL;
+ }
my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah);
wqe_p->u.ud_av.ud_av = my_av->av;
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 79e72b25b252..706d97ad5555 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -39,12 +39,18 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <rdma/ib_mad.h>
#include "ehca_classes.h"
#include "ehca_tools.h"
#include "ehca_iverbs.h"
#include "hcp_if.h"
+#define IB_MAD_STATUS_REDIRECT __constant_htons(0x0002)
+#define IB_MAD_STATUS_UNSUP_VERSION __constant_htons(0x0004)
+#define IB_MAD_STATUS_UNSUP_METHOD __constant_htons(0x0008)
+
+#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001)
/**
* ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
@@ -83,6 +89,9 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
port, ret);
return ret;
}
+ shca->sport[port - 1].pma_qp_nr = pma_qp_nr;
+ ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x",
+ port, pma_qp_nr);
break;
default:
ehca_err(&shca->ib_device, "invalid qp_type=%x",
@@ -109,3 +118,85 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
return H_SUCCESS;
}
+
+struct ib_perf {
+ struct ib_mad_hdr mad_hdr;
+ u8 reserved[40];
+ u8 data[192];
+} __attribute__ ((packed));
+
+
+static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ struct ib_perf *in_perf = (struct ib_perf *)in_mad;
+ struct ib_perf *out_perf = (struct ib_perf *)out_mad;
+ struct ib_class_port_info *poi =
+ (struct ib_class_port_info *)out_perf->data;
+ struct ehca_shca *shca =
+ container_of(ibdev, struct ehca_shca, ib_device);
+ struct ehca_sport *sport = &shca->sport[port_num - 1];
+
+ ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
+
+ *out_mad = *in_mad;
+
+ if (in_perf->mad_hdr.class_version != 1) {
+ ehca_warn(ibdev, "Unsupported class_version=%x",
+ in_perf->mad_hdr.class_version);
+ out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION;
+ goto perf_reply;
+ }
+
+ switch (in_perf->mad_hdr.method) {
+ case IB_MGMT_METHOD_GET:
+ case IB_MGMT_METHOD_SET:
+ /* set class port info for redirection */
+ out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO;
+ out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT;
+ memset(poi, 0, sizeof(*poi));
+ poi->base_version = 1;
+ poi->class_version = 1;
+ poi->resp_time_value = 18;
+ poi->redirect_lid = sport->saved_attr.lid;
+ poi->redirect_qp = sport->pma_qp_nr;
+ poi->redirect_qkey = IB_QP1_QKEY;
+ poi->redirect_pkey = IB_DEFAULT_PKEY_FULL;
+
+ ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
+ sport->saved_attr.lid, sport->pma_qp_nr);
+ break;
+
+ case IB_MGMT_METHOD_GET_RESP:
+ return IB_MAD_RESULT_FAILURE;
+
+ default:
+ out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD;
+ break;
+ }
+
+perf_reply:
+ out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
+
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ int ret;
+
+ if (!port_num || port_num > ibdev->phys_port_cnt)
+ return IB_MAD_RESULT_FAILURE;
+
+ /* accept only pma request */
+ if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
+ return IB_MAD_RESULT_SUCCESS;
+
+ ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
+ ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad);
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index d8287d9db41e..96a39b5c9254 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -52,7 +52,7 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
-static const char mlx4_ib_version[] __devinitdata =
+static const char mlx4_ib_version[] =
DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -468,6 +468,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
if (err)
goto out;
+ dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
out:
@@ -516,9 +517,16 @@ static struct class_device_attribute *mlx4_class_attributes[] = {
static void *mlx4_ib_add(struct mlx4_dev *dev)
{
+ static int mlx4_ib_version_printed;
struct mlx4_ib_dev *ibdev;
int i;
+
+ if (!mlx4_ib_version_printed) {
+ printk(KERN_INFO "%s", mlx4_ib_version);
+ ++mlx4_ib_version_printed;
+ }
+
ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
if (!ibdev) {
dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 6966f943f440..09a30dd12b14 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1255,9 +1255,14 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
if (err)
goto out;
- MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
- MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
- MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
+ if (!mthca_is_memfree(dev)) {
+ MTHCA_GET(adapter->vendor_id, outbox,
+ QUERY_ADAPTER_VENDOR_ID_OFFSET);
+ MTHCA_GET(adapter->device_id, outbox,
+ QUERY_ADAPTER_DEVICE_ID_OFFSET);
+ MTHCA_GET(adapter->revision_id, outbox,
+ QUERY_ADAPTER_REVISION_ID_OFFSET);
+ }
MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 5cf8250d4e16..cd3d8adbef9f 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -126,7 +126,7 @@ module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
MODULE_PARM_DESC(fmr_reserved_mtts,
"number of memory translation table segments reserved for FMR");
-static const char mthca_version[] __devinitdata =
+static char mthca_version[] __devinitdata =
DRV_NAME ": Mellanox InfiniBand HCA driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -735,7 +735,8 @@ static int mthca_init_hca(struct mthca_dev *mdev)
}
mdev->eq_table.inta_pin = adapter.inta_pin;
- mdev->rev_id = adapter.revision_id;
+ if (!mthca_is_memfree(mdev))
+ mdev->rev_id = adapter.revision_id;
memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
return 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index aa6c70a6a36f..3b6985557cb2 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -613,8 +613,10 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
sizeof *(mr->mem.tavor.mpt) * idx;
mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
- if (IS_ERR(mr->mtt))
+ if (IS_ERR(mr->mtt)) {
+ err = PTR_ERR(mr->mtt);
goto err_out_table;
+ }
mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
@@ -627,8 +629,10 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
- if (IS_ERR(mailbox))
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
goto err_out_free_mtt;
+ }
mpt_entry = mailbox->buf;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 6bcde1cb9688..9e491df6419c 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -923,17 +923,13 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
struct mthca_mr *mr;
u64 *page_list;
u64 total_size;
- u64 mask;
+ unsigned long mask;
int shift;
int npages;
int err;
int i, j, n;
- /* First check that we have enough alignment */
- if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
- return ERR_PTR(-EINVAL);
-
- mask = 0;
+ mask = buffer_list[0].addr ^ *iova_start;
total_size = 0;
for (i = 0; i < num_phys_buf; ++i) {
if (i != 0)
@@ -947,17 +943,7 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
if (mask & ~PAGE_MASK)
return ERR_PTR(-EINVAL);
- /* Find largest page shift we can use to cover buffers */
- for (shift = PAGE_SHIFT; shift < 31; ++shift)
- if (num_phys_buf > 1) {
- if ((1ULL << shift) & mask)
- break;
- } else {
- if (1ULL << shift >=
- buffer_list[0].size +
- (buffer_list[0].addr & ((1ULL << shift) - 1)))
- break;
- }
+ shift = __ffs(mask | 1 << 31);
buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
buffer_list[0].addr &= ~0ull << shift;
@@ -1270,6 +1256,8 @@ static int mthca_init_node_data(struct mthca_dev *dev)
goto out;
}
+ if (mthca_is_memfree(dev))
+ dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
out:
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 0e5461c65731..db5595bbf7f0 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1175,6 +1175,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
{
int ret;
int i;
+ struct mthca_next_seg *next;
qp->refcount = 1;
init_waitqueue_head(&qp->wait);
@@ -1217,7 +1218,6 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
}
if (mthca_is_memfree(dev)) {
- struct mthca_next_seg *next;
struct mthca_data_seg *scatter;
int size = (sizeof (struct mthca_next_seg) +
qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
@@ -1240,6 +1240,13 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
qp->sq.wqe_shift) +
qp->send_wqe_offset);
}
+ } else {
+ for (i = 0; i < qp->rq.max; ++i) {
+ next = get_recv_wqe(qp, i);
+ next->nda_op = htonl((((i + 1) % qp->rq.max) <<
+ qp->rq.wqe_shift) | 1);
+ }
+
}
qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
@@ -1863,7 +1870,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
prev_wqe = qp->rq.last;
qp->rq.last = wqe;
- ((struct mthca_next_seg *) wqe)->nda_op = 0;
((struct mthca_next_seg *) wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD);
((struct mthca_next_seg *) wqe)->flags = 0;
@@ -1885,9 +1891,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qp->wrid[ind] = wr->wr_id;
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
- wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD | size);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 553d681f6813..a5ffff6e1026 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -175,9 +175,17 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
* scatter list L_Keys to the sentry value of 0x100.
*/
for (i = 0; i < srq->max; ++i) {
- wqe = get_wqe(srq, i);
+ struct mthca_next_seg *next;
- *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
+ next = wqe = get_wqe(srq, i);
+
+ if (i < srq->max - 1) {
+ *wqe_to_link(wqe) = i + 1;
+ next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
+ } else {
+ *wqe_to_link(wqe) = -1;
+ next->nda_op = 0;
+ }
for (scatter = wqe + sizeof (struct mthca_next_seg);
(void *) scatter < wqe + (1 << srq->wqe_shift);
@@ -470,16 +478,15 @@ out:
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
{
int ind;
+ struct mthca_next_seg *last_free;
ind = wqe_addr >> srq->wqe_shift;
spin_lock(&srq->lock);
- if (likely(srq->first_free >= 0))
- *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
- else
- srq->first_free = ind;
-
+ last_free = get_wqe(srq, srq->last_free);
+ *wqe_to_link(last_free) = ind;
+ last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
*wqe_to_link(get_wqe(srq, ind)) = -1;
srq->last_free = ind;
@@ -506,15 +513,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
first_ind = srq->first_free;
for (nreq = 0; wr; wr = wr->next) {
- ind = srq->first_free;
-
- if (unlikely(ind < 0)) {
- mthca_err(dev, "SRQ %06x full\n", srq->srqn);
- err = -ENOMEM;
- *bad_wr = wr;
- break;
- }
-
+ ind = srq->first_free;
wqe = get_wqe(srq, ind);
next_ind = *wqe_to_link(wqe);
@@ -528,7 +527,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
prev_wqe = srq->last;
srq->last = wqe;
- ((struct mthca_next_seg *) wqe)->nda_op = 0;
((struct mthca_next_seg *) wqe)->ee_nds = 0;
/* flags field will always remain 0 */
@@ -549,9 +547,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
if (i < srq->max_gs)
mthca_set_data_seg_inval(wqe);
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32((ind << srq->wqe_shift) | 1);
- wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD);
@@ -614,15 +609,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
spin_lock_irqsave(&srq->lock, flags);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
- ind = srq->first_free;
-
- if (unlikely(ind < 0)) {
- mthca_err(dev, "SRQ %06x full\n", srq->srqn);
- err = -ENOMEM;
- *bad_wr = wr;
- break;
- }
-
+ ind = srq->first_free;
wqe = get_wqe(srq, ind);
next_ind = *wqe_to_link(wqe);
@@ -633,8 +620,6 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
break;
}
- ((struct mthca_next_seg *) wqe)->nda_op =
- cpu_to_be32((next_ind << srq->wqe_shift) | 1);
((struct mthca_next_seg *) wqe)->ee_nds = 0;
/* flags field will always remain 0 */
diff --git a/drivers/infiniband/hw/nes/Kconfig b/drivers/infiniband/hw/nes/Kconfig
new file mode 100644
index 000000000000..2aeb7ac972a9
--- /dev/null
+++ b/drivers/infiniband/hw/nes/Kconfig
@@ -0,0 +1,16 @@
+config INFINIBAND_NES
+ tristate "NetEffect RNIC Driver"
+ depends on PCI && INET && INFINIBAND
+ select LIBCRC32C
+ ---help---
+ This is a low-level driver for NetEffect RDMA enabled
+ Network Interface Cards (RNIC).
+
+config INFINIBAND_NES_DEBUG
+ bool "Verbose debugging output"
+ depends on INFINIBAND_NES
+ default n
+ ---help---
+ This option causes the NetEffect RNIC driver to produce debug
+ messages. Select this if you are developing the driver
+ or trying to diagnose a problem.
diff --git a/drivers/infiniband/hw/nes/Makefile b/drivers/infiniband/hw/nes/Makefile
new file mode 100644
index 000000000000..35148513c47e
--- /dev/null
+++ b/drivers/infiniband/hw/nes/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_INFINIBAND_NES) += iw_nes.o
+
+iw_nes-objs := nes.o nes_hw.o nes_nic.o nes_utils.o nes_verbs.o nes_cm.o
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
new file mode 100644
index 000000000000..7f8853b44ee1
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -0,0 +1,1152 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/in.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <linux/highmem.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/iw_cm.h>
+
+#include "nes.h"
+
+#include <net/netevent.h>
+#include <net/neighbour.h>
+#include <linux/route.h>
+#include <net/ip_fib.h>
+
+MODULE_AUTHOR("NetEffect");
+MODULE_DESCRIPTION("NetEffect RNIC Low-level iWARP Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+
+int max_mtu = 9000;
+int nics_per_function = 1;
+int interrupt_mod_interval = 0;
+
+
+/* Interoperability */
+int mpa_version = 1;
+module_param(mpa_version, int, 0);
+MODULE_PARM_DESC(mpa_version, "MPA version to be used int MPA Req/Resp (0 or 1)");
+
+/* Interoperability */
+int disable_mpa_crc = 0;
+module_param(disable_mpa_crc, int, 0);
+MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC");
+
+unsigned int send_first = 0;
+module_param(send_first, int, 0);
+MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection");
+
+
+unsigned int nes_drv_opt = 0;
+module_param(nes_drv_opt, int, 0);
+MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters");
+
+unsigned int nes_debug_level = 0;
+module_param_named(debug_level, nes_debug_level, uint, 0644);
+MODULE_PARM_DESC(debug_level, "Enable debug output level");
+
+LIST_HEAD(nes_adapter_list);
+LIST_HEAD(nes_dev_list);
+
+atomic_t qps_destroyed;
+atomic_t cqp_reqs_allocated;
+atomic_t cqp_reqs_freed;
+atomic_t cqp_reqs_dynallocated;
+atomic_t cqp_reqs_dynfreed;
+atomic_t cqp_reqs_queued;
+atomic_t cqp_reqs_redriven;
+
+static void nes_print_macaddr(struct net_device *netdev);
+static irqreturn_t nes_interrupt(int, void *);
+static int __devinit nes_probe(struct pci_dev *, const struct pci_device_id *);
+static void __devexit nes_remove(struct pci_dev *);
+static int __init nes_init_module(void);
+static void __exit nes_exit_module(void);
+static unsigned int ee_flsh_adapter;
+static unsigned int sysfs_nonidx_addr;
+static unsigned int sysfs_idx_addr;
+
+static struct pci_device_id nes_pci_table[] = {
+ {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID},
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, nes_pci_table);
+
+static int nes_inetaddr_event(struct notifier_block *, unsigned long, void *);
+static int nes_net_event(struct notifier_block *, unsigned long, void *);
+static int nes_notifiers_registered;
+
+
+static struct notifier_block nes_inetaddr_notifier = {
+ .notifier_call = nes_inetaddr_event
+};
+
+static struct notifier_block nes_net_notifier = {
+ .notifier_call = nes_net_event
+};
+
+
+
+
+/**
+ * nes_inetaddr_event
+ */
+static int nes_inetaddr_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ struct net_device *event_netdev = ifa->ifa_dev->dev;
+ struct nes_device *nesdev;
+ struct net_device *netdev;
+ struct nes_vnic *nesvnic;
+ unsigned int addr;
+ unsigned int mask;
+
+ addr = ntohl(ifa->ifa_address);
+ mask = ntohl(ifa->ifa_mask);
+ nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %08X, netmask %08X.\n",
+ addr, mask);
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n",
+ nesdev, nesdev->netdev[0]->name);
+ netdev = nesdev->netdev[0];
+ nesvnic = netdev_priv(netdev);
+ if (netdev == event_netdev) {
+ if (nesvnic->rdma_enabled == 0) {
+ nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
+ " RDMA is not enabled.\n",
+ netdev->name);
+ return NOTIFY_OK;
+ }
+ /* we have ifa->ifa_address/mask here if we need it */
+ switch (event) {
+ case NETDEV_DOWN:
+ nes_debug(NES_DBG_NETDEV, "event:DOWN\n");
+ nes_write_indexed(nesdev,
+ NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), 0);
+
+ nes_manage_arp_cache(netdev, netdev->dev_addr,
+ ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE);
+ nesvnic->local_ipaddr = 0;
+ return NOTIFY_OK;
+ break;
+ case NETDEV_UP:
+ nes_debug(NES_DBG_NETDEV, "event:UP\n");
+
+ if (nesvnic->local_ipaddr != 0) {
+ nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n");
+ return NOTIFY_OK;
+ }
+ /* Add the address to the IP table */
+ nesvnic->local_ipaddr = ifa->ifa_address;
+
+ nes_write_indexed(nesdev,
+ NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)),
+ ntohl(ifa->ifa_address));
+ nes_manage_arp_cache(netdev, netdev->dev_addr,
+ ntohl(nesvnic->local_ipaddr), NES_ARP_ADD);
+ return NOTIFY_OK;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+
+/**
+ * nes_net_event
+ */
+static int nes_net_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct neighbour *neigh = ptr;
+ struct nes_device *nesdev;
+ struct net_device *netdev;
+ struct nes_vnic *nesvnic;
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ /* nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p.\n", nesdev); */
+ netdev = nesdev->netdev[0];
+ nesvnic = netdev_priv(netdev);
+ if (netdev == neigh->dev) {
+ if (nesvnic->rdma_enabled == 0) {
+ nes_debug(NES_DBG_NETDEV, "Skipping device %s since no RDMA\n",
+ netdev->name);
+ } else {
+ if (neigh->nud_state & NUD_VALID) {
+ nes_manage_arp_cache(neigh->dev, neigh->ha,
+ ntohl(*(__be32 *)neigh->primary_key), NES_ARP_ADD);
+ } else {
+ nes_manage_arp_cache(neigh->dev, neigh->ha,
+ ntohl(*(__be32 *)neigh->primary_key), NES_ARP_DELETE);
+ }
+ }
+ return NOTIFY_OK;
+ }
+ }
+ break;
+ default:
+ nes_debug(NES_DBG_NETDEV, "NETEVENT_ %lu undefined\n", event);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+
+/**
+ * nes_add_ref
+ */
+void nes_add_ref(struct ib_qp *ibqp)
+{
+ struct nes_qp *nesqp;
+
+ nesqp = to_nesqp(ibqp);
+ nes_debug(NES_DBG_QP, "Bumping refcount for QP%u. Pre-inc value = %u\n",
+ ibqp->qp_num, atomic_read(&nesqp->refcount));
+ atomic_inc(&nesqp->refcount);
+}
+
+static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
+{
+ unsigned long flags;
+ struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 qp_id;
+
+ atomic_inc(&qps_destroyed);
+
+ /* Free the control structures */
+
+ qp_id = nesqp->hwqp.qp_id;
+ if (nesqp->pbl_vbase) {
+ pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
+ nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ nesadapter->free_256pbl++;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase);
+ nesqp->pbl_vbase = NULL;
+
+ } else {
+ pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
+ nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase);
+ }
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, nesqp->hwqp.qp_id);
+
+ kfree(nesqp->allocated_buffer);
+
+}
+
+/**
+ * nes_rem_ref
+ */
+void nes_rem_ref(struct ib_qp *ibqp)
+{
+ u64 u64temp;
+ struct nes_qp *nesqp;
+ struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_cqp_request *cqp_request;
+ u32 opcode;
+
+ nesqp = to_nesqp(ibqp);
+
+ if (atomic_read(&nesqp->refcount) == 0) {
+ printk(KERN_INFO PFX "%s: Reference count already 0 for QP%d, last aeq = 0x%04X.\n",
+ __FUNCTION__, ibqp->qp_num, nesqp->last_aeq);
+ BUG();
+ }
+
+ if (atomic_dec_and_test(&nesqp->refcount)) {
+ nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = NULL;
+
+ /* Destroy the QP */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n");
+ return;
+ }
+ cqp_request->waiting = 0;
+ cqp_request->callback = 1;
+ cqp_request->cqp_callback = nes_cqp_rem_ref_callback;
+ cqp_request->cqp_callback_pointer = nesqp;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ opcode = NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_IWARP;
+
+ if (nesqp->hte_added) {
+ opcode |= NES_CQP_QP_DEL_HTE;
+ nesqp->hte_added = 0;
+ }
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
+ u64temp = (u64)nesqp->nesqp_context_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+ }
+}
+
+
+/**
+ * nes_get_qp
+ */
+struct ib_qp *nes_get_qp(struct ib_device *device, int qpn)
+{
+ struct nes_vnic *nesvnic = to_nesvnic(device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+
+ if ((qpn < NES_FIRST_QPN) || (qpn >= (NES_FIRST_QPN + nesadapter->max_qp)))
+ return NULL;
+
+ return &nesadapter->qp_table[qpn - NES_FIRST_QPN]->ibqp;
+}
+
+
+/**
+ * nes_print_macaddr
+ */
+static void nes_print_macaddr(struct net_device *netdev)
+{
+ nes_debug(NES_DBG_INIT, "%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, IRQ %u\n",
+ netdev->name,
+ netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
+ netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
+ netdev->irq);
+}
+
+
+/**
+ * nes_interrupt - handle interrupts
+ */
+static irqreturn_t nes_interrupt(int irq, void *dev_id)
+{
+ struct nes_device *nesdev = (struct nes_device *)dev_id;
+ int handled = 0;
+ u32 int_mask;
+ u32 int_req;
+ u32 int_stat;
+ u32 intf_int_stat;
+ u32 timer_stat;
+
+ if (nesdev->msi_enabled) {
+ /* No need to read the interrupt pending register if msi is enabled */
+ handled = 1;
+ } else {
+ if (unlikely(nesdev->nesadapter->hw_rev == NE020_REV)) {
+ /* Master interrupt enable provides synchronization for kicking off bottom half
+ when interrupt sharing is going on */
+ int_mask = nes_read32(nesdev->regs + NES_INT_MASK);
+ if (int_mask & 0x80000000) {
+ /* Check interrupt status to see if this might be ours */
+ int_stat = nes_read32(nesdev->regs + NES_INT_STAT);
+ int_req = nesdev->int_req;
+ if (int_stat&int_req) {
+ /* if interesting CEQ or AEQ is pending, claim the interrupt */
+ if ((int_stat&int_req) & (~(NES_INT_TIMER|NES_INT_INTF))) {
+ handled = 1;
+ } else {
+ if (((int_stat & int_req) & NES_INT_TIMER) == NES_INT_TIMER) {
+ /* Timer might be running but might be for another function */
+ timer_stat = nes_read32(nesdev->regs + NES_TIMER_STAT);
+ if ((timer_stat & nesdev->timer_int_req) != 0) {
+ handled = 1;
+ }
+ }
+ if ((((int_stat & int_req) & NES_INT_INTF) == NES_INT_INTF) &&
+ (handled == 0)) {
+ intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT);
+ if ((intf_int_stat & nesdev->intf_int_req) != 0) {
+ handled = 1;
+ }
+ }
+ }
+ if (handled) {
+ nes_write32(nesdev->regs+NES_INT_MASK, int_mask & (~0x80000000));
+ int_mask = nes_read32(nesdev->regs+NES_INT_MASK);
+ /* Save off the status to save an additional read */
+ nesdev->int_stat = int_stat;
+ nesdev->napi_isr_ran = 1;
+ }
+ }
+ }
+ } else {
+ handled = nes_read32(nesdev->regs+NES_INT_PENDING);
+ }
+ }
+
+ if (handled) {
+
+ if (nes_napi_isr(nesdev) == 0) {
+ tasklet_schedule(&nesdev->dpc_tasklet);
+
+ }
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
+}
+
+
+/**
+ * nes_probe - Device initialization
+ */
+static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev = NULL;
+ struct nes_device *nesdev = NULL;
+ int ret = 0;
+ struct nes_vnic *nesvnic = NULL;
+ void __iomem *mmio_regs = NULL;
+ u8 hw_rev;
+
+ assert(pcidev != NULL);
+ assert(ent != NULL);
+
+ printk(KERN_INFO PFX "NetEffect RNIC driver v%s loading. (%s)\n",
+ DRV_VERSION, pci_name(pcidev));
+
+ ret = pci_enable_device(pcidev);
+ if (ret) {
+ printk(KERN_ERR PFX "Unable to enable PCI device. (%s)\n", pci_name(pcidev));
+ goto bail0;
+ }
+
+ nes_debug(NES_DBG_INIT, "BAR0 (@0x%08lX) size = 0x%lX bytes\n",
+ (long unsigned int)pci_resource_start(pcidev, BAR_0),
+ (long unsigned int)pci_resource_len(pcidev, BAR_0));
+ nes_debug(NES_DBG_INIT, "BAR1 (@0x%08lX) size = 0x%lX bytes\n",
+ (long unsigned int)pci_resource_start(pcidev, BAR_1),
+ (long unsigned int)pci_resource_len(pcidev, BAR_1));
+
+ /* Make sure PCI base addr are MMIO */
+ if (!(pci_resource_flags(pcidev, BAR_0) & IORESOURCE_MEM) ||
+ !(pci_resource_flags(pcidev, BAR_1) & IORESOURCE_MEM)) {
+ printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
+ ret = -ENODEV;
+ goto bail1;
+ }
+
+ /* Reserve PCI I/O and memory resources */
+ ret = pci_request_regions(pcidev, DRV_NAME);
+ if (ret) {
+ printk(KERN_ERR PFX "Unable to request regions. (%s)\n", pci_name(pcidev));
+ goto bail1;
+ }
+
+ if ((sizeof(dma_addr_t) > 4)) {
+ ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK);
+ if (ret < 0) {
+ printk(KERN_ERR PFX "64b DMA mask configuration failed\n");
+ goto bail2;
+ }
+ ret = pci_set_consistent_dma_mask(pcidev, DMA_64BIT_MASK);
+ if (ret) {
+ printk(KERN_ERR PFX "64b DMA consistent mask configuration failed\n");
+ goto bail2;
+ }
+ } else {
+ ret = pci_set_dma_mask(pcidev, DMA_32BIT_MASK);
+ if (ret < 0) {
+ printk(KERN_ERR PFX "32b DMA mask configuration failed\n");
+ goto bail2;
+ }
+ ret = pci_set_consistent_dma_mask(pcidev, DMA_32BIT_MASK);
+ if (ret) {
+ printk(KERN_ERR PFX "32b DMA consistent mask configuration failed\n");
+ goto bail2;
+ }
+ }
+
+ pci_set_master(pcidev);
+
+ /* Allocate hardware structure */
+ nesdev = kzalloc(sizeof(struct nes_device), GFP_KERNEL);
+ if (!nesdev) {
+ printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", pci_name(pcidev));
+ ret = -ENOMEM;
+ goto bail2;
+ }
+
+ nes_debug(NES_DBG_INIT, "Allocated nes device at %p\n", nesdev);
+ nesdev->pcidev = pcidev;
+ pci_set_drvdata(pcidev, nesdev);
+
+ pci_read_config_byte(pcidev, 0x0008, &hw_rev);
+ nes_debug(NES_DBG_INIT, "hw_rev=%u\n", hw_rev);
+
+ spin_lock_init(&nesdev->indexed_regs_lock);
+
+ /* Remap the PCI registers in adapter BAR0 to kernel VA space */
+ mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0), sizeof(mmio_regs));
+ if (mmio_regs == NULL) {
+ printk(KERN_ERR PFX "Unable to remap BAR0\n");
+ ret = -EIO;
+ goto bail3;
+ }
+ nesdev->regs = mmio_regs;
+ nesdev->index_reg = 0x50 + (PCI_FUNC(pcidev->devfn)*8) + mmio_regs;
+
+ /* Ensure interrupts are disabled */
+ nes_write32(nesdev->regs+NES_INT_MASK, 0x7fffffff);
+
+ if (nes_drv_opt & NES_DRV_OPT_ENABLE_MSI) {
+ if (!pci_enable_msi(nesdev->pcidev)) {
+ nesdev->msi_enabled = 1;
+ nes_debug(NES_DBG_INIT, "MSI is enabled for device %s\n",
+ pci_name(pcidev));
+ } else {
+ nes_debug(NES_DBG_INIT, "MSI is disabled by linux for device %s\n",
+ pci_name(pcidev));
+ }
+ } else {
+ nes_debug(NES_DBG_INIT, "MSI not requested due to driver options for device %s\n",
+ pci_name(pcidev));
+ }
+
+ nesdev->csr_start = pci_resource_start(nesdev->pcidev, BAR_0);
+ nesdev->doorbell_region = pci_resource_start(nesdev->pcidev, BAR_1);
+
+ /* Init the adapter */
+ nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev);
+ nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
+ if (!nesdev->nesadapter) {
+ printk(KERN_ERR PFX "Unable to initialize adapter.\n");
+ ret = -ENOMEM;
+ goto bail5;
+ }
+
+ /* nesdev->base_doorbell_index =
+ nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */
+ nesdev->base_doorbell_index = 1;
+ nesdev->doorbell_start = nesdev->nesadapter->doorbell_start;
+ nesdev->mac_index = PCI_FUNC(nesdev->pcidev->devfn) % nesdev->nesadapter->port_count;
+
+ tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
+
+ /* bring up the Control QP */
+ if (nes_init_cqp(nesdev)) {
+ ret = -ENODEV;
+ goto bail6;
+ }
+
+ /* Arm the CCQ */
+ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
+ PCI_FUNC(nesdev->pcidev->devfn));
+ nes_read32(nesdev->regs+NES_CQE_ALLOC);
+
+ /* Enable the interrupts */
+ nesdev->int_req = (0x101 << PCI_FUNC(nesdev->pcidev->devfn)) |
+ (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16));
+ if (PCI_FUNC(nesdev->pcidev->devfn) < 4) {
+ nesdev->int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+24));
+ }
+
+ /* TODO: This really should be the first driver to load, not function 0 */
+ if (PCI_FUNC(nesdev->pcidev->devfn) == 0) {
+ /* pick up PCI and critical errors if the first driver to load */
+ nesdev->intf_int_req = NES_INTF_INT_PCIERR | NES_INTF_INT_CRITERR;
+ nesdev->int_req |= NES_INT_INTF;
+ } else {
+ nesdev->intf_int_req = 0;
+ }
+ nesdev->intf_int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16));
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS0, 0);
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 0);
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS2, 0x00001265);
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS4, 0x18021804);
+
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS3, 0x17801790);
+
+ /* deal with both periodic and one_shot */
+ nesdev->timer_int_req = 0x101 << PCI_FUNC(nesdev->pcidev->devfn);
+ nesdev->nesadapter->timer_int_req |= nesdev->timer_int_req;
+ nes_debug(NES_DBG_INIT, "setting int_req for function %u, nesdev = 0x%04X, adapter = 0x%04X\n",
+ PCI_FUNC(nesdev->pcidev->devfn),
+ nesdev->timer_int_req, nesdev->nesadapter->timer_int_req);
+
+ nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
+
+ list_add_tail(&nesdev->list, &nes_dev_list);
+
+ /* Request an interrupt line for the driver */
+ ret = request_irq(pcidev->irq, nes_interrupt, IRQF_SHARED, DRV_NAME, nesdev);
+ if (ret) {
+ printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
+ pci_name(pcidev), pcidev->irq);
+ goto bail65;
+ }
+
+ nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
+
+ if (nes_notifiers_registered == 0) {
+ register_inetaddr_notifier(&nes_inetaddr_notifier);
+ register_netevent_notifier(&nes_net_notifier);
+ }
+ nes_notifiers_registered++;
+
+ /* Initialize network devices */
+ if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) {
+ goto bail7;
+ }
+
+ /* Register network device */
+ ret = register_netdev(netdev);
+ if (ret) {
+ printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret);
+ nes_netdev_destroy(netdev);
+ goto bail7;
+ }
+
+ nes_print_macaddr(netdev);
+ /* create a CM core for this netdev */
+ nesvnic = netdev_priv(netdev);
+
+ nesdev->netdev_count++;
+ nesdev->nesadapter->netdev_count++;
+
+
+ printk(KERN_ERR PFX "%s: NetEffect RNIC driver successfully loaded.\n",
+ pci_name(pcidev));
+ return 0;
+
+ bail7:
+ printk(KERN_ERR PFX "bail7\n");
+ while (nesdev->netdev_count > 0) {
+ nesdev->netdev_count--;
+ nesdev->nesadapter->netdev_count--;
+
+ unregister_netdev(nesdev->netdev[nesdev->netdev_count]);
+ nes_netdev_destroy(nesdev->netdev[nesdev->netdev_count]);
+ }
+
+ nes_debug(NES_DBG_INIT, "netdev_count=%d, nesadapter->netdev_count=%d\n",
+ nesdev->netdev_count, nesdev->nesadapter->netdev_count);
+
+ nes_notifiers_registered--;
+ if (nes_notifiers_registered == 0) {
+ unregister_netevent_notifier(&nes_net_notifier);
+ unregister_inetaddr_notifier(&nes_inetaddr_notifier);
+ }
+
+ list_del(&nesdev->list);
+ nes_destroy_cqp(nesdev);
+
+ bail65:
+ printk(KERN_ERR PFX "bail65\n");
+ free_irq(pcidev->irq, nesdev);
+ if (nesdev->msi_enabled) {
+ pci_disable_msi(pcidev);
+ }
+ bail6:
+ printk(KERN_ERR PFX "bail6\n");
+ tasklet_kill(&nesdev->dpc_tasklet);
+ /* Deallocate the Adapter Structure */
+ nes_destroy_adapter(nesdev->nesadapter);
+
+ bail5:
+ printk(KERN_ERR PFX "bail5\n");
+ iounmap(nesdev->regs);
+
+ bail3:
+ printk(KERN_ERR PFX "bail3\n");
+ kfree(nesdev);
+
+ bail2:
+ pci_release_regions(pcidev);
+
+ bail1:
+ pci_disable_device(pcidev);
+
+ bail0:
+ return ret;
+}
+
+
+/**
+ * nes_remove - unload from kernel
+ */
+static void __devexit nes_remove(struct pci_dev *pcidev)
+{
+ struct nes_device *nesdev = pci_get_drvdata(pcidev);
+ struct net_device *netdev;
+ int netdev_index = 0;
+
+ if (nesdev->netdev_count) {
+ netdev = nesdev->netdev[netdev_index];
+ if (netdev) {
+ netif_stop_queue(netdev);
+ unregister_netdev(netdev);
+ nes_netdev_destroy(netdev);
+
+ nesdev->netdev[netdev_index] = NULL;
+ nesdev->netdev_count--;
+ nesdev->nesadapter->netdev_count--;
+ }
+ }
+
+ nes_notifiers_registered--;
+ if (nes_notifiers_registered == 0) {
+ unregister_netevent_notifier(&nes_net_notifier);
+ unregister_inetaddr_notifier(&nes_inetaddr_notifier);
+ }
+
+ list_del(&nesdev->list);
+ nes_destroy_cqp(nesdev);
+ tasklet_kill(&nesdev->dpc_tasklet);
+
+ /* Deallocate the Adapter Structure */
+ nes_destroy_adapter(nesdev->nesadapter);
+
+ free_irq(pcidev->irq, nesdev);
+
+ if (nesdev->msi_enabled) {
+ pci_disable_msi(pcidev);
+ }
+
+ iounmap(nesdev->regs);
+ kfree(nesdev);
+
+ /* nes_debug(NES_DBG_SHUTDOWN, "calling pci_release_regions.\n"); */
+ pci_release_regions(pcidev);
+ pci_disable_device(pcidev);
+ pci_set_drvdata(pcidev, NULL);
+}
+
+
+static struct pci_driver nes_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = nes_pci_table,
+ .probe = nes_probe,
+ .remove = __devexit_p(nes_remove),
+};
+
+static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf)
+{
+ unsigned int devfn = 0xffffffff;
+ unsigned char bus_number = 0xff;
+ unsigned int i = 0;
+ struct nes_device *nesdev;
+
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ devfn = nesdev->nesadapter->devfn;
+ bus_number = nesdev->nesadapter->bus_number;
+ break;
+ }
+ i++;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%x:%x", bus_number, devfn);
+}
+
+static ssize_t nes_store_adapter(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+
+ ee_flsh_adapter = simple_strtoul(p, &p, 10);
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_ee_cmd(struct device_driver *ddp, char *buf)
+{
+ u32 eeprom_cmd = 0xdead;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ eeprom_cmd = nes_read32(nesdev->regs + NES_EEPROM_COMMAND);
+ break;
+ }
+ i++;
+ }
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_cmd);
+}
+
+static ssize_t nes_store_ee_cmd(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+ u32 val;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+ val = simple_strtoul(p, &p, 16);
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ nes_write32(nesdev->regs + NES_EEPROM_COMMAND, val);
+ break;
+ }
+ i++;
+ }
+ }
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_ee_data(struct device_driver *ddp, char *buf)
+{
+ u32 eeprom_data = 0xdead;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ eeprom_data = nes_read32(nesdev->regs + NES_EEPROM_DATA);
+ break;
+ }
+ i++;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_data);
+}
+
+static ssize_t nes_store_ee_data(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+ u32 val;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+ val = simple_strtoul(p, &p, 16);
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ nes_write32(nesdev->regs + NES_EEPROM_DATA, val);
+ break;
+ }
+ i++;
+ }
+ }
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_flash_cmd(struct device_driver *ddp, char *buf)
+{
+ u32 flash_cmd = 0xdead;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ flash_cmd = nes_read32(nesdev->regs + NES_FLASH_COMMAND);
+ break;
+ }
+ i++;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_cmd);
+}
+
+static ssize_t nes_store_flash_cmd(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+ u32 val;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+ val = simple_strtoul(p, &p, 16);
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ nes_write32(nesdev->regs + NES_FLASH_COMMAND, val);
+ break;
+ }
+ i++;
+ }
+ }
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_flash_data(struct device_driver *ddp, char *buf)
+{
+ u32 flash_data = 0xdead;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ flash_data = nes_read32(nesdev->regs + NES_FLASH_DATA);
+ break;
+ }
+ i++;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_data);
+}
+
+static ssize_t nes_store_flash_data(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+ u32 val;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+ val = simple_strtoul(p, &p, 16);
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ nes_write32(nesdev->regs + NES_FLASH_DATA, val);
+ break;
+ }
+ i++;
+ }
+ }
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_nonidx_addr(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_nonidx_addr);
+}
+
+static ssize_t nes_store_nonidx_addr(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X')
+ sysfs_nonidx_addr = simple_strtoul(p, &p, 16);
+
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_nonidx_data(struct device_driver *ddp, char *buf)
+{
+ u32 nonidx_data = 0xdead;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ nonidx_data = nes_read32(nesdev->regs + sysfs_nonidx_addr);
+ break;
+ }
+ i++;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", nonidx_data);
+}
+
+static ssize_t nes_store_nonidx_data(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+ u32 val;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+ val = simple_strtoul(p, &p, 16);
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ nes_write32(nesdev->regs + sysfs_nonidx_addr, val);
+ break;
+ }
+ i++;
+ }
+ }
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_idx_addr(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_idx_addr);
+}
+
+static ssize_t nes_store_idx_addr(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X')
+ sysfs_idx_addr = simple_strtoul(p, &p, 16);
+
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_idx_data(struct device_driver *ddp, char *buf)
+{
+ u32 idx_data = 0xdead;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ idx_data = nes_read_indexed(nesdev, sysfs_idx_addr);
+ break;
+ }
+ i++;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", idx_data);
+}
+
+static ssize_t nes_store_idx_data(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+ u32 val;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+ val = simple_strtoul(p, &p, 16);
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ nes_write_indexed(nesdev, sysfs_idx_addr, val);
+ break;
+ }
+ i++;
+ }
+ }
+ return strnlen(buf, count);
+}
+
+static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR,
+ nes_show_adapter, nes_store_adapter);
+static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR,
+ nes_show_ee_cmd, nes_store_ee_cmd);
+static DRIVER_ATTR(eeprom_data, S_IRUSR | S_IWUSR,
+ nes_show_ee_data, nes_store_ee_data);
+static DRIVER_ATTR(flash_cmd, S_IRUSR | S_IWUSR,
+ nes_show_flash_cmd, nes_store_flash_cmd);
+static DRIVER_ATTR(flash_data, S_IRUSR | S_IWUSR,
+ nes_show_flash_data, nes_store_flash_data);
+static DRIVER_ATTR(nonidx_addr, S_IRUSR | S_IWUSR,
+ nes_show_nonidx_addr, nes_store_nonidx_addr);
+static DRIVER_ATTR(nonidx_data, S_IRUSR | S_IWUSR,
+ nes_show_nonidx_data, nes_store_nonidx_data);
+static DRIVER_ATTR(idx_addr, S_IRUSR | S_IWUSR,
+ nes_show_idx_addr, nes_store_idx_addr);
+static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR,
+ nes_show_idx_data, nes_store_idx_data);
+
+static int nes_create_driver_sysfs(struct pci_driver *drv)
+{
+ int error;
+ error = driver_create_file(&drv->driver, &driver_attr_adapter);
+ error |= driver_create_file(&drv->driver, &driver_attr_eeprom_cmd);
+ error |= driver_create_file(&drv->driver, &driver_attr_eeprom_data);
+ error |= driver_create_file(&drv->driver, &driver_attr_flash_cmd);
+ error |= driver_create_file(&drv->driver, &driver_attr_flash_data);
+ error |= driver_create_file(&drv->driver, &driver_attr_nonidx_addr);
+ error |= driver_create_file(&drv->driver, &driver_attr_nonidx_data);
+ error |= driver_create_file(&drv->driver, &driver_attr_idx_addr);
+ error |= driver_create_file(&drv->driver, &driver_attr_idx_data);
+ return error;
+}
+
+static void nes_remove_driver_sysfs(struct pci_driver *drv)
+{
+ driver_remove_file(&drv->driver, &driver_attr_adapter);
+ driver_remove_file(&drv->driver, &driver_attr_eeprom_cmd);
+ driver_remove_file(&drv->driver, &driver_attr_eeprom_data);
+ driver_remove_file(&drv->driver, &driver_attr_flash_cmd);
+ driver_remove_file(&drv->driver, &driver_attr_flash_data);
+ driver_remove_file(&drv->driver, &driver_attr_nonidx_addr);
+ driver_remove_file(&drv->driver, &driver_attr_nonidx_data);
+ driver_remove_file(&drv->driver, &driver_attr_idx_addr);
+ driver_remove_file(&drv->driver, &driver_attr_idx_data);
+}
+
+/**
+ * nes_init_module - module initialization entry point
+ */
+static int __init nes_init_module(void)
+{
+ int retval;
+ int retval1;
+
+ retval = nes_cm_start();
+ if (retval) {
+ printk(KERN_ERR PFX "Unable to start NetEffect iWARP CM.\n");
+ return retval;
+ }
+ retval = pci_register_driver(&nes_pci_driver);
+ if (retval >= 0) {
+ retval1 = nes_create_driver_sysfs(&nes_pci_driver);
+ if (retval1 < 0)
+ printk(KERN_ERR PFX "Unable to create NetEffect sys files.\n");
+ }
+ return retval;
+}
+
+
+/**
+ * nes_exit_module - module unload entry point
+ */
+static void __exit nes_exit_module(void)
+{
+ nes_cm_stop();
+ nes_remove_driver_sysfs(&nes_pci_driver);
+
+ pci_unregister_driver(&nes_pci_driver);
+}
+
+
+module_init(nes_init_module);
+module_exit(nes_exit_module);
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
new file mode 100644
index 000000000000..fd57e8a1582f
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -0,0 +1,560 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NES_H
+#define __NES_H
+
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <asm/semaphore.h>
+#include <linux/version.h>
+#include <asm/io.h>
+#include <linux/crc32c.h>
+
+#include <rdma/ib_smi.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/iw_cm.h>
+
+#define NES_SEND_FIRST_WRITE
+
+#define QUEUE_DISCONNECTS
+
+#define DRV_BUILD "1"
+
+#define DRV_NAME "iw_nes"
+#define DRV_VERSION "1.0 KO Build " DRV_BUILD
+#define PFX DRV_NAME ": "
+
+/*
+ * NetEffect PCI vendor id and NE010 PCI device id.
+ */
+#ifndef PCI_VENDOR_ID_NETEFFECT /* not in pci.ids yet */
+#define PCI_VENDOR_ID_NETEFFECT 0x1678
+#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100
+#endif
+
+#define NE020_REV 4
+#define NE020_REV1 5
+
+#define BAR_0 0
+#define BAR_1 2
+
+#define RX_BUF_SIZE (1536 + 8)
+#define NES_REG0_SIZE (4 * 1024)
+#define NES_TX_TIMEOUT (6*HZ)
+#define NES_FIRST_QPN 64
+#define NES_SW_CONTEXT_ALIGN 1024
+
+#define NES_NIC_MAX_NICS 16
+#define NES_MAX_ARP_TABLE_SIZE 4096
+
+#define NES_NIC_CEQ_SIZE 8
+/* NICs will be on a separate CQ */
+#define NES_CCEQ_SIZE ((nesadapter->max_cq / nesadapter->port_count) - 32)
+
+#define NES_MAX_PORT_COUNT 4
+
+#define MAX_DPC_ITERATIONS 128
+
+#define NES_CQP_REQUEST_NO_DOORBELL_RING 0
+#define NES_CQP_REQUEST_RING_DOORBELL 1
+
+#define NES_DRV_OPT_ENABLE_MPA_VER_0 0x00000001
+#define NES_DRV_OPT_DISABLE_MPA_CRC 0x00000002
+#define NES_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
+#define NES_DRV_OPT_DISABLE_INTF 0x00000008
+#define NES_DRV_OPT_ENABLE_MSI 0x00000010
+#define NES_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
+#define NES_DRV_OPT_SUPRESS_OPTION_BC 0x00000040
+#define NES_DRV_OPT_NO_INLINE_DATA 0x00000080
+#define NES_DRV_OPT_DISABLE_INT_MOD 0x00000100
+#define NES_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
+
+#define NES_AEQ_EVENT_TIMEOUT 2500
+#define NES_DISCONNECT_EVENT_TIMEOUT 2000
+
+/* debug levels */
+/* must match userspace */
+#define NES_DBG_HW 0x00000001
+#define NES_DBG_INIT 0x00000002
+#define NES_DBG_ISR 0x00000004
+#define NES_DBG_PHY 0x00000008
+#define NES_DBG_NETDEV 0x00000010
+#define NES_DBG_CM 0x00000020
+#define NES_DBG_CM1 0x00000040
+#define NES_DBG_NIC_RX 0x00000080
+#define NES_DBG_NIC_TX 0x00000100
+#define NES_DBG_CQP 0x00000200
+#define NES_DBG_MMAP 0x00000400
+#define NES_DBG_MR 0x00000800
+#define NES_DBG_PD 0x00001000
+#define NES_DBG_CQ 0x00002000
+#define NES_DBG_QP 0x00004000
+#define NES_DBG_MOD_QP 0x00008000
+#define NES_DBG_AEQ 0x00010000
+#define NES_DBG_IW_RX 0x00020000
+#define NES_DBG_IW_TX 0x00040000
+#define NES_DBG_SHUTDOWN 0x00080000
+#define NES_DBG_RSVD1 0x10000000
+#define NES_DBG_RSVD2 0x20000000
+#define NES_DBG_RSVD3 0x40000000
+#define NES_DBG_RSVD4 0x80000000
+#define NES_DBG_ALL 0xffffffff
+
+#ifdef CONFIG_INFINIBAND_NES_DEBUG
+#define nes_debug(level, fmt, args...) \
+ if (level & nes_debug_level) \
+ printk(KERN_ERR PFX "%s[%u]: " fmt, __FUNCTION__, __LINE__, ##args)
+
+#define assert(expr) \
+if (!(expr)) { \
+ printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \
+ #expr, __FILE__, __FUNCTION__, __LINE__); \
+}
+
+#define NES_EVENT_TIMEOUT 1200000
+#else
+#define nes_debug(level, fmt, args...)
+#define assert(expr) do {} while (0)
+
+#define NES_EVENT_TIMEOUT 100000
+#endif
+
+#include "nes_hw.h"
+#include "nes_verbs.h"
+#include "nes_context.h"
+#include "nes_user.h"
+#include "nes_cm.h"
+
+extern int max_mtu;
+extern int nics_per_function;
+#define max_frame_len (max_mtu+ETH_HLEN)
+extern int interrupt_mod_interval;
+extern int nes_if_count;
+extern int mpa_version;
+extern int disable_mpa_crc;
+extern unsigned int send_first;
+extern unsigned int nes_drv_opt;
+extern unsigned int nes_debug_level;
+
+extern struct list_head nes_adapter_list;
+extern struct list_head nes_dev_list;
+
+extern struct nes_cm_core *g_cm_core;
+
+extern atomic_t cm_connects;
+extern atomic_t cm_accepts;
+extern atomic_t cm_disconnects;
+extern atomic_t cm_closes;
+extern atomic_t cm_connecteds;
+extern atomic_t cm_connect_reqs;
+extern atomic_t cm_rejects;
+extern atomic_t mod_qp_timouts;
+extern atomic_t qps_created;
+extern atomic_t qps_destroyed;
+extern atomic_t sw_qps_destroyed;
+extern u32 mh_detected;
+extern u32 mh_pauses_sent;
+extern u32 cm_packets_sent;
+extern u32 cm_packets_bounced;
+extern u32 cm_packets_created;
+extern u32 cm_packets_received;
+extern u32 cm_packets_dropped;
+extern u32 cm_packets_retrans;
+extern u32 cm_listens_created;
+extern u32 cm_listens_destroyed;
+extern u32 cm_backlog_drops;
+extern atomic_t cm_loopbacks;
+extern atomic_t cm_nodes_created;
+extern atomic_t cm_nodes_destroyed;
+extern atomic_t cm_accel_dropped_pkts;
+extern atomic_t cm_resets_recvd;
+
+extern u32 crit_err_count;
+extern u32 int_mod_timer_init;
+extern u32 int_mod_cq_depth_256;
+extern u32 int_mod_cq_depth_128;
+extern u32 int_mod_cq_depth_32;
+extern u32 int_mod_cq_depth_24;
+extern u32 int_mod_cq_depth_16;
+extern u32 int_mod_cq_depth_4;
+extern u32 int_mod_cq_depth_1;
+
+extern atomic_t cqp_reqs_allocated;
+extern atomic_t cqp_reqs_freed;
+extern atomic_t cqp_reqs_dynallocated;
+extern atomic_t cqp_reqs_dynfreed;
+extern atomic_t cqp_reqs_queued;
+extern atomic_t cqp_reqs_redriven;
+
+
+struct nes_device {
+ struct nes_adapter *nesadapter;
+ void __iomem *regs;
+ void __iomem *index_reg;
+ struct pci_dev *pcidev;
+ struct net_device *netdev[NES_NIC_MAX_NICS];
+ u64 link_status_interrupts;
+ struct tasklet_struct dpc_tasklet;
+ spinlock_t indexed_regs_lock;
+ unsigned long csr_start;
+ unsigned long doorbell_region;
+ unsigned long doorbell_start;
+ unsigned long mac_tx_errors;
+ unsigned long mac_pause_frames_sent;
+ unsigned long mac_pause_frames_received;
+ unsigned long mac_rx_errors;
+ unsigned long mac_rx_crc_errors;
+ unsigned long mac_rx_symbol_err_frames;
+ unsigned long mac_rx_jabber_frames;
+ unsigned long mac_rx_oversized_frames;
+ unsigned long mac_rx_short_frames;
+ unsigned long port_rx_discards;
+ unsigned long port_tx_discards;
+ unsigned int mac_index;
+ unsigned int nes_stack_start;
+
+ /* Control Structures */
+ void *cqp_vbase;
+ dma_addr_t cqp_pbase;
+ u32 cqp_mem_size;
+ u8 ceq_index;
+ u8 nic_ceq_index;
+ struct nes_hw_cqp cqp;
+ struct nes_hw_cq ccq;
+ struct list_head cqp_avail_reqs;
+ struct list_head cqp_pending_reqs;
+ struct nes_cqp_request *nes_cqp_requests;
+
+ u32 int_req;
+ u32 int_stat;
+ u32 timer_int_req;
+ u32 timer_only_int_count;
+ u32 intf_int_req;
+ u32 last_mac_tx_pauses;
+ u32 last_used_chunks_tx;
+ struct list_head list;
+
+ u16 base_doorbell_index;
+ u16 currcq_count;
+ u16 deepcq_count;
+ u8 msi_enabled;
+ u8 netdev_count;
+ u8 napi_isr_ran;
+ u8 disable_rx_flow_control;
+ u8 disable_tx_flow_control;
+};
+
+
+static inline void
+set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value)
+{
+ wqe_words[index] = cpu_to_le32((u32) ((unsigned long)value));
+ wqe_words[index + 1] = cpu_to_le32((u32)(upper_32_bits((unsigned long)value)));
+}
+
+static inline void
+set_wqe_32bit_value(__le32 *wqe_words, u32 index, u32 value)
+{
+ wqe_words[index] = cpu_to_le32(value);
+}
+
+static inline void
+nes_fill_init_cqp_wqe(struct nes_hw_cqp_wqe *cqp_wqe, struct nes_device *nesdev)
+{
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_COMP_CTX_LOW_IDX,
+ (u64)((unsigned long) &nesdev->cqp));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_LEN_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_LOW_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PA_LOW_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PA_HIGH_IDX] = 0;
+}
+
+static inline void
+nes_fill_init_qp_wqe(struct nes_hw_qp_wqe *wqe, struct nes_qp *nesqp, u32 head)
+{
+ u32 value;
+ value = ((u32)((unsigned long) nesqp)) | head;
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX,
+ (u32)(upper_32_bits((unsigned long)(nesqp))));
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, value);
+}
+
+/* Read from memory-mapped device */
+static inline u32 nes_read_indexed(struct nes_device *nesdev, u32 reg_index)
+{
+ unsigned long flags;
+ void __iomem *addr = nesdev->index_reg;
+ u32 value;
+
+ spin_lock_irqsave(&nesdev->indexed_regs_lock, flags);
+
+ writel(reg_index, addr);
+ value = readl((void __iomem *)addr + 4);
+
+ spin_unlock_irqrestore(&nesdev->indexed_regs_lock, flags);
+ return value;
+}
+
+static inline u32 nes_read32(const void __iomem *addr)
+{
+ return readl(addr);
+}
+
+static inline u16 nes_read16(const void __iomem *addr)
+{
+ return readw(addr);
+}
+
+static inline u8 nes_read8(const void __iomem *addr)
+{
+ return readb(addr);
+}
+
+/* Write to memory-mapped device */
+static inline void nes_write_indexed(struct nes_device *nesdev, u32 reg_index, u32 val)
+{
+ unsigned long flags;
+ void __iomem *addr = nesdev->index_reg;
+
+ spin_lock_irqsave(&nesdev->indexed_regs_lock, flags);
+
+ writel(reg_index, addr);
+ writel(val, (void __iomem *)addr + 4);
+
+ spin_unlock_irqrestore(&nesdev->indexed_regs_lock, flags);
+}
+
+static inline void nes_write32(void __iomem *addr, u32 val)
+{
+ writel(val, addr);
+}
+
+static inline void nes_write16(void __iomem *addr, u16 val)
+{
+ writew(val, addr);
+}
+
+static inline void nes_write8(void __iomem *addr, u8 val)
+{
+ writeb(val, addr);
+}
+
+
+
+static inline int nes_alloc_resource(struct nes_adapter *nesadapter,
+ unsigned long *resource_array, u32 max_resources,
+ u32 *req_resource_num, u32 *next)
+{
+ unsigned long flags;
+ u32 resource_num;
+
+ spin_lock_irqsave(&nesadapter->resource_lock, flags);
+
+ resource_num = find_next_zero_bit(resource_array, max_resources, *next);
+ if (resource_num >= max_resources) {
+ resource_num = find_first_zero_bit(resource_array, max_resources);
+ if (resource_num >= max_resources) {
+ printk(KERN_ERR PFX "%s: No available resourcess.\n", __FUNCTION__);
+ spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
+ return -EMFILE;
+ }
+ }
+ set_bit(resource_num, resource_array);
+ *next = resource_num+1;
+ if (*next == max_resources) {
+ *next = 0;
+ }
+ spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
+ *req_resource_num = resource_num;
+
+ return 0;
+}
+
+static inline int nes_is_resource_allocated(struct nes_adapter *nesadapter,
+ unsigned long *resource_array, u32 resource_num)
+{
+ unsigned long flags;
+ int bit_is_set;
+
+ spin_lock_irqsave(&nesadapter->resource_lock, flags);
+
+ bit_is_set = test_bit(resource_num, resource_array);
+ nes_debug(NES_DBG_HW, "resource_num %u is%s allocated.\n",
+ resource_num, (bit_is_set ? "": " not"));
+ spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
+
+ return bit_is_set;
+}
+
+static inline void nes_free_resource(struct nes_adapter *nesadapter,
+ unsigned long *resource_array, u32 resource_num)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&nesadapter->resource_lock, flags);
+ clear_bit(resource_num, resource_array);
+ spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
+}
+
+static inline struct nes_vnic *to_nesvnic(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct nes_ib_device, ibdev)->nesvnic;
+}
+
+static inline struct nes_pd *to_nespd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct nes_pd, ibpd);
+}
+
+static inline struct nes_ucontext *to_nesucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct nes_ucontext, ibucontext);
+}
+
+static inline struct nes_mr *to_nesmr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct nes_mr, ibmr);
+}
+
+static inline struct nes_mr *to_nesmr_from_ibfmr(struct ib_fmr *ibfmr)
+{
+ return container_of(ibfmr, struct nes_mr, ibfmr);
+}
+
+static inline struct nes_mr *to_nesmw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct nes_mr, ibmw);
+}
+
+static inline struct nes_fmr *to_nesfmr(struct nes_mr *nesmr)
+{
+ return container_of(nesmr, struct nes_fmr, nesmr);
+}
+
+static inline struct nes_cq *to_nescq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct nes_cq, ibcq);
+}
+
+static inline struct nes_qp *to_nesqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct nes_qp, ibqp);
+}
+
+
+
+/* nes.c */
+void nes_add_ref(struct ib_qp *);
+void nes_rem_ref(struct ib_qp *);
+struct ib_qp *nes_get_qp(struct ib_device *, int);
+
+
+/* nes_hw.c */
+struct nes_adapter *nes_init_adapter(struct nes_device *, u8);
+void nes_nic_init_timer_defaults(struct nes_device *, u8);
+unsigned int nes_reset_adapter_ne020(struct nes_device *, u8 *);
+int nes_init_serdes(struct nes_device *, u8, u8, u8);
+void nes_init_csr_ne020(struct nes_device *, u8, u8);
+void nes_destroy_adapter(struct nes_adapter *);
+int nes_init_cqp(struct nes_device *);
+int nes_init_phy(struct nes_device *);
+int nes_init_nic_qp(struct nes_device *, struct net_device *);
+void nes_destroy_nic_qp(struct nes_vnic *);
+int nes_napi_isr(struct nes_device *);
+void nes_dpc(unsigned long);
+void nes_process_ceq(struct nes_device *, struct nes_hw_ceq *);
+void nes_process_aeq(struct nes_device *, struct nes_hw_aeq *);
+void nes_process_mac_intr(struct nes_device *, u32);
+void nes_nic_napi_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
+void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
+void nes_cqp_ce_handler(struct nes_device *, struct nes_hw_cq *);
+void nes_process_iwarp_aeqe(struct nes_device *, struct nes_hw_aeqe *);
+void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
+int nes_destroy_cqp(struct nes_device *);
+int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
+
+/* nes_nic.c */
+void nes_netdev_set_multicast_list(struct net_device *);
+void nes_netdev_exit(struct nes_vnic *);
+struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
+void nes_netdev_destroy(struct net_device *);
+int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
+
+/* nes_cm.c */
+void *nes_cm_create(struct net_device *);
+int nes_cm_recv(struct sk_buff *, struct net_device *);
+void nes_update_arp(unsigned char *, u32, u32, u16, u16);
+void nes_manage_arp_cache(struct net_device *, unsigned char *, u32, u32);
+void nes_sock_release(struct nes_qp *, unsigned long *);
+struct nes_cm_core *nes_cm_alloc_core(void);
+void flush_wqes(struct nes_device *nesdev, struct nes_qp *, u32, u32);
+int nes_manage_apbvt(struct nes_vnic *, u32, u32, u32);
+int nes_cm_disconn(struct nes_qp *);
+void nes_cm_disconn_worker(void *);
+
+/* nes_verbs.c */
+int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32);
+int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
+struct nes_ib_device *nes_init_ofa_device(struct net_device *);
+void nes_destroy_ofa_device(struct nes_ib_device *);
+int nes_register_ofa_device(struct nes_ib_device *);
+void nes_unregister_ofa_device(struct nes_ib_device *);
+
+/* nes_util.c */
+int nes_read_eeprom_values(struct nes_device *, struct nes_adapter *);
+void nes_write_1G_phy_reg(struct nes_device *, u8, u8, u16);
+void nes_read_1G_phy_reg(struct nes_device *, u8, u8, u16 *);
+void nes_write_10G_phy_reg(struct nes_device *, u16, u8, u16);
+void nes_read_10G_phy_reg(struct nes_device *, u16, u8);
+struct nes_cqp_request *nes_get_cqp_request(struct nes_device *);
+void nes_post_cqp_request(struct nes_device *, struct nes_cqp_request *, int);
+int nes_arp_table(struct nes_device *, u32, u8 *, u32);
+void nes_mh_fix(unsigned long);
+void nes_clc(unsigned long);
+void nes_dump_mem(unsigned int, void *, int);
+u32 nes_crc32(u32, u32, u32, u32, u8 *, u32, u32, u32);
+
+#endif /* __NES_H */
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
new file mode 100644
index 000000000000..bd5cfeaac203
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -0,0 +1,3088 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+
+#define TCPOPT_TIMESTAMP 8
+
+#include <asm/atomic.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <linux/notifier.h>
+#include <linux/net.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/list.h>
+#include <linux/threads.h>
+
+#include <net/neighbour.h>
+#include <net/route.h>
+#include <net/ip_fib.h>
+
+#include "nes.h"
+
+u32 cm_packets_sent;
+u32 cm_packets_bounced;
+u32 cm_packets_dropped;
+u32 cm_packets_retrans;
+u32 cm_packets_created;
+u32 cm_packets_received;
+u32 cm_listens_created;
+u32 cm_listens_destroyed;
+u32 cm_backlog_drops;
+atomic_t cm_loopbacks;
+atomic_t cm_nodes_created;
+atomic_t cm_nodes_destroyed;
+atomic_t cm_accel_dropped_pkts;
+atomic_t cm_resets_recvd;
+
+static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
+static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *,
+ struct nes_vnic *, struct nes_cm_info *);
+static int add_ref_cm_node(struct nes_cm_node *);
+static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *);
+static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *);
+
+
+/* External CM API Interface */
+/* instance of function pointers for client API */
+/* set address of this instance to cm_core->cm_ops at cm_core alloc */
+static struct nes_cm_ops nes_cm_api = {
+ mini_cm_accelerated,
+ mini_cm_listen,
+ mini_cm_del_listen,
+ mini_cm_connect,
+ mini_cm_close,
+ mini_cm_accept,
+ mini_cm_reject,
+ mini_cm_recv_pkt,
+ mini_cm_dealloc_core,
+ mini_cm_get,
+ mini_cm_set
+};
+
+struct nes_cm_core *g_cm_core;
+
+atomic_t cm_connects;
+atomic_t cm_accepts;
+atomic_t cm_disconnects;
+atomic_t cm_closes;
+atomic_t cm_connecteds;
+atomic_t cm_connect_reqs;
+atomic_t cm_rejects;
+
+
+/**
+ * create_event
+ */
+static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
+ enum nes_cm_event_type type)
+{
+ struct nes_cm_event *event;
+
+ if (!cm_node->cm_id)
+ return NULL;
+
+ /* allocate an empty event */
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+
+ if (!event)
+ return NULL;
+
+ event->type = type;
+ event->cm_node = cm_node;
+ event->cm_info.rem_addr = cm_node->rem_addr;
+ event->cm_info.loc_addr = cm_node->loc_addr;
+ event->cm_info.rem_port = cm_node->rem_port;
+ event->cm_info.loc_port = cm_node->loc_port;
+ event->cm_info.cm_id = cm_node->cm_id;
+
+ nes_debug(NES_DBG_CM, "Created event=%p, type=%u, dst_addr=%08x[%x],"
+ " src_addr=%08x[%x]\n",
+ event, type,
+ event->cm_info.loc_addr, event->cm_info.loc_port,
+ event->cm_info.rem_addr, event->cm_info.rem_port);
+
+ nes_cm_post_event(event);
+ return event;
+}
+
+
+/**
+ * send_mpa_request
+ */
+int send_mpa_request(struct nes_cm_node *cm_node)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = get_free_pkt(cm_node);
+ if (!skb) {
+ nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+ return -1;
+ }
+
+ /* send an MPA Request frame */
+ form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame,
+ cm_node->mpa_frame_size, SET_ACK);
+
+ ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+
+/**
+ * recv_mpa - process a received TCP pkt, we are expecting an
+ * IETF MPA frame
+ */
+static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len)
+{
+ struct ietf_mpa_frame *mpa_frame;
+
+ /* assume req frame is in tcp data payload */
+ if (len < sizeof(struct ietf_mpa_frame)) {
+ nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len);
+ return -1;
+ }
+
+ mpa_frame = (struct ietf_mpa_frame *)buffer;
+ cm_node->mpa_frame_size = ntohs(mpa_frame->priv_data_len);
+
+ if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) {
+ nes_debug(NES_DBG_CM, "The received ietf buffer was not right"
+ " complete (%x + %x != %x)\n",
+ cm_node->mpa_frame_size, (u32)sizeof(struct ietf_mpa_frame), len);
+ return -1;
+ }
+
+ /* copy entire MPA frame to our cm_node's frame */
+ memcpy(cm_node->mpa_frame_buf, buffer + sizeof(struct ietf_mpa_frame),
+ cm_node->mpa_frame_size);
+
+ return 0;
+}
+
+
+/**
+ * handle_exception_pkt - process an exception packet.
+ * We have been in a TSA state, and we have now received SW
+ * TCP/IP traffic should be a FIN request or IP pkt with options
+ */
+static int handle_exception_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb)
+{
+ int ret = 0;
+ struct tcphdr *tcph = tcp_hdr(skb);
+
+ /* first check to see if this a FIN pkt */
+ if (tcph->fin) {
+ /* we need to ACK the FIN request */
+ send_ack(cm_node);
+
+ /* check which side we are (client/server) and set next state accordingly */
+ if (cm_node->tcp_cntxt.client)
+ cm_node->state = NES_CM_STATE_CLOSING;
+ else {
+ /* we are the server side */
+ cm_node->state = NES_CM_STATE_CLOSE_WAIT;
+ /* since this is a self contained CM we don't wait for */
+ /* an APP to close us, just send final FIN immediately */
+ ret = send_fin(cm_node, NULL);
+ cm_node->state = NES_CM_STATE_LAST_ACK;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+
+/**
+ * form_cm_frame - get a free packet and build empty frame Use
+ * node info to build.
+ */
+struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node,
+ void *options, u32 optionsize, void *data, u32 datasize, u8 flags)
+{
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ struct ethhdr *ethh;
+ u8 *buf;
+ u16 packetsize = sizeof(*iph);
+
+ packetsize += sizeof(*tcph);
+ packetsize += optionsize + datasize;
+
+ memset(skb->data, 0x00, ETH_HLEN + sizeof(*iph) + sizeof(*tcph));
+
+ skb->len = 0;
+ buf = skb_put(skb, packetsize + ETH_HLEN);
+
+ ethh = (struct ethhdr *) buf;
+ buf += ETH_HLEN;
+
+ iph = (struct iphdr *)buf;
+ buf += sizeof(*iph);
+ tcph = (struct tcphdr *)buf;
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, ETH_HLEN);
+ skb_set_transport_header(skb, ETH_HLEN+sizeof(*iph));
+ buf += sizeof(*tcph);
+
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->protocol = htons(0x800);
+ skb->data_len = 0;
+ skb->mac_len = ETH_HLEN;
+
+ memcpy(ethh->h_dest, cm_node->rem_mac, ETH_ALEN);
+ memcpy(ethh->h_source, cm_node->loc_mac, ETH_ALEN);
+ ethh->h_proto = htons(0x0800);
+
+ iph->version = IPVERSION;
+ iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
+ iph->tos = 0;
+ iph->tot_len = htons(packetsize);
+ iph->id = htons(++cm_node->tcp_cntxt.loc_id);
+
+ iph->frag_off = htons(0x4000);
+ iph->ttl = 0x40;
+ iph->protocol = 0x06; /* IPPROTO_TCP */
+
+ iph->saddr = htonl(cm_node->loc_addr);
+ iph->daddr = htonl(cm_node->rem_addr);
+
+ tcph->source = htons(cm_node->loc_port);
+ tcph->dest = htons(cm_node->rem_port);
+ tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
+
+ if (flags & SET_ACK) {
+ cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
+ tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
+ tcph->ack = 1;
+ } else
+ tcph->ack_seq = 0;
+
+ if (flags & SET_SYN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->syn = 1;
+ } else
+ cm_node->tcp_cntxt.loc_seq_num += datasize; /* data (no headers) */
+
+ if (flags & SET_FIN)
+ tcph->fin = 1;
+
+ if (flags & SET_RST)
+ tcph->rst = 1;
+
+ tcph->doff = (u16)((sizeof(*tcph) + optionsize + 3) >> 2);
+ tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
+ tcph->urg_ptr = 0;
+ if (optionsize)
+ memcpy(buf, options, optionsize);
+ buf += optionsize;
+ if (datasize)
+ memcpy(buf, data, datasize);
+
+ skb_shinfo(skb)->nr_frags = 0;
+ cm_packets_created++;
+
+ return skb;
+}
+
+
+/**
+ * print_core - dump a cm core
+ */
+static void print_core(struct nes_cm_core *core)
+{
+ nes_debug(NES_DBG_CM, "---------------------------------------------\n");
+ nes_debug(NES_DBG_CM, "CM Core -- (core = %p )\n", core);
+ if (!core)
+ return;
+ nes_debug(NES_DBG_CM, "---------------------------------------------\n");
+ nes_debug(NES_DBG_CM, "Session ID : %u \n", atomic_read(&core->session_id));
+
+ nes_debug(NES_DBG_CM, "State : %u \n", core->state);
+
+ nes_debug(NES_DBG_CM, "Tx Free cnt : %u \n", skb_queue_len(&core->tx_free_list));
+ nes_debug(NES_DBG_CM, "Listen Nodes : %u \n", atomic_read(&core->listen_node_cnt));
+ nes_debug(NES_DBG_CM, "Active Nodes : %u \n", atomic_read(&core->node_cnt));
+
+ nes_debug(NES_DBG_CM, "core : %p \n", core);
+
+ nes_debug(NES_DBG_CM, "-------------- end core ---------------\n");
+}
+
+
+/**
+ * schedule_nes_timer
+ * note - cm_node needs to be protected before calling this. Encase in:
+ * rem_ref_cm_node(cm_core, cm_node);add_ref_cm_node(cm_node);
+ */
+int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ enum nes_timer_type type, int send_retrans,
+ int close_when_complete)
+{
+ unsigned long flags;
+ struct nes_cm_core *cm_core;
+ struct nes_timer_entry *new_send;
+ int ret = 0;
+ u32 was_timer_set;
+
+ new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
+ if (!new_send)
+ return -1;
+ if (!cm_node)
+ return -EINVAL;
+
+ /* new_send->timetosend = currenttime */
+ new_send->retrycount = NES_DEFAULT_RETRYS;
+ new_send->retranscount = NES_DEFAULT_RETRANS;
+ new_send->skb = skb;
+ new_send->timetosend = jiffies;
+ new_send->type = type;
+ new_send->netdev = cm_node->netdev;
+ new_send->send_retrans = send_retrans;
+ new_send->close_when_complete = close_when_complete;
+
+ if (type == NES_TIMER_TYPE_CLOSE) {
+ new_send->timetosend += (HZ/2); /* TODO: decide on the correct value here */
+ spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+ list_add_tail(&new_send->list, &cm_node->recv_list);
+ spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+ }
+
+ if (type == NES_TIMER_TYPE_SEND) {
+ new_send->seq_num = htonl(tcp_hdr(skb)->seq);
+ atomic_inc(&new_send->skb->users);
+
+ ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev);
+ if (ret != NETDEV_TX_OK) {
+ nes_debug(NES_DBG_CM, "Error sending packet %p (jiffies = %lu)\n",
+ new_send, jiffies);
+ atomic_dec(&new_send->skb->users);
+ new_send->timetosend = jiffies;
+ } else {
+ cm_packets_sent++;
+ if (!send_retrans) {
+ if (close_when_complete)
+ rem_ref_cm_node(cm_node->cm_core, cm_node);
+ dev_kfree_skb_any(new_send->skb);
+ kfree(new_send);
+ return ret;
+ }
+ new_send->timetosend = jiffies + NES_RETRY_TIMEOUT;
+ }
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ list_add_tail(&new_send->list, &cm_node->retrans_list);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ }
+ if (type == NES_TIMER_TYPE_RECV) {
+ new_send->seq_num = htonl(tcp_hdr(skb)->seq);
+ new_send->timetosend = jiffies;
+ spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+ list_add_tail(&new_send->list, &cm_node->recv_list);
+ spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+ }
+ cm_core = cm_node->cm_core;
+
+ was_timer_set = timer_pending(&cm_core->tcp_timer);
+
+ if (!was_timer_set) {
+ cm_core->tcp_timer.expires = new_send->timetosend;
+ add_timer(&cm_core->tcp_timer);
+ }
+
+ return ret;
+}
+
+
+/**
+ * nes_cm_timer_tick
+ */
+void nes_cm_timer_tick(unsigned long pass)
+{
+ unsigned long flags, qplockflags;
+ unsigned long nexttimeout = jiffies + NES_LONG_TIME;
+ struct iw_cm_id *cm_id;
+ struct nes_cm_node *cm_node;
+ struct nes_timer_entry *send_entry, *recv_entry;
+ struct list_head *list_core, *list_core_temp;
+ struct list_head *list_node, *list_node_temp;
+ struct nes_cm_core *cm_core = g_cm_core;
+ struct nes_qp *nesqp;
+ struct sk_buff *skb;
+ u32 settimer = 0;
+ int ret = NETDEV_TX_OK;
+ int node_done;
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+
+ list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
+ cm_node = container_of(list_node, struct nes_cm_node, list);
+ add_ref_cm_node(cm_node);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+ list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) {
+ recv_entry = container_of(list_core, struct nes_timer_entry, list);
+ if ((time_after(recv_entry->timetosend, jiffies)) &&
+ (recv_entry->type == NES_TIMER_TYPE_CLOSE)) {
+ if (nexttimeout > recv_entry->timetosend || !settimer) {
+ nexttimeout = recv_entry->timetosend;
+ settimer = 1;
+ }
+ continue;
+ }
+ list_del(&recv_entry->list);
+ cm_id = cm_node->cm_id;
+ spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+ if (recv_entry->type == NES_TIMER_TYPE_CLOSE) {
+ nesqp = (struct nes_qp *)recv_entry->skb;
+ spin_lock_irqsave(&nesqp->lock, qplockflags);
+ if (nesqp->cm_id) {
+ nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, refcount = %d: "
+ "****** HIT A NES_TIMER_TYPE_CLOSE"
+ " with something to do!!! ******\n",
+ nesqp->hwqp.qp_id, cm_id,
+ atomic_read(&nesqp->refcount));
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
+ nesqp->ibqp_state = IB_QPS_ERR;
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_cm_disconn(nesqp);
+ } else {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, refcount = %d:"
+ " ****** HIT A NES_TIMER_TYPE_CLOSE"
+ " with nothing to do!!! ******\n",
+ nesqp->hwqp.qp_id, cm_id,
+ atomic_read(&nesqp->refcount));
+ nes_rem_ref(&nesqp->ibqp);
+ }
+ if (cm_id)
+ cm_id->rem_ref(cm_id);
+ }
+ kfree(recv_entry);
+ spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+ }
+ spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ node_done = 0;
+ list_for_each_safe(list_core, list_node_temp, &cm_node->retrans_list) {
+ if (node_done) {
+ break;
+ }
+ send_entry = container_of(list_core, struct nes_timer_entry, list);
+ if (time_after(send_entry->timetosend, jiffies)) {
+ if (cm_node->state != NES_CM_STATE_TSA) {
+ if ((nexttimeout > send_entry->timetosend) || !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ node_done = 1;
+ continue;
+ } else {
+ list_del(&send_entry->list);
+ skb = send_entry->skb;
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ dev_kfree_skb_any(skb);
+ kfree(send_entry);
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ continue;
+ }
+ }
+ if (send_entry->type == NES_TIMER_NODE_CLEANUP) {
+ list_del(&send_entry->list);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ kfree(send_entry);
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ continue;
+ }
+ if ((send_entry->seq_num < cm_node->tcp_cntxt.rem_ack_num) ||
+ (cm_node->state == NES_CM_STATE_TSA) ||
+ (cm_node->state == NES_CM_STATE_CLOSED)) {
+ skb = send_entry->skb;
+ list_del(&send_entry->list);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ kfree(send_entry);
+ dev_kfree_skb_any(skb);
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ continue;
+ }
+
+ if (!send_entry->retranscount || !send_entry->retrycount) {
+ cm_packets_dropped++;
+ skb = send_entry->skb;
+ list_del(&send_entry->list);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ dev_kfree_skb_any(skb);
+ kfree(send_entry);
+ if (cm_node->state == NES_CM_STATE_SYN_RCVD) {
+ /* this node never even generated an indication up to the cm */
+ rem_ref_cm_node(cm_core, cm_node);
+ } else {
+ cm_node->state = NES_CM_STATE_CLOSED;
+ create_event(cm_node, NES_CM_EVENT_ABORTED);
+ }
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ continue;
+ }
+ /* this seems like the correct place, but leave send entry unprotected */
+ // spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ atomic_inc(&send_entry->skb->users);
+ cm_packets_retrans++;
+ nes_debug(NES_DBG_CM, "Retransmitting send_entry %p for node %p,"
+ " jiffies = %lu, time to send = %lu, retranscount = %u, "
+ "send_entry->seq_num = 0x%08X, cm_node->tcp_cntxt.rem_ack_num = 0x%08X\n",
+ send_entry, cm_node, jiffies, send_entry->timetosend, send_entry->retranscount,
+ send_entry->seq_num, cm_node->tcp_cntxt.rem_ack_num);
+
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ ret = nes_nic_cm_xmit(send_entry->skb, cm_node->netdev);
+ if (ret != NETDEV_TX_OK) {
+ cm_packets_bounced++;
+ atomic_dec(&send_entry->skb->users);
+ send_entry->retrycount--;
+ nexttimeout = jiffies + NES_SHORT_TIME;
+ settimer = 1;
+ node_done = 1;
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ continue;
+ } else {
+ cm_packets_sent++;
+ }
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ list_del(&send_entry->list);
+ nes_debug(NES_DBG_CM, "Packet Sent: retrans count = %u, retry count = %u.\n",
+ send_entry->retranscount, send_entry->retrycount);
+ if (send_entry->send_retrans) {
+ send_entry->retranscount--;
+ send_entry->timetosend = jiffies + NES_RETRY_TIMEOUT;
+ if (nexttimeout > send_entry->timetosend || !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ list_add(&send_entry->list, &cm_node->retrans_list);
+ continue;
+ } else {
+ int close_when_complete;
+ skb = send_entry->skb;
+ close_when_complete = send_entry->close_when_complete;
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ if (close_when_complete) {
+ BUG_ON(atomic_read(&cm_node->ref_count) == 1);
+ rem_ref_cm_node(cm_core, cm_node);
+ }
+ dev_kfree_skb_any(skb);
+ kfree(send_entry);
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ continue;
+ }
+ }
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+
+ rem_ref_cm_node(cm_core, cm_node);
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ if (ret != NETDEV_TX_OK)
+ break;
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ if (settimer) {
+ if (!timer_pending(&cm_core->tcp_timer)) {
+ cm_core->tcp_timer.expires = nexttimeout;
+ add_timer(&cm_core->tcp_timer);
+ }
+ }
+}
+
+
+/**
+ * send_syn
+ */
+int send_syn(struct nes_cm_node *cm_node, u32 sendack)
+{
+ int ret;
+ int flags = SET_SYN;
+ struct sk_buff *skb;
+ char optionsbuffer[sizeof(struct option_mss) +
+ sizeof(struct option_windowscale) +
+ sizeof(struct option_base) + 1];
+
+ int optionssize = 0;
+ /* Sending MSS option */
+ union all_known_options *options;
+
+ if (!cm_node)
+ return -EINVAL;
+
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_mss.optionnum = OPTION_NUMBER_MSS;
+ options->as_mss.length = sizeof(struct option_mss);
+ options->as_mss.mss = htons(cm_node->tcp_cntxt.mss);
+ optionssize += sizeof(struct option_mss);
+
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;
+ options->as_windowscale.length = sizeof(struct option_windowscale);
+ options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
+ optionssize += sizeof(struct option_windowscale);
+
+ if (sendack && !(NES_DRV_OPT_SUPRESS_OPTION_BC & nes_drv_opt)
+ ) {
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_base.optionnum = OPTION_NUMBER_WRITE0;
+ options->as_base.length = sizeof(struct option_base);
+ optionssize += sizeof(struct option_base);
+ /* we need the size to be a multiple of 4 */
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_end = 1;
+ optionssize += 1;
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_end = 1;
+ optionssize += 1;
+ }
+
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_end = OPTION_NUMBER_END;
+ optionssize += 1;
+
+ skb = get_free_pkt(cm_node);
+ if (!skb) {
+ nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+ return -1;
+ }
+
+ if (sendack)
+ flags |= SET_ACK;
+
+ form_cm_frame(skb, cm_node, optionsbuffer, optionssize, NULL, 0, flags);
+ ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
+
+ return ret;
+}
+
+
+/**
+ * send_reset
+ */
+int send_reset(struct nes_cm_node *cm_node)
+{
+ int ret;
+ struct sk_buff *skb = get_free_pkt(cm_node);
+ int flags = SET_RST | SET_ACK;
+
+ if (!skb) {
+ nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+ return -1;
+ }
+
+ add_ref_cm_node(cm_node);
+ form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags);
+ ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 1);
+
+ return ret;
+}
+
+
+/**
+ * send_ack
+ */
+int send_ack(struct nes_cm_node *cm_node)
+{
+ int ret;
+ struct sk_buff *skb = get_free_pkt(cm_node);
+
+ if (!skb) {
+ nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+ return -1;
+ }
+
+ form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, SET_ACK);
+ ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 0);
+
+ return ret;
+}
+
+
+/**
+ * send_fin
+ */
+int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
+{
+ int ret;
+
+ /* if we didn't get a frame get one */
+ if (!skb)
+ skb = get_free_pkt(cm_node);
+
+ if (!skb) {
+ nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+ return -1;
+ }
+
+ form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, SET_ACK | SET_FIN);
+ ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
+
+ return ret;
+}
+
+
+/**
+ * get_free_pkt
+ */
+struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node)
+{
+ struct sk_buff *skb, *new_skb;
+
+ /* check to see if we need to repopulate the free tx pkt queue */
+ if (skb_queue_len(&cm_node->cm_core->tx_free_list) < NES_CM_FREE_PKT_LO_WATERMARK) {
+ while (skb_queue_len(&cm_node->cm_core->tx_free_list) <
+ cm_node->cm_core->free_tx_pkt_max) {
+ /* replace the frame we took, we won't get it back */
+ new_skb = dev_alloc_skb(cm_node->cm_core->mtu);
+ BUG_ON(!new_skb);
+ /* add a replacement frame to the free tx list head */
+ skb_queue_head(&cm_node->cm_core->tx_free_list, new_skb);
+ }
+ }
+
+ skb = skb_dequeue(&cm_node->cm_core->tx_free_list);
+
+ return skb;
+}
+
+
+/**
+ * make_hashkey - generate hash key from node tuple
+ */
+static inline int make_hashkey(u16 loc_port, nes_addr_t loc_addr, u16 rem_port,
+ nes_addr_t rem_addr)
+{
+ u32 hashkey = 0;
+
+ hashkey = loc_addr + rem_addr + loc_port + rem_port;
+ hashkey = (hashkey % NES_CM_HASHTABLE_SIZE);
+
+ return hashkey;
+}
+
+
+/**
+ * find_node - find a cm node that matches the reference cm node
+ */
+static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
+ u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr)
+{
+ unsigned long flags;
+ u32 hashkey;
+ struct list_head *list_pos;
+ struct list_head *hte;
+ struct nes_cm_node *cm_node;
+
+ /* make a hash index key for this packet */
+ hashkey = make_hashkey(loc_port, loc_addr, rem_port, rem_addr);
+
+ /* get a handle on the hte */
+ hte = &cm_core->connected_nodes;
+
+ nes_debug(NES_DBG_CM, "Searching for an owner node:%x:%x from core %p->%p\n",
+ loc_addr, loc_port, cm_core, hte);
+
+ /* walk list and find cm_node associated with this session ID */
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ list_for_each(list_pos, hte) {
+ cm_node = container_of(list_pos, struct nes_cm_node, list);
+ /* compare quad, return node handle if a match */
+ nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n",
+ cm_node->loc_addr, cm_node->loc_port,
+ loc_addr, loc_port,
+ cm_node->rem_addr, cm_node->rem_port,
+ rem_addr, rem_port);
+ if ((cm_node->loc_addr == loc_addr) && (cm_node->loc_port == loc_port) &&
+ (cm_node->rem_addr == rem_addr) && (cm_node->rem_port == rem_port)) {
+ add_ref_cm_node(cm_node);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ return cm_node;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ /* no owner node */
+ return NULL;
+}
+
+
+/**
+ * find_listener - find a cm node listening on this addr-port pair
+ */
+static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
+ nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state)
+{
+ unsigned long flags;
+ struct list_head *listen_list;
+ struct nes_cm_listener *listen_node;
+
+ /* walk list and find cm_node associated with this session ID */
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each(listen_list, &cm_core->listen_list.list) {
+ listen_node = container_of(listen_list, struct nes_cm_listener, list);
+ /* compare node pair, return node handle if a match */
+ if (((listen_node->loc_addr == dst_addr) ||
+ listen_node->loc_addr == 0x00000000) &&
+ (listen_node->loc_port == dst_port) &&
+ (listener_state & listen_node->listener_state)) {
+ atomic_inc(&listen_node->ref_count);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ return listen_node;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ nes_debug(NES_DBG_CM, "Unable to find listener- %x:%x\n",
+ dst_addr, dst_port);
+
+ /* no listener */
+ return NULL;
+}
+
+
+/**
+ * add_hte_node - add a cm node to the hash table
+ */
+static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
+{
+ unsigned long flags;
+ u32 hashkey;
+ struct list_head *hte;
+
+ if (!cm_node || !cm_core)
+ return -EINVAL;
+
+ nes_debug(NES_DBG_CM, "Adding Node to Active Connection HT\n");
+
+ /* first, make an index into our hash table */
+ hashkey = make_hashkey(cm_node->loc_port, cm_node->loc_addr,
+ cm_node->rem_port, cm_node->rem_addr);
+ cm_node->hashkey = hashkey;
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+
+ /* get a handle on the hash table element (list head for this slot) */
+ hte = &cm_core->connected_nodes;
+ list_add_tail(&cm_node->list, hte);
+ atomic_inc(&cm_core->ht_node_cnt);
+
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ return 0;
+}
+
+
+/**
+ * mini_cm_dec_refcnt_listen
+ */
+static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
+ struct nes_cm_listener *listener, int free_hanging_nodes)
+{
+ int ret = 1;
+ unsigned long flags;
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ if (!atomic_dec_return(&listener->ref_count)) {
+ list_del(&listener->list);
+
+ /* decrement our listen node count */
+ atomic_dec(&cm_core->listen_node_cnt);
+
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ if (listener->nesvnic) {
+ nes_manage_apbvt(listener->nesvnic, listener->loc_port,
+ PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL);
+ }
+
+ nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener);
+
+ kfree(listener);
+ ret = 0;
+ cm_listens_destroyed++;
+ } else {
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ }
+ if (listener) {
+ if (atomic_read(&listener->pend_accepts_cnt) > 0)
+ nes_debug(NES_DBG_CM, "destroying listener (%p)"
+ " with non-zero pending accepts=%u\n",
+ listener, atomic_read(&listener->pend_accepts_cnt));
+ }
+
+ return ret;
+}
+
+
+/**
+ * mini_cm_del_listen
+ */
+static int mini_cm_del_listen(struct nes_cm_core *cm_core,
+ struct nes_cm_listener *listener)
+{
+ listener->listener_state = NES_CM_LISTENER_PASSIVE_STATE;
+ listener->cm_id = NULL; /* going to be destroyed pretty soon */
+ return mini_cm_dec_refcnt_listen(cm_core, listener, 1);
+}
+
+
+/**
+ * mini_cm_accelerated
+ */
+static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
+ struct nes_cm_node *cm_node)
+{
+ u32 was_timer_set;
+ cm_node->accelerated = 1;
+
+ if (cm_node->accept_pend) {
+ BUG_ON(!cm_node->listener);
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
+ }
+
+ was_timer_set = timer_pending(&cm_core->tcp_timer);
+ if (!was_timer_set) {
+ cm_core->tcp_timer.expires = jiffies + NES_SHORT_TIME;
+ add_timer(&cm_core->tcp_timer);
+ }
+
+ return 0;
+}
+
+
+/**
+ * nes_addr_send_arp
+ */
+static void nes_addr_send_arp(u32 dst_ip)
+{
+ struct rtable *rt;
+ struct flowi fl;
+
+ memset(&fl, 0, sizeof fl);
+ fl.nl_u.ip4_u.daddr = htonl(dst_ip);
+ if (ip_route_output_key(&init_net, &rt, &fl)) {
+ printk("%s: ip_route_output_key failed for 0x%08X\n",
+ __FUNCTION__, dst_ip);
+ return;
+ }
+
+ neigh_event_send(rt->u.dst.neighbour, NULL);
+ ip_rt_put(rt);
+}
+
+
+/**
+ * make_cm_node - create a new instance of a cm node
+ */
+static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
+ struct nes_vnic *nesvnic, struct nes_cm_info *cm_info,
+ struct nes_cm_listener *listener)
+{
+ struct nes_cm_node *cm_node;
+ struct timespec ts;
+ int arpindex = 0;
+ struct nes_device *nesdev;
+ struct nes_adapter *nesadapter;
+
+ /* create an hte and cm_node for this instance */
+ cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
+ if (!cm_node)
+ return NULL;
+
+ /* set our node specific transport info */
+ cm_node->loc_addr = cm_info->loc_addr;
+ cm_node->rem_addr = cm_info->rem_addr;
+ cm_node->loc_port = cm_info->loc_port;
+ cm_node->rem_port = cm_info->rem_port;
+ cm_node->send_write0 = send_first;
+ nes_debug(NES_DBG_CM, "Make node addresses : loc = %x:%x, rem = %x:%x\n",
+ cm_node->loc_addr, cm_node->loc_port, cm_node->rem_addr, cm_node->rem_port);
+ cm_node->listener = listener;
+ cm_node->netdev = nesvnic->netdev;
+ cm_node->cm_id = cm_info->cm_id;
+ memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN);
+
+ nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n",
+ cm_node->listener, cm_node->cm_id);
+
+ INIT_LIST_HEAD(&cm_node->retrans_list);
+ spin_lock_init(&cm_node->retrans_list_lock);
+ INIT_LIST_HEAD(&cm_node->recv_list);
+ spin_lock_init(&cm_node->recv_list_lock);
+
+ cm_node->loopbackpartner = NULL;
+ atomic_set(&cm_node->ref_count, 1);
+ /* associate our parent CM core */
+ cm_node->cm_core = cm_core;
+ cm_node->tcp_cntxt.loc_id = NES_CM_DEF_LOCAL_ID;
+ cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
+ cm_node->tcp_cntxt.rcv_wnd = NES_CM_DEFAULT_RCV_WND_SCALED >>
+ NES_CM_DEFAULT_RCV_WND_SCALE;
+ ts = current_kernel_time();
+ cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec);
+ cm_node->tcp_cntxt.mss = nesvnic->max_frame_size - sizeof(struct iphdr) -
+ sizeof(struct tcphdr) - ETH_HLEN;
+ cm_node->tcp_cntxt.rcv_nxt = 0;
+ /* get a unique session ID , add thread_id to an upcounter to handle race */
+ atomic_inc(&cm_core->node_cnt);
+ atomic_inc(&cm_core->session_id);
+ cm_node->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
+ cm_node->conn_type = cm_info->conn_type;
+ cm_node->apbvt_set = 0;
+ cm_node->accept_pend = 0;
+
+ cm_node->nesvnic = nesvnic;
+ /* get some device handles, for arp lookup */
+ nesdev = nesvnic->nesdev;
+ nesadapter = nesdev->nesadapter;
+
+ cm_node->loopbackpartner = NULL;
+ /* get the mac addr for the remote node */
+ arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
+ if (arpindex < 0) {
+ kfree(cm_node);
+ nes_addr_send_arp(cm_info->rem_addr);
+ return NULL;
+ }
+
+ /* copy the mac addr to node context */
+ memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
+ nes_debug(NES_DBG_CM, "Remote mac addr from arp table:%02x,"
+ " %02x, %02x, %02x, %02x, %02x\n",
+ cm_node->rem_mac[0], cm_node->rem_mac[1],
+ cm_node->rem_mac[2], cm_node->rem_mac[3],
+ cm_node->rem_mac[4], cm_node->rem_mac[5]);
+
+ add_hte_node(cm_core, cm_node);
+ atomic_inc(&cm_nodes_created);
+
+ return cm_node;
+}
+
+
+/**
+ * add_ref_cm_node - destroy an instance of a cm node
+ */
+static int add_ref_cm_node(struct nes_cm_node *cm_node)
+{
+ atomic_inc(&cm_node->ref_count);
+ return 0;
+}
+
+
+/**
+ * rem_ref_cm_node - destroy an instance of a cm node
+ */
+static int rem_ref_cm_node(struct nes_cm_core *cm_core,
+ struct nes_cm_node *cm_node)
+{
+ unsigned long flags, qplockflags;
+ struct nes_timer_entry *send_entry;
+ struct nes_timer_entry *recv_entry;
+ struct iw_cm_id *cm_id;
+ struct list_head *list_core, *list_node_temp;
+ struct nes_qp *nesqp;
+
+ if (!cm_node)
+ return -EINVAL;
+
+ spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);
+ if (atomic_dec_return(&cm_node->ref_count)) {
+ spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
+ return 0;
+ }
+ list_del(&cm_node->list);
+ atomic_dec(&cm_core->ht_node_cnt);
+ spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
+
+ /* if the node is destroyed before connection was accelerated */
+ if (!cm_node->accelerated && cm_node->accept_pend) {
+ BUG_ON(!cm_node->listener);
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
+ }
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ list_for_each_safe(list_core, list_node_temp, &cm_node->retrans_list) {
+ send_entry = container_of(list_core, struct nes_timer_entry, list);
+ list_del(&send_entry->list);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ dev_kfree_skb_any(send_entry->skb);
+ kfree(send_entry);
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ continue;
+ }
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+
+ spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+ list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) {
+ recv_entry = container_of(list_core, struct nes_timer_entry, list);
+ list_del(&recv_entry->list);
+ cm_id = cm_node->cm_id;
+ spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+ if (recv_entry->type == NES_TIMER_TYPE_CLOSE) {
+ nesqp = (struct nes_qp *)recv_entry->skb;
+ spin_lock_irqsave(&nesqp->lock, qplockflags);
+ if (nesqp->cm_id) {
+ nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: ****** HIT A NES_TIMER_TYPE_CLOSE"
+ " with something to do!!! ******\n",
+ nesqp->hwqp.qp_id, cm_id);
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
+ nesqp->ibqp_state = IB_QPS_ERR;
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_cm_disconn(nesqp);
+ } else {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: ****** HIT A NES_TIMER_TYPE_CLOSE"
+ " with nothing to do!!! ******\n",
+ nesqp->hwqp.qp_id, cm_id);
+ nes_rem_ref(&nesqp->ibqp);
+ }
+ cm_id->rem_ref(cm_id);
+ } else if (recv_entry->type == NES_TIMER_TYPE_RECV) {
+ dev_kfree_skb_any(recv_entry->skb);
+ }
+ kfree(recv_entry);
+ spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+ }
+ spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+
+ if (cm_node->listener) {
+ mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0);
+ } else {
+ if (cm_node->apbvt_set && cm_node->nesvnic) {
+ nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port,
+ PCI_FUNC(cm_node->nesvnic->nesdev->pcidev->devfn),
+ NES_MANAGE_APBVT_DEL);
+ }
+ }
+
+ kfree(cm_node);
+ atomic_dec(&cm_core->node_cnt);
+ atomic_inc(&cm_nodes_destroyed);
+
+ return 0;
+}
+
+
+/**
+ * process_options
+ */
+static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 optionsize, u32 syn_packet)
+{
+ u32 tmp;
+ u32 offset = 0;
+ union all_known_options *all_options;
+ char got_mss_option = 0;
+
+ while (offset < optionsize) {
+ all_options = (union all_known_options *)(optionsloc + offset);
+ switch (all_options->as_base.optionnum) {
+ case OPTION_NUMBER_END:
+ offset = optionsize;
+ break;
+ case OPTION_NUMBER_NONE:
+ offset += 1;
+ continue;
+ case OPTION_NUMBER_MSS:
+ nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d Size: %d\n",
+ __FUNCTION__,
+ all_options->as_mss.length, offset, optionsize);
+ got_mss_option = 1;
+ if (all_options->as_mss.length != 4) {
+ return 1;
+ } else {
+ tmp = ntohs(all_options->as_mss.mss);
+ if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)
+ cm_node->tcp_cntxt.mss = tmp;
+ }
+ break;
+ case OPTION_NUMBER_WINDOW_SCALE:
+ cm_node->tcp_cntxt.snd_wscale = all_options->as_windowscale.shiftcount;
+ break;
+ case OPTION_NUMBER_WRITE0:
+ cm_node->send_write0 = 1;
+ break;
+ default:
+ nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n",
+ all_options->as_base.optionnum);
+ break;
+ }
+ offset += all_options->as_base.length;
+ }
+ if ((!got_mss_option) && (syn_packet))
+ cm_node->tcp_cntxt.mss = NES_CM_DEFAULT_MSS;
+ return 0;
+}
+
+
+/**
+ * process_packet
+ */
+int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ struct nes_cm_core *cm_core)
+{
+ int optionsize;
+ int datasize;
+ int ret = 0;
+ struct tcphdr *tcph = tcp_hdr(skb);
+ u32 inc_sequence;
+ if (cm_node->state == NES_CM_STATE_SYN_SENT && tcph->syn) {
+ inc_sequence = ntohl(tcph->seq);
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence;
+ }
+
+ if ((!tcph) || (cm_node->state == NES_CM_STATE_TSA)) {
+ BUG_ON(!tcph);
+ atomic_inc(&cm_accel_dropped_pkts);
+ return -1;
+ }
+
+ if (tcph->rst) {
+ atomic_inc(&cm_resets_recvd);
+ nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u. refcnt=%d\n",
+ cm_node, cm_node->state, atomic_read(&cm_node->ref_count));
+ switch (cm_node->state) {
+ case NES_CM_STATE_LISTENING:
+ rem_ref_cm_node(cm_core, cm_node);
+ break;
+ case NES_CM_STATE_TSA:
+ case NES_CM_STATE_CLOSED:
+ break;
+ case NES_CM_STATE_SYN_RCVD:
+ nes_debug(NES_DBG_CM, "Received a reset for local 0x%08X:%04X,"
+ " remote 0x%08X:%04X, node state = %u\n",
+ cm_node->loc_addr, cm_node->loc_port,
+ cm_node->rem_addr, cm_node->rem_port,
+ cm_node->state);
+ rem_ref_cm_node(cm_core, cm_node);
+ break;
+ case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
+ case NES_CM_STATE_ESTABLISHED:
+ case NES_CM_STATE_MPAREQ_SENT:
+ default:
+ nes_debug(NES_DBG_CM, "Received a reset for local 0x%08X:%04X,"
+ " remote 0x%08X:%04X, node state = %u refcnt=%d\n",
+ cm_node->loc_addr, cm_node->loc_port,
+ cm_node->rem_addr, cm_node->rem_port,
+ cm_node->state, atomic_read(&cm_node->ref_count));
+ // create event
+ cm_node->state = NES_CM_STATE_CLOSED;
+
+ create_event(cm_node, NES_CM_EVENT_ABORTED);
+ break;
+
+ }
+ return -1;
+ }
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+
+ skb_pull(skb, ip_hdr(skb)->ihl << 2);
+ skb_pull(skb, tcph->doff << 2);
+
+ datasize = skb->len;
+ inc_sequence = ntohl(tcph->seq);
+ nes_debug(NES_DBG_CM, "datasize = %u, sequence = 0x%08X, ack_seq = 0x%08X,"
+ " rcv_nxt = 0x%08X Flags: %s %s.\n",
+ datasize, inc_sequence, ntohl(tcph->ack_seq),
+ cm_node->tcp_cntxt.rcv_nxt, (tcph->syn ? "SYN":""),
+ (tcph->ack ? "ACK":""));
+
+ if (!tcph->syn && (inc_sequence != cm_node->tcp_cntxt.rcv_nxt)
+ ) {
+ nes_debug(NES_DBG_CM, "dropping packet, datasize = %u, sequence = 0x%08X,"
+ " ack_seq = 0x%08X, rcv_nxt = 0x%08X Flags: %s.\n",
+ datasize, inc_sequence, ntohl(tcph->ack_seq),
+ cm_node->tcp_cntxt.rcv_nxt, (tcph->ack ? "ACK":""));
+ if (cm_node->state == NES_CM_STATE_LISTENING) {
+ rem_ref_cm_node(cm_core, cm_node);
+ }
+ return -1;
+ }
+
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+
+
+ if (optionsize) {
+ u8 *optionsloc = (u8 *)&tcph[1];
+ if (process_options(cm_node, optionsloc, optionsize, (u32)tcph->syn)) {
+ nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __FUNCTION__, cm_node);
+ send_reset(cm_node);
+ if (cm_node->state != NES_CM_STATE_SYN_SENT)
+ rem_ref_cm_node(cm_core, cm_node);
+ return 0;
+ }
+ } else if (tcph->syn)
+ cm_node->tcp_cntxt.mss = NES_CM_DEFAULT_MSS;
+
+ cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
+ cm_node->tcp_cntxt.snd_wscale;
+
+ if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) {
+ cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
+ }
+
+ if (tcph->ack) {
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ switch (cm_node->state) {
+ case NES_CM_STATE_SYN_RCVD:
+ case NES_CM_STATE_SYN_SENT:
+ /* read and stash current sequence number */
+ if (cm_node->tcp_cntxt.rem_ack_num != cm_node->tcp_cntxt.loc_seq_num) {
+ nes_debug(NES_DBG_CM, "ERROR - cm_node->tcp_cntxt.rem_ack_num !="
+ " cm_node->tcp_cntxt.loc_seq_num\n");
+ send_reset(cm_node);
+ return 0;
+ }
+ if (cm_node->state == NES_CM_STATE_SYN_SENT)
+ cm_node->state = NES_CM_STATE_ONE_SIDE_ESTABLISHED;
+ else {
+ cm_node->state = NES_CM_STATE_ESTABLISHED;
+ }
+ break;
+ case NES_CM_STATE_LAST_ACK:
+ cm_node->state = NES_CM_STATE_CLOSED;
+ break;
+ case NES_CM_STATE_FIN_WAIT1:
+ cm_node->state = NES_CM_STATE_FIN_WAIT2;
+ break;
+ case NES_CM_STATE_CLOSING:
+ cm_node->state = NES_CM_STATE_TIME_WAIT;
+ /* need to schedule this to happen in 2MSL timeouts */
+ cm_node->state = NES_CM_STATE_CLOSED;
+ break;
+ case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
+ case NES_CM_STATE_ESTABLISHED:
+ case NES_CM_STATE_MPAREQ_SENT:
+ case NES_CM_STATE_CLOSE_WAIT:
+ case NES_CM_STATE_TIME_WAIT:
+ case NES_CM_STATE_CLOSED:
+ break;
+ case NES_CM_STATE_LISTENING:
+ nes_debug(NES_DBG_CM, "Received an ACK on a listening port (SYN %d)\n", tcph->syn);
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ send_reset(cm_node);
+ /* send_reset bumps refcount, this should have been a new node */
+ rem_ref_cm_node(cm_core, cm_node);
+ return -1;
+ break;
+ case NES_CM_STATE_TSA:
+ nes_debug(NES_DBG_CM, "Received a packet with the ack bit set while in TSA state\n");
+ break;
+ case NES_CM_STATE_UNKNOWN:
+ case NES_CM_STATE_INITED:
+ case NES_CM_STATE_ACCEPTING:
+ case NES_CM_STATE_FIN_WAIT2:
+ default:
+ nes_debug(NES_DBG_CM, "Received ack from unknown state: %x\n",
+ cm_node->state);
+ send_reset(cm_node);
+ break;
+ }
+ }
+
+ if (tcph->syn) {
+ if (cm_node->state == NES_CM_STATE_LISTENING) {
+ /* do not exceed backlog */
+ atomic_inc(&cm_node->listener->pend_accepts_cnt);
+ if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
+ cm_node->listener->backlog) {
+ nes_debug(NES_DBG_CM, "drop syn due to backlog pressure \n");
+ cm_backlog_drops++;
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ rem_ref_cm_node(cm_core, cm_node);
+ return 0;
+ }
+ cm_node->accept_pend = 1;
+
+ }
+ if (datasize == 0)
+ cm_node->tcp_cntxt.rcv_nxt ++;
+
+ if (cm_node->state == NES_CM_STATE_LISTENING) {
+ cm_node->state = NES_CM_STATE_SYN_RCVD;
+ send_syn(cm_node, 1);
+ }
+ if (cm_node->state == NES_CM_STATE_ONE_SIDE_ESTABLISHED) {
+ cm_node->state = NES_CM_STATE_ESTABLISHED;
+ /* send final handshake ACK */
+ ret = send_ack(cm_node);
+ if (ret < 0)
+ return ret;
+
+ cm_node->state = NES_CM_STATE_MPAREQ_SENT;
+ ret = send_mpa_request(cm_node);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (tcph->fin) {
+ cm_node->tcp_cntxt.rcv_nxt++;
+ switch (cm_node->state) {
+ case NES_CM_STATE_SYN_RCVD:
+ case NES_CM_STATE_SYN_SENT:
+ case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
+ case NES_CM_STATE_ESTABLISHED:
+ case NES_CM_STATE_ACCEPTING:
+ case NES_CM_STATE_MPAREQ_SENT:
+ cm_node->state = NES_CM_STATE_CLOSE_WAIT;
+ cm_node->state = NES_CM_STATE_LAST_ACK;
+ ret = send_fin(cm_node, NULL);
+ break;
+ case NES_CM_STATE_FIN_WAIT1:
+ cm_node->state = NES_CM_STATE_CLOSING;
+ ret = send_ack(cm_node);
+ break;
+ case NES_CM_STATE_FIN_WAIT2:
+ cm_node->state = NES_CM_STATE_TIME_WAIT;
+ cm_node->tcp_cntxt.loc_seq_num ++;
+ ret = send_ack(cm_node);
+ /* need to schedule this to happen in 2MSL timeouts */
+ cm_node->state = NES_CM_STATE_CLOSED;
+ break;
+ case NES_CM_STATE_CLOSE_WAIT:
+ case NES_CM_STATE_LAST_ACK:
+ case NES_CM_STATE_CLOSING:
+ case NES_CM_STATE_TSA:
+ default:
+ nes_debug(NES_DBG_CM, "Received a fin while in %x state\n",
+ cm_node->state);
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (datasize) {
+ u8 *dataloc = skb->data;
+ /* figure out what state we are in and handle transition to next state */
+ switch (cm_node->state) {
+ case NES_CM_STATE_LISTENING:
+ case NES_CM_STATE_SYN_RCVD:
+ case NES_CM_STATE_SYN_SENT:
+ case NES_CM_STATE_FIN_WAIT1:
+ case NES_CM_STATE_FIN_WAIT2:
+ case NES_CM_STATE_CLOSE_WAIT:
+ case NES_CM_STATE_LAST_ACK:
+ case NES_CM_STATE_CLOSING:
+ break;
+ case NES_CM_STATE_MPAREQ_SENT:
+ /* recv the mpa res frame, ret=frame len (incl priv data) */
+ ret = parse_mpa(cm_node, dataloc, datasize);
+ if (ret < 0)
+ break;
+ /* set the req frame payload len in skb */
+ /* we are done handling this state, set node to a TSA state */
+ cm_node->state = NES_CM_STATE_TSA;
+ send_ack(cm_node);
+ create_event(cm_node, NES_CM_EVENT_CONNECTED);
+ break;
+
+ case NES_CM_STATE_ESTABLISHED:
+ /* we are expecting an MPA req frame */
+ ret = parse_mpa(cm_node, dataloc, datasize);
+ if (ret < 0) {
+ break;
+ }
+ cm_node->state = NES_CM_STATE_TSA;
+ send_ack(cm_node);
+ /* we got a valid MPA request, create an event */
+ create_event(cm_node, NES_CM_EVENT_MPA_REQ);
+ break;
+ case NES_CM_STATE_TSA:
+ handle_exception_pkt(cm_node, skb);
+ break;
+ case NES_CM_STATE_UNKNOWN:
+ case NES_CM_STATE_INITED:
+ default:
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+
+/**
+ * mini_cm_listen - create a listen node with params
+ */
+static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
+ struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)
+{
+ struct nes_cm_listener *listener;
+ unsigned long flags;
+
+ nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n",
+ cm_info->loc_addr, cm_info->loc_port);
+
+ /* cannot have multiple matching listeners */
+ listener = find_listener(cm_core, htonl(cm_info->loc_addr),
+ htons(cm_info->loc_port), NES_CM_LISTENER_EITHER_STATE);
+ if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) {
+ /* find automatically incs ref count ??? */
+ atomic_dec(&listener->ref_count);
+ nes_debug(NES_DBG_CM, "Not creating listener since it already exists\n");
+ return NULL;
+ }
+
+ if (!listener) {
+ /* create a CM listen node (1/2 node to compare incoming traffic to) */
+ listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
+ if (!listener) {
+ nes_debug(NES_DBG_CM, "Not creating listener memory allocation failed\n");
+ return NULL;
+ }
+
+ memset(listener, 0, sizeof(struct nes_cm_listener));
+ listener->loc_addr = htonl(cm_info->loc_addr);
+ listener->loc_port = htons(cm_info->loc_port);
+ listener->reused_node = 0;
+
+ atomic_set(&listener->ref_count, 1);
+ }
+ /* pasive case */
+ /* find already inc'ed the ref count */
+ else {
+ listener->reused_node = 1;
+ }
+
+ listener->cm_id = cm_info->cm_id;
+ atomic_set(&listener->pend_accepts_cnt, 0);
+ listener->cm_core = cm_core;
+ listener->nesvnic = nesvnic;
+ atomic_inc(&cm_core->node_cnt);
+ atomic_inc(&cm_core->session_id);
+
+ listener->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
+ listener->conn_type = cm_info->conn_type;
+ listener->backlog = cm_info->backlog;
+ listener->listener_state = NES_CM_LISTENER_ACTIVE_STATE;
+
+ if (!listener->reused_node) {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_add(&listener->list, &cm_core->listen_list.list);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ atomic_inc(&cm_core->listen_node_cnt);
+ }
+
+ nes_debug(NES_DBG_CM, "Api - listen(): addr=0x%08X, port=0x%04x,"
+ " listener = %p, backlog = %d, cm_id = %p.\n",
+ cm_info->loc_addr, cm_info->loc_port,
+ listener, listener->backlog, listener->cm_id);
+
+ return listener;
+}
+
+
+/**
+ * mini_cm_connect - make a connection node with params
+ */
+struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
+ struct nes_vnic *nesvnic, struct ietf_mpa_frame *mpa_frame,
+ struct nes_cm_info *cm_info)
+{
+ int ret = 0;
+ struct nes_cm_node *cm_node;
+ struct nes_cm_listener *loopbackremotelistener;
+ struct nes_cm_node *loopbackremotenode;
+ struct nes_cm_info loopback_cm_info;
+
+ u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) +
+ ntohs(mpa_frame->priv_data_len);
+
+ cm_info->loc_addr = htonl(cm_info->loc_addr);
+ cm_info->rem_addr = htonl(cm_info->rem_addr);
+ cm_info->loc_port = htons(cm_info->loc_port);
+ cm_info->rem_port = htons(cm_info->rem_port);
+
+ /* create a CM connection node */
+ cm_node = make_cm_node(cm_core, nesvnic, cm_info, NULL);
+ if (!cm_node)
+ return NULL;
+
+ // set our node side to client (active) side
+ cm_node->tcp_cntxt.client = 1;
+ cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
+
+ if (cm_info->loc_addr == cm_info->rem_addr) {
+ loopbackremotelistener = find_listener(cm_core, cm_node->rem_addr,
+ cm_node->rem_port, NES_CM_LISTENER_ACTIVE_STATE);
+ if (loopbackremotelistener == NULL) {
+ create_event(cm_node, NES_CM_EVENT_ABORTED);
+ } else {
+ atomic_inc(&cm_loopbacks);
+ loopback_cm_info = *cm_info;
+ loopback_cm_info.loc_port = cm_info->rem_port;
+ loopback_cm_info.rem_port = cm_info->loc_port;
+ loopback_cm_info.cm_id = loopbackremotelistener->cm_id;
+ loopbackremotenode = make_cm_node(cm_core, nesvnic, &loopback_cm_info,
+ loopbackremotelistener);
+ loopbackremotenode->loopbackpartner = cm_node;
+ loopbackremotenode->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
+ cm_node->loopbackpartner = loopbackremotenode;
+ memcpy(loopbackremotenode->mpa_frame_buf, &mpa_frame->priv_data,
+ mpa_frame_size);
+ loopbackremotenode->mpa_frame_size = mpa_frame_size -
+ sizeof(struct ietf_mpa_frame);
+
+ // we are done handling this state, set node to a TSA state
+ cm_node->state = NES_CM_STATE_TSA;
+ cm_node->tcp_cntxt.rcv_nxt = loopbackremotenode->tcp_cntxt.loc_seq_num;
+ loopbackremotenode->tcp_cntxt.rcv_nxt = cm_node->tcp_cntxt.loc_seq_num;
+ cm_node->tcp_cntxt.max_snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd;
+ loopbackremotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ cm_node->tcp_cntxt.snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd;
+ loopbackremotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ cm_node->tcp_cntxt.snd_wscale = loopbackremotenode->tcp_cntxt.rcv_wscale;
+ loopbackremotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
+
+ create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ);
+ }
+ return cm_node;
+ }
+
+ /* set our node side to client (active) side */
+ cm_node->tcp_cntxt.client = 1;
+ /* init our MPA frame ptr */
+ memcpy(&cm_node->mpa_frame, mpa_frame, mpa_frame_size);
+ cm_node->mpa_frame_size = mpa_frame_size;
+
+ /* send a syn and goto syn sent state */
+ cm_node->state = NES_CM_STATE_SYN_SENT;
+ ret = send_syn(cm_node, 0);
+
+ nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X, port=0x%04x,"
+ " cm_node=%p, cm_id = %p.\n",
+ cm_node->rem_addr, cm_node->rem_port, cm_node, cm_node->cm_id);
+
+ return cm_node;
+}
+
+
+/**
+ * mini_cm_accept - accept a connection
+ * This function is never called
+ */
+int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame,
+ struct nes_cm_node *cm_node)
+{
+ return 0;
+}
+
+
+/**
+ * mini_cm_reject - reject and teardown a connection
+ */
+int mini_cm_reject(struct nes_cm_core *cm_core,
+ struct ietf_mpa_frame *mpa_frame,
+ struct nes_cm_node *cm_node)
+{
+ int ret = 0;
+ struct sk_buff *skb;
+ u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) +
+ ntohs(mpa_frame->priv_data_len);
+
+ skb = get_free_pkt(cm_node);
+ if (!skb) {
+ nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+ return -1;
+ }
+
+ /* send an MPA Request frame */
+ form_cm_frame(skb, cm_node, NULL, 0, mpa_frame, mpa_frame_size, SET_ACK | SET_FIN);
+ ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
+
+ cm_node->state = NES_CM_STATE_CLOSED;
+ ret = send_fin(cm_node, NULL);
+
+ if (ret < 0) {
+ printk(KERN_INFO PFX "failed to send MPA Reply (reject)\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+
+/**
+ * mini_cm_close
+ */
+int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
+{
+ int ret = 0;
+
+ if (!cm_core || !cm_node)
+ return -EINVAL;
+
+ switch (cm_node->state) {
+ /* if passed in node is null, create a reference key node for node search */
+ /* check if we found an owner node for this pkt */
+ case NES_CM_STATE_SYN_RCVD:
+ case NES_CM_STATE_SYN_SENT:
+ case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
+ case NES_CM_STATE_ESTABLISHED:
+ case NES_CM_STATE_ACCEPTING:
+ case NES_CM_STATE_MPAREQ_SENT:
+ cm_node->state = NES_CM_STATE_FIN_WAIT1;
+ send_fin(cm_node, NULL);
+ break;
+ case NES_CM_STATE_CLOSE_WAIT:
+ cm_node->state = NES_CM_STATE_LAST_ACK;
+ send_fin(cm_node, NULL);
+ break;
+ case NES_CM_STATE_FIN_WAIT1:
+ case NES_CM_STATE_FIN_WAIT2:
+ case NES_CM_STATE_LAST_ACK:
+ case NES_CM_STATE_TIME_WAIT:
+ case NES_CM_STATE_CLOSING:
+ ret = -1;
+ break;
+ case NES_CM_STATE_LISTENING:
+ case NES_CM_STATE_UNKNOWN:
+ case NES_CM_STATE_INITED:
+ case NES_CM_STATE_CLOSED:
+ case NES_CM_STATE_TSA:
+ ret = rem_ref_cm_node(cm_core, cm_node);
+ break;
+ }
+ cm_node->cm_id = NULL;
+ return ret;
+}
+
+
+/**
+ * recv_pkt - recv an ETHERNET packet, and process it through CM
+ * node state machine
+ */
+int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic,
+ struct sk_buff *skb)
+{
+ struct nes_cm_node *cm_node = NULL;
+ struct nes_cm_listener *listener = NULL;
+ struct iphdr *iph;
+ struct tcphdr *tcph;
+ struct nes_cm_info nfo;
+ int ret = 0;
+
+ if (!skb || skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ iph = (struct iphdr *)skb->data;
+ tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr));
+ skb_reset_network_header(skb);
+ skb_set_transport_header(skb, sizeof(*tcph));
+ skb->len = ntohs(iph->tot_len);
+
+ nfo.loc_addr = ntohl(iph->daddr);
+ nfo.loc_port = ntohs(tcph->dest);
+ nfo.rem_addr = ntohl(iph->saddr);
+ nfo.rem_port = ntohs(tcph->source);
+
+ nes_debug(NES_DBG_CM, "Received packet: dest=0x%08X:0x%04X src=0x%08X:0x%04X\n",
+ iph->daddr, tcph->dest, iph->saddr, tcph->source);
+
+ /* note: this call is going to increment cm_node ref count */
+ cm_node = find_node(cm_core,
+ nfo.rem_port, nfo.rem_addr,
+ nfo.loc_port, nfo.loc_addr);
+
+ if (!cm_node) {
+ listener = find_listener(cm_core, nfo.loc_addr, nfo.loc_port,
+ NES_CM_LISTENER_ACTIVE_STATE);
+ if (listener) {
+ nfo.cm_id = listener->cm_id;
+ nfo.conn_type = listener->conn_type;
+ } else {
+ nfo.cm_id = NULL;
+ nfo.conn_type = 0;
+ }
+
+ cm_node = make_cm_node(cm_core, nesvnic, &nfo, listener);
+ if (!cm_node) {
+ nes_debug(NES_DBG_CM, "Unable to allocate node\n");
+ if (listener) {
+ nes_debug(NES_DBG_CM, "unable to allocate node and decrementing listener refcount\n");
+ atomic_dec(&listener->ref_count);
+ }
+ ret = -1;
+ goto out;
+ }
+ if (!listener) {
+ nes_debug(NES_DBG_CM, "Packet found for unknown port %x refcnt=%d\n",
+ nfo.loc_port, atomic_read(&cm_node->ref_count));
+ if (!tcph->rst) {
+ nes_debug(NES_DBG_CM, "Packet found for unknown port=%d"
+ " rem_port=%d refcnt=%d\n",
+ nfo.loc_port, nfo.rem_port, atomic_read(&cm_node->ref_count));
+
+ cm_node->tcp_cntxt.rcv_nxt = ntohl(tcph->seq);
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ send_reset(cm_node);
+ }
+ rem_ref_cm_node(cm_core, cm_node);
+ ret = -1;
+ goto out;
+ }
+ add_ref_cm_node(cm_node);
+ cm_node->state = NES_CM_STATE_LISTENING;
+ }
+
+ nes_debug(NES_DBG_CM, "Processing Packet for node %p, data = (%p):\n",
+ cm_node, skb->data);
+ process_packet(cm_node, skb, cm_core);
+
+ rem_ref_cm_node(cm_core, cm_node);
+ out:
+ if (skb)
+ dev_kfree_skb_any(skb);
+ return ret;
+}
+
+
+/**
+ * nes_cm_alloc_core - allocate a top level instance of a cm core
+ */
+struct nes_cm_core *nes_cm_alloc_core(void)
+{
+ int i;
+
+ struct nes_cm_core *cm_core;
+ struct sk_buff *skb = NULL;
+
+ /* setup the CM core */
+ /* alloc top level core control structure */
+ cm_core = kzalloc(sizeof(*cm_core), GFP_KERNEL);
+ if (!cm_core)
+ return NULL;
+
+ INIT_LIST_HEAD(&cm_core->connected_nodes);
+ init_timer(&cm_core->tcp_timer);
+ cm_core->tcp_timer.function = nes_cm_timer_tick;
+
+ cm_core->mtu = NES_CM_DEFAULT_MTU;
+ cm_core->state = NES_CM_STATE_INITED;
+ cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS;
+
+ atomic_set(&cm_core->session_id, 0);
+ atomic_set(&cm_core->events_posted, 0);
+
+ /* init the packet lists */
+ skb_queue_head_init(&cm_core->tx_free_list);
+
+ for (i = 0; i < NES_CM_DEFAULT_FRAME_CNT; i++) {
+ skb = dev_alloc_skb(cm_core->mtu);
+ if (!skb) {
+ kfree(cm_core);
+ return NULL;
+ }
+ /* add 'raw' skb to free frame list */
+ skb_queue_head(&cm_core->tx_free_list, skb);
+ }
+
+ cm_core->api = &nes_cm_api;
+
+ spin_lock_init(&cm_core->ht_lock);
+ spin_lock_init(&cm_core->listen_list_lock);
+
+ INIT_LIST_HEAD(&cm_core->listen_list.list);
+
+ nes_debug(NES_DBG_CM, "Init CM Core completed -- cm_core=%p\n", cm_core);
+
+ nes_debug(NES_DBG_CM, "Enable QUEUE EVENTS\n");
+ cm_core->event_wq = create_singlethread_workqueue("nesewq");
+ cm_core->post_event = nes_cm_post_event;
+ nes_debug(NES_DBG_CM, "Enable QUEUE DISCONNECTS\n");
+ cm_core->disconn_wq = create_singlethread_workqueue("nesdwq");
+
+ print_core(cm_core);
+ return cm_core;
+}
+
+
+/**
+ * mini_cm_dealloc_core - deallocate a top level instance of a cm core
+ */
+int mini_cm_dealloc_core(struct nes_cm_core *cm_core)
+{
+ nes_debug(NES_DBG_CM, "De-Alloc CM Core (%p)\n", cm_core);
+
+ if (!cm_core)
+ return -EINVAL;
+
+ barrier();
+
+ if (timer_pending(&cm_core->tcp_timer)) {
+ del_timer(&cm_core->tcp_timer);
+ }
+
+ destroy_workqueue(cm_core->event_wq);
+ destroy_workqueue(cm_core->disconn_wq);
+ nes_debug(NES_DBG_CM, "\n");
+ kfree(cm_core);
+
+ return 0;
+}
+
+
+/**
+ * mini_cm_get
+ */
+int mini_cm_get(struct nes_cm_core *cm_core)
+{
+ return cm_core->state;
+}
+
+
+/**
+ * mini_cm_set
+ */
+int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value)
+{
+ int ret = 0;
+
+ switch (type) {
+ case NES_CM_SET_PKT_SIZE:
+ cm_core->mtu = value;
+ break;
+ case NES_CM_SET_FREE_PKT_Q_SIZE:
+ cm_core->free_tx_pkt_max = value;
+ break;
+ default:
+ /* unknown set option */
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+
+/**
+ * nes_cm_init_tsa_conn setup HW; MPA frames must be
+ * successfully exchanged when this is called
+ */
+static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_node)
+{
+ int ret = 0;
+
+ if (!nesqp)
+ return -EINVAL;
+
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_IPV4 |
+ NES_QPCONTEXT_MISC_NO_NAGLE | NES_QPCONTEXT_MISC_DO_NOT_FRAG |
+ NES_QPCONTEXT_MISC_DROS);
+
+ if (cm_node->tcp_cntxt.snd_wscale || cm_node->tcp_cntxt.rcv_wscale)
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WSCALE);
+
+ nesqp->nesqp_context->misc2 |= cpu_to_le32(64 << NES_QPCONTEXT_MISC2_TTL_SHIFT);
+
+ nesqp->nesqp_context->mss |= cpu_to_le32(((u32)cm_node->tcp_cntxt.mss) << 16);
+
+ nesqp->nesqp_context->tcp_state_flow_label |= cpu_to_le32(
+ (u32)NES_QPCONTEXT_TCPSTATE_EST << NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT);
+
+ nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32(
+ (cm_node->tcp_cntxt.snd_wscale << NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT) &
+ NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK);
+
+ nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32(
+ (cm_node->tcp_cntxt.rcv_wscale << NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT) &
+ NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK);
+
+ nesqp->nesqp_context->keepalive = cpu_to_le32(0x80);
+ nesqp->nesqp_context->ts_recent = 0;
+ nesqp->nesqp_context->ts_age = 0;
+ nesqp->nesqp_context->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+ nesqp->nesqp_context->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
+ nesqp->nesqp_context->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
+ nesqp->nesqp_context->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
+ cm_node->tcp_cntxt.rcv_wscale);
+ nesqp->nesqp_context->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+ nesqp->nesqp_context->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+ nesqp->nesqp_context->srtt = 0;
+ nesqp->nesqp_context->rttvar = cpu_to_le32(0x6);
+ nesqp->nesqp_context->ssthresh = cpu_to_le32(0x3FFFC000);
+ nesqp->nesqp_context->cwnd = cpu_to_le32(2*cm_node->tcp_cntxt.mss);
+ nesqp->nesqp_context->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
+ nesqp->nesqp_context->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+ nesqp->nesqp_context->max_snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
+
+ nes_debug(NES_DBG_CM, "QP%u: rcv_nxt = 0x%08X, snd_nxt = 0x%08X,"
+ " Setting MSS to %u, PDWscale = 0x%08X, rcv_wnd = %u, context misc = 0x%08X.\n",
+ nesqp->hwqp.qp_id, le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
+ le32_to_cpu(nesqp->nesqp_context->snd_nxt),
+ cm_node->tcp_cntxt.mss, le32_to_cpu(nesqp->nesqp_context->pd_index_wscale),
+ le32_to_cpu(nesqp->nesqp_context->rcv_wnd),
+ le32_to_cpu(nesqp->nesqp_context->misc));
+ nes_debug(NES_DBG_CM, " snd_wnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->snd_wnd));
+ nes_debug(NES_DBG_CM, " snd_cwnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->cwnd));
+ nes_debug(NES_DBG_CM, " max_swnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->max_snd_wnd));
+
+ nes_debug(NES_DBG_CM, "Change cm_node state to TSA\n");
+ cm_node->state = NES_CM_STATE_TSA;
+
+ return ret;
+}
+
+
+/**
+ * nes_cm_disconn
+ */
+int nes_cm_disconn(struct nes_qp *nesqp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+ if (nesqp->disconn_pending == 0) {
+ nesqp->disconn_pending++;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ /* nes_add_ref(&nesqp->ibqp); */
+ /* init our disconnect work element, to */
+ INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker);
+
+ queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work);
+ } else {
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_rem_ref(&nesqp->ibqp);
+ }
+
+ return 0;
+}
+
+
+/**
+ * nes_disconnect_worker
+ */
+void nes_disconnect_worker(struct work_struct *work)
+{
+ struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work);
+
+ nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n",
+ nesqp->last_aeq, nesqp->hwqp.qp_id);
+ nes_cm_disconn_true(nesqp);
+}
+
+
+/**
+ * nes_cm_disconn_true
+ */
+int nes_cm_disconn_true(struct nes_qp *nesqp)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct iw_cm_id *cm_id;
+ struct iw_cm_event cm_event;
+ struct nes_vnic *nesvnic;
+ u16 last_ae;
+ u8 original_hw_tcp_state;
+ u8 original_ibqp_state;
+ u8 issued_disconnect_reset = 0;
+
+ if (!nesqp) {
+ nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n");
+ return -1;
+ }
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+ cm_id = nesqp->cm_id;
+ /* make sure we havent already closed this connection */
+ if (!cm_id) {
+ nes_debug(NES_DBG_CM, "QP%u disconnect_worker cmid is NULL\n",
+ nesqp->hwqp.qp_id);
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -1;
+ }
+
+ nesvnic = to_nesvnic(nesqp->ibqp.device);
+ nes_debug(NES_DBG_CM, "Disconnecting QP%u\n", nesqp->hwqp.qp_id);
+
+ original_hw_tcp_state = nesqp->hw_tcp_state;
+ original_ibqp_state = nesqp->ibqp_state;
+ last_ae = nesqp->last_aeq;
+
+
+ nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state);
+
+ if ((nesqp->cm_id) && (cm_id->event_handler)) {
+ if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
+ ((original_ibqp_state == IB_QPS_RTS) &&
+ (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ atomic_inc(&cm_disconnects);
+ cm_event.event = IW_CM_EVENT_DISCONNECT;
+ if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) {
+ issued_disconnect_reset = 1;
+ cm_event.status = IW_CM_EVENT_STATUS_RESET;
+ nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event (status reset) for "
+ " QP%u, cm_id = %p. \n",
+ nesqp->hwqp.qp_id, cm_id);
+ } else {
+ cm_event.status = IW_CM_EVENT_STATUS_OK;
+ }
+
+ cm_event.local_addr = cm_id->local_addr;
+ cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.private_data = NULL;
+ cm_event.private_data_len = 0;
+
+ nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event for "
+ " QP%u, SQ Head = %u, SQ Tail = %u. cm_id = %p, refcount = %u.\n",
+ nesqp->hwqp.qp_id,
+ nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail, cm_id,
+ atomic_read(&nesqp->refcount));
+
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ if (ret)
+ nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ }
+
+ nesqp->disconn_pending = 0;
+ /* There might have been another AE while the lock was released */
+ original_hw_tcp_state = nesqp->hw_tcp_state;
+ original_ibqp_state = nesqp->ibqp_state;
+ last_ae = nesqp->last_aeq;
+
+ if ((issued_disconnect_reset == 0) && (nesqp->cm_id) &&
+ ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
+ (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
+ (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
+ (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ atomic_inc(&cm_closes);
+ nesqp->cm_id = NULL;
+ nesqp->in_disconnect = 0;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_disconnect(nesqp, 1);
+
+ cm_id->provider_data = nesqp;
+ /* Send up the close complete event */
+ cm_event.event = IW_CM_EVENT_CLOSE;
+ cm_event.status = IW_CM_EVENT_STATUS_OK;
+ cm_event.provider_data = cm_id->provider_data;
+ cm_event.local_addr = cm_id->local_addr;
+ cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.private_data = NULL;
+ cm_event.private_data_len = 0;
+
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ if (ret) {
+ nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
+ }
+
+ cm_id->rem_ref(cm_id);
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+ if (nesqp->flush_issued == 0) {
+ nesqp->flush_issued = 1;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ flush_wqes(nesvnic->nesdev, nesqp, NES_CQP_FLUSH_RQ, 1);
+ } else {
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ }
+
+ /* This reference is from either ModifyQP or the AE processing,
+ there is still a race here with modifyqp */
+ nes_rem_ref(&nesqp->ibqp);
+
+ } else {
+ cm_id = nesqp->cm_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ /* check to see if the inbound reset beat the outbound reset */
+ if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) {
+ nes_debug(NES_DBG_CM, "QP%u: Decing refcount due to inbound reset"
+ " beating the outbound reset.\n",
+ nesqp->hwqp.qp_id);
+ nes_rem_ref(&nesqp->ibqp);
+ }
+ }
+ } else {
+ nesqp->disconn_pending = 0;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ }
+ nes_rem_ref(&nesqp->ibqp);
+
+ return 0;
+}
+
+
+/**
+ * nes_disconnect
+ */
+int nes_disconnect(struct nes_qp *nesqp, int abrupt)
+{
+ int ret = 0;
+ struct nes_vnic *nesvnic;
+ struct nes_device *nesdev;
+
+ nesvnic = to_nesvnic(nesqp->ibqp.device);
+ if (!nesvnic)
+ return -EINVAL;
+
+ nesdev = nesvnic->nesdev;
+
+ nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
+ atomic_read(&nesvnic->netdev->refcnt));
+
+ if (nesqp->active_conn) {
+
+ /* indicate this connection is NOT active */
+ nesqp->active_conn = 0;
+ } else {
+ /* Need to free the Last Streaming Mode Message */
+ if (nesqp->ietf_frame) {
+ pci_free_consistent(nesdev->pcidev,
+ nesqp->private_data_len+sizeof(struct ietf_mpa_frame),
+ nesqp->ietf_frame, nesqp->ietf_frame_pbase);
+ }
+ }
+
+ /* close the CM node down if it is still active */
+ if (nesqp->cm_node) {
+ nes_debug(NES_DBG_CM, "Call close API\n");
+
+ g_cm_core->api->close(g_cm_core, nesqp->cm_node);
+ nesqp->cm_node = NULL;
+ }
+
+ return ret;
+}
+
+
+/**
+ * nes_accept
+ */
+int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ u64 u64temp;
+ struct ib_qp *ibqp;
+ struct nes_qp *nesqp;
+ struct nes_vnic *nesvnic;
+ struct nes_device *nesdev;
+ struct nes_cm_node *cm_node;
+ struct nes_adapter *adapter;
+ struct ib_qp_attr attr;
+ struct iw_cm_event cm_event;
+ struct nes_hw_qp_wqe *wqe;
+ struct nes_v4_quad nes_quad;
+ int ret;
+
+ ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+
+ /* get all our handles */
+ nesqp = to_nesqp(ibqp);
+ nesvnic = to_nesvnic(nesqp->ibqp.device);
+ nesdev = nesvnic->nesdev;
+ adapter = nesdev->nesadapter;
+
+ nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n",
+ nesvnic, nesvnic->netdev, nesvnic->netdev->name);
+
+ /* since this is from a listen, we were able to put node handle into cm_id */
+ cm_node = (struct nes_cm_node *)cm_id->provider_data;
+
+ /* associate the node with the QP */
+ nesqp->cm_node = (void *)cm_node;
+
+ nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu\n",
+ nesqp->hwqp.qp_id, cm_node, jiffies);
+ atomic_inc(&cm_accepts);
+
+ nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
+ atomic_read(&nesvnic->netdev->refcnt));
+
+ /* allocate the ietf frame and space for private data */
+ nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev,
+ sizeof(struct ietf_mpa_frame) + conn_param->private_data_len,
+ &nesqp->ietf_frame_pbase);
+
+ if (!nesqp->ietf_frame) {
+ nes_debug(NES_DBG_CM, "Unable to allocate memory for private data\n");
+ return -ENOMEM;
+ }
+
+
+ /* setup the MPA frame */
+ nesqp->private_data_len = conn_param->private_data_len;
+ memcpy(nesqp->ietf_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
+
+ memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data,
+ conn_param->private_data_len);
+
+ nesqp->ietf_frame->priv_data_len = cpu_to_be16(conn_param->private_data_len);
+ nesqp->ietf_frame->rev = mpa_version;
+ nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC;
+
+ /* setup our first outgoing iWarp send WQE (the IETF frame response) */
+ wqe = &nesqp->hwqp.sq_vbase[0];
+
+ if (cm_id->remote_addr.sin_addr.s_addr != cm_id->local_addr.sin_addr.s_addr) {
+ u64temp = (unsigned long)nesqp;
+ u64temp |= NES_SW_CONTEXT_ALIGN>>1;
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
+ u64temp);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
+ cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING | NES_IWARP_SQ_WQE_WRPDU);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
+ cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame));
+ wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] =
+ cpu_to_le32((u32)nesqp->ietf_frame_pbase);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] =
+ cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32));
+ wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
+ cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame));
+ wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
+
+ nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(
+ NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | NES_QPCONTEXT_ORDIRD_WRPDU);
+ } else {
+ nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
+ NES_QPCONTEXT_ORDIRD_WRPDU | NES_QPCONTEXT_ORDIRD_ALSMM));
+ }
+ nesqp->skip_lsmm = 1;
+
+
+ /* Cache the cm_id in the qp */
+ nesqp->cm_id = cm_id;
+ cm_node->cm_id = cm_id;
+
+ /* nesqp->cm_node = (void *)cm_id->provider_data; */
+ cm_id->provider_data = nesqp;
+ nesqp->active_conn = 0;
+
+ nes_cm_init_tsa_conn(nesqp, cm_node);
+
+ nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
+ nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
+ nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
+
+ nesqp->nesqp_context->misc2 |= cpu_to_le32(
+ (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
+
+ nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32(
+ nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0), NULL,
+ NES_ARP_RESOLVE) << 16);
+
+ nesqp->nesqp_context->ts_val_delta = cpu_to_le32(
+ jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
+
+ nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id);
+
+ nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(
+ ((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT));
+ nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord);
+
+ memset(&nes_quad, 0, sizeof(nes_quad));
+ nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
+ nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
+ nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
+ nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
+
+ /* Produce hash key */
+ nesqp->hte_index = cpu_to_be32(
+ crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff);
+ nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n",
+ nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask);
+
+ nesqp->hte_index &= adapter->hte_index_mask;
+ nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
+
+ cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
+
+ nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = 0x%08X:0x%04X,"
+ " rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + private data length=%zu.\n",
+ nesqp->hwqp.qp_id,
+ ntohl(cm_id->remote_addr.sin_addr.s_addr),
+ ntohs(cm_id->remote_addr.sin_port),
+ ntohl(cm_id->local_addr.sin_addr.s_addr),
+ ntohs(cm_id->local_addr.sin_port),
+ le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
+ le32_to_cpu(nesqp->nesqp_context->snd_nxt),
+ conn_param->private_data_len+sizeof(struct ietf_mpa_frame));
+
+ attr.qp_state = IB_QPS_RTS;
+ nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
+
+ /* notify OF layer that accept event was successfull */
+ cm_id->add_ref(cm_id);
+
+ cm_event.event = IW_CM_EVENT_ESTABLISHED;
+ cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED;
+ cm_event.provider_data = (void *)nesqp;
+ cm_event.local_addr = cm_id->local_addr;
+ cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.private_data = NULL;
+ cm_event.private_data_len = 0;
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ if (cm_node->loopbackpartner) {
+ cm_node->loopbackpartner->mpa_frame_size = nesqp->private_data_len;
+ /* copy entire MPA frame to our cm_node's frame */
+ memcpy(cm_node->loopbackpartner->mpa_frame_buf, nesqp->ietf_frame->priv_data,
+ nesqp->private_data_len);
+ create_event(cm_node->loopbackpartner, NES_CM_EVENT_CONNECTED);
+ }
+ if (ret)
+ printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
+ __FUNCTION__, __LINE__, ret);
+
+ return 0;
+}
+
+
+/**
+ * nes_reject
+ */
+int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ struct nes_cm_node *cm_node;
+ struct nes_cm_core *cm_core;
+
+ atomic_inc(&cm_rejects);
+ cm_node = (struct nes_cm_node *) cm_id->provider_data;
+ cm_core = cm_node->cm_core;
+ cm_node->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len;
+
+ strcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP);
+ memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len);
+
+ cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len);
+ cm_node->mpa_frame.rev = mpa_version;
+ cm_node->mpa_frame.flags = IETF_MPA_FLAGS_CRC | IETF_MPA_FLAGS_REJECT;
+
+ cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node);
+
+ return 0;
+}
+
+
+/**
+ * nes_connect
+ * setup and launch cm connect node
+ */
+int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct ib_qp *ibqp;
+ struct nes_qp *nesqp;
+ struct nes_vnic *nesvnic;
+ struct nes_device *nesdev;
+ struct nes_cm_node *cm_node;
+ struct nes_cm_info cm_info;
+
+ ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+ nesqp = to_nesqp(ibqp);
+ if (!nesqp)
+ return -EINVAL;
+ nesvnic = to_nesvnic(nesqp->ibqp.device);
+ if (!nesvnic)
+ return -EINVAL;
+ nesdev = nesvnic->nesdev;
+ if (!nesdev)
+ return -EINVAL;
+
+ atomic_inc(&cm_connects);
+
+ nesqp->ietf_frame = kzalloc(sizeof(struct ietf_mpa_frame) +
+ conn_param->private_data_len, GFP_KERNEL);
+ if (!nesqp->ietf_frame)
+ return -ENOMEM;
+
+ /* set qp as having an active connection */
+ nesqp->active_conn = 1;
+
+ nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = 0x%08X:0x%04X.\n",
+ nesqp->hwqp.qp_id,
+ ntohl(cm_id->remote_addr.sin_addr.s_addr),
+ ntohs(cm_id->remote_addr.sin_port),
+ ntohl(cm_id->local_addr.sin_addr.s_addr),
+ ntohs(cm_id->local_addr.sin_port));
+
+ /* cache the cm_id in the qp */
+ nesqp->cm_id = cm_id;
+
+ cm_id->provider_data = nesqp;
+
+ /* copy the private data */
+ if (conn_param->private_data_len) {
+ memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data,
+ conn_param->private_data_len);
+ }
+
+ nesqp->private_data_len = conn_param->private_data_len;
+ nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord);
+ nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord);
+ nes_debug(NES_DBG_CM, "mpa private data len =%u\n", conn_param->private_data_len);
+
+ strcpy(&nesqp->ietf_frame->key[0], IEFT_MPA_KEY_REQ);
+ nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC;
+ nesqp->ietf_frame->rev = IETF_MPA_VERSION;
+ nesqp->ietf_frame->priv_data_len = htons(conn_param->private_data_len);
+
+ if (cm_id->local_addr.sin_addr.s_addr != cm_id->remote_addr.sin_addr.s_addr)
+ nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
+ PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
+
+ /* set up the connection params for the node */
+ cm_info.loc_addr = (cm_id->local_addr.sin_addr.s_addr);
+ cm_info.loc_port = (cm_id->local_addr.sin_port);
+ cm_info.rem_addr = (cm_id->remote_addr.sin_addr.s_addr);
+ cm_info.rem_port = (cm_id->remote_addr.sin_port);
+ cm_info.cm_id = cm_id;
+ cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
+
+ cm_id->add_ref(cm_id);
+ nes_add_ref(&nesqp->ibqp);
+
+ /* create a connect CM node connection */
+ cm_node = g_cm_core->api->connect(g_cm_core, nesvnic, nesqp->ietf_frame, &cm_info);
+ if (!cm_node) {
+ if (cm_id->local_addr.sin_addr.s_addr != cm_id->remote_addr.sin_addr.s_addr)
+ nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
+ PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL);
+ nes_rem_ref(&nesqp->ibqp);
+ kfree(nesqp->ietf_frame);
+ nesqp->ietf_frame = NULL;
+ cm_id->rem_ref(cm_id);
+ return -ENOMEM;
+ }
+
+ cm_node->apbvt_set = 1;
+ nesqp->cm_node = cm_node;
+
+ return 0;
+}
+
+
+/**
+ * nes_create_listen
+ */
+int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ struct nes_vnic *nesvnic;
+ struct nes_cm_listener *cm_node;
+ struct nes_cm_info cm_info;
+ struct nes_adapter *adapter;
+ int err;
+
+
+ nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n",
+ cm_id, ntohs(cm_id->local_addr.sin_port));
+
+ nesvnic = to_nesvnic(cm_id->device);
+ if (!nesvnic)
+ return -EINVAL;
+ adapter = nesvnic->nesdev->nesadapter;
+ nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n",
+ nesvnic, nesvnic->netdev, nesvnic->netdev->name);
+
+ nes_debug(NES_DBG_CM, "nesvnic->local_ipaddr=0x%08x, sin_addr.s_addr=0x%08x\n",
+ nesvnic->local_ipaddr, cm_id->local_addr.sin_addr.s_addr);
+
+ /* setup listen params in our api call struct */
+ cm_info.loc_addr = nesvnic->local_ipaddr;
+ cm_info.loc_port = cm_id->local_addr.sin_port;
+ cm_info.backlog = backlog;
+ cm_info.cm_id = cm_id;
+
+ cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
+
+
+ cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
+ if (!cm_node) {
+ printk("%s[%u] Error returned from listen API call\n",
+ __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+
+ cm_id->provider_data = cm_node;
+
+ if (!cm_node->reused_node) {
+ err = nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
+ PCI_FUNC(nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
+ if (err) {
+ printk("nes_manage_apbvt call returned %d.\n", err);
+ g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
+ return err;
+ }
+ cm_listens_created++;
+ }
+
+ cm_id->add_ref(cm_id);
+ cm_id->provider_data = (void *)cm_node;
+
+
+ return 0;
+}
+
+
+/**
+ * nes_destroy_listen
+ */
+int nes_destroy_listen(struct iw_cm_id *cm_id)
+{
+ if (cm_id->provider_data)
+ g_cm_core->api->stop_listener(g_cm_core, cm_id->provider_data);
+ else
+ nes_debug(NES_DBG_CM, "cm_id->provider_data was NULL\n");
+
+ cm_id->rem_ref(cm_id);
+
+ return 0;
+}
+
+
+/**
+ * nes_cm_recv
+ */
+int nes_cm_recv(struct sk_buff *skb, struct net_device *netdevice)
+{
+ cm_packets_received++;
+ if ((g_cm_core) && (g_cm_core->api)) {
+ g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb);
+ } else {
+ nes_debug(NES_DBG_CM, "Unable to process packet for CM,"
+ " cm is not setup properly.\n");
+ }
+
+ return 0;
+}
+
+
+/**
+ * nes_cm_start
+ * Start and init a cm core module
+ */
+int nes_cm_start(void)
+{
+ nes_debug(NES_DBG_CM, "\n");
+ /* create the primary CM core, pass this handle to subsequent core inits */
+ g_cm_core = nes_cm_alloc_core();
+ if (g_cm_core) {
+ return 0;
+ } else {
+ return -ENOMEM;
+ }
+}
+
+
+/**
+ * nes_cm_stop
+ * stop and dealloc all cm core instances
+ */
+int nes_cm_stop(void)
+{
+ g_cm_core->api->destroy_cm_core(g_cm_core);
+ return 0;
+}
+
+
+/**
+ * cm_event_connected
+ * handle a connected event, setup QPs and HW
+ */
+void cm_event_connected(struct nes_cm_event *event)
+{
+ u64 u64temp;
+ struct nes_qp *nesqp;
+ struct nes_vnic *nesvnic;
+ struct nes_device *nesdev;
+ struct nes_cm_node *cm_node;
+ struct nes_adapter *nesadapter;
+ struct ib_qp_attr attr;
+ struct iw_cm_id *cm_id;
+ struct iw_cm_event cm_event;
+ struct nes_hw_qp_wqe *wqe;
+ struct nes_v4_quad nes_quad;
+ int ret;
+
+ /* get all our handles */
+ cm_node = event->cm_node;
+ cm_id = cm_node->cm_id;
+ nes_debug(NES_DBG_CM, "cm_event_connected - %p - cm_id = %p\n", cm_node, cm_id);
+ nesqp = (struct nes_qp *)cm_id->provider_data;
+ nesvnic = to_nesvnic(nesqp->ibqp.device);
+ nesdev = nesvnic->nesdev;
+ nesadapter = nesdev->nesadapter;
+
+ if (nesqp->destroyed) {
+ return;
+ }
+ atomic_inc(&cm_connecteds);
+ nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
+ " local port 0x%04X. jiffies = %lu.\n",
+ nesqp->hwqp.qp_id,
+ ntohl(cm_id->remote_addr.sin_addr.s_addr),
+ ntohs(cm_id->remote_addr.sin_port),
+ ntohs(cm_id->local_addr.sin_port),
+ jiffies);
+
+ nes_cm_init_tsa_conn(nesqp, cm_node);
+
+ /* set the QP tsa context */
+ nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
+ nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
+ nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
+
+ nesqp->nesqp_context->misc2 |= cpu_to_le32(
+ (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
+ nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32(
+ nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0),
+ NULL, NES_ARP_RESOLVE) << 16);
+ nesqp->nesqp_context->ts_val_delta = cpu_to_le32(
+ jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
+ nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id);
+ nesqp->nesqp_context->ird_ord_sizes |=
+ cpu_to_le32((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT);
+
+ /* Adjust tail for not having a LSMM */
+ nesqp->hwqp.sq_tail = 1;
+
+#if defined(NES_SEND_FIRST_WRITE)
+ if (cm_node->send_write0) {
+ nes_debug(NES_DBG_CM, "Sending first write.\n");
+ wqe = &nesqp->hwqp.sq_vbase[0];
+ u64temp = (unsigned long)nesqp;
+ u64temp |= NES_SW_CONTEXT_ALIGN>>1;
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
+ u64temp);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(NES_IWARP_SQ_OP_RDMAW);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
+
+ /* use the reserved spot on the WQ for the extra first WQE */
+ nesqp->nesqp_context->ird_ord_sizes &= cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
+ NES_QPCONTEXT_ORDIRD_WRPDU | NES_QPCONTEXT_ORDIRD_ALSMM));
+ nesqp->skip_lsmm = 1;
+ nesqp->hwqp.sq_tail = 0;
+ nes_write32(nesdev->regs + NES_WQE_ALLOC,
+ (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
+ }
+#endif
+
+ memset(&nes_quad, 0, sizeof(nes_quad));
+
+ nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
+ nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
+ nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
+ nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
+
+ /* Produce hash key */
+ nesqp->hte_index = cpu_to_be32(
+ crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff);
+ nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n",
+ nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask);
+
+ nesqp->hte_index &= nesadapter->hte_index_mask;
+ nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
+
+ nesqp->ietf_frame = &cm_node->mpa_frame;
+ nesqp->private_data_len = (u8) cm_node->mpa_frame_size;
+ cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
+
+ /* modify QP state to rts */
+ attr.qp_state = IB_QPS_RTS;
+ nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
+
+ /* notify OF layer we successfully created the requested connection */
+ cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
+ cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED;
+ cm_event.provider_data = cm_id->provider_data;
+ cm_event.local_addr.sin_family = AF_INET;
+ cm_event.local_addr.sin_port = cm_id->local_addr.sin_port;
+ cm_event.remote_addr = cm_id->remote_addr;
+
+ cm_event.private_data = (void *)event->cm_node->mpa_frame_buf;
+ cm_event.private_data_len = (u8) event->cm_node->mpa_frame_size;
+
+ cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr;
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
+
+ if (ret)
+ printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
+ __FUNCTION__, __LINE__, ret);
+ nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = %lu\n",
+ nesqp->hwqp.qp_id, jiffies );
+
+ nes_rem_ref(&nesqp->ibqp);
+
+ return;
+}
+
+
+/**
+ * cm_event_connect_error
+ */
+void cm_event_connect_error(struct nes_cm_event *event)
+{
+ struct nes_qp *nesqp;
+ struct iw_cm_id *cm_id;
+ struct iw_cm_event cm_event;
+ /* struct nes_cm_info cm_info; */
+ int ret;
+
+ if (!event->cm_node)
+ return;
+
+ cm_id = event->cm_node->cm_id;
+ if (!cm_id) {
+ return;
+ }
+
+ nes_debug(NES_DBG_CM, "cm_node=%p, cm_id=%p\n", event->cm_node, cm_id);
+ nesqp = cm_id->provider_data;
+
+ if (!nesqp) {
+ return;
+ }
+
+ /* notify OF layer about this connection error event */
+ /* cm_id->rem_ref(cm_id); */
+ nesqp->cm_id = NULL;
+ cm_id->provider_data = NULL;
+ cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
+ cm_event.status = IW_CM_EVENT_STATUS_REJECTED;
+ cm_event.provider_data = cm_id->provider_data;
+ cm_event.local_addr = cm_id->local_addr;
+ cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.private_data = NULL;
+ cm_event.private_data_len = 0;
+
+ nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, remove_addr=%08x\n",
+ cm_event.local_addr.sin_addr.s_addr, cm_event.remote_addr.sin_addr.s_addr);
+
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
+ if (ret)
+ printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
+ __FUNCTION__, __LINE__, ret);
+ nes_rem_ref(&nesqp->ibqp);
+ cm_id->rem_ref(cm_id);
+
+ return;
+}
+
+
+/**
+ * cm_event_reset
+ */
+void cm_event_reset(struct nes_cm_event *event)
+{
+ struct nes_qp *nesqp;
+ struct iw_cm_id *cm_id;
+ struct iw_cm_event cm_event;
+ /* struct nes_cm_info cm_info; */
+ int ret;
+
+ if (!event->cm_node)
+ return;
+
+ if (!event->cm_node->cm_id)
+ return;
+
+ cm_id = event->cm_node->cm_id;
+
+ nes_debug(NES_DBG_CM, "%p - cm_id = %p\n", event->cm_node, cm_id);
+ nesqp = cm_id->provider_data;
+
+ nesqp->cm_id = NULL;
+ /* cm_id->provider_data = NULL; */
+ cm_event.event = IW_CM_EVENT_DISCONNECT;
+ cm_event.status = IW_CM_EVENT_STATUS_RESET;
+ cm_event.provider_data = cm_id->provider_data;
+ cm_event.local_addr = cm_id->local_addr;
+ cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.private_data = NULL;
+ cm_event.private_data_len = 0;
+
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
+
+
+ /* notify OF layer about this connection error event */
+ cm_id->rem_ref(cm_id);
+
+ return;
+}
+
+
+/**
+ * cm_event_mpa_req
+ */
+void cm_event_mpa_req(struct nes_cm_event *event)
+{
+ struct iw_cm_id *cm_id;
+ struct iw_cm_event cm_event;
+ int ret;
+ struct nes_cm_node *cm_node;
+
+ cm_node = event->cm_node;
+ if (!cm_node)
+ return;
+ cm_id = cm_node->cm_id;
+
+ atomic_inc(&cm_connect_reqs);
+ nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
+ cm_node, cm_id, jiffies);
+
+ cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
+ cm_event.status = IW_CM_EVENT_STATUS_OK;
+ cm_event.provider_data = (void *)cm_node;
+
+ cm_event.local_addr.sin_family = AF_INET;
+ cm_event.local_addr.sin_port = htons(event->cm_info.loc_port);
+ cm_event.local_addr.sin_addr.s_addr = htonl(event->cm_info.loc_addr);
+
+ cm_event.remote_addr.sin_family = AF_INET;
+ cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port);
+ cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
+
+ cm_event.private_data = cm_node->mpa_frame_buf;
+ cm_event.private_data_len = (u8) cm_node->mpa_frame_size;
+
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ if (ret)
+ printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
+ __FUNCTION__, __LINE__, ret);
+
+ return;
+}
+
+
+static void nes_cm_event_handler(struct work_struct *);
+
+/**
+ * nes_cm_post_event
+ * post an event to the cm event handler
+ */
+int nes_cm_post_event(struct nes_cm_event *event)
+{
+ atomic_inc(&event->cm_node->cm_core->events_posted);
+ add_ref_cm_node(event->cm_node);
+ event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
+ INIT_WORK(&event->event_work, nes_cm_event_handler);
+ nes_debug(NES_DBG_CM, "queue_work, event=%p\n", event);
+
+ queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
+
+ nes_debug(NES_DBG_CM, "Exit\n");
+ return 0;
+}
+
+
+/**
+ * nes_cm_event_handler
+ * worker function to handle cm events
+ * will free instance of nes_cm_event
+ */
+static void nes_cm_event_handler(struct work_struct *work)
+{
+ struct nes_cm_event *event = container_of(work, struct nes_cm_event, event_work);
+ struct nes_cm_core *cm_core;
+
+ if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core)) {
+ return;
+ }
+ cm_core = event->cm_node->cm_core;
+ nes_debug(NES_DBG_CM, "event=%p, event->type=%u, events posted=%u\n",
+ event, event->type, atomic_read(&cm_core->events_posted));
+
+ switch (event->type) {
+ case NES_CM_EVENT_MPA_REQ:
+ cm_event_mpa_req(event);
+ nes_debug(NES_DBG_CM, "CM Event: MPA REQUEST\n");
+ break;
+ case NES_CM_EVENT_RESET:
+ nes_debug(NES_DBG_CM, "CM Event: RESET\n");
+ cm_event_reset(event);
+ break;
+ case NES_CM_EVENT_CONNECTED:
+ if ((!event->cm_node->cm_id) ||
+ (event->cm_node->state != NES_CM_STATE_TSA)) {
+ break;
+ }
+ cm_event_connected(event);
+ nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n");
+ break;
+ case NES_CM_EVENT_ABORTED:
+ if ((!event->cm_node->cm_id) || (event->cm_node->state == NES_CM_STATE_TSA)) {
+ break;
+ }
+ cm_event_connect_error(event);
+ nes_debug(NES_DBG_CM, "CM Event: ABORTED\n");
+ break;
+ case NES_CM_EVENT_DROPPED_PKT:
+ nes_debug(NES_DBG_CM, "CM Event: DROPPED PKT\n");
+ break;
+ default:
+ nes_debug(NES_DBG_CM, "CM Event: UNKNOWN EVENT TYPE\n");
+ break;
+ }
+
+ atomic_dec(&cm_core->events_posted);
+ event->cm_info.cm_id->rem_ref(event->cm_info.cm_id);
+ rem_ref_cm_node(cm_core, event->cm_node);
+ kfree(event);
+
+ return;
+}
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
new file mode 100644
index 000000000000..a59f0a7fb278
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef NES_CM_H
+#define NES_CM_H
+
+#define QUEUE_EVENTS
+
+#define NES_MANAGE_APBVT_DEL 0
+#define NES_MANAGE_APBVT_ADD 1
+
+/* IETF MPA -- defines, enums, structs */
+#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
+#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
+#define IETF_MPA_KEY_SIZE 16
+#define IETF_MPA_VERSION 1
+
+enum ietf_mpa_flags {
+ IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
+ IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */
+ IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */
+};
+
+struct ietf_mpa_frame {
+ u8 key[IETF_MPA_KEY_SIZE];
+ u8 flags;
+ u8 rev;
+ __be16 priv_data_len;
+ u8 priv_data[0];
+};
+
+#define ietf_mpa_req_resp_frame ietf_mpa_frame
+
+struct nes_v4_quad {
+ u32 rsvd0;
+ __le32 DstIpAdrIndex; /* Only most significant 5 bits are valid */
+ __be32 SrcIpadr;
+ __be16 TcpPorts[2]; /* src is low, dest is high */
+};
+
+struct nes_cm_node;
+enum nes_timer_type {
+ NES_TIMER_TYPE_SEND,
+ NES_TIMER_TYPE_RECV,
+ NES_TIMER_NODE_CLEANUP,
+ NES_TIMER_TYPE_CLOSE,
+};
+
+#define MAX_NES_IFS 4
+
+#define SET_ACK 1
+#define SET_SYN 2
+#define SET_FIN 4
+#define SET_RST 8
+
+struct option_base {
+ u8 optionnum;
+ u8 length;
+};
+
+enum option_numbers {
+ OPTION_NUMBER_END,
+ OPTION_NUMBER_NONE,
+ OPTION_NUMBER_MSS,
+ OPTION_NUMBER_WINDOW_SCALE,
+ OPTION_NUMBER_SACK_PERM,
+ OPTION_NUMBER_SACK,
+ OPTION_NUMBER_WRITE0 = 0xbc
+};
+
+struct option_mss {
+ u8 optionnum;
+ u8 length;
+ __be16 mss;
+};
+
+struct option_windowscale {
+ u8 optionnum;
+ u8 length;
+ u8 shiftcount;
+};
+
+union all_known_options {
+ char as_end;
+ struct option_base as_base;
+ struct option_mss as_mss;
+ struct option_windowscale as_windowscale;
+};
+
+struct nes_timer_entry {
+ struct list_head list;
+ unsigned long timetosend; /* jiffies */
+ struct sk_buff *skb;
+ u32 type;
+ u32 retrycount;
+ u32 retranscount;
+ u32 context;
+ u32 seq_num;
+ u32 send_retrans;
+ int close_when_complete;
+ struct net_device *netdev;
+};
+
+#define NES_DEFAULT_RETRYS 64
+#define NES_DEFAULT_RETRANS 8
+#ifdef CONFIG_INFINIBAND_NES_DEBUG
+#define NES_RETRY_TIMEOUT (1000*HZ/1000)
+#else
+#define NES_RETRY_TIMEOUT (3000*HZ/1000)
+#endif
+#define NES_SHORT_TIME (10)
+#define NES_LONG_TIME (2000*HZ/1000)
+
+#define NES_CM_HASHTABLE_SIZE 1024
+#define NES_CM_TCP_TIMER_INTERVAL 3000
+#define NES_CM_DEFAULT_MTU 1540
+#define NES_CM_DEFAULT_FRAME_CNT 10
+#define NES_CM_THREAD_STACK_SIZE 256
+#define NES_CM_DEFAULT_RCV_WND 64240 // before we know that window scaling is allowed
+#define NES_CM_DEFAULT_RCV_WND_SCALED 256960 // after we know that window scaling is allowed
+#define NES_CM_DEFAULT_RCV_WND_SCALE 2
+#define NES_CM_DEFAULT_FREE_PKTS 0x000A
+#define NES_CM_FREE_PKT_LO_WATERMARK 2
+
+#define NES_CM_DEFAULT_MSS 536
+
+#define NES_CM_DEF_SEQ 0x159bf75f
+#define NES_CM_DEF_LOCAL_ID 0x3b47
+
+#define NES_CM_DEF_SEQ2 0x18ed5740
+#define NES_CM_DEF_LOCAL_ID2 0xb807
+
+typedef u32 nes_addr_t;
+
+#define nes_cm_tsa_context nes_qp_context
+
+struct nes_qp;
+
+/* cm node transition states */
+enum nes_cm_node_state {
+ NES_CM_STATE_UNKNOWN,
+ NES_CM_STATE_INITED,
+ NES_CM_STATE_LISTENING,
+ NES_CM_STATE_SYN_RCVD,
+ NES_CM_STATE_SYN_SENT,
+ NES_CM_STATE_ONE_SIDE_ESTABLISHED,
+ NES_CM_STATE_ESTABLISHED,
+ NES_CM_STATE_ACCEPTING,
+ NES_CM_STATE_MPAREQ_SENT,
+ NES_CM_STATE_TSA,
+ NES_CM_STATE_FIN_WAIT1,
+ NES_CM_STATE_FIN_WAIT2,
+ NES_CM_STATE_CLOSE_WAIT,
+ NES_CM_STATE_TIME_WAIT,
+ NES_CM_STATE_LAST_ACK,
+ NES_CM_STATE_CLOSING,
+ NES_CM_STATE_CLOSED
+};
+
+/* type of nes connection */
+enum nes_cm_conn_type {
+ NES_CM_IWARP_CONN_TYPE,
+};
+
+/* CM context params */
+struct nes_cm_tcp_context {
+ u8 client;
+
+ u32 loc_seq_num;
+ u32 loc_ack_num;
+ u32 rem_ack_num;
+ u32 rcv_nxt;
+
+ u32 loc_id;
+ u32 rem_id;
+
+ u32 snd_wnd;
+ u32 max_snd_wnd;
+
+ u32 rcv_wnd;
+ u32 mss;
+ u8 snd_wscale;
+ u8 rcv_wscale;
+
+ struct nes_cm_tsa_context tsa_cntxt;
+ struct timeval sent_ts;
+};
+
+
+enum nes_cm_listener_state {
+ NES_CM_LISTENER_PASSIVE_STATE=1,
+ NES_CM_LISTENER_ACTIVE_STATE=2,
+ NES_CM_LISTENER_EITHER_STATE=3
+};
+
+struct nes_cm_listener {
+ struct list_head list;
+ u64 session_id;
+ struct nes_cm_core *cm_core;
+ u8 loc_mac[ETH_ALEN];
+ nes_addr_t loc_addr;
+ u16 loc_port;
+ struct iw_cm_id *cm_id;
+ enum nes_cm_conn_type conn_type;
+ atomic_t ref_count;
+ struct nes_vnic *nesvnic;
+ atomic_t pend_accepts_cnt;
+ int backlog;
+ enum nes_cm_listener_state listener_state;
+ u32 reused_node;
+};
+
+/* per connection node and node state information */
+struct nes_cm_node {
+ u64 session_id;
+ u32 hashkey;
+
+ nes_addr_t loc_addr, rem_addr;
+ u16 loc_port, rem_port;
+
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+
+ enum nes_cm_node_state state;
+ struct nes_cm_tcp_context tcp_cntxt;
+ struct nes_cm_core *cm_core;
+ struct sk_buff_head resend_list;
+ atomic_t ref_count;
+ struct net_device *netdev;
+
+ struct nes_cm_node *loopbackpartner;
+ struct list_head retrans_list;
+ spinlock_t retrans_list_lock;
+ struct list_head recv_list;
+ spinlock_t recv_list_lock;
+
+ int send_write0;
+ union {
+ struct ietf_mpa_frame mpa_frame;
+ u8 mpa_frame_buf[NES_CM_DEFAULT_MTU];
+ };
+ u16 mpa_frame_size;
+ struct iw_cm_id *cm_id;
+ struct list_head list;
+ int accelerated;
+ struct nes_cm_listener *listener;
+ enum nes_cm_conn_type conn_type;
+ struct nes_vnic *nesvnic;
+ int apbvt_set;
+ int accept_pend;
+};
+
+/* structure for client or CM to fill when making CM api calls. */
+/* - only need to set relevant data, based on op. */
+struct nes_cm_info {
+ union {
+ struct iw_cm_id *cm_id;
+ struct net_device *netdev;
+ };
+
+ u16 loc_port;
+ u16 rem_port;
+ nes_addr_t loc_addr;
+ nes_addr_t rem_addr;
+
+ enum nes_cm_conn_type conn_type;
+ int backlog;
+};
+
+/* CM event codes */
+enum nes_cm_event_type {
+ NES_CM_EVENT_UNKNOWN,
+ NES_CM_EVENT_ESTABLISHED,
+ NES_CM_EVENT_MPA_REQ,
+ NES_CM_EVENT_MPA_CONNECT,
+ NES_CM_EVENT_MPA_ACCEPT,
+ NES_CM_EVENT_MPA_ESTABLISHED,
+ NES_CM_EVENT_CONNECTED,
+ NES_CM_EVENT_CLOSED,
+ NES_CM_EVENT_RESET,
+ NES_CM_EVENT_DROPPED_PKT,
+ NES_CM_EVENT_CLOSE_IMMED,
+ NES_CM_EVENT_CLOSE_HARD,
+ NES_CM_EVENT_CLOSE_CLEAN,
+ NES_CM_EVENT_ABORTED,
+ NES_CM_EVENT_SEND_FIRST
+};
+
+/* event to post to CM event handler */
+struct nes_cm_event {
+ enum nes_cm_event_type type;
+
+ struct nes_cm_info cm_info;
+ struct work_struct event_work;
+ struct nes_cm_node *cm_node;
+};
+
+struct nes_cm_core {
+ enum nes_cm_node_state state;
+ atomic_t session_id;
+
+ atomic_t listen_node_cnt;
+ struct nes_cm_node listen_list;
+ spinlock_t listen_list_lock;
+
+ u32 mtu;
+ u32 free_tx_pkt_max;
+ u32 rx_pkt_posted;
+ struct sk_buff_head tx_free_list;
+ atomic_t ht_node_cnt;
+ struct list_head connected_nodes;
+ /* struct list_head hashtable[NES_CM_HASHTABLE_SIZE]; */
+ spinlock_t ht_lock;
+
+ struct timer_list tcp_timer;
+
+ struct nes_cm_ops *api;
+
+ int (*post_event)(struct nes_cm_event *event);
+ atomic_t events_posted;
+ struct workqueue_struct *event_wq;
+ struct workqueue_struct *disconn_wq;
+
+ atomic_t node_cnt;
+ u64 aborted_connects;
+ u32 options;
+
+ struct nes_cm_node *current_listen_node;
+};
+
+
+#define NES_CM_SET_PKT_SIZE (1 << 1)
+#define NES_CM_SET_FREE_PKT_Q_SIZE (1 << 2)
+
+/* CM ops/API for client interface */
+struct nes_cm_ops {
+ int (*accelerated)(struct nes_cm_core *, struct nes_cm_node *);
+ struct nes_cm_listener * (*listen)(struct nes_cm_core *, struct nes_vnic *,
+ struct nes_cm_info *);
+ int (*stop_listener)(struct nes_cm_core *, struct nes_cm_listener *);
+ struct nes_cm_node * (*connect)(struct nes_cm_core *,
+ struct nes_vnic *, struct ietf_mpa_frame *,
+ struct nes_cm_info *);
+ int (*close)(struct nes_cm_core *, struct nes_cm_node *);
+ int (*accept)(struct nes_cm_core *, struct ietf_mpa_frame *,
+ struct nes_cm_node *);
+ int (*reject)(struct nes_cm_core *, struct ietf_mpa_frame *,
+ struct nes_cm_node *);
+ int (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *,
+ struct sk_buff *);
+ int (*destroy_cm_core)(struct nes_cm_core *);
+ int (*get)(struct nes_cm_core *);
+ int (*set)(struct nes_cm_core *, u32, u32);
+};
+
+
+int send_mpa_request(struct nes_cm_node *);
+struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
+ void *, u32, void *, u32, u8);
+int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
+ enum nes_timer_type, int, int);
+void nes_cm_timer_tick(unsigned long);
+int send_syn(struct nes_cm_node *, u32);
+int send_reset(struct nes_cm_node *);
+int send_ack(struct nes_cm_node *);
+int send_fin(struct nes_cm_node *, struct sk_buff *);
+struct sk_buff *get_free_pkt(struct nes_cm_node *);
+int process_packet(struct nes_cm_node *, struct sk_buff *, struct nes_cm_core *);
+
+struct nes_cm_node * mini_cm_connect(struct nes_cm_core *,
+ struct nes_vnic *, struct ietf_mpa_frame *, struct nes_cm_info *);
+int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
+int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
+int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
+int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *);
+struct nes_cm_core *mini_cm_alloc_core(struct nes_cm_info *);
+int mini_cm_dealloc_core(struct nes_cm_core *);
+int mini_cm_get(struct nes_cm_core *);
+int mini_cm_set(struct nes_cm_core *, u32, u32);
+
+int nes_cm_disconn(struct nes_qp *);
+void nes_disconnect_worker(struct work_struct *);
+int nes_cm_disconn_true(struct nes_qp *);
+int nes_disconnect(struct nes_qp *, int);
+
+int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
+int nes_reject(struct iw_cm_id *, const void *, u8);
+int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
+int nes_create_listen(struct iw_cm_id *, int);
+int nes_destroy_listen(struct iw_cm_id *);
+
+int nes_cm_recv(struct sk_buff *, struct net_device *);
+int nes_cm_start(void);
+int nes_cm_stop(void);
+
+/* CM event handler functions */
+void cm_event_connected(struct nes_cm_event *);
+void cm_event_connect_error(struct nes_cm_event *);
+void cm_event_reset(struct nes_cm_event *);
+void cm_event_mpa_req(struct nes_cm_event *);
+int nes_cm_post_event(struct nes_cm_event *);
+
+#endif /* NES_CM_H */
diff --git a/drivers/infiniband/hw/nes/nes_context.h b/drivers/infiniband/hw/nes/nes_context.h
new file mode 100644
index 000000000000..da9daba8e668
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_context.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NES_CONTEXT_H
+#define NES_CONTEXT_H
+
+struct nes_qp_context {
+ __le32 misc;
+ __le32 cqs;
+ __le32 sq_addr_low;
+ __le32 sq_addr_high;
+ __le32 rq_addr_low;
+ __le32 rq_addr_high;
+ __le32 misc2;
+ __le16 tcpPorts[2];
+ __le32 ip0;
+ __le32 ip1;
+ __le32 ip2;
+ __le32 ip3;
+ __le32 mss;
+ __le32 arp_index_vlan;
+ __le32 tcp_state_flow_label;
+ __le32 pd_index_wscale;
+ __le32 keepalive;
+ u32 ts_recent;
+ u32 ts_age;
+ __le32 snd_nxt;
+ __le32 snd_wnd;
+ __le32 rcv_nxt;
+ __le32 rcv_wnd;
+ __le32 snd_max;
+ __le32 snd_una;
+ u32 srtt;
+ __le32 rttvar;
+ __le32 ssthresh;
+ __le32 cwnd;
+ __le32 snd_wl1;
+ __le32 snd_wl2;
+ __le32 max_snd_wnd;
+ __le32 ts_val_delta;
+ u32 retransmit;
+ u32 probe_cnt;
+ u32 hte_index;
+ __le32 q2_addr_low;
+ __le32 q2_addr_high;
+ __le32 ird_index;
+ u32 Rsvd3;
+ __le32 ird_ord_sizes;
+ u32 mrkr_offset;
+ __le32 aeq_token_low;
+ __le32 aeq_token_high;
+};
+
+/* QP Context Misc Field */
+
+#define NES_QPCONTEXT_MISC_IWARP_VER_MASK 0x00000003
+#define NES_QPCONTEXT_MISC_IWARP_VER_SHIFT 0
+#define NES_QPCONTEXT_MISC_EFB_SIZE_MASK 0x000000C0
+#define NES_QPCONTEXT_MISC_EFB_SIZE_SHIFT 6
+#define NES_QPCONTEXT_MISC_RQ_SIZE_MASK 0x00000300
+#define NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT 8
+#define NES_QPCONTEXT_MISC_SQ_SIZE_MASK 0x00000c00
+#define NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT 10
+#define NES_QPCONTEXT_MISC_PCI_FCN_MASK 0x00007000
+#define NES_QPCONTEXT_MISC_PCI_FCN_SHIFT 12
+#define NES_QPCONTEXT_MISC_DUP_ACKS_MASK 0x00070000
+#define NES_QPCONTEXT_MISC_DUP_ACKS_SHIFT 16
+
+enum nes_qp_context_misc_bits {
+ NES_QPCONTEXT_MISC_RX_WQE_SIZE = 0x00000004,
+ NES_QPCONTEXT_MISC_IPV4 = 0x00000008,
+ NES_QPCONTEXT_MISC_DO_NOT_FRAG = 0x00000010,
+ NES_QPCONTEXT_MISC_INSERT_VLAN = 0x00000020,
+ NES_QPCONTEXT_MISC_DROS = 0x00008000,
+ NES_QPCONTEXT_MISC_WSCALE = 0x00080000,
+ NES_QPCONTEXT_MISC_KEEPALIVE = 0x00100000,
+ NES_QPCONTEXT_MISC_TIMESTAMP = 0x00200000,
+ NES_QPCONTEXT_MISC_SACK = 0x00400000,
+ NES_QPCONTEXT_MISC_RDMA_WRITE_EN = 0x00800000,
+ NES_QPCONTEXT_MISC_RDMA_READ_EN = 0x01000000,
+ NES_QPCONTEXT_MISC_WBIND_EN = 0x10000000,
+ NES_QPCONTEXT_MISC_FAST_REGISTER_EN = 0x20000000,
+ NES_QPCONTEXT_MISC_PRIV_EN = 0x40000000,
+ NES_QPCONTEXT_MISC_NO_NAGLE = 0x80000000
+};
+
+enum nes_qp_acc_wq_sizes {
+ HCONTEXT_TSA_WQ_SIZE_4 = 0,
+ HCONTEXT_TSA_WQ_SIZE_32 = 1,
+ HCONTEXT_TSA_WQ_SIZE_128 = 2,
+ HCONTEXT_TSA_WQ_SIZE_512 = 3
+};
+
+/* QP Context Misc2 Fields */
+#define NES_QPCONTEXT_MISC2_TTL_MASK 0x000000ff
+#define NES_QPCONTEXT_MISC2_TTL_SHIFT 0
+#define NES_QPCONTEXT_MISC2_HOP_LIMIT_MASK 0x000000ff
+#define NES_QPCONTEXT_MISC2_HOP_LIMIT_SHIFT 0
+#define NES_QPCONTEXT_MISC2_LIMIT_MASK 0x00000300
+#define NES_QPCONTEXT_MISC2_LIMIT_SHIFT 8
+#define NES_QPCONTEXT_MISC2_NIC_INDEX_MASK 0x0000fc00
+#define NES_QPCONTEXT_MISC2_NIC_INDEX_SHIFT 10
+#define NES_QPCONTEXT_MISC2_SRC_IP_MASK 0x001f0000
+#define NES_QPCONTEXT_MISC2_SRC_IP_SHIFT 16
+#define NES_QPCONTEXT_MISC2_TOS_MASK 0xff000000
+#define NES_QPCONTEXT_MISC2_TOS_SHIFT 24
+#define NES_QPCONTEXT_MISC2_TRAFFIC_CLASS_MASK 0xff000000
+#define NES_QPCONTEXT_MISC2_TRAFFIC_CLASS_SHIFT 24
+
+/* QP Context Tcp State/Flow Label Fields */
+#define NES_QPCONTEXT_TCPFLOW_FLOW_LABEL_MASK 0x000fffff
+#define NES_QPCONTEXT_TCPFLOW_FLOW_LABEL_SHIFT 0
+#define NES_QPCONTEXT_TCPFLOW_TCP_STATE_MASK 0xf0000000
+#define NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT 28
+
+enum nes_qp_tcp_state {
+ NES_QPCONTEXT_TCPSTATE_CLOSED = 1,
+ NES_QPCONTEXT_TCPSTATE_EST = 5,
+ NES_QPCONTEXT_TCPSTATE_TIME_WAIT = 11,
+};
+
+/* QP Context PD Index/wscale Fields */
+#define NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK 0x0000000f
+#define NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT 0
+#define NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK 0x00000f00
+#define NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT 8
+#define NES_QPCONTEXT_PDWSCALE_PDINDEX_MASK 0xffff0000
+#define NES_QPCONTEXT_PDWSCALE_PDINDEX_SHIFT 16
+
+/* QP Context Keepalive Fields */
+#define NES_QPCONTEXT_KEEPALIVE_DELTA_MASK 0x0000ffff
+#define NES_QPCONTEXT_KEEPALIVE_DELTA_SHIFT 0
+#define NES_QPCONTEXT_KEEPALIVE_PROBE_CNT_MASK 0x00ff0000
+#define NES_QPCONTEXT_KEEPALIVE_PROBE_CNT_SHIFT 16
+#define NES_QPCONTEXT_KEEPALIVE_INTV_MASK 0xff000000
+#define NES_QPCONTEXT_KEEPALIVE_INTV_SHIFT 24
+
+/* QP Context ORD/IRD Fields */
+#define NES_QPCONTEXT_ORDIRD_ORDSIZE_MASK 0x0000007f
+#define NES_QPCONTEXT_ORDIRD_ORDSIZE_SHIFT 0
+#define NES_QPCONTEXT_ORDIRD_IRDSIZE_MASK 0x00030000
+#define NES_QPCONTEXT_ORDIRD_IRDSIZE_SHIFT 16
+#define NES_QPCONTEXT_ORDIRD_IWARP_MODE_MASK 0x30000000
+#define NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT 28
+
+enum nes_ord_ird_bits {
+ NES_QPCONTEXT_ORDIRD_WRPDU = 0x02000000,
+ NES_QPCONTEXT_ORDIRD_LSMM_PRESENT = 0x04000000,
+ NES_QPCONTEXT_ORDIRD_ALSMM = 0x08000000,
+ NES_QPCONTEXT_ORDIRD_AAH = 0x40000000,
+ NES_QPCONTEXT_ORDIRD_RNMC = 0x80000000
+};
+
+enum nes_iwarp_qp_state {
+ NES_QPCONTEXT_IWARP_STATE_NONEXIST = 0,
+ NES_QPCONTEXT_IWARP_STATE_IDLE = 1,
+ NES_QPCONTEXT_IWARP_STATE_RTS = 2,
+ NES_QPCONTEXT_IWARP_STATE_CLOSING = 3,
+ NES_QPCONTEXT_IWARP_STATE_TERMINATE = 5,
+ NES_QPCONTEXT_IWARP_STATE_ERROR = 6
+};
+
+
+#endif /* NES_CONTEXT_H */
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
new file mode 100644
index 000000000000..7c4c0fbf0abd
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -0,0 +1,3080 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+
+#include "nes.h"
+
+u32 crit_err_count = 0;
+u32 int_mod_timer_init;
+u32 int_mod_cq_depth_256;
+u32 int_mod_cq_depth_128;
+u32 int_mod_cq_depth_32;
+u32 int_mod_cq_depth_24;
+u32 int_mod_cq_depth_16;
+u32 int_mod_cq_depth_4;
+u32 int_mod_cq_depth_1;
+
+#include "nes_cm.h"
+
+
+#ifdef CONFIG_INFINIBAND_NES_DEBUG
+static unsigned char *nes_iwarp_state_str[] = {
+ "Non-Existant",
+ "Idle",
+ "RTS",
+ "Closing",
+ "RSVD1",
+ "Terminate",
+ "Error",
+ "RSVD2",
+};
+
+static unsigned char *nes_tcp_state_str[] = {
+ "Non-Existant",
+ "Closed",
+ "Listen",
+ "SYN Sent",
+ "SYN Rcvd",
+ "Established",
+ "Close Wait",
+ "FIN Wait 1",
+ "Closing",
+ "Last Ack",
+ "FIN Wait 2",
+ "Time Wait",
+ "RSVD1",
+ "RSVD2",
+ "RSVD3",
+ "RSVD4",
+};
+#endif
+
+
+/**
+ * nes_nic_init_timer_defaults
+ */
+void nes_nic_init_timer_defaults(struct nes_device *nesdev, u8 jumbomode)
+{
+ unsigned long flags;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
+
+ spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
+
+ shared_timer->timer_in_use_min = NES_NIC_FAST_TIMER_LOW;
+ shared_timer->timer_in_use_max = NES_NIC_FAST_TIMER_HIGH;
+ if (jumbomode) {
+ shared_timer->threshold_low = DEFAULT_JUMBO_NES_QL_LOW;
+ shared_timer->threshold_target = DEFAULT_JUMBO_NES_QL_TARGET;
+ shared_timer->threshold_high = DEFAULT_JUMBO_NES_QL_HIGH;
+ } else {
+ shared_timer->threshold_low = DEFAULT_NES_QL_LOW;
+ shared_timer->threshold_target = DEFAULT_NES_QL_TARGET;
+ shared_timer->threshold_high = DEFAULT_NES_QL_HIGH;
+ }
+
+ /* todo use netdev->mtu to set thresholds */
+ spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
+}
+
+
+/**
+ * nes_nic_init_timer
+ */
+static void nes_nic_init_timer(struct nes_device *nesdev)
+{
+ unsigned long flags;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
+
+ spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
+
+ if (shared_timer->timer_in_use_old == 0) {
+ nesdev->deepcq_count = 0;
+ shared_timer->timer_direction_upward = 0;
+ shared_timer->timer_direction_downward = 0;
+ shared_timer->timer_in_use = NES_NIC_FAST_TIMER;
+ shared_timer->timer_in_use_old = 0;
+
+ }
+ if (shared_timer->timer_in_use != shared_timer->timer_in_use_old) {
+ shared_timer->timer_in_use_old = shared_timer->timer_in_use;
+ nes_write32(nesdev->regs+NES_PERIODIC_CONTROL,
+ 0x80000000 | ((u32)(shared_timer->timer_in_use*8)));
+ }
+ /* todo use netdev->mtu to set thresholds */
+ spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
+}
+
+
+/**
+ * nes_nic_tune_timer
+ */
+static void nes_nic_tune_timer(struct nes_device *nesdev)
+{
+ unsigned long flags;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
+ u16 cq_count = nesdev->currcq_count;
+
+ spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
+
+ if (shared_timer->cq_count_old < cq_count) {
+ if (cq_count > shared_timer->threshold_low)
+ shared_timer->cq_direction_downward=0;
+ }
+ if (shared_timer->cq_count_old >= cq_count)
+ shared_timer->cq_direction_downward++;
+ shared_timer->cq_count_old = cq_count;
+ if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) {
+ if (cq_count <= shared_timer->threshold_low) {
+ shared_timer->threshold_low = shared_timer->threshold_low/2;
+ shared_timer->cq_direction_downward=0;
+ nesdev->currcq_count = 0;
+ spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
+ return;
+ }
+ }
+
+ if (cq_count > 1) {
+ nesdev->deepcq_count += cq_count;
+ if (cq_count <= shared_timer->threshold_low) { /* increase timer gently */
+ shared_timer->timer_direction_upward++;
+ shared_timer->timer_direction_downward = 0;
+ } else if (cq_count <= shared_timer->threshold_target) { /* balanced */
+ shared_timer->timer_direction_upward = 0;
+ shared_timer->timer_direction_downward = 0;
+ } else if (cq_count <= shared_timer->threshold_high) { /* decrease timer gently */
+ shared_timer->timer_direction_downward++;
+ shared_timer->timer_direction_upward = 0;
+ } else if (cq_count <= (shared_timer->threshold_high) * 2) {
+ shared_timer->timer_in_use -= 2;
+ shared_timer->timer_direction_upward = 0;
+ shared_timer->timer_direction_downward++;
+ } else {
+ shared_timer->timer_in_use -= 4;
+ shared_timer->timer_direction_upward = 0;
+ shared_timer->timer_direction_downward++;
+ }
+
+ if (shared_timer->timer_direction_upward > 3 ) { /* using history */
+ shared_timer->timer_in_use += 3;
+ shared_timer->timer_direction_upward = 0;
+ shared_timer->timer_direction_downward = 0;
+ }
+ if (shared_timer->timer_direction_downward > 5) { /* using history */
+ shared_timer->timer_in_use -= 4 ;
+ shared_timer->timer_direction_downward = 0;
+ shared_timer->timer_direction_upward = 0;
+ }
+ }
+
+ /* boundary checking */
+ if (shared_timer->timer_in_use > NES_NIC_FAST_TIMER_HIGH)
+ shared_timer->timer_in_use = NES_NIC_FAST_TIMER_HIGH;
+ else if (shared_timer->timer_in_use < NES_NIC_FAST_TIMER_LOW) {
+ shared_timer->timer_in_use = NES_NIC_FAST_TIMER_LOW;
+ }
+
+ nesdev->currcq_count = 0;
+
+ spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
+}
+
+
+/**
+ * nes_init_adapter - initialize adapter
+ */
+struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
+ struct nes_adapter *nesadapter = NULL;
+ unsigned long num_pds;
+ u32 u32temp;
+ u32 port_count;
+ u16 max_rq_wrs;
+ u16 max_sq_wrs;
+ u32 max_mr;
+ u32 max_256pbl;
+ u32 max_4kpbl;
+ u32 max_qp;
+ u32 max_irrq;
+ u32 max_cq;
+ u32 hte_index_mask;
+ u32 adapter_size;
+ u32 arp_table_size;
+ u16 vendor_id;
+ u8 OneG_Mode;
+ u8 func_index;
+
+ /* search the list of existing adapters */
+ list_for_each_entry(nesadapter, &nes_adapter_list, list) {
+ nes_debug(NES_DBG_INIT, "Searching Adapter list for PCI devfn = 0x%X,"
+ " adapter PCI slot/bus = %u/%u, pci devices PCI slot/bus = %u/%u, .\n",
+ nesdev->pcidev->devfn,
+ PCI_SLOT(nesadapter->devfn),
+ nesadapter->bus_number,
+ PCI_SLOT(nesdev->pcidev->devfn),
+ nesdev->pcidev->bus->number );
+ if ((PCI_SLOT(nesadapter->devfn) == PCI_SLOT(nesdev->pcidev->devfn)) &&
+ (nesadapter->bus_number == nesdev->pcidev->bus->number)) {
+ nesadapter->ref_count++;
+ return nesadapter;
+ }
+ }
+
+ /* no adapter found */
+ num_pds = pci_resource_len(nesdev->pcidev, BAR_1) >> PAGE_SHIFT;
+ if ((hw_rev != NE020_REV) && (hw_rev != NE020_REV1)) {
+ nes_debug(NES_DBG_INIT, "NE020 driver detected unknown hardware revision 0x%x\n",
+ hw_rev);
+ return NULL;
+ }
+
+ nes_debug(NES_DBG_INIT, "Determine Soft Reset, QP_control=0x%x, CPU0=0x%x, CPU1=0x%x, CPU2=0x%x\n",
+ nes_read_indexed(nesdev, NES_IDX_QP_CONTROL + PCI_FUNC(nesdev->pcidev->devfn) * 8),
+ nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS),
+ nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS + 4),
+ nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS + 8));
+
+ nes_debug(NES_DBG_INIT, "Reset and init NE020\n");
+
+
+ if ((port_count = nes_reset_adapter_ne020(nesdev, &OneG_Mode)) == 0)
+ return NULL;
+ if (nes_init_serdes(nesdev, hw_rev, port_count, OneG_Mode))
+ return NULL;
+ nes_init_csr_ne020(nesdev, hw_rev, port_count);
+
+ max_qp = nes_read_indexed(nesdev, NES_IDX_QP_CTX_SIZE);
+ nes_debug(NES_DBG_INIT, "QP_CTX_SIZE=%u\n", max_qp);
+
+ u32temp = nes_read_indexed(nesdev, NES_IDX_QUAD_HASH_TABLE_SIZE);
+ if (max_qp > ((u32)1 << (u32temp & 0x001f))) {
+ nes_debug(NES_DBG_INIT, "Reducing Max QPs to %u due to hash table size = 0x%08X\n",
+ max_qp, u32temp);
+ max_qp = (u32)1 << (u32temp & 0x001f);
+ }
+
+ hte_index_mask = ((u32)1 << ((u32temp & 0x001f)+1))-1;
+ nes_debug(NES_DBG_INIT, "Max QP = %u, hte_index_mask = 0x%08X.\n",
+ max_qp, hte_index_mask);
+
+ u32temp = nes_read_indexed(nesdev, NES_IDX_IRRQ_COUNT);
+
+ max_irrq = 1 << (u32temp & 0x001f);
+
+ if (max_qp > max_irrq) {
+ max_qp = max_irrq;
+ nes_debug(NES_DBG_INIT, "Reducing Max QPs to %u due to Available Q1s.\n",
+ max_qp);
+ }
+
+ /* there should be no reason to allocate more pds than qps */
+ if (num_pds > max_qp)
+ num_pds = max_qp;
+
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MRT_SIZE);
+ max_mr = (u32)8192 << (u32temp & 0x7);
+
+ u32temp = nes_read_indexed(nesdev, NES_IDX_PBL_REGION_SIZE);
+ max_256pbl = (u32)1 << (u32temp & 0x0000001f);
+ max_4kpbl = (u32)1 << ((u32temp >> 16) & 0x0000001f);
+ max_cq = nes_read_indexed(nesdev, NES_IDX_CQ_CTX_SIZE);
+
+ u32temp = nes_read_indexed(nesdev, NES_IDX_ARP_CACHE_SIZE);
+ arp_table_size = 1 << u32temp;
+
+ adapter_size = (sizeof(struct nes_adapter) +
+ (sizeof(unsigned long)-1)) & (~(sizeof(unsigned long)-1));
+ adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
+ adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
+ adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq);
+ adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds);
+ adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size);
+ adapter_size += sizeof(struct nes_qp **) * max_qp;
+
+ /* allocate a new adapter struct */
+ nesadapter = kzalloc(adapter_size, GFP_KERNEL);
+ if (nesadapter == NULL) {
+ return NULL;
+ }
+
+ nes_debug(NES_DBG_INIT, "Allocating new nesadapter @ %p, size = %u (actual size = %u).\n",
+ nesadapter, (u32)sizeof(struct nes_adapter), adapter_size);
+
+ /* populate the new nesadapter */
+ nesadapter->devfn = nesdev->pcidev->devfn;
+ nesadapter->bus_number = nesdev->pcidev->bus->number;
+ nesadapter->ref_count = 1;
+ nesadapter->timer_int_req = 0xffff0000;
+ nesadapter->OneG_Mode = OneG_Mode;
+ nesadapter->doorbell_start = nesdev->doorbell_region;
+
+ /* nesadapter->tick_delta = clk_divisor; */
+ nesadapter->hw_rev = hw_rev;
+ nesadapter->port_count = port_count;
+
+ nesadapter->max_qp = max_qp;
+ nesadapter->hte_index_mask = hte_index_mask;
+ nesadapter->max_irrq = max_irrq;
+ nesadapter->max_mr = max_mr;
+ nesadapter->max_256pbl = max_256pbl - 1;
+ nesadapter->max_4kpbl = max_4kpbl - 1;
+ nesadapter->max_cq = max_cq;
+ nesadapter->free_256pbl = max_256pbl - 1;
+ nesadapter->free_4kpbl = max_4kpbl - 1;
+ nesadapter->max_pd = num_pds;
+ nesadapter->arp_table_size = arp_table_size;
+
+ nesadapter->et_pkt_rate_low = NES_TIMER_ENABLE_LIMIT;
+ if (nes_drv_opt & NES_DRV_OPT_DISABLE_INT_MOD) {
+ nesadapter->et_use_adaptive_rx_coalesce = 0;
+ nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT;
+ nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
+ } else {
+ nesadapter->et_use_adaptive_rx_coalesce = 1;
+ nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
+ nesadapter->et_rx_coalesce_usecs_irq = 0;
+ printk(PFX "%s: Using Adaptive Interrupt Moderation\n", __FUNCTION__);
+ }
+ /* Setup and enable the periodic timer */
+ if (nesadapter->et_rx_coalesce_usecs_irq)
+ nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 0x80000000 |
+ ((u32)(nesadapter->et_rx_coalesce_usecs_irq * 8)));
+ else
+ nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 0x00000000);
+
+ nesadapter->base_pd = 1;
+
+ nesadapter->device_cap_flags =
+ IB_DEVICE_ZERO_STAG | IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW;
+
+ nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter)
+ [(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]);
+ nesadapter->allocated_cqs = &nesadapter->allocated_qps[BITS_TO_LONGS(max_qp)];
+ nesadapter->allocated_mrs = &nesadapter->allocated_cqs[BITS_TO_LONGS(max_cq)];
+ nesadapter->allocated_pds = &nesadapter->allocated_mrs[BITS_TO_LONGS(max_mr)];
+ nesadapter->allocated_arps = &nesadapter->allocated_pds[BITS_TO_LONGS(num_pds)];
+ nesadapter->qp_table = (struct nes_qp **)(&nesadapter->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
+
+
+ /* mark the usual suspect QPs and CQs as in use */
+ for (u32temp = 0; u32temp < NES_FIRST_QPN; u32temp++) {
+ set_bit(u32temp, nesadapter->allocated_qps);
+ set_bit(u32temp, nesadapter->allocated_cqs);
+ }
+
+ for (u32temp = 0; u32temp < 20; u32temp++)
+ set_bit(u32temp, nesadapter->allocated_pds);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_QP_MAX_CFG_SIZES);
+
+ max_rq_wrs = ((u32temp >> 8) & 3);
+ switch (max_rq_wrs) {
+ case 0:
+ max_rq_wrs = 4;
+ break;
+ case 1:
+ max_rq_wrs = 16;
+ break;
+ case 2:
+ max_rq_wrs = 32;
+ break;
+ case 3:
+ max_rq_wrs = 512;
+ break;
+ }
+
+ max_sq_wrs = (u32temp & 3);
+ switch (max_sq_wrs) {
+ case 0:
+ max_sq_wrs = 4;
+ break;
+ case 1:
+ max_sq_wrs = 16;
+ break;
+ case 2:
+ max_sq_wrs = 32;
+ break;
+ case 3:
+ max_sq_wrs = 512;
+ break;
+ }
+ nesadapter->max_qp_wr = min(max_rq_wrs, max_sq_wrs);
+ nesadapter->max_irrq_wr = (u32temp >> 16) & 3;
+
+ nesadapter->max_sge = 4;
+ nesadapter->max_cqe = 32767;
+
+ if (nes_read_eeprom_values(nesdev, nesadapter)) {
+ printk(KERN_ERR PFX "Unable to read EEPROM data.\n");
+ kfree(nesadapter);
+ return NULL;
+ }
+
+ u32temp = nes_read_indexed(nesdev, NES_IDX_TCP_TIMER_CONFIG);
+ nes_write_indexed(nesdev, NES_IDX_TCP_TIMER_CONFIG,
+ (u32temp & 0xff000000) | (nesadapter->tcp_timer_core_clk_divisor & 0x00ffffff));
+
+ /* setup port configuration */
+ if (nesadapter->port_count == 1) {
+ u32temp = 0x00000000;
+ if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT)
+ nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000002);
+ else
+ nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003);
+ } else {
+ if (nesadapter->port_count == 2)
+ u32temp = 0x00000044;
+ else
+ u32temp = 0x000000e4;
+ nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003);
+ }
+
+ nes_write_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT, u32temp);
+ nes_debug(NES_DBG_INIT, "Probe time, LOG2PHY=%u\n",
+ nes_read_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT));
+
+ spin_lock_init(&nesadapter->resource_lock);
+ spin_lock_init(&nesadapter->phy_lock);
+ spin_lock_init(&nesadapter->pbl_lock);
+ spin_lock_init(&nesadapter->periodic_timer_lock);
+
+ INIT_LIST_HEAD(&nesadapter->nesvnic_list[0]);
+ INIT_LIST_HEAD(&nesadapter->nesvnic_list[1]);
+ INIT_LIST_HEAD(&nesadapter->nesvnic_list[2]);
+ INIT_LIST_HEAD(&nesadapter->nesvnic_list[3]);
+
+ if ((!nesadapter->OneG_Mode) && (nesadapter->port_count == 2)) {
+ u32 pcs_control_status0, pcs_control_status1;
+ u32 reset_value;
+ u32 i = 0;
+ u32 int_cnt = 0;
+ u32 ext_cnt = 0;
+ unsigned long flags;
+ u32 j = 0;
+
+ pcs_control_status0 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0);
+ pcs_control_status1 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
+
+ for (i = 0; i < NES_MAX_LINK_CHECK; i++) {
+ pcs_control_status0 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0);
+ pcs_control_status1 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
+ if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
+ || (0x0F000100 == (pcs_control_status1 & 0x0F000100)))
+ int_cnt++;
+ msleep(1);
+ }
+ if (int_cnt > 1) {
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088);
+ mh_detected++;
+ reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
+ reset_value |= 0x0000003d;
+ nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
+
+ while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
+ & 0x00000040) != 0x00000040) && (j++ < 5000));
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+
+ pcs_control_status0 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0);
+ pcs_control_status1 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
+
+ for (i = 0; i < NES_MAX_LINK_CHECK; i++) {
+ pcs_control_status0 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0);
+ pcs_control_status1 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
+ if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
+ || (0x0F000100 == (pcs_control_status1 & 0x0F000100))) {
+ if (++ext_cnt > int_cnt) {
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1,
+ 0x0000F0C8);
+ mh_detected++;
+ reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
+ reset_value |= 0x0000003d;
+ nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
+
+ while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
+ & 0x00000040) != 0x00000040) && (j++ < 5000));
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ break;
+ }
+ }
+ msleep(1);
+ }
+ }
+ }
+
+ if (nesadapter->hw_rev == NE020_REV) {
+ init_timer(&nesadapter->mh_timer);
+ nesadapter->mh_timer.function = nes_mh_fix;
+ nesadapter->mh_timer.expires = jiffies + (HZ/5); /* 1 second */
+ nesadapter->mh_timer.data = (unsigned long)nesdev;
+ add_timer(&nesadapter->mh_timer);
+ } else {
+ nes_write32(nesdev->regs+NES_INTF_INT_STAT, 0x0f000000);
+ }
+
+ init_timer(&nesadapter->lc_timer);
+ nesadapter->lc_timer.function = nes_clc;
+ nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */
+ nesadapter->lc_timer.data = (unsigned long)nesdev;
+ add_timer(&nesadapter->lc_timer);
+
+ list_add_tail(&nesadapter->list, &nes_adapter_list);
+
+ for (func_index = 0; func_index < 8; func_index++) {
+ pci_bus_read_config_word(nesdev->pcidev->bus,
+ PCI_DEVFN(PCI_SLOT(nesdev->pcidev->devfn),
+ func_index), 0, &vendor_id);
+ if (vendor_id == 0xffff)
+ break;
+ }
+ nes_debug(NES_DBG_INIT, "%s %d functions found for %s.\n", __FUNCTION__,
+ func_index, pci_name(nesdev->pcidev));
+ nesadapter->adapter_fcn_count = func_index;
+
+ return nesadapter;
+}
+
+
+/**
+ * nes_reset_adapter_ne020
+ */
+unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode)
+{
+ u32 port_count;
+ u32 u32temp;
+ u32 i;
+
+ u32temp = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
+ port_count = ((u32temp & 0x00000300) >> 8) + 1;
+ /* TODO: assuming that both SERDES are set the same for now */
+ *OneG_Mode = (u32temp & 0x00003c00) ? 0 : 1;
+ nes_debug(NES_DBG_INIT, "Initial Software Reset = 0x%08X, port_count=%u\n",
+ u32temp, port_count);
+ if (*OneG_Mode)
+ nes_debug(NES_DBG_INIT, "Running in 1G mode.\n");
+ u32temp &= 0xff00ffc0;
+ switch (port_count) {
+ case 1:
+ u32temp |= 0x00ee0000;
+ break;
+ case 2:
+ u32temp |= 0x00cc0000;
+ break;
+ case 4:
+ u32temp |= 0x00000000;
+ break;
+ default:
+ return 0;
+ break;
+ }
+
+ /* check and do full reset if needed */
+ if (nes_read_indexed(nesdev, NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8))) {
+ nes_debug(NES_DBG_INIT, "Issuing Full Soft reset = 0x%08X\n", u32temp | 0xd);
+ nes_write32(nesdev->regs+NES_SOFTWARE_RESET, u32temp | 0xd);
+
+ i = 0;
+ while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
+ mdelay(1);
+ if (i >= 10000) {
+ nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n");
+ return 0;
+ }
+ }
+
+ /* port reset */
+ switch (port_count) {
+ case 1:
+ u32temp |= 0x00ee0010;
+ break;
+ case 2:
+ u32temp |= 0x00cc0030;
+ break;
+ case 4:
+ u32temp |= 0x00000030;
+ break;
+ }
+
+ nes_debug(NES_DBG_INIT, "Issuing Port Soft reset = 0x%08X\n", u32temp | 0xd);
+ nes_write32(nesdev->regs+NES_SOFTWARE_RESET, u32temp | 0xd);
+
+ i = 0;
+ while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
+ mdelay(1);
+ if (i >= 10000) {
+ nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n");
+ return 0;
+ }
+
+ /* serdes 0 */
+ i = 0;
+ while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
+ & 0x0000000f)) != 0x0000000f) && i++ < 5000)
+ mdelay(1);
+ if (i >= 5000) {
+ nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp);
+ return 0;
+ }
+
+ /* serdes 1 */
+ if (port_count > 1) {
+ i = 0;
+ while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
+ & 0x0000000f)) != 0x0000000f) && i++ < 5000)
+ mdelay(1);
+ if (i >= 5000) {
+ nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp);
+ return 0;
+ }
+ }
+
+
+
+ i = 0;
+ while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
+ mdelay(1);
+ if (i >= 10000) {
+ printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
+ nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
+ return 0;
+ }
+
+ return port_count;
+}
+
+
+/**
+ * nes_init_serdes
+ */
+int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, u8 OneG_Mode)
+{
+ int i;
+ u32 u32temp;
+
+ if (hw_rev != NE020_REV) {
+ /* init serdes 0 */
+
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
+ if (!OneG_Mode)
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
+ if (port_count > 1) {
+ /* init serdes 1 */
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF);
+ if (!OneG_Mode)
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000);
+ }
+ } else {
+ /* init serdes 0 */
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
+ i = 0;
+ while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
+ & 0x0000000f)) != 0x0000000f) && i++ < 5000)
+ mdelay(1);
+ if (i >= 5000) {
+ nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp);
+ return 1;
+ }
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x000bdef7);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE0, 0x9ce73000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE0, 0x0ff00000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET0, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS0, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0, 0x00000000);
+ if (OneG_Mode)
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0182222);
+ else
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0042222);
+
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000ff);
+ if (port_count > 1) {
+ /* init serdes 1 */
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x00000048);
+ i = 0;
+ while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
+ & 0x0000000f)) != 0x0000000f) && (i++ < 5000))
+ mdelay(1);
+ if (i >= 5000) {
+ printk("%s: Init: serdes 1 not ready, status=%x\n", __FUNCTION__, u32temp);
+ /* return 1; */
+ }
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x000bdef7);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE1, 0x9ce73000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE1, 0x0ff00000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET1, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS1, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL1, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL1, 0xf0002222);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000ff);
+ }
+ }
+ return 0;
+}
+
+
+/**
+ * nes_init_csr_ne020
+ * Initialize registers for ne020 hardware
+ */
+void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count)
+{
+ u32 u32temp;
+
+ nes_debug(NES_DBG_INIT, "port_count=%d\n", port_count);
+
+ nes_write_indexed(nesdev, 0x000001E4, 0x00000007);
+ /* nes_write_indexed(nesdev, 0x000001E8, 0x000208C4); */
+ nes_write_indexed(nesdev, 0x000001E8, 0x00020874);
+ nes_write_indexed(nesdev, 0x000001D8, 0x00048002);
+ /* nes_write_indexed(nesdev, 0x000001D8, 0x0004B002); */
+ nes_write_indexed(nesdev, 0x000001FC, 0x00050005);
+ nes_write_indexed(nesdev, 0x00000600, 0x55555555);
+ nes_write_indexed(nesdev, 0x00000604, 0x55555555);
+
+ /* TODO: move these MAC register settings to NIC bringup */
+ nes_write_indexed(nesdev, 0x00002000, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002004, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002008, 0x0000FFFF);
+ nes_write_indexed(nesdev, 0x0000200C, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002010, 0x000003c1);
+ nes_write_indexed(nesdev, 0x0000201C, 0x75345678);
+ if (port_count > 1) {
+ nes_write_indexed(nesdev, 0x00002200, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002204, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002208, 0x0000FFFF);
+ nes_write_indexed(nesdev, 0x0000220C, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002210, 0x000003c1);
+ nes_write_indexed(nesdev, 0x0000221C, 0x75345678);
+ nes_write_indexed(nesdev, 0x00000908, 0x20000001);
+ }
+ if (port_count > 2) {
+ nes_write_indexed(nesdev, 0x00002400, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002404, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002408, 0x0000FFFF);
+ nes_write_indexed(nesdev, 0x0000240C, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002410, 0x000003c1);
+ nes_write_indexed(nesdev, 0x0000241C, 0x75345678);
+ nes_write_indexed(nesdev, 0x00000910, 0x20000001);
+
+ nes_write_indexed(nesdev, 0x00002600, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002604, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002608, 0x0000FFFF);
+ nes_write_indexed(nesdev, 0x0000260C, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002610, 0x000003c1);
+ nes_write_indexed(nesdev, 0x0000261C, 0x75345678);
+ nes_write_indexed(nesdev, 0x00000918, 0x20000001);
+ }
+
+ nes_write_indexed(nesdev, 0x00005000, 0x00018000);
+ /* nes_write_indexed(nesdev, 0x00005000, 0x00010000); */
+ nes_write_indexed(nesdev, 0x00005004, 0x00020001);
+ nes_write_indexed(nesdev, 0x00005008, 0x1F1F1F1F);
+ nes_write_indexed(nesdev, 0x00005010, 0x1F1F1F1F);
+ nes_write_indexed(nesdev, 0x00005018, 0x1F1F1F1F);
+ nes_write_indexed(nesdev, 0x00005020, 0x1F1F1F1F);
+ nes_write_indexed(nesdev, 0x00006090, 0xFFFFFFFF);
+
+ /* TODO: move this to code, get from EEPROM */
+ nes_write_indexed(nesdev, 0x00000900, 0x20000001);
+ nes_write_indexed(nesdev, 0x000060C0, 0x0000028e);
+ nes_write_indexed(nesdev, 0x000060C8, 0x00000020);
+ //
+ nes_write_indexed(nesdev, 0x000001EC, 0x7b2625a0);
+ /* nes_write_indexed(nesdev, 0x000001EC, 0x5f2625a0); */
+
+ if (hw_rev != NE020_REV) {
+ u32temp = nes_read_indexed(nesdev, 0x000008e8);
+ u32temp |= 0x80000000;
+ nes_write_indexed(nesdev, 0x000008e8, u32temp);
+ u32temp = nes_read_indexed(nesdev, 0x000021f8);
+ u32temp &= 0x7fffffff;
+ u32temp |= 0x7fff0010;
+ nes_write_indexed(nesdev, 0x000021f8, u32temp);
+ }
+}
+
+
+/**
+ * nes_destroy_adapter - destroy the adapter structure
+ */
+void nes_destroy_adapter(struct nes_adapter *nesadapter)
+{
+ struct nes_adapter *tmp_adapter;
+
+ list_for_each_entry(tmp_adapter, &nes_adapter_list, list) {
+ nes_debug(NES_DBG_SHUTDOWN, "Nes Adapter list entry = 0x%p.\n",
+ tmp_adapter);
+ }
+
+ nesadapter->ref_count--;
+ if (!nesadapter->ref_count) {
+ if (nesadapter->hw_rev == NE020_REV) {
+ del_timer(&nesadapter->mh_timer);
+ }
+ del_timer(&nesadapter->lc_timer);
+
+ list_del(&nesadapter->list);
+ kfree(nesadapter);
+ }
+}
+
+
+/**
+ * nes_init_cqp
+ */
+int nes_init_cqp(struct nes_device *nesdev)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_cqp_qp_context *cqp_qp_context;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_hw_ceq *ceq;
+ struct nes_hw_ceq *nic_ceq;
+ struct nes_hw_aeq *aeq;
+ void *vmem;
+ dma_addr_t pmem;
+ u32 count=0;
+ u32 cqp_head;
+ u64 u64temp;
+ u32 u32temp;
+
+ /* allocate CQP memory */
+ /* Need to add max_cq to the aeq size once cq overflow checking is added back */
+ /* SQ is 512 byte aligned, others are 256 byte aligned */
+ nesdev->cqp_mem_size = 512 +
+ (sizeof(struct nes_hw_cqp_wqe) * NES_CQP_SQ_SIZE) +
+ (sizeof(struct nes_hw_cqe) * NES_CCQ_SIZE) +
+ max(((u32)sizeof(struct nes_hw_ceqe) * NES_CCEQ_SIZE), (u32)256) +
+ max(((u32)sizeof(struct nes_hw_ceqe) * NES_NIC_CEQ_SIZE), (u32)256) +
+ (sizeof(struct nes_hw_aeqe) * nesadapter->max_qp) +
+ sizeof(struct nes_hw_cqp_qp_context);
+
+ nesdev->cqp_vbase = pci_alloc_consistent(nesdev->pcidev, nesdev->cqp_mem_size,
+ &nesdev->cqp_pbase);
+ if (!nesdev->cqp_vbase) {
+ nes_debug(NES_DBG_INIT, "Unable to allocate memory for host descriptor rings\n");
+ return -ENOMEM;
+ }
+ memset(nesdev->cqp_vbase, 0, nesdev->cqp_mem_size);
+
+ /* Allocate a twice the number of CQP requests as the SQ size */
+ nesdev->nes_cqp_requests = kzalloc(sizeof(struct nes_cqp_request) *
+ 2 * NES_CQP_SQ_SIZE, GFP_KERNEL);
+ if (nesdev->nes_cqp_requests == NULL) {
+ nes_debug(NES_DBG_INIT, "Unable to allocate memory CQP request entries.\n");
+ pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase,
+ nesdev->cqp.sq_pbase);
+ return -ENOMEM;
+ }
+
+ nes_debug(NES_DBG_INIT, "Allocated CQP structures at %p (phys = %016lX), size = %u.\n",
+ nesdev->cqp_vbase, (unsigned long)nesdev->cqp_pbase, nesdev->cqp_mem_size);
+
+ spin_lock_init(&nesdev->cqp.lock);
+ init_waitqueue_head(&nesdev->cqp.waitq);
+
+ /* Setup Various Structures */
+ vmem = (void *)(((unsigned long)nesdev->cqp_vbase + (512 - 1)) &
+ ~(unsigned long)(512 - 1));
+ pmem = (dma_addr_t)(((unsigned long long)nesdev->cqp_pbase + (512 - 1)) &
+ ~(unsigned long long)(512 - 1));
+
+ nesdev->cqp.sq_vbase = vmem;
+ nesdev->cqp.sq_pbase = pmem;
+ nesdev->cqp.sq_size = NES_CQP_SQ_SIZE;
+ nesdev->cqp.sq_head = 0;
+ nesdev->cqp.sq_tail = 0;
+ nesdev->cqp.qp_id = PCI_FUNC(nesdev->pcidev->devfn);
+
+ vmem += (sizeof(struct nes_hw_cqp_wqe) * nesdev->cqp.sq_size);
+ pmem += (sizeof(struct nes_hw_cqp_wqe) * nesdev->cqp.sq_size);
+
+ nesdev->ccq.cq_vbase = vmem;
+ nesdev->ccq.cq_pbase = pmem;
+ nesdev->ccq.cq_size = NES_CCQ_SIZE;
+ nesdev->ccq.cq_head = 0;
+ nesdev->ccq.ce_handler = nes_cqp_ce_handler;
+ nesdev->ccq.cq_number = PCI_FUNC(nesdev->pcidev->devfn);
+
+ vmem += (sizeof(struct nes_hw_cqe) * nesdev->ccq.cq_size);
+ pmem += (sizeof(struct nes_hw_cqe) * nesdev->ccq.cq_size);
+
+ nesdev->ceq_index = PCI_FUNC(nesdev->pcidev->devfn);
+ ceq = &nesadapter->ceq[nesdev->ceq_index];
+ ceq->ceq_vbase = vmem;
+ ceq->ceq_pbase = pmem;
+ ceq->ceq_size = NES_CCEQ_SIZE;
+ ceq->ceq_head = 0;
+
+ vmem += max(((u32)sizeof(struct nes_hw_ceqe) * ceq->ceq_size), (u32)256);
+ pmem += max(((u32)sizeof(struct nes_hw_ceqe) * ceq->ceq_size), (u32)256);
+
+ nesdev->nic_ceq_index = PCI_FUNC(nesdev->pcidev->devfn) + 8;
+ nic_ceq = &nesadapter->ceq[nesdev->nic_ceq_index];
+ nic_ceq->ceq_vbase = vmem;
+ nic_ceq->ceq_pbase = pmem;
+ nic_ceq->ceq_size = NES_NIC_CEQ_SIZE;
+ nic_ceq->ceq_head = 0;
+
+ vmem += max(((u32)sizeof(struct nes_hw_ceqe) * nic_ceq->ceq_size), (u32)256);
+ pmem += max(((u32)sizeof(struct nes_hw_ceqe) * nic_ceq->ceq_size), (u32)256);
+
+ aeq = &nesadapter->aeq[PCI_FUNC(nesdev->pcidev->devfn)];
+ aeq->aeq_vbase = vmem;
+ aeq->aeq_pbase = pmem;
+ aeq->aeq_size = nesadapter->max_qp;
+ aeq->aeq_head = 0;
+
+ /* Setup QP Context */
+ vmem += (sizeof(struct nes_hw_aeqe) * aeq->aeq_size);
+ pmem += (sizeof(struct nes_hw_aeqe) * aeq->aeq_size);
+
+ cqp_qp_context = vmem;
+ cqp_qp_context->context_words[0] =
+ cpu_to_le32((PCI_FUNC(nesdev->pcidev->devfn) << 12) + (2 << 10));
+ cqp_qp_context->context_words[1] = 0;
+ cqp_qp_context->context_words[2] = cpu_to_le32((u32)nesdev->cqp.sq_pbase);
+ cqp_qp_context->context_words[3] = cpu_to_le32(((u64)nesdev->cqp.sq_pbase) >> 32);
+
+
+ /* Write the address to Create CQP */
+ if ((sizeof(dma_addr_t) > 4)) {
+ nes_write_indexed(nesdev,
+ NES_IDX_CREATE_CQP_HIGH + (PCI_FUNC(nesdev->pcidev->devfn) * 8),
+ ((u64)pmem) >> 32);
+ } else {
+ nes_write_indexed(nesdev,
+ NES_IDX_CREATE_CQP_HIGH + (PCI_FUNC(nesdev->pcidev->devfn) * 8), 0);
+ }
+ nes_write_indexed(nesdev,
+ NES_IDX_CREATE_CQP_LOW + (PCI_FUNC(nesdev->pcidev->devfn) * 8),
+ (u32)pmem);
+
+ INIT_LIST_HEAD(&nesdev->cqp_avail_reqs);
+ INIT_LIST_HEAD(&nesdev->cqp_pending_reqs);
+
+ for (count = 0; count < 2*NES_CQP_SQ_SIZE; count++) {
+ init_waitqueue_head(&nesdev->nes_cqp_requests[count].waitq);
+ list_add_tail(&nesdev->nes_cqp_requests[count].list, &nesdev->cqp_avail_reqs);
+ }
+
+ /* Write Create CCQ WQE */
+ cqp_head = nesdev->cqp.sq_head++;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ (NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
+ NES_CQP_CQ_CHK_OVERFLOW | ((u32)nesdev->ccq.cq_size << 16)));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
+ (nesdev->ccq.cq_number |
+ ((u32)nesdev->ceq_index << 16)));
+ u64temp = (u64)nesdev->ccq.cq_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
+ u64temp = (unsigned long)&nesdev->ccq;
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] =
+ cpu_to_le32((u32)(u64temp >> 1));
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
+ cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
+
+ /* Write Create CEQ WQE */
+ cqp_head = nesdev->cqp.sq_head++;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ (NES_CQP_CREATE_CEQ + ((u32)nesdev->ceq_index << 8)));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX, ceq->ceq_size);
+ u64temp = (u64)ceq->ceq_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
+
+ /* Write Create AEQ WQE */
+ cqp_head = nesdev->cqp.sq_head++;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ (NES_CQP_CREATE_AEQ + ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 8)));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_AEQ_WQE_ELEMENT_COUNT_IDX, aeq->aeq_size);
+ u64temp = (u64)aeq->aeq_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
+
+ /* Write Create NIC CEQ WQE */
+ cqp_head = nesdev->cqp.sq_head++;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ (NES_CQP_CREATE_CEQ + ((u32)nesdev->nic_ceq_index << 8)));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX, nic_ceq->ceq_size);
+ u64temp = (u64)nic_ceq->ceq_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
+
+ /* Poll until CCQP done */
+ count = 0;
+ do {
+ if (count++ > 1000) {
+ printk(KERN_ERR PFX "Error creating CQP\n");
+ pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size,
+ nesdev->cqp_vbase, nesdev->cqp_pbase);
+ return -1;
+ }
+ udelay(10);
+ } while (!(nes_read_indexed(nesdev,
+ NES_IDX_QP_CONTROL + (PCI_FUNC(nesdev->pcidev->devfn) * 8)) & (1 << 8)));
+
+ nes_debug(NES_DBG_INIT, "CQP Status = 0x%08X\n", nes_read_indexed(nesdev,
+ NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)));
+
+ u32temp = 0x04800000;
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, u32temp | nesdev->cqp.qp_id);
+
+ /* wait for the CCQ, CEQ, and AEQ to get created */
+ count = 0;
+ do {
+ if (count++ > 1000) {
+ printk(KERN_ERR PFX "Error creating CCQ, CEQ, and AEQ\n");
+ pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size,
+ nesdev->cqp_vbase, nesdev->cqp_pbase);
+ return -1;
+ }
+ udelay(10);
+ } while (((nes_read_indexed(nesdev,
+ NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)) & (15<<8)) != (15<<8)));
+
+ /* dump the QP status value */
+ nes_debug(NES_DBG_INIT, "QP Status = 0x%08X\n", nes_read_indexed(nesdev,
+ NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)));
+
+ nesdev->cqp.sq_tail++;
+
+ return 0;
+}
+
+
+/**
+ * nes_destroy_cqp
+ */
+int nes_destroy_cqp(struct nes_device *nesdev)
+{
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ u32 count = 0;
+ u32 cqp_head;
+ unsigned long flags;
+
+ do {
+ if (count++ > 1000)
+ break;
+ udelay(10);
+ } while (!(nesdev->cqp.sq_head == nesdev->cqp.sq_tail));
+
+ /* Reset CCQ */
+ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_RESET |
+ nesdev->ccq.cq_number);
+
+ /* Disable device interrupts */
+ nes_write32(nesdev->regs+NES_INT_MASK, 0x7fffffff);
+
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+
+ /* Destroy the AEQ */
+ cqp_head = nesdev->cqp.sq_head++;
+ nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_AEQ |
+ ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 8));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] = 0;
+
+ /* Destroy the NIC CEQ */
+ cqp_head = nesdev->cqp.sq_head++;
+ nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CEQ |
+ ((u32)nesdev->nic_ceq_index << 8));
+
+ /* Destroy the CEQ */
+ cqp_head = nesdev->cqp.sq_head++;
+ nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CEQ |
+ (nesdev->ceq_index << 8));
+
+ /* Destroy the CCQ */
+ cqp_head = nesdev->cqp.sq_head++;
+ nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CQ);
+ cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesdev->ccq.cq_number |
+ ((u32)nesdev->ceq_index << 16));
+
+ /* Destroy CQP */
+ cqp_head = nesdev->cqp.sq_head++;
+ nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_QP |
+ NES_CQP_QP_TYPE_CQP);
+ cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesdev->cqp.qp_id);
+
+ barrier();
+ /* Ring doorbell (5 WQEs) */
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x05800000 | nesdev->cqp.qp_id);
+
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+
+ /* wait for the CCQ, CEQ, and AEQ to get destroyed */
+ count = 0;
+ do {
+ if (count++ > 1000) {
+ printk(KERN_ERR PFX "Function%d: Error destroying CCQ, CEQ, and AEQ\n",
+ PCI_FUNC(nesdev->pcidev->devfn));
+ break;
+ }
+ udelay(10);
+ } while (((nes_read_indexed(nesdev,
+ NES_IDX_QP_CONTROL + (PCI_FUNC(nesdev->pcidev->devfn)*8)) & (15 << 8)) != 0));
+
+ /* dump the QP status value */
+ nes_debug(NES_DBG_SHUTDOWN, "Function%d: QP Status = 0x%08X\n",
+ PCI_FUNC(nesdev->pcidev->devfn),
+ nes_read_indexed(nesdev,
+ NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)));
+
+ kfree(nesdev->nes_cqp_requests);
+
+ /* Free the control structures */
+ pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase,
+ nesdev->cqp.sq_pbase);
+
+ return 0;
+}
+
+
+/**
+ * nes_init_phy
+ */
+int nes_init_phy(struct nes_device *nesdev)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 counter = 0;
+ u32 mac_index = nesdev->mac_index;
+ u32 tx_config;
+ u16 phy_data;
+
+ if (nesadapter->OneG_Mode) {
+ nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
+ if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) {
+ printk(PFX "%s: Programming mdc config for 1G\n", __FUNCTION__);
+ tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
+ tx_config |= 0x04;
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
+ }
+
+ nes_read_1G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 1 phy address %u = 0x%X.\n",
+ nesadapter->phy_index[mac_index], phy_data);
+ nes_write_1G_phy_reg(nesdev, 23, nesadapter->phy_index[mac_index], 0xb000);
+
+ /* Reset the PHY */
+ nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], 0x8000);
+ udelay(100);
+ counter = 0;
+ do {
+ nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data);
+ if (counter++ > 100) break;
+ } while (phy_data & 0x8000);
+
+ /* Setting no phy loopback */
+ phy_data &= 0xbfff;
+ phy_data |= 0x1140;
+ nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data);
+ nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data);
+
+ nes_read_1G_phy_reg(nesdev, 0x17, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x17 = 0x%X.\n", phy_data);
+
+ nes_read_1G_phy_reg(nesdev, 0x1e, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x1e = 0x%X.\n", phy_data);
+
+ /* Setting the interrupt mask */
+ nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data);
+ nes_write_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], 0xffee);
+
+ nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data);
+
+ /* turning on flow control */
+ nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data);
+ nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index],
+ (phy_data & ~(0x03E0)) | 0xc00);
+ /* nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index],
+ phy_data | 0xc00); */
+ nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data);
+
+ nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data);
+ /* Clear Half duplex */
+ nes_write_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index],
+ phy_data & ~(0x0100));
+ nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data);
+
+ nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data);
+ nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data | 0x0300);
+ } else {
+ if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) {
+ /* setup 10G MDIO operation */
+ tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
+ tx_config |= 0x14;
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
+ }
+ }
+ return 0;
+}
+
+
+/**
+ * nes_replenish_nic_rq
+ */
+static void nes_replenish_nic_rq(struct nes_vnic *nesvnic)
+{
+ unsigned long flags;
+ dma_addr_t bus_address;
+ struct sk_buff *skb;
+ struct nes_hw_nic_rq_wqe *nic_rqe;
+ struct nes_hw_nic *nesnic;
+ struct nes_device *nesdev;
+ u32 rx_wqes_posted = 0;
+
+ nesnic = &nesvnic->nic;
+ nesdev = nesvnic->nesdev;
+ spin_lock_irqsave(&nesnic->rq_lock, flags);
+ if (nesnic->replenishing_rq !=0) {
+ if (((nesnic->rq_size-1) == atomic_read(&nesvnic->rx_skbs_needed)) &&
+ (atomic_read(&nesvnic->rx_skb_timer_running) == 0)) {
+ atomic_set(&nesvnic->rx_skb_timer_running, 1);
+ spin_unlock_irqrestore(&nesnic->rq_lock, flags);
+ nesvnic->rq_wqes_timer.expires = jiffies + (HZ/2); /* 1/2 second */
+ add_timer(&nesvnic->rq_wqes_timer);
+ } else
+ spin_unlock_irqrestore(&nesnic->rq_lock, flags);
+ return;
+ }
+ nesnic->replenishing_rq = 1;
+ spin_unlock_irqrestore(&nesnic->rq_lock, flags);
+ do {
+ skb = dev_alloc_skb(nesvnic->max_frame_size);
+ if (skb) {
+ skb->dev = nesvnic->netdev;
+
+ bus_address = pci_map_single(nesdev->pcidev,
+ skb->data, nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
+
+ nic_rqe = &nesnic->rq_vbase[nesvnic->nic.rq_head];
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] =
+ cpu_to_le32(nesvnic->max_frame_size);
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] =
+ cpu_to_le32((u32)bus_address);
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] =
+ cpu_to_le32((u32)((u64)bus_address >> 32));
+ nesnic->rx_skb[nesnic->rq_head] = skb;
+ nesnic->rq_head++;
+ nesnic->rq_head &= nesnic->rq_size - 1;
+ atomic_dec(&nesvnic->rx_skbs_needed);
+ barrier();
+ if (++rx_wqes_posted == 255) {
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesnic->qp_id);
+ rx_wqes_posted = 0;
+ }
+ } else {
+ spin_lock_irqsave(&nesnic->rq_lock, flags);
+ if (((nesnic->rq_size-1) == atomic_read(&nesvnic->rx_skbs_needed)) &&
+ (atomic_read(&nesvnic->rx_skb_timer_running) == 0)) {
+ atomic_set(&nesvnic->rx_skb_timer_running, 1);
+ spin_unlock_irqrestore(&nesnic->rq_lock, flags);
+ nesvnic->rq_wqes_timer.expires = jiffies + (HZ/2); /* 1/2 second */
+ add_timer(&nesvnic->rq_wqes_timer);
+ } else
+ spin_unlock_irqrestore(&nesnic->rq_lock, flags);
+ break;
+ }
+ } while (atomic_read(&nesvnic->rx_skbs_needed));
+ barrier();
+ if (rx_wqes_posted)
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesnic->qp_id);
+ nesnic->replenishing_rq = 0;
+}
+
+
+/**
+ * nes_rq_wqes_timeout
+ */
+static void nes_rq_wqes_timeout(unsigned long parm)
+{
+ struct nes_vnic *nesvnic = (struct nes_vnic *)parm;
+ printk("%s: Timer fired.\n", __FUNCTION__);
+ atomic_set(&nesvnic->rx_skb_timer_running, 0);
+ if (atomic_read(&nesvnic->rx_skbs_needed))
+ nes_replenish_nic_rq(nesvnic);
+}
+
+
+/**
+ * nes_init_nic_qp
+ */
+int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
+{
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_hw_nic_sq_wqe *nic_sqe;
+ struct nes_hw_nic_qp_context *nic_context;
+ struct sk_buff *skb;
+ struct nes_hw_nic_rq_wqe *nic_rqe;
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ unsigned long flags;
+ void *vmem;
+ dma_addr_t pmem;
+ u64 u64temp;
+ int ret;
+ u32 cqp_head;
+ u32 counter;
+ u32 wqe_count;
+ u8 jumbomode=0;
+
+ /* Allocate fragment, SQ, RQ, and CQ; Reuse CEQ based on the PCI function */
+ nesvnic->nic_mem_size = 256 +
+ (NES_NIC_WQ_SIZE * sizeof(struct nes_first_frag)) +
+ (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe)) +
+ (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe)) +
+ (NES_NIC_WQ_SIZE * 2 * sizeof(struct nes_hw_nic_cqe)) +
+ sizeof(struct nes_hw_nic_qp_context);
+
+ nesvnic->nic_vbase = pci_alloc_consistent(nesdev->pcidev, nesvnic->nic_mem_size,
+ &nesvnic->nic_pbase);
+ if (!nesvnic->nic_vbase) {
+ nes_debug(NES_DBG_INIT, "Unable to allocate memory for NIC host descriptor rings\n");
+ return -ENOMEM;
+ }
+ memset(nesvnic->nic_vbase, 0, nesvnic->nic_mem_size);
+ nes_debug(NES_DBG_INIT, "Allocated NIC QP structures at %p (phys = %016lX), size = %u.\n",
+ nesvnic->nic_vbase, (unsigned long)nesvnic->nic_pbase, nesvnic->nic_mem_size);
+
+ vmem = (void *)(((unsigned long)nesvnic->nic_vbase + (256 - 1)) &
+ ~(unsigned long)(256 - 1));
+ pmem = (dma_addr_t)(((unsigned long long)nesvnic->nic_pbase + (256 - 1)) &
+ ~(unsigned long long)(256 - 1));
+
+ /* Setup the first Fragment buffers */
+ nesvnic->nic.first_frag_vbase = vmem;
+
+ for (counter = 0; counter < NES_NIC_WQ_SIZE; counter++) {
+ nesvnic->nic.frag_paddr[counter] = pmem;
+ pmem += sizeof(struct nes_first_frag);
+ }
+
+ /* setup the SQ */
+ vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_first_frag));
+
+ nesvnic->nic.sq_vbase = (void *)vmem;
+ nesvnic->nic.sq_pbase = pmem;
+ nesvnic->nic.sq_head = 0;
+ nesvnic->nic.sq_tail = 0;
+ nesvnic->nic.sq_size = NES_NIC_WQ_SIZE;
+ for (counter = 0; counter < NES_NIC_WQ_SIZE; counter++) {
+ nic_sqe = &nesvnic->nic.sq_vbase[counter];
+ nic_sqe->wqe_words[NES_NIC_SQ_WQE_MISC_IDX] =
+ cpu_to_le32(NES_NIC_SQ_WQE_DISABLE_CHKSUM |
+ NES_NIC_SQ_WQE_COMPLETION);
+ nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX] =
+ cpu_to_le32((u32)NES_FIRST_FRAG_SIZE << 16);
+ nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX] =
+ cpu_to_le32((u32)nesvnic->nic.frag_paddr[counter]);
+ nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX] =
+ cpu_to_le32((u32)((u64)nesvnic->nic.frag_paddr[counter] >> 32));
+ }
+
+ nesvnic->get_cqp_request = nes_get_cqp_request;
+ nesvnic->post_cqp_request = nes_post_cqp_request;
+ nesvnic->mcrq_mcast_filter = NULL;
+
+ spin_lock_init(&nesvnic->nic.sq_lock);
+ spin_lock_init(&nesvnic->nic.rq_lock);
+
+ /* setup the RQ */
+ vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe));
+ pmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe));
+
+
+ nesvnic->nic.rq_vbase = vmem;
+ nesvnic->nic.rq_pbase = pmem;
+ nesvnic->nic.rq_head = 0;
+ nesvnic->nic.rq_tail = 0;
+ nesvnic->nic.rq_size = NES_NIC_WQ_SIZE;
+
+ /* setup the CQ */
+ vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe));
+ pmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe));
+
+ if (nesdev->nesadapter->netdev_count > 2)
+ nesvnic->mcrq_qp_id = nesvnic->nic_index + 32;
+ else
+ nesvnic->mcrq_qp_id = nesvnic->nic.qp_id + 4;
+
+ nesvnic->nic_cq.cq_vbase = vmem;
+ nesvnic->nic_cq.cq_pbase = pmem;
+ nesvnic->nic_cq.cq_head = 0;
+ nesvnic->nic_cq.cq_size = NES_NIC_WQ_SIZE * 2;
+
+ nesvnic->nic_cq.ce_handler = nes_nic_napi_ce_handler;
+
+ /* Send CreateCQ request to CQP */
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ cqp_head = nesdev->cqp.sq_head;
+
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
+ NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
+ ((u32)nesvnic->nic_cq.cq_size << 16));
+ cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(
+ nesvnic->nic_cq.cq_number | ((u32)nesdev->nic_ceq_index << 16));
+ u64temp = (u64)nesvnic->nic_cq.cq_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
+ u64temp = (unsigned long)&nesvnic->nic_cq;
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp >> 1));
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
+ cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
+ if (++cqp_head >= nesdev->cqp.sq_size)
+ cqp_head = 0;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+
+ /* Send CreateQP request to CQP */
+ nic_context = (void *)(&nesvnic->nic_cq.cq_vbase[nesvnic->nic_cq.cq_size]);
+ nic_context->context_words[NES_NIC_CTX_MISC_IDX] =
+ cpu_to_le32((u32)NES_NIC_CTX_SIZE |
+ ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 12));
+ nes_debug(NES_DBG_INIT, "RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x%08X, RX_WINDOW_BUFFER_SIZE = 0x%08X\n",
+ nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE),
+ nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE));
+ if (nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE) != 0) {
+ nic_context->context_words[NES_NIC_CTX_MISC_IDX] |= cpu_to_le32(NES_NIC_BACK_STORE);
+ }
+
+ u64temp = (u64)nesvnic->nic.sq_pbase;
+ nic_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
+ nic_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
+ u64temp = (u64)nesvnic->nic.rq_pbase;
+ nic_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
+ nic_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
+
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP |
+ NES_CQP_QP_TYPE_NIC);
+ cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesvnic->nic.qp_id);
+ u64temp = (u64)nesvnic->nic_cq.cq_pbase +
+ (nesvnic->nic_cq.cq_size * sizeof(struct nes_hw_nic_cqe));
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
+
+ if (++cqp_head >= nesdev->cqp.sq_size)
+ cqp_head = 0;
+ nesdev->cqp.sq_head = cqp_head;
+
+ barrier();
+
+ /* Ring doorbell (2 WQEs) */
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
+
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ nes_debug(NES_DBG_INIT, "Waiting for create NIC QP%u to complete.\n",
+ nesvnic->nic.qp_id);
+
+ ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_INIT, "Create NIC QP%u completed, wait_event_timeout ret = %u.\n",
+ nesvnic->nic.qp_id, ret);
+ if (!ret) {
+ nes_debug(NES_DBG_INIT, "NIC QP%u create timeout expired\n", nesvnic->nic.qp_id);
+ pci_free_consistent(nesdev->pcidev, nesvnic->nic_mem_size, nesvnic->nic_vbase,
+ nesvnic->nic_pbase);
+ return -EIO;
+ }
+
+ /* Populate the RQ */
+ for (counter = 0; counter < (NES_NIC_WQ_SIZE - 1); counter++) {
+ skb = dev_alloc_skb(nesvnic->max_frame_size);
+ if (!skb) {
+ nes_debug(NES_DBG_INIT, "%s: out of memory for receive skb\n", netdev->name);
+
+ nes_destroy_nic_qp(nesvnic);
+ return -ENOMEM;
+ }
+
+ skb->dev = netdev;
+
+ pmem = pci_map_single(nesdev->pcidev, skb->data,
+ nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
+
+ nic_rqe = &nesvnic->nic.rq_vbase[counter];
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32(nesvnic->max_frame_size);
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem);
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32));
+ nesvnic->nic.rx_skb[counter] = skb;
+ }
+
+ wqe_count = NES_NIC_WQ_SIZE - 1;
+ nesvnic->nic.rq_head = wqe_count;
+ barrier();
+ do {
+ counter = min(wqe_count, ((u32)255));
+ wqe_count -= counter;
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter << 24) | nesvnic->nic.qp_id);
+ } while (wqe_count);
+ init_timer(&nesvnic->rq_wqes_timer);
+ nesvnic->rq_wqes_timer.function = nes_rq_wqes_timeout;
+ nesvnic->rq_wqes_timer.data = (unsigned long)nesvnic;
+ nes_debug(NES_DBG_INIT, "NAPI support Enabled\n");
+
+ if (nesdev->nesadapter->et_use_adaptive_rx_coalesce)
+ {
+ nes_nic_init_timer(nesdev);
+ if (netdev->mtu > 1500)
+ jumbomode = 1;
+ nes_nic_init_timer_defaults(nesdev, jumbomode);
+ }
+
+ return 0;
+}
+
+
+/**
+ * nes_destroy_nic_qp
+ */
+void nes_destroy_nic_qp(struct nes_vnic *nesvnic)
+{
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_hw_nic_rq_wqe *nic_rqe;
+ u64 wqe_frag;
+ u32 cqp_head;
+ unsigned long flags;
+ int ret;
+
+ /* Free remaining NIC receive buffers */
+ while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) {
+ nic_rqe = &nesvnic->nic.rq_vbase[nesvnic->nic.rq_tail];
+ wqe_frag = (u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]);
+ wqe_frag |= ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32;
+ pci_unmap_single(nesdev->pcidev, (dma_addr_t)wqe_frag,
+ nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(nesvnic->nic.rx_skb[nesvnic->nic.rq_tail++]);
+ nesvnic->nic.rq_tail &= (nesvnic->nic.rq_size - 1);
+ }
+
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+
+ /* Destroy NIC QP */
+ cqp_head = nesdev->cqp.sq_head;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ (NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_NIC));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
+ nesvnic->nic.qp_id);
+
+ if (++cqp_head >= nesdev->cqp.sq_size)
+ cqp_head = 0;
+
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+
+ /* Destroy NIC CQ */
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ (NES_CQP_DESTROY_CQ | ((u32)nesvnic->nic_cq.cq_size << 16)));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
+ (nesvnic->nic_cq.cq_number | ((u32)nesdev->nic_ceq_index << 16)));
+
+ if (++cqp_head >= nesdev->cqp.sq_size)
+ cqp_head = 0;
+
+ nesdev->cqp.sq_head = cqp_head;
+ barrier();
+
+ /* Ring doorbell (2 WQEs) */
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
+
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ nes_debug(NES_DBG_SHUTDOWN, "Waiting for CQP, cqp_head=%u, cqp.sq_head=%u,"
+ " cqp.sq_tail=%u, cqp.sq_size=%u\n",
+ cqp_head, nesdev->cqp.sq_head,
+ nesdev->cqp.sq_tail, nesdev->cqp.sq_size);
+
+ ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
+ NES_EVENT_TIMEOUT);
+
+ nes_debug(NES_DBG_SHUTDOWN, "Destroy NIC QP returned, wait_event_timeout ret = %u, cqp_head=%u,"
+ " cqp.sq_head=%u, cqp.sq_tail=%u\n",
+ ret, cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail);
+ if (!ret) {
+ nes_debug(NES_DBG_SHUTDOWN, "NIC QP%u destroy timeout expired\n",
+ nesvnic->nic.qp_id);
+ }
+
+ pci_free_consistent(nesdev->pcidev, nesvnic->nic_mem_size, nesvnic->nic_vbase,
+ nesvnic->nic_pbase);
+}
+
+/**
+ * nes_napi_isr
+ */
+int nes_napi_isr(struct nes_device *nesdev)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 int_stat;
+
+ if (nesdev->napi_isr_ran) {
+ /* interrupt status has already been read in ISR */
+ int_stat = nesdev->int_stat;
+ } else {
+ int_stat = nes_read32(nesdev->regs + NES_INT_STAT);
+ nesdev->int_stat = int_stat;
+ nesdev->napi_isr_ran = 1;
+ }
+
+ int_stat &= nesdev->int_req;
+ /* iff NIC, process here, else wait for DPC */
+ if ((int_stat) && ((int_stat & 0x0000ff00) == int_stat)) {
+ nesdev->napi_isr_ran = 0;
+ nes_write32(nesdev->regs+NES_INT_STAT,
+ (int_stat &
+ ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3)));
+
+ /* Process the CEQs */
+ nes_process_ceq(nesdev, &nesdev->nesadapter->ceq[nesdev->nic_ceq_index]);
+
+ if (unlikely((((nesadapter->et_rx_coalesce_usecs_irq) &&
+ (!nesadapter->et_use_adaptive_rx_coalesce)) ||
+ ((nesadapter->et_use_adaptive_rx_coalesce) &&
+ (nesdev->deepcq_count > nesadapter->et_pkt_rate_low)))) ) {
+ if ((nesdev->int_req & NES_INT_TIMER) == 0) {
+ /* Enable Periodic timer interrupts */
+ nesdev->int_req |= NES_INT_TIMER;
+ /* ack any pending periodic timer interrupts so we don't get an immediate interrupt */
+ /* TODO: need to also ack other unused periodic timer values, get from nesadapter */
+ nes_write32(nesdev->regs+NES_TIMER_STAT,
+ nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req));
+ nes_write32(nesdev->regs+NES_INTF_INT_MASK,
+ ~(nesdev->intf_int_req | NES_INTF_PERIODIC_TIMER));
+ }
+
+ if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
+ {
+ nes_nic_init_timer(nesdev);
+ }
+ /* Enable interrupts, except CEQs */
+ nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req));
+ } else {
+ /* Enable interrupts, make sure timer is off */
+ nesdev->int_req &= ~NES_INT_TIMER;
+ nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
+ nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
+ nesadapter->tune_timer.timer_in_use_old = 0;
+ }
+ nesdev->deepcq_count = 0;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+
+/**
+ * nes_dpc
+ */
+void nes_dpc(unsigned long param)
+{
+ struct nes_device *nesdev = (struct nes_device *)param;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 counter;
+ u32 loop_counter = 0;
+ u32 int_status_bit;
+ u32 int_stat;
+ u32 timer_stat;
+ u32 temp_int_stat;
+ u32 intf_int_stat;
+ u32 debug_error;
+ u32 processed_intf_int = 0;
+ u16 processed_timer_int = 0;
+ u16 completion_ints = 0;
+ u16 timer_ints = 0;
+
+ /* nes_debug(NES_DBG_ISR, "\n"); */
+
+ do {
+ timer_stat = 0;
+ if (nesdev->napi_isr_ran) {
+ nesdev->napi_isr_ran = 0;
+ int_stat = nesdev->int_stat;
+ } else
+ int_stat = nes_read32(nesdev->regs+NES_INT_STAT);
+ if (processed_intf_int != 0)
+ int_stat &= nesdev->int_req & ~NES_INT_INTF;
+ else
+ int_stat &= nesdev->int_req;
+ if (processed_timer_int == 0) {
+ processed_timer_int = 1;
+ if (int_stat & NES_INT_TIMER) {
+ timer_stat = nes_read32(nesdev->regs + NES_TIMER_STAT);
+ if ((timer_stat & nesdev->timer_int_req) == 0) {
+ int_stat &= ~NES_INT_TIMER;
+ }
+ }
+ } else {
+ int_stat &= ~NES_INT_TIMER;
+ }
+
+ if (int_stat) {
+ if (int_stat & ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|
+ NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3)) {
+ /* Ack the interrupts */
+ nes_write32(nesdev->regs+NES_INT_STAT,
+ (int_stat & ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|
+ NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3)));
+ }
+
+ temp_int_stat = int_stat;
+ for (counter = 0, int_status_bit = 1; counter < 16; counter++) {
+ if (int_stat & int_status_bit) {
+ nes_process_ceq(nesdev, &nesadapter->ceq[counter]);
+ temp_int_stat &= ~int_status_bit;
+ completion_ints = 1;
+ }
+ if (!(temp_int_stat & 0x0000ffff))
+ break;
+ int_status_bit <<= 1;
+ }
+
+ /* Process the AEQ for this pci function */
+ int_status_bit = 1 << (16 + PCI_FUNC(nesdev->pcidev->devfn));
+ if (int_stat & int_status_bit) {
+ nes_process_aeq(nesdev, &nesadapter->aeq[PCI_FUNC(nesdev->pcidev->devfn)]);
+ }
+
+ /* Process the MAC interrupt for this pci function */
+ int_status_bit = 1 << (24 + nesdev->mac_index);
+ if (int_stat & int_status_bit) {
+ nes_process_mac_intr(nesdev, nesdev->mac_index);
+ }
+
+ if (int_stat & NES_INT_TIMER) {
+ if (timer_stat & nesdev->timer_int_req) {
+ nes_write32(nesdev->regs + NES_TIMER_STAT,
+ (timer_stat & nesdev->timer_int_req) |
+ ~(nesdev->nesadapter->timer_int_req));
+ timer_ints = 1;
+ }
+ }
+
+ if (int_stat & NES_INT_INTF) {
+ processed_intf_int = 1;
+ intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT);
+ intf_int_stat &= nesdev->intf_int_req;
+ if (NES_INTF_INT_CRITERR & intf_int_stat) {
+ debug_error = nes_read_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS);
+ printk(KERN_ERR PFX "Critical Error reported by device!!! 0x%02X\n",
+ (u16)debug_error);
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS,
+ 0x01010000 | (debug_error & 0x0000ffff));
+ /* BUG(); */
+ if (crit_err_count++ > 10)
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 1 << 0x17);
+ }
+ if (NES_INTF_INT_PCIERR & intf_int_stat) {
+ printk(KERN_ERR PFX "PCI Error reported by device!!!\n");
+ BUG();
+ }
+ if (NES_INTF_INT_AEQ_OFLOW & intf_int_stat) {
+ printk(KERN_ERR PFX "AEQ Overflow reported by device!!!\n");
+ BUG();
+ }
+ nes_write32(nesdev->regs+NES_INTF_INT_STAT, intf_int_stat);
+ }
+
+ if (int_stat & NES_INT_TSW) {
+ }
+ }
+ /* Don't use the interface interrupt bit stay in loop */
+ int_stat &= ~NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|
+ NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3;
+ } while ((int_stat != 0) && (loop_counter++ < MAX_DPC_ITERATIONS));
+
+ if (timer_ints == 1) {
+ if ((nesadapter->et_rx_coalesce_usecs_irq) || (nesadapter->et_use_adaptive_rx_coalesce)) {
+ if (completion_ints == 0) {
+ nesdev->timer_only_int_count++;
+ if (nesdev->timer_only_int_count>=nesadapter->timer_int_limit) {
+ nesdev->timer_only_int_count = 0;
+ nesdev->int_req &= ~NES_INT_TIMER;
+ nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
+ nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
+ nesdev->nesadapter->tune_timer.timer_in_use_old = 0;
+ } else {
+ nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req));
+ }
+ } else {
+ if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
+ {
+ nes_nic_init_timer(nesdev);
+ }
+ nesdev->timer_only_int_count = 0;
+ nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req));
+ }
+ } else {
+ nesdev->timer_only_int_count = 0;
+ nesdev->int_req &= ~NES_INT_TIMER;
+ nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
+ nes_write32(nesdev->regs+NES_TIMER_STAT,
+ nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req));
+ nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
+ }
+ } else {
+ if ( (completion_ints == 1) &&
+ (((nesadapter->et_rx_coalesce_usecs_irq) &&
+ (!nesadapter->et_use_adaptive_rx_coalesce)) ||
+ ((nesdev->deepcq_count > nesadapter->et_pkt_rate_low) &&
+ (nesadapter->et_use_adaptive_rx_coalesce) )) ) {
+ /* nes_debug(NES_DBG_ISR, "Enabling periodic timer interrupt.\n" ); */
+ nesdev->timer_only_int_count = 0;
+ nesdev->int_req |= NES_INT_TIMER;
+ nes_write32(nesdev->regs+NES_TIMER_STAT,
+ nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req));
+ nes_write32(nesdev->regs+NES_INTF_INT_MASK,
+ ~(nesdev->intf_int_req | NES_INTF_PERIODIC_TIMER));
+ nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req));
+ } else {
+ nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
+ }
+ }
+ nesdev->deepcq_count = 0;
+}
+
+
+/**
+ * nes_process_ceq
+ */
+void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
+{
+ u64 u64temp;
+ struct nes_hw_cq *cq;
+ u32 head;
+ u32 ceq_size;
+
+ /* nes_debug(NES_DBG_CQ, "\n"); */
+ head = ceq->ceq_head;
+ ceq_size = ceq->ceq_size;
+
+ do {
+ if (le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX]) &
+ NES_CEQE_VALID) {
+ u64temp = (((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX])))<<32) |
+ ((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_LOW_IDX])));
+ u64temp <<= 1;
+ cq = *((struct nes_hw_cq **)&u64temp);
+ /* nes_debug(NES_DBG_CQ, "pCQ = %p\n", cq); */
+ barrier();
+ ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX] = 0;
+
+ /* call the event handler */
+ cq->ce_handler(nesdev, cq);
+
+ if (++head >= ceq_size)
+ head = 0;
+ } else {
+ break;
+ }
+
+ } while (1);
+
+ ceq->ceq_head = head;
+}
+
+
+/**
+ * nes_process_aeq
+ */
+void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
+{
+// u64 u64temp;
+ u32 head;
+ u32 aeq_size;
+ u32 aeqe_misc;
+ u32 aeqe_cq_id;
+ struct nes_hw_aeqe volatile *aeqe;
+
+ head = aeq->aeq_head;
+ aeq_size = aeq->aeq_size;
+
+ do {
+ aeqe = &aeq->aeq_vbase[head];
+ if ((le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]) & NES_AEQE_VALID) == 0)
+ break;
+ aeqe_misc = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+ aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
+ if (aeqe_misc & (NES_AEQE_QP|NES_AEQE_CQ)) {
+ if (aeqe_cq_id >= NES_FIRST_QPN) {
+ /* dealing with an accelerated QP related AE */
+// u64temp = (((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])))<<32) |
+// ((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX])));
+ nes_process_iwarp_aeqe(nesdev, (struct nes_hw_aeqe *)aeqe);
+ } else {
+ /* TODO: dealing with a CQP related AE */
+ nes_debug(NES_DBG_AEQ, "Processing CQP related AE, misc = 0x%04X\n",
+ (u16)(aeqe_misc >> 16));
+ }
+ }
+
+ aeqe->aeqe_words[NES_AEQE_MISC_IDX] = 0;
+
+ if (++head >= aeq_size)
+ head = 0;
+ }
+ while (1);
+ aeq->aeq_head = head;
+}
+
+static void nes_reset_link(struct nes_device *nesdev, u32 mac_index)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 reset_value;
+ u32 i=0;
+ u32 u32temp;
+
+ if (nesadapter->hw_rev == NE020_REV) {
+ return;
+ }
+ mh_detected++;
+
+ reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
+
+ if ((mac_index == 0) || ((mac_index == 1) && (nesadapter->OneG_Mode)))
+ reset_value |= 0x0000001d;
+ else
+ reset_value |= 0x0000002d;
+
+ if (4 <= (nesadapter->link_interrupt_count[mac_index] / ((u16)NES_MAX_LINK_INTERRUPTS))) {
+ if ((!nesadapter->OneG_Mode) && (nesadapter->port_count == 2)) {
+ nesadapter->link_interrupt_count[0] = 0;
+ nesadapter->link_interrupt_count[1] = 0;
+ u32temp = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
+ if (0x00000040 & u32temp)
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088);
+ else
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F0C8);
+
+ reset_value |= 0x0000003d;
+ }
+ nesadapter->link_interrupt_count[mac_index] = 0;
+ }
+
+ nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
+
+ while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
+ & 0x00000040) != 0x00000040) && (i++ < 5000));
+
+ if (0x0000003d == (reset_value & 0x0000003d)) {
+ u32 pcs_control_status0, pcs_control_status1;
+
+ for (i = 0; i < 10; i++) {
+ pcs_control_status0 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0);
+ pcs_control_status1 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
+ if (((0x0F000000 == (pcs_control_status0 & 0x0F000000))
+ && (pcs_control_status0 & 0x00100000))
+ || ((0x0F000000 == (pcs_control_status1 & 0x0F000000))
+ && (pcs_control_status1 & 0x00100000)))
+ continue;
+ else
+ break;
+ }
+ if (10 == i) {
+ u32temp = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
+ if (0x00000040 & u32temp)
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088);
+ else
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F0C8);
+
+ nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
+
+ while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET)
+ & 0x00000040) != 0x00000040) && (i++ < 5000));
+ }
+ }
+}
+
+/**
+ * nes_process_mac_intr
+ */
+void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
+{
+ unsigned long flags;
+ u32 pcs_control_status;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_vnic *nesvnic;
+ u32 mac_status;
+ u32 mac_index = nesdev->mac_index;
+ u32 u32temp;
+ u16 phy_data;
+ u16 temp_phy_data;
+
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ if (nesadapter->mac_sw_state[mac_number] != NES_MAC_SW_IDLE) {
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ return;
+ }
+ nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT;
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+
+ /* ack the MAC interrupt */
+ mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200));
+ /* Clear the interrupt */
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200), mac_status);
+
+ nes_debug(NES_DBG_PHY, "MAC%u interrupt status = 0x%X.\n", mac_number, mac_status);
+
+ if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) {
+ nesdev->link_status_interrupts++;
+ if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS))) {
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ nes_reset_link(nesdev, mac_index);
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ /* read the PHY interrupt status register */
+ if (nesadapter->OneG_Mode) {
+ do {
+ nes_read_1G_phy_reg(nesdev, 0x1a,
+ nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy%d data from register 0x1a = 0x%X.\n",
+ nesadapter->phy_index[mac_index], phy_data);
+ } while (phy_data&0x8000);
+
+ temp_phy_data = 0;
+ do {
+ nes_read_1G_phy_reg(nesdev, 0x11,
+ nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy%d data from register 0x11 = 0x%X.\n",
+ nesadapter->phy_index[mac_index], phy_data);
+ if (temp_phy_data == phy_data)
+ break;
+ temp_phy_data = phy_data;
+ } while (1);
+
+ nes_read_1G_phy_reg(nesdev, 0x1e,
+ nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy%d data from register 0x1e = 0x%X.\n",
+ nesadapter->phy_index[mac_index], phy_data);
+
+ nes_read_1G_phy_reg(nesdev, 1,
+ nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "1G phy%u data from register 1 = 0x%X\n",
+ nesadapter->phy_index[mac_index], phy_data);
+
+ if (temp_phy_data & 0x1000) {
+ nes_debug(NES_DBG_PHY, "The Link is up according to the PHY\n");
+ phy_data = 4;
+ } else {
+ nes_debug(NES_DBG_PHY, "The Link is down according to the PHY\n");
+ }
+ }
+ nes_debug(NES_DBG_PHY, "Eth SERDES Common Status: 0=0x%08X, 1=0x%08X\n",
+ nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0),
+ nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0+0x200));
+ pcs_control_status = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index&1)*0x200));
+ pcs_control_status = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index&1)*0x200));
+ nes_debug(NES_DBG_PHY, "PCS PHY Control/Status%u: 0x%08X\n",
+ mac_index, pcs_control_status);
+ if (nesadapter->OneG_Mode) {
+ u32temp = 0x01010000;
+ if (nesadapter->port_count > 2) {
+ u32temp |= 0x02020000;
+ }
+ if ((pcs_control_status & u32temp)!= u32temp) {
+ phy_data = 0;
+ nes_debug(NES_DBG_PHY, "PCS says the link is down\n");
+ }
+ } else if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) {
+ nes_read_10G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index]);
+ temp_phy_data = (u16)nes_read_indexed(nesdev,
+ NES_IDX_MAC_MDIO_CONTROL);
+ u32temp = 20;
+ do {
+ nes_read_10G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index]);
+ phy_data = (u16)nes_read_indexed(nesdev,
+ NES_IDX_MAC_MDIO_CONTROL);
+ if ((phy_data == temp_phy_data) || (!(--u32temp)))
+ break;
+ temp_phy_data = phy_data;
+ } while (1);
+ nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
+ __FUNCTION__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP");
+
+ } else {
+ phy_data = (0x0f0f0000 == (pcs_control_status & 0x0f1f0000)) ? 4 : 0;
+ }
+
+ if (phy_data & 0x0004) {
+ nesadapter->mac_link_down[mac_index] = 0;
+ list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
+ nes_debug(NES_DBG_PHY, "The Link is UP!!. linkup was %d\n",
+ nesvnic->linkup);
+ if (nesvnic->linkup == 0) {
+ printk(PFX "The Link is now up for port %u, netdev %p.\n",
+ mac_index, nesvnic->netdev);
+ if (netif_queue_stopped(nesvnic->netdev))
+ netif_start_queue(nesvnic->netdev);
+ nesvnic->linkup = 1;
+ netif_carrier_on(nesvnic->netdev);
+ }
+ }
+ } else {
+ nesadapter->mac_link_down[mac_index] = 1;
+ list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
+ nes_debug(NES_DBG_PHY, "The Link is Down!!. linkup was %d\n",
+ nesvnic->linkup);
+ if (nesvnic->linkup == 1) {
+ printk(PFX "The Link is now down for port %u, netdev %p.\n",
+ mac_index, nesvnic->netdev);
+ if (!(netif_queue_stopped(nesvnic->netdev)))
+ netif_stop_queue(nesvnic->netdev);
+ nesvnic->linkup = 0;
+ netif_carrier_off(nesvnic->netdev);
+ }
+ }
+ }
+ }
+
+ nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE;
+}
+
+
+
+void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
+{
+ struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
+
+ netif_rx_schedule(nesdev->netdev[nesvnic->netdev_index], &nesvnic->napi);
+}
+
+
+/* The MAX_RQES_TO_PROCESS defines how many max read requests to complete before
+* getting out of nic_ce_handler
+*/
+#define MAX_RQES_TO_PROCESS 384
+
+/**
+ * nes_nic_ce_handler
+ */
+void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
+{
+ u64 u64temp;
+ dma_addr_t bus_address;
+ struct nes_hw_nic *nesnic;
+ struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_nic_rq_wqe *nic_rqe;
+ struct nes_hw_nic_sq_wqe *nic_sqe;
+ struct sk_buff *skb;
+ struct sk_buff *rx_skb;
+ __le16 *wqe_fragment_length;
+ u32 head;
+ u32 cq_size;
+ u32 rx_pkt_size;
+ u32 cqe_count=0;
+ u32 cqe_errv;
+ u32 cqe_misc;
+ u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */
+ u16 vlan_tag;
+ u16 pkt_type;
+ u16 rqes_processed = 0;
+ u8 sq_cqes = 0;
+
+ head = cq->cq_head;
+ cq_size = cq->cq_size;
+ cq->cqes_pending = 1;
+ do {
+ if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
+ NES_NIC_CQE_VALID) {
+ nesnic = &nesvnic->nic;
+ cqe_misc = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]);
+ if (cqe_misc & NES_NIC_CQE_SQ) {
+ sq_cqes++;
+ wqe_fragment_index = 1;
+ nic_sqe = &nesnic->sq_vbase[nesnic->sq_tail];
+ skb = nesnic->tx_skb[nesnic->sq_tail];
+ wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
+ /* bump past the vlan tag */
+ wqe_fragment_length++;
+ if (le16_to_cpu(wqe_fragment_length[wqe_fragment_index]) != 0) {
+ u64temp = (u64) le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+wqe_fragment_index*2]);
+ u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+wqe_fragment_index*2]))<<32;
+ bus_address = (dma_addr_t)u64temp;
+ if (test_and_clear_bit(nesnic->sq_tail, nesnic->first_frag_overflow)) {
+ pci_unmap_single(nesdev->pcidev,
+ bus_address,
+ le16_to_cpu(wqe_fragment_length[wqe_fragment_index++]),
+ PCI_DMA_TODEVICE);
+ }
+ for (; wqe_fragment_index < 5; wqe_fragment_index++) {
+ if (wqe_fragment_length[wqe_fragment_index]) {
+ u64temp = le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+wqe_fragment_index*2]);
+ u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+wqe_fragment_index*2]))<<32;
+ bus_address = (dma_addr_t)u64temp;
+ pci_unmap_page(nesdev->pcidev,
+ bus_address,
+ le16_to_cpu(wqe_fragment_length[wqe_fragment_index]),
+ PCI_DMA_TODEVICE);
+ } else
+ break;
+ }
+ if (skb)
+ dev_kfree_skb_any(skb);
+ }
+ nesnic->sq_tail++;
+ nesnic->sq_tail &= nesnic->sq_size-1;
+ if (sq_cqes > 128) {
+ barrier();
+ /* restart the queue if it had been stopped */
+ if (netif_queue_stopped(nesvnic->netdev))
+ netif_wake_queue(nesvnic->netdev);
+ sq_cqes = 0;
+ }
+ } else {
+ rqes_processed ++;
+
+ cq->rx_cqes_completed++;
+ cq->rx_pkts_indicated++;
+ rx_pkt_size = cqe_misc & 0x0000ffff;
+ nic_rqe = &nesnic->rq_vbase[nesnic->rq_tail];
+ /* Get the skb */
+ rx_skb = nesnic->rx_skb[nesnic->rq_tail];
+ nic_rqe = &nesnic->rq_vbase[nesvnic->nic.rq_tail];
+ bus_address = (dma_addr_t)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]);
+ bus_address += ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32;
+ pci_unmap_single(nesdev->pcidev, bus_address,
+ nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
+ /* rx_skb->tail = rx_skb->data + rx_pkt_size; */
+ /* rx_skb->len = rx_pkt_size; */
+ rx_skb->len = 0; /* TODO: see if this is necessary */
+ skb_put(rx_skb, rx_pkt_size);
+ rx_skb->protocol = eth_type_trans(rx_skb, nesvnic->netdev);
+ nesnic->rq_tail++;
+ nesnic->rq_tail &= nesnic->rq_size - 1;
+
+ atomic_inc(&nesvnic->rx_skbs_needed);
+ if (atomic_read(&nesvnic->rx_skbs_needed) > (nesvnic->nic.rq_size>>1)) {
+ nes_write32(nesdev->regs+NES_CQE_ALLOC,
+ cq->cq_number | (cqe_count << 16));
+// nesadapter->tune_timer.cq_count += cqe_count;
+ nesdev->currcq_count += cqe_count;
+ cqe_count = 0;
+ nes_replenish_nic_rq(nesvnic);
+ }
+ pkt_type = (u16)(le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX]));
+ cqe_errv = (cqe_misc & NES_NIC_CQE_ERRV_MASK) >> NES_NIC_CQE_ERRV_SHIFT;
+ rx_skb->ip_summed = CHECKSUM_NONE;
+
+ if ((NES_PKT_TYPE_TCPV4_BITS == (pkt_type & NES_PKT_TYPE_TCPV4_MASK)) ||
+ (NES_PKT_TYPE_UDPV4_BITS == (pkt_type & NES_PKT_TYPE_UDPV4_MASK))) {
+ if ((cqe_errv &
+ (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_TCPUDP_CSUM_ERR |
+ NES_NIC_ERRV_BITS_IPH_ERR | NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) {
+ if (nesvnic->rx_checksum_disabled == 0) {
+ rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ } else
+ nes_debug(NES_DBG_CQ, "%s: unsuccessfully checksummed TCP or UDP packet."
+ " errv = 0x%X, pkt_type = 0x%X.\n",
+ nesvnic->netdev->name, cqe_errv, pkt_type);
+
+ } else if ((pkt_type & NES_PKT_TYPE_IPV4_MASK) == NES_PKT_TYPE_IPV4_BITS) {
+ if ((cqe_errv &
+ (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_IPH_ERR |
+ NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) {
+ if (nesvnic->rx_checksum_disabled == 0) {
+ rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* nes_debug(NES_DBG_CQ, "%s: Reporting successfully checksummed IPv4 packet.\n",
+ nesvnic->netdev->name); */
+ }
+ } else
+ nes_debug(NES_DBG_CQ, "%s: unsuccessfully checksummed TCP or UDP packet."
+ " errv = 0x%X, pkt_type = 0x%X.\n",
+ nesvnic->netdev->name, cqe_errv, pkt_type);
+ }
+ /* nes_debug(NES_DBG_CQ, "pkt_type=%x, APBVT_MASK=%x\n",
+ pkt_type, (pkt_type & NES_PKT_TYPE_APBVT_MASK)); */
+
+ if ((pkt_type & NES_PKT_TYPE_APBVT_MASK) == NES_PKT_TYPE_APBVT_BITS) {
+ nes_cm_recv(rx_skb, nesvnic->netdev);
+ } else {
+ if ((cqe_misc & NES_NIC_CQE_TAG_VALID) && (nesvnic->vlan_grp != NULL)) {
+ vlan_tag = (u16)(le32_to_cpu(
+ cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX])
+ >> 16);
+ nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
+ nesvnic->netdev->name, vlan_tag);
+ nes_vlan_rx(rx_skb, nesvnic->vlan_grp, vlan_tag);
+ } else {
+ nes_netif_rx(rx_skb);
+ }
+ }
+
+ nesvnic->netdev->last_rx = jiffies;
+ /* nesvnic->netstats.rx_packets++; */
+ /* nesvnic->netstats.rx_bytes += rx_pkt_size; */
+ }
+
+ cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX] = 0;
+ /* Accounting... */
+ cqe_count++;
+ if (++head >= cq_size)
+ head = 0;
+ if (cqe_count == 255) {
+ /* Replenish Nic CQ */
+ nes_write32(nesdev->regs+NES_CQE_ALLOC,
+ cq->cq_number | (cqe_count << 16));
+// nesdev->nesadapter->tune_timer.cq_count += cqe_count;
+ nesdev->currcq_count += cqe_count;
+ cqe_count = 0;
+ }
+
+ if (cq->rx_cqes_completed >= nesvnic->budget)
+ break;
+ } else {
+ cq->cqes_pending = 0;
+ break;
+ }
+
+ } while (1);
+
+ if (sq_cqes) {
+ barrier();
+ /* restart the queue if it had been stopped */
+ if (netif_queue_stopped(nesvnic->netdev))
+ netif_wake_queue(nesvnic->netdev);
+ }
+
+ cq->cq_head = head;
+ /* nes_debug(NES_DBG_CQ, "CQ%u Processed = %u cqes, new head = %u.\n",
+ cq->cq_number, cqe_count, cq->cq_head); */
+ cq->cqe_allocs_pending = cqe_count;
+ if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
+ {
+// nesdev->nesadapter->tune_timer.cq_count += cqe_count;
+ nesdev->currcq_count += cqe_count;
+ nes_nic_tune_timer(nesdev);
+ }
+ if (atomic_read(&nesvnic->rx_skbs_needed))
+ nes_replenish_nic_rq(nesvnic);
+ }
+
+
+/**
+ * nes_cqp_ce_handler
+ */
+void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
+{
+ u64 u64temp;
+ unsigned long flags;
+ struct nes_hw_cqp *cqp = NULL;
+ struct nes_cqp_request *cqp_request;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ u32 head;
+ u32 cq_size;
+ u32 cqe_count=0;
+ u32 error_code;
+ /* u32 counter; */
+
+ head = cq->cq_head;
+ cq_size = cq->cq_size;
+
+ do {
+ /* process the CQE */
+ /* nes_debug(NES_DBG_CQP, "head=%u cqe_words=%08X\n", head,
+ le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])); */
+
+ if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
+ u64temp = (((u64)(le32_to_cpu(cq->cq_vbase[head].
+ cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
+ ((u64)(le32_to_cpu(cq->cq_vbase[head].
+ cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX])));
+ cqp = *((struct nes_hw_cqp **)&u64temp);
+
+ error_code = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX]);
+ if (error_code) {
+ nes_debug(NES_DBG_CQP, "Bad Completion code for opcode 0x%02X from CQP,"
+ " Major/Minor codes = 0x%04X:%04X.\n",
+ le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])&0x3f,
+ (u16)(error_code >> 16),
+ (u16)error_code);
+ nes_debug(NES_DBG_CQP, "cqp: qp_id=%u, sq_head=%u, sq_tail=%u\n",
+ cqp->qp_id, cqp->sq_head, cqp->sq_tail);
+ }
+
+ u64temp = (((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail].
+ wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX])))<<32) |
+ ((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail].
+ wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX])));
+ cqp_request = *((struct nes_cqp_request **)&u64temp);
+ if (cqp_request) {
+ if (cqp_request->waiting) {
+ /* nes_debug(NES_DBG_CQP, "%s: Waking up requestor\n"); */
+ cqp_request->major_code = (u16)(error_code >> 16);
+ cqp_request->minor_code = (u16)error_code;
+ barrier();
+ cqp_request->request_done = 1;
+ wake_up(&cqp_request->waitq);
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n",
+ cqp_request,
+ le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f);
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ } else if (cqp_request->callback) {
+ /* Envoke the callback routine */
+ cqp_request->cqp_callback(nesdev, cqp_request);
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ } else {
+ nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n",
+ cqp_request,
+ le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f);
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ } else {
+ wake_up(&nesdev->cqp.waitq);
+ }
+
+ cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
+ nes_write32(nesdev->regs+NES_CQE_ALLOC, cq->cq_number | (1 << 16));
+ if (++cqp->sq_tail >= cqp->sq_size)
+ cqp->sq_tail = 0;
+
+ /* Accounting... */
+ cqe_count++;
+ if (++head >= cq_size)
+ head = 0;
+ } else {
+ break;
+ }
+ } while (1);
+ cq->cq_head = head;
+
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ while ((!list_empty(&nesdev->cqp_pending_reqs)) &&
+ ((((nesdev->cqp.sq_tail+nesdev->cqp.sq_size)-nesdev->cqp.sq_head) &
+ (nesdev->cqp.sq_size - 1)) != 1)) {
+ cqp_request = list_entry(nesdev->cqp_pending_reqs.next,
+ struct nes_cqp_request, list);
+ list_del_init(&cqp_request->list);
+ head = nesdev->cqp.sq_head++;
+ nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
+ cqp_wqe = &nesdev->cqp.sq_vbase[head];
+ memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe));
+ barrier();
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] =
+ cpu_to_le32((u32)((unsigned long)cqp_request));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] =
+ cpu_to_le32((u32)(upper_32_bits((unsigned long)cqp_request)));
+ nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) put on CQPs SQ wqe%u.\n",
+ cqp_request, le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f, head);
+ /* Ring doorbell (1 WQEs) */
+ barrier();
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x01800000 | nesdev->cqp.qp_id);
+ }
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+
+ /* Arm the CCQ */
+ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
+ cq->cq_number);
+ nes_read32(nesdev->regs+NES_CQE_ALLOC);
+}
+
+
+/**
+ * nes_process_iwarp_aeqe
+ */
+void nes_process_iwarp_aeqe(struct nes_device *nesdev, struct nes_hw_aeqe *aeqe)
+{
+ u64 context;
+ u64 aeqe_context = 0;
+ unsigned long flags;
+ struct nes_qp *nesqp;
+ int resource_allocated;
+ /* struct iw_cm_id *cm_id; */
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct ib_event ibevent;
+ /* struct iw_cm_event cm_event; */
+ u32 aeq_info;
+ u32 next_iwarp_state = 0;
+ u16 async_event_id;
+ u8 tcp_state;
+ u8 iwarp_state;
+
+ nes_debug(NES_DBG_AEQ, "\n");
+ aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+ if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) {
+ context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
+ context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
+ } else {
+ aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
+ aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
+ context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
+ aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
+ BUG_ON(!context);
+ }
+
+ async_event_id = (u16)aeq_info;
+ tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
+ iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
+ nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p,"
+ " Tcp state = %s, iWARP state = %s\n",
+ async_event_id,
+ le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
+ nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
+
+
+ switch (async_event_id) {
+ case NES_AEQE_AEID_LLP_FIN_RECEIVED:
+ nesqp = *((struct nes_qp **)&context);
+ if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
+ nesqp->cm_id->add_ref(nesqp->cm_id);
+ nes_add_ref(&nesqp->ibqp);
+ schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
+ NES_TIMER_TYPE_CLOSE, 1, 0);
+ nes_debug(NES_DBG_AEQ, "QP%u Not decrementing QP refcount (%d),"
+ " need ae to finish up, original_last_aeq = 0x%04X."
+ " last_aeq = 0x%04X, scheduling timer. TCP state = %d\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
+ async_event_id, nesqp->last_aeq, tcp_state);
+ }
+ if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
+ (nesqp->ibqp_state != IB_QPS_RTS)) {
+ /* FIN Received but tcp state or IB state moved on,
+ should expect a close complete */
+ return;
+ }
+ case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
+ case NES_AEQE_AEID_LLP_CONNECTION_RESET:
+ case NES_AEQE_AEID_TERMINATE_SENT:
+ case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
+ case NES_AEQE_AEID_RESET_SENT:
+ nesqp = *((struct nes_qp **)&context);
+ if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
+ tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ }
+ nes_add_ref(&nesqp->ibqp);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+
+ if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
+ (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
+ nesqp->hte_added = 0;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u to remove hte\n",
+ nesqp->hwqp.qp_id);
+ nes_hw_modify_qp(nesdev, nesqp,
+ NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE, 0);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ }
+
+ if ((nesqp->ibqp_state == IB_QPS_RTS) &&
+ ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
+ (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ switch (nesqp->hw_iwarp_state) {
+ case NES_AEQE_IWARP_STATE_RTS:
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+ break;
+ case NES_AEQE_IWARP_STATE_TERMINATE:
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE;
+ if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
+ next_iwarp_state |= 0x02000000;
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ }
+ break;
+ default:
+ next_iwarp_state = 0;
+ }
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ if (next_iwarp_state) {
+ nes_add_ref(&nesqp->ibqp);
+ nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
+ " also added another reference\n",
+ nesqp->hwqp.qp_id, next_iwarp_state);
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
+ }
+ nes_cm_disconn(nesqp);
+ } else {
+ if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) {
+ /* FIN Received but ib state not RTS,
+ close complete will be on its way */
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_rem_ref(&nesqp->ibqp);
+ return;
+ }
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000;
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
+ " also added another reference\n",
+ nesqp->hwqp.qp_id, next_iwarp_state);
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
+ }
+ nes_cm_disconn(nesqp);
+ }
+ break;
+ case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
+ nesqp = *((struct nes_qp **)&context);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TERMINATE_RECEIVED"
+ " event on QP%u \n Q2 Data:\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_FATAL;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
+ ((nesqp->ibqp_state == IB_QPS_RTS)&&
+ (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ nes_add_ref(&nesqp->ibqp);
+ nes_cm_disconn(nesqp);
+ } else {
+ nesqp->in_disconnect = 0;
+ wake_up(&nesqp->kick_waitq);
+ }
+ break;
+ case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
+ nesqp = *((struct nes_qp **)&context);
+ nes_add_ref(&nesqp->ibqp);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR;
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ nesqp->last_aeq = async_event_id;
+ if (nesqp->cm_id) {
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
+ " event on QP%u, remote IP = 0x%08X \n",
+ nesqp->hwqp.qp_id,
+ ntohl(nesqp->cm_id->remote_addr.sin_addr.s_addr));
+ } else {
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
+ " event on QP%u \n",
+ nesqp->hwqp.qp_id);
+ }
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_RESET;
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_FATAL;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ break;
+ case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
+ if (NES_AEQE_INBOUND_RDMA&aeq_info) {
+ nesqp = nesadapter->qp_table[le32_to_cpu(
+ aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
+ } else {
+ /* TODO: get the actual WQE and mask off wqe index */
+ context &= ~((u64)511);
+ nesqp = *((struct nes_qp **)&context);
+ }
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_BAD_STAG_INDEX event on QP%u\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_ACCESS_ERR;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ break;
+ case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
+ nesqp = *((struct nes_qp **)&context);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_UNALLOCATED_STAG event on QP%u\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_ACCESS_ERR;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ break;
+ case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
+ nesqp = nesadapter->qp_table[le32_to_cpu(aeqe->aeqe_words
+ [NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_PRIV_OPERATION_DENIED event on QP%u,"
+ " nesqp = %p, AE reported %p\n",
+ nesqp->hwqp.qp_id, nesqp, *((struct nes_qp **)&context));
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_ACCESS_ERR;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ break;
+ case NES_AEQE_AEID_CQ_OPERATION_ERROR:
+ context <<= 1;
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n",
+ le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), (void *)(unsigned long)context);
+ resource_allocated = nes_is_resource_allocated(nesadapter, nesadapter->allocated_cqs,
+ le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
+ if (resource_allocated) {
+ printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n",
+ __FUNCTION__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
+ }
+ break;
+ case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ nesqp = nesadapter->qp_table[le32_to_cpu(
+ aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG"
+ "_FOR_AVAILABLE_BUFFER event on QP%u\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_ACCESS_ERR;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ /* tell cm to disconnect, cm will queue work to thread */
+ nes_add_ref(&nesqp->ibqp);
+ nes_cm_disconn(nesqp);
+ break;
+ case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+ nesqp = *((struct nes_qp **)&context);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_INVALID_MSN"
+ "_NO_BUFFER_AVAILABLE event on QP%u\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_FATAL;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ /* tell cm to disconnect, cm will queue work to thread */
+ nes_add_ref(&nesqp->ibqp);
+ nes_cm_disconn(nesqp);
+ break;
+ case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
+ nesqp = *((struct nes_qp **)&context);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR"
+ " event on QP%u \n Q2 Data:\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_FATAL;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ /* tell cm to disconnect, cm will queue work to thread */
+ nes_add_ref(&nesqp->ibqp);
+ nes_cm_disconn(nesqp);
+ break;
+ /* TODO: additional AEs need to be here */
+ default:
+ nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n",
+ async_event_id);
+ break;
+ }
+
+}
+
+
+/**
+ * nes_iwarp_ce_handler
+ */
+void nes_iwarp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *hw_cq)
+{
+ struct nes_cq *nescq = container_of(hw_cq, struct nes_cq, hw_cq);
+
+ /* nes_debug(NES_DBG_CQ, "Processing completion event for iWARP CQ%u.\n",
+ nescq->hw_cq.cq_number); */
+ nes_write32(nesdev->regs+NES_CQ_ACK, nescq->hw_cq.cq_number);
+
+ if (nescq->ibcq.comp_handler)
+ nescq->ibcq.comp_handler(&nescq->ibcq, nescq->ibcq.cq_context);
+
+ return;
+}
+
+
+/**
+ * nes_manage_apbvt()
+ */
+int nes_manage_apbvt(struct nes_vnic *nesvnic, u32 accel_local_port,
+ u32 nic_index, u32 add_port)
+{
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ unsigned long flags;
+ struct nes_cqp_request *cqp_request;
+ int ret = 0;
+ u16 major_code;
+
+ /* Send manage APBVT request to CQP */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n");
+ return -ENOMEM;
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ nes_debug(NES_DBG_QP, "%s APBV for local port=%u(0x%04x), nic_index=%u\n",
+ (add_port == NES_MANAGE_APBVT_ADD) ? "ADD" : "DEL",
+ accel_local_port, accel_local_port, nic_index);
+
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, (NES_CQP_MANAGE_APBVT |
+ ((add_port == NES_MANAGE_APBVT_ADD) ? NES_CQP_APBVT_ADD : 0)));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
+ ((nic_index << NES_CQP_APBVT_NIC_SHIFT) | accel_local_port));
+
+ nes_debug(NES_DBG_QP, "Waiting for CQP completion for APBVT.\n");
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ if (add_port == NES_MANAGE_APBVT_ADD)
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_QP, "Completed, ret=%u, CQP Major:Minor codes = 0x%04X:0x%04X\n",
+ ret, cqp_request->major_code, cqp_request->minor_code);
+ major_code = cqp_request->major_code;
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ if (!ret)
+ return -ETIME;
+ else if (major_code)
+ return -EIO;
+ else
+ return 0;
+}
+
+
+/**
+ * nes_manage_arp_cache
+ */
+void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
+ u32 ip_addr, u32 action)
+{
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev;
+ struct nes_cqp_request *cqp_request;
+ int arp_index;
+
+ nesdev = nesvnic->nesdev;
+ arp_index = nes_arp_table(nesdev, ip_addr, mac_addr, action);
+ if (arp_index == -1) {
+ return;
+ }
+
+ /* update the ARP entry */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_NETDEV, "Failed to get a cqp_request.\n");
+ return;
+ }
+ cqp_request->waiting = 0;
+ cqp_wqe = &cqp_request->cqp_wqe;
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
+ NES_CQP_MANAGE_ARP_CACHE | NES_CQP_ARP_PERM);
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(
+ (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_CQP_ARP_AEQ_INDEX_SHIFT);
+ cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(arp_index);
+
+ if (action == NES_ARP_ADD) {
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_ARP_VALID);
+ cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = cpu_to_le32(
+ (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) |
+ (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]);
+ cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32(
+ (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]);
+ } else {
+ cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0;
+ }
+
+ nes_debug(NES_DBG_NETDEV, "Not waiting for CQP, cqp.sq_head=%u, cqp.sq_tail=%u\n",
+ nesdev->cqp.sq_head, nesdev->cqp.sq_tail);
+
+ atomic_set(&cqp_request->refcount, 1);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+}
+
+
+/**
+ * flush_wqes
+ */
+void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
+ u32 which_wq, u32 wait_completion)
+{
+ unsigned long flags;
+ struct nes_cqp_request *cqp_request;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ int ret;
+
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n");
+ return;
+ }
+ if (wait_completion) {
+ cqp_request->waiting = 1;
+ atomic_set(&cqp_request->refcount, 2);
+ } else {
+ cqp_request->waiting = 0;
+ }
+ cqp_wqe = &cqp_request->cqp_wqe;
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] =
+ cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq);
+ cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id);
+
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ if (wait_completion) {
+ /* Wait for CQP */
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_QP, "Flush SQ QP WQEs completed, ret=%u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X\n",
+ ret, cqp_request->major_code, cqp_request->minor_code);
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ }
+}
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
new file mode 100644
index 000000000000..1e10df550c9e
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -0,0 +1,1206 @@
+/*
+* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenIB.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*/
+
+#ifndef __NES_HW_H
+#define __NES_HW_H
+
+#define NES_PHY_TYPE_1G 2
+#define NES_PHY_TYPE_IRIS 3
+#define NES_PHY_TYPE_PUMA_10G 6
+
+#define NES_MULTICAST_PF_MAX 8
+
+enum pci_regs {
+ NES_INT_STAT = 0x0000,
+ NES_INT_MASK = 0x0004,
+ NES_INT_PENDING = 0x0008,
+ NES_INTF_INT_STAT = 0x000C,
+ NES_INTF_INT_MASK = 0x0010,
+ NES_TIMER_STAT = 0x0014,
+ NES_PERIODIC_CONTROL = 0x0018,
+ NES_ONE_SHOT_CONTROL = 0x001C,
+ NES_EEPROM_COMMAND = 0x0020,
+ NES_EEPROM_DATA = 0x0024,
+ NES_FLASH_COMMAND = 0x0028,
+ NES_FLASH_DATA = 0x002C,
+ NES_SOFTWARE_RESET = 0x0030,
+ NES_CQ_ACK = 0x0034,
+ NES_WQE_ALLOC = 0x0040,
+ NES_CQE_ALLOC = 0x0044,
+};
+
+enum indexed_regs {
+ NES_IDX_CREATE_CQP_LOW = 0x0000,
+ NES_IDX_CREATE_CQP_HIGH = 0x0004,
+ NES_IDX_QP_CONTROL = 0x0040,
+ NES_IDX_FLM_CONTROL = 0x0080,
+ NES_IDX_INT_CPU_STATUS = 0x00a0,
+ NES_IDX_GPIO_CONTROL = 0x00f0,
+ NES_IDX_GPIO_DATA = 0x00f4,
+ NES_IDX_TCP_CONFIG0 = 0x01e4,
+ NES_IDX_TCP_TIMER_CONFIG = 0x01ec,
+ NES_IDX_TCP_NOW = 0x01f0,
+ NES_IDX_QP_MAX_CFG_SIZES = 0x0200,
+ NES_IDX_QP_CTX_SIZE = 0x0218,
+ NES_IDX_TCP_TIMER_SIZE0 = 0x0238,
+ NES_IDX_TCP_TIMER_SIZE1 = 0x0240,
+ NES_IDX_ARP_CACHE_SIZE = 0x0258,
+ NES_IDX_CQ_CTX_SIZE = 0x0260,
+ NES_IDX_MRT_SIZE = 0x0278,
+ NES_IDX_PBL_REGION_SIZE = 0x0280,
+ NES_IDX_IRRQ_COUNT = 0x02b0,
+ NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x02f0,
+ NES_IDX_RX_WINDOW_BUFFER_SIZE = 0x0300,
+ NES_IDX_DST_IP_ADDR = 0x0400,
+ NES_IDX_PCIX_DIAG = 0x08e8,
+ NES_IDX_MPP_DEBUG = 0x0a00,
+ NES_IDX_PORT_RX_DISCARDS = 0x0a30,
+ NES_IDX_PORT_TX_DISCARDS = 0x0a34,
+ NES_IDX_MPP_LB_DEBUG = 0x0b00,
+ NES_IDX_DENALI_CTL_22 = 0x1058,
+ NES_IDX_MAC_TX_CONTROL = 0x2000,
+ NES_IDX_MAC_TX_CONFIG = 0x2004,
+ NES_IDX_MAC_TX_PAUSE_QUANTA = 0x2008,
+ NES_IDX_MAC_RX_CONTROL = 0x200c,
+ NES_IDX_MAC_RX_CONFIG = 0x2010,
+ NES_IDX_MAC_EXACT_MATCH_BOTTOM = 0x201c,
+ NES_IDX_MAC_MDIO_CONTROL = 0x2084,
+ NES_IDX_MAC_TX_OCTETS_LOW = 0x2100,
+ NES_IDX_MAC_TX_OCTETS_HIGH = 0x2104,
+ NES_IDX_MAC_TX_FRAMES_LOW = 0x2108,
+ NES_IDX_MAC_TX_FRAMES_HIGH = 0x210c,
+ NES_IDX_MAC_TX_PAUSE_FRAMES = 0x2118,
+ NES_IDX_MAC_TX_ERRORS = 0x2138,
+ NES_IDX_MAC_RX_OCTETS_LOW = 0x213c,
+ NES_IDX_MAC_RX_OCTETS_HIGH = 0x2140,
+ NES_IDX_MAC_RX_FRAMES_LOW = 0x2144,
+ NES_IDX_MAC_RX_FRAMES_HIGH = 0x2148,
+ NES_IDX_MAC_RX_BC_FRAMES_LOW = 0x214c,
+ NES_IDX_MAC_RX_MC_FRAMES_HIGH = 0x2150,
+ NES_IDX_MAC_RX_PAUSE_FRAMES = 0x2154,
+ NES_IDX_MAC_RX_SHORT_FRAMES = 0x2174,
+ NES_IDX_MAC_RX_OVERSIZED_FRAMES = 0x2178,
+ NES_IDX_MAC_RX_JABBER_FRAMES = 0x217c,
+ NES_IDX_MAC_RX_CRC_ERR_FRAMES = 0x2180,
+ NES_IDX_MAC_RX_LENGTH_ERR_FRAMES = 0x2184,
+ NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES = 0x2188,
+ NES_IDX_MAC_INT_STATUS = 0x21f0,
+ NES_IDX_MAC_INT_MASK = 0x21f4,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0 = 0x2800,
+ NES_IDX_PHY_PCS_CONTROL_STATUS1 = 0x2a00,
+ NES_IDX_ETH_SERDES_COMMON_CONTROL0 = 0x2808,
+ NES_IDX_ETH_SERDES_COMMON_CONTROL1 = 0x2a08,
+ NES_IDX_ETH_SERDES_COMMON_STATUS0 = 0x280c,
+ NES_IDX_ETH_SERDES_COMMON_STATUS1 = 0x2a0c,
+ NES_IDX_ETH_SERDES_TX_EMP0 = 0x2810,
+ NES_IDX_ETH_SERDES_TX_EMP1 = 0x2a10,
+ NES_IDX_ETH_SERDES_TX_DRIVE0 = 0x2814,
+ NES_IDX_ETH_SERDES_TX_DRIVE1 = 0x2a14,
+ NES_IDX_ETH_SERDES_RX_MODE0 = 0x2818,
+ NES_IDX_ETH_SERDES_RX_MODE1 = 0x2a18,
+ NES_IDX_ETH_SERDES_RX_SIGDET0 = 0x281c,
+ NES_IDX_ETH_SERDES_RX_SIGDET1 = 0x2a1c,
+ NES_IDX_ETH_SERDES_BYPASS0 = 0x2820,
+ NES_IDX_ETH_SERDES_BYPASS1 = 0x2a20,
+ NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0 = 0x2824,
+ NES_IDX_ETH_SERDES_LOOPBACK_CONTROL1 = 0x2a24,
+ NES_IDX_ETH_SERDES_RX_EQ_CONTROL0 = 0x2828,
+ NES_IDX_ETH_SERDES_RX_EQ_CONTROL1 = 0x2a28,
+ NES_IDX_ETH_SERDES_RX_EQ_STATUS0 = 0x282c,
+ NES_IDX_ETH_SERDES_RX_EQ_STATUS1 = 0x2a2c,
+ NES_IDX_ETH_SERDES_CDR_RESET0 = 0x2830,
+ NES_IDX_ETH_SERDES_CDR_RESET1 = 0x2a30,
+ NES_IDX_ETH_SERDES_CDR_CONTROL0 = 0x2834,
+ NES_IDX_ETH_SERDES_CDR_CONTROL1 = 0x2a34,
+ NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0 = 0x2838,
+ NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1 = 0x2a38,
+ NES_IDX_ENDNODE0_NSTAT_RX_DISCARD = 0x3080,
+ NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO = 0x3000,
+ NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI = 0x3004,
+ NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO = 0x3008,
+ NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI = 0x300c,
+ NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO = 0x7000,
+ NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI = 0x7004,
+ NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO = 0x7008,
+ NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI = 0x700c,
+ NES_IDX_CM_CONFIG = 0x5100,
+ NES_IDX_NIC_LOGPORT_TO_PHYPORT = 0x6000,
+ NES_IDX_NIC_PHYPORT_TO_USW = 0x6008,
+ NES_IDX_NIC_ACTIVE = 0x6010,
+ NES_IDX_NIC_UNICAST_ALL = 0x6018,
+ NES_IDX_NIC_MULTICAST_ALL = 0x6020,
+ NES_IDX_NIC_MULTICAST_ENABLE = 0x6028,
+ NES_IDX_NIC_BROADCAST_ON = 0x6030,
+ NES_IDX_USED_CHUNKS_TX = 0x60b0,
+ NES_IDX_TX_POOL_SIZE = 0x60b8,
+ NES_IDX_QUAD_HASH_TABLE_SIZE = 0x6148,
+ NES_IDX_PERFECT_FILTER_LOW = 0x6200,
+ NES_IDX_PERFECT_FILTER_HIGH = 0x6204,
+ NES_IDX_IPV4_TCP_REXMITS = 0x7080,
+ NES_IDX_DEBUG_ERROR_CONTROL_STATUS = 0x913c,
+ NES_IDX_DEBUG_ERROR_MASKS0 = 0x9140,
+ NES_IDX_DEBUG_ERROR_MASKS1 = 0x9144,
+ NES_IDX_DEBUG_ERROR_MASKS2 = 0x9148,
+ NES_IDX_DEBUG_ERROR_MASKS3 = 0x914c,
+ NES_IDX_DEBUG_ERROR_MASKS4 = 0x9150,
+ NES_IDX_DEBUG_ERROR_MASKS5 = 0x9154,
+};
+
+#define NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE 1
+#define NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE (1 << 17)
+
+enum nes_cqp_opcodes {
+ NES_CQP_CREATE_QP = 0x00,
+ NES_CQP_MODIFY_QP = 0x01,
+ NES_CQP_DESTROY_QP = 0x02,
+ NES_CQP_CREATE_CQ = 0x03,
+ NES_CQP_MODIFY_CQ = 0x04,
+ NES_CQP_DESTROY_CQ = 0x05,
+ NES_CQP_ALLOCATE_STAG = 0x09,
+ NES_CQP_REGISTER_STAG = 0x0a,
+ NES_CQP_QUERY_STAG = 0x0b,
+ NES_CQP_REGISTER_SHARED_STAG = 0x0c,
+ NES_CQP_DEALLOCATE_STAG = 0x0d,
+ NES_CQP_MANAGE_ARP_CACHE = 0x0f,
+ NES_CQP_SUSPEND_QPS = 0x11,
+ NES_CQP_UPLOAD_CONTEXT = 0x13,
+ NES_CQP_CREATE_CEQ = 0x16,
+ NES_CQP_DESTROY_CEQ = 0x18,
+ NES_CQP_CREATE_AEQ = 0x19,
+ NES_CQP_DESTROY_AEQ = 0x1b,
+ NES_CQP_LMI_ACCESS = 0x20,
+ NES_CQP_FLUSH_WQES = 0x22,
+ NES_CQP_MANAGE_APBVT = 0x23
+};
+
+enum nes_cqp_wqe_word_idx {
+ NES_CQP_WQE_OPCODE_IDX = 0,
+ NES_CQP_WQE_ID_IDX = 1,
+ NES_CQP_WQE_COMP_CTX_LOW_IDX = 2,
+ NES_CQP_WQE_COMP_CTX_HIGH_IDX = 3,
+ NES_CQP_WQE_COMP_SCRATCH_LOW_IDX = 4,
+ NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX = 5,
+};
+
+enum nes_cqp_cq_wqeword_idx {
+ NES_CQP_CQ_WQE_PBL_LOW_IDX = 6,
+ NES_CQP_CQ_WQE_PBL_HIGH_IDX = 7,
+ NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX = 8,
+ NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX = 9,
+ NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX = 10,
+};
+
+enum nes_cqp_stag_wqeword_idx {
+ NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX = 1,
+ NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX = 6,
+ NES_CQP_STAG_WQE_LEN_LOW_IDX = 7,
+ NES_CQP_STAG_WQE_STAG_IDX = 8,
+ NES_CQP_STAG_WQE_VA_LOW_IDX = 10,
+ NES_CQP_STAG_WQE_VA_HIGH_IDX = 11,
+ NES_CQP_STAG_WQE_PA_LOW_IDX = 12,
+ NES_CQP_STAG_WQE_PA_HIGH_IDX = 13,
+ NES_CQP_STAG_WQE_PBL_LEN_IDX = 14
+};
+
+#define NES_CQP_OP_IWARP_STATE_SHIFT 28
+
+enum nes_cqp_qp_bits {
+ NES_CQP_QP_ARP_VALID = (1<<8),
+ NES_CQP_QP_WINBUF_VALID = (1<<9),
+ NES_CQP_QP_CONTEXT_VALID = (1<<10),
+ NES_CQP_QP_ORD_VALID = (1<<11),
+ NES_CQP_QP_WINBUF_DATAIND_EN = (1<<12),
+ NES_CQP_QP_VIRT_WQS = (1<<13),
+ NES_CQP_QP_DEL_HTE = (1<<14),
+ NES_CQP_QP_CQS_VALID = (1<<15),
+ NES_CQP_QP_TYPE_TSA = 0,
+ NES_CQP_QP_TYPE_IWARP = (1<<16),
+ NES_CQP_QP_TYPE_CQP = (4<<16),
+ NES_CQP_QP_TYPE_NIC = (5<<16),
+ NES_CQP_QP_MSS_CHG = (1<<20),
+ NES_CQP_QP_STATIC_RESOURCES = (1<<21),
+ NES_CQP_QP_IGNORE_MW_BOUND = (1<<22),
+ NES_CQP_QP_VWQ_USE_LMI = (1<<23),
+ NES_CQP_QP_IWARP_STATE_IDLE = (1<<NES_CQP_OP_IWARP_STATE_SHIFT),
+ NES_CQP_QP_IWARP_STATE_RTS = (2<<NES_CQP_OP_IWARP_STATE_SHIFT),
+ NES_CQP_QP_IWARP_STATE_CLOSING = (3<<NES_CQP_OP_IWARP_STATE_SHIFT),
+ NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT),
+ NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT),
+ NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT),
+ NES_CQP_QP_RESET = (1<<31),
+};
+
+enum nes_cqp_qp_wqe_word_idx {
+ NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6,
+ NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7,
+ NES_CQP_QP_WQE_NEW_MSS_IDX = 15,
+};
+
+enum nes_nic_ctx_bits {
+ NES_NIC_CTX_RQ_SIZE_32 = (3<<8),
+ NES_NIC_CTX_RQ_SIZE_512 = (3<<8),
+ NES_NIC_CTX_SQ_SIZE_32 = (1<<10),
+ NES_NIC_CTX_SQ_SIZE_512 = (3<<10),
+};
+
+enum nes_nic_qp_ctx_word_idx {
+ NES_NIC_CTX_MISC_IDX = 0,
+ NES_NIC_CTX_SQ_LOW_IDX = 2,
+ NES_NIC_CTX_SQ_HIGH_IDX = 3,
+ NES_NIC_CTX_RQ_LOW_IDX = 4,
+ NES_NIC_CTX_RQ_HIGH_IDX = 5,
+};
+
+enum nes_cqp_cq_bits {
+ NES_CQP_CQ_CEQE_MASK = (1<<9),
+ NES_CQP_CQ_CEQ_VALID = (1<<10),
+ NES_CQP_CQ_RESIZE = (1<<11),
+ NES_CQP_CQ_CHK_OVERFLOW = (1<<12),
+ NES_CQP_CQ_4KB_CHUNK = (1<<14),
+ NES_CQP_CQ_VIRT = (1<<15),
+};
+
+enum nes_cqp_stag_bits {
+ NES_CQP_STAG_VA_TO = (1<<9),
+ NES_CQP_STAG_DEALLOC_PBLS = (1<<10),
+ NES_CQP_STAG_PBL_BLK_SIZE = (1<<11),
+ NES_CQP_STAG_MR = (1<<13),
+ NES_CQP_STAG_RIGHTS_LOCAL_READ = (1<<16),
+ NES_CQP_STAG_RIGHTS_LOCAL_WRITE = (1<<17),
+ NES_CQP_STAG_RIGHTS_REMOTE_READ = (1<<18),
+ NES_CQP_STAG_RIGHTS_REMOTE_WRITE = (1<<19),
+ NES_CQP_STAG_RIGHTS_WINDOW_BIND = (1<<20),
+ NES_CQP_STAG_REM_ACC_EN = (1<<21),
+ NES_CQP_STAG_LEAVE_PENDING = (1<<31),
+};
+
+enum nes_cqp_ceq_wqeword_idx {
+ NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX = 1,
+ NES_CQP_CEQ_WQE_PBL_LOW_IDX = 6,
+ NES_CQP_CEQ_WQE_PBL_HIGH_IDX = 7,
+};
+
+enum nes_cqp_ceq_bits {
+ NES_CQP_CEQ_4KB_CHUNK = (1<<14),
+ NES_CQP_CEQ_VIRT = (1<<15),
+};
+
+enum nes_cqp_aeq_wqeword_idx {
+ NES_CQP_AEQ_WQE_ELEMENT_COUNT_IDX = 1,
+ NES_CQP_AEQ_WQE_PBL_LOW_IDX = 6,
+ NES_CQP_AEQ_WQE_PBL_HIGH_IDX = 7,
+};
+
+enum nes_cqp_aeq_bits {
+ NES_CQP_AEQ_4KB_CHUNK = (1<<14),
+ NES_CQP_AEQ_VIRT = (1<<15),
+};
+
+enum nes_cqp_lmi_wqeword_idx {
+ NES_CQP_LMI_WQE_LMI_OFFSET_IDX = 1,
+ NES_CQP_LMI_WQE_FRAG_LOW_IDX = 8,
+ NES_CQP_LMI_WQE_FRAG_HIGH_IDX = 9,
+ NES_CQP_LMI_WQE_FRAG_LEN_IDX = 10,
+};
+
+enum nes_cqp_arp_wqeword_idx {
+ NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX = 6,
+ NES_CQP_ARP_WQE_MAC_HIGH_IDX = 7,
+ NES_CQP_ARP_WQE_REACHABILITY_MAX_IDX = 1,
+};
+
+enum nes_cqp_upload_wqeword_idx {
+ NES_CQP_UPLOAD_WQE_CTXT_LOW_IDX = 6,
+ NES_CQP_UPLOAD_WQE_CTXT_HIGH_IDX = 7,
+ NES_CQP_UPLOAD_WQE_HTE_IDX = 8,
+};
+
+enum nes_cqp_arp_bits {
+ NES_CQP_ARP_VALID = (1<<8),
+ NES_CQP_ARP_PERM = (1<<9),
+};
+
+enum nes_cqp_flush_bits {
+ NES_CQP_FLUSH_SQ = (1<<30),
+ NES_CQP_FLUSH_RQ = (1<<31),
+};
+
+enum nes_cqe_opcode_bits {
+ NES_CQE_STAG_VALID = (1<<6),
+ NES_CQE_ERROR = (1<<7),
+ NES_CQE_SQ = (1<<8),
+ NES_CQE_SE = (1<<9),
+ NES_CQE_PSH = (1<<29),
+ NES_CQE_FIN = (1<<30),
+ NES_CQE_VALID = (1<<31),
+};
+
+
+enum nes_cqe_word_idx {
+ NES_CQE_PAYLOAD_LENGTH_IDX = 0,
+ NES_CQE_COMP_COMP_CTX_LOW_IDX = 2,
+ NES_CQE_COMP_COMP_CTX_HIGH_IDX = 3,
+ NES_CQE_INV_STAG_IDX = 4,
+ NES_CQE_QP_ID_IDX = 5,
+ NES_CQE_ERROR_CODE_IDX = 6,
+ NES_CQE_OPCODE_IDX = 7,
+};
+
+enum nes_ceqe_word_idx {
+ NES_CEQE_CQ_CTX_LOW_IDX = 0,
+ NES_CEQE_CQ_CTX_HIGH_IDX = 1,
+};
+
+enum nes_ceqe_status_bit {
+ NES_CEQE_VALID = (1<<31),
+};
+
+enum nes_int_bits {
+ NES_INT_CEQ0 = (1<<0),
+ NES_INT_CEQ1 = (1<<1),
+ NES_INT_CEQ2 = (1<<2),
+ NES_INT_CEQ3 = (1<<3),
+ NES_INT_CEQ4 = (1<<4),
+ NES_INT_CEQ5 = (1<<5),
+ NES_INT_CEQ6 = (1<<6),
+ NES_INT_CEQ7 = (1<<7),
+ NES_INT_CEQ8 = (1<<8),
+ NES_INT_CEQ9 = (1<<9),
+ NES_INT_CEQ10 = (1<<10),
+ NES_INT_CEQ11 = (1<<11),
+ NES_INT_CEQ12 = (1<<12),
+ NES_INT_CEQ13 = (1<<13),
+ NES_INT_CEQ14 = (1<<14),
+ NES_INT_CEQ15 = (1<<15),
+ NES_INT_AEQ0 = (1<<16),
+ NES_INT_AEQ1 = (1<<17),
+ NES_INT_AEQ2 = (1<<18),
+ NES_INT_AEQ3 = (1<<19),
+ NES_INT_AEQ4 = (1<<20),
+ NES_INT_AEQ5 = (1<<21),
+ NES_INT_AEQ6 = (1<<22),
+ NES_INT_AEQ7 = (1<<23),
+ NES_INT_MAC0 = (1<<24),
+ NES_INT_MAC1 = (1<<25),
+ NES_INT_MAC2 = (1<<26),
+ NES_INT_MAC3 = (1<<27),
+ NES_INT_TSW = (1<<28),
+ NES_INT_TIMER = (1<<29),
+ NES_INT_INTF = (1<<30),
+};
+
+enum nes_intf_int_bits {
+ NES_INTF_INT_PCIERR = (1<<0),
+ NES_INTF_PERIODIC_TIMER = (1<<2),
+ NES_INTF_ONE_SHOT_TIMER = (1<<3),
+ NES_INTF_INT_CRITERR = (1<<14),
+ NES_INTF_INT_AEQ0_OFLOW = (1<<16),
+ NES_INTF_INT_AEQ1_OFLOW = (1<<17),
+ NES_INTF_INT_AEQ2_OFLOW = (1<<18),
+ NES_INTF_INT_AEQ3_OFLOW = (1<<19),
+ NES_INTF_INT_AEQ4_OFLOW = (1<<20),
+ NES_INTF_INT_AEQ5_OFLOW = (1<<21),
+ NES_INTF_INT_AEQ6_OFLOW = (1<<22),
+ NES_INTF_INT_AEQ7_OFLOW = (1<<23),
+ NES_INTF_INT_AEQ_OFLOW = (0xff<<16),
+};
+
+enum nes_mac_int_bits {
+ NES_MAC_INT_LINK_STAT_CHG = (1<<1),
+ NES_MAC_INT_XGMII_EXT = (1<<2),
+ NES_MAC_INT_TX_UNDERFLOW = (1<<6),
+ NES_MAC_INT_TX_ERROR = (1<<7),
+};
+
+enum nes_cqe_allocate_bits {
+ NES_CQE_ALLOC_INC_SELECT = (1<<28),
+ NES_CQE_ALLOC_NOTIFY_NEXT = (1<<29),
+ NES_CQE_ALLOC_NOTIFY_SE = (1<<30),
+ NES_CQE_ALLOC_RESET = (1<<31),
+};
+
+enum nes_nic_rq_wqe_word_idx {
+ NES_NIC_RQ_WQE_LENGTH_1_0_IDX = 0,
+ NES_NIC_RQ_WQE_LENGTH_3_2_IDX = 1,
+ NES_NIC_RQ_WQE_FRAG0_LOW_IDX = 2,
+ NES_NIC_RQ_WQE_FRAG0_HIGH_IDX = 3,
+ NES_NIC_RQ_WQE_FRAG1_LOW_IDX = 4,
+ NES_NIC_RQ_WQE_FRAG1_HIGH_IDX = 5,
+ NES_NIC_RQ_WQE_FRAG2_LOW_IDX = 6,
+ NES_NIC_RQ_WQE_FRAG2_HIGH_IDX = 7,
+ NES_NIC_RQ_WQE_FRAG3_LOW_IDX = 8,
+ NES_NIC_RQ_WQE_FRAG3_HIGH_IDX = 9,
+};
+
+enum nes_nic_sq_wqe_word_idx {
+ NES_NIC_SQ_WQE_MISC_IDX = 0,
+ NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX = 1,
+ NES_NIC_SQ_WQE_LSO_INFO_IDX = 2,
+ NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX = 3,
+ NES_NIC_SQ_WQE_LENGTH_2_1_IDX = 4,
+ NES_NIC_SQ_WQE_LENGTH_4_3_IDX = 5,
+ NES_NIC_SQ_WQE_FRAG0_LOW_IDX = 6,
+ NES_NIC_SQ_WQE_FRAG0_HIGH_IDX = 7,
+ NES_NIC_SQ_WQE_FRAG1_LOW_IDX = 8,
+ NES_NIC_SQ_WQE_FRAG1_HIGH_IDX = 9,
+ NES_NIC_SQ_WQE_FRAG2_LOW_IDX = 10,
+ NES_NIC_SQ_WQE_FRAG2_HIGH_IDX = 11,
+ NES_NIC_SQ_WQE_FRAG3_LOW_IDX = 12,
+ NES_NIC_SQ_WQE_FRAG3_HIGH_IDX = 13,
+ NES_NIC_SQ_WQE_FRAG4_LOW_IDX = 14,
+ NES_NIC_SQ_WQE_FRAG4_HIGH_IDX = 15,
+};
+
+enum nes_iwarp_sq_wqe_word_idx {
+ NES_IWARP_SQ_WQE_MISC_IDX = 0,
+ NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX = 1,
+ NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX = 2,
+ NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX = 3,
+ NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX = 4,
+ NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX = 5,
+ NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX = 7,
+ NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX = 8,
+ NES_IWARP_SQ_WQE_RDMA_TO_HIGH_IDX = 9,
+ NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX = 10,
+ NES_IWARP_SQ_WQE_RDMA_STAG_IDX = 11,
+ NES_IWARP_SQ_WQE_IMM_DATA_START_IDX = 12,
+ NES_IWARP_SQ_WQE_FRAG0_LOW_IDX = 16,
+ NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX = 17,
+ NES_IWARP_SQ_WQE_LENGTH0_IDX = 18,
+ NES_IWARP_SQ_WQE_STAG0_IDX = 19,
+ NES_IWARP_SQ_WQE_FRAG1_LOW_IDX = 20,
+ NES_IWARP_SQ_WQE_FRAG1_HIGH_IDX = 21,
+ NES_IWARP_SQ_WQE_LENGTH1_IDX = 22,
+ NES_IWARP_SQ_WQE_STAG1_IDX = 23,
+ NES_IWARP_SQ_WQE_FRAG2_LOW_IDX = 24,
+ NES_IWARP_SQ_WQE_FRAG2_HIGH_IDX = 25,
+ NES_IWARP_SQ_WQE_LENGTH2_IDX = 26,
+ NES_IWARP_SQ_WQE_STAG2_IDX = 27,
+ NES_IWARP_SQ_WQE_FRAG3_LOW_IDX = 28,
+ NES_IWARP_SQ_WQE_FRAG3_HIGH_IDX = 29,
+ NES_IWARP_SQ_WQE_LENGTH3_IDX = 30,
+ NES_IWARP_SQ_WQE_STAG3_IDX = 31,
+};
+
+enum nes_iwarp_sq_bind_wqe_word_idx {
+ NES_IWARP_SQ_BIND_WQE_MR_IDX = 6,
+ NES_IWARP_SQ_BIND_WQE_MW_IDX = 7,
+ NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX = 8,
+ NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX = 9,
+ NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX = 10,
+ NES_IWARP_SQ_BIND_WQE_VA_FBO_HIGH_IDX = 11,
+};
+
+enum nes_iwarp_sq_fmr_wqe_word_idx {
+ NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX = 7,
+ NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX = 8,
+ NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX = 9,
+ NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX = 10,
+ NES_IWARP_SQ_FMR_WQE_VA_FBO_HIGH_IDX = 11,
+ NES_IWARP_SQ_FMR_WQE_PBL_ADDR_LOW_IDX = 12,
+ NES_IWARP_SQ_FMR_WQE_PBL_ADDR_HIGH_IDX = 13,
+ NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX = 14,
+};
+
+enum nes_iwarp_sq_locinv_wqe_word_idx {
+ NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX = 6,
+};
+
+
+enum nes_iwarp_rq_wqe_word_idx {
+ NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX = 1,
+ NES_IWARP_RQ_WQE_COMP_CTX_LOW_IDX = 2,
+ NES_IWARP_RQ_WQE_COMP_CTX_HIGH_IDX = 3,
+ NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX = 4,
+ NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX = 5,
+ NES_IWARP_RQ_WQE_FRAG0_LOW_IDX = 8,
+ NES_IWARP_RQ_WQE_FRAG0_HIGH_IDX = 9,
+ NES_IWARP_RQ_WQE_LENGTH0_IDX = 10,
+ NES_IWARP_RQ_WQE_STAG0_IDX = 11,
+ NES_IWARP_RQ_WQE_FRAG1_LOW_IDX = 12,
+ NES_IWARP_RQ_WQE_FRAG1_HIGH_IDX = 13,
+ NES_IWARP_RQ_WQE_LENGTH1_IDX = 14,
+ NES_IWARP_RQ_WQE_STAG1_IDX = 15,
+ NES_IWARP_RQ_WQE_FRAG2_LOW_IDX = 16,
+ NES_IWARP_RQ_WQE_FRAG2_HIGH_IDX = 17,
+ NES_IWARP_RQ_WQE_LENGTH2_IDX = 18,
+ NES_IWARP_RQ_WQE_STAG2_IDX = 19,
+ NES_IWARP_RQ_WQE_FRAG3_LOW_IDX = 20,
+ NES_IWARP_RQ_WQE_FRAG3_HIGH_IDX = 21,
+ NES_IWARP_RQ_WQE_LENGTH3_IDX = 22,
+ NES_IWARP_RQ_WQE_STAG3_IDX = 23,
+};
+
+enum nes_nic_sq_wqe_bits {
+ NES_NIC_SQ_WQE_PHDR_CS_READY = (1<<21),
+ NES_NIC_SQ_WQE_LSO_ENABLE = (1<<22),
+ NES_NIC_SQ_WQE_TAGVALUE_ENABLE = (1<<23),
+ NES_NIC_SQ_WQE_DISABLE_CHKSUM = (1<<30),
+ NES_NIC_SQ_WQE_COMPLETION = (1<<31),
+};
+
+enum nes_nic_cqe_word_idx {
+ NES_NIC_CQE_ACCQP_ID_IDX = 0,
+ NES_NIC_CQE_TAG_PKT_TYPE_IDX = 2,
+ NES_NIC_CQE_MISC_IDX = 3,
+};
+
+#define NES_PKT_TYPE_APBVT_BITS 0xC112
+#define NES_PKT_TYPE_APBVT_MASK 0xff3e
+
+#define NES_PKT_TYPE_PVALID_BITS 0x10000000
+#define NES_PKT_TYPE_PVALID_MASK 0x30000000
+
+#define NES_PKT_TYPE_TCPV4_BITS 0x0110
+#define NES_PKT_TYPE_TCPV4_MASK 0x3f30
+
+#define NES_PKT_TYPE_UDPV4_BITS 0x0210
+#define NES_PKT_TYPE_UDPV4_MASK 0x3f30
+
+#define NES_PKT_TYPE_IPV4_BITS 0x0010
+#define NES_PKT_TYPE_IPV4_MASK 0x3f30
+
+#define NES_PKT_TYPE_OTHER_BITS 0x0000
+#define NES_PKT_TYPE_OTHER_MASK 0x0030
+
+#define NES_NIC_CQE_ERRV_SHIFT 16
+enum nes_nic_ev_bits {
+ NES_NIC_ERRV_BITS_MODE = (1<<0),
+ NES_NIC_ERRV_BITS_IPV4_CSUM_ERR = (1<<1),
+ NES_NIC_ERRV_BITS_TCPUDP_CSUM_ERR = (1<<2),
+ NES_NIC_ERRV_BITS_WQE_OVERRUN = (1<<3),
+ NES_NIC_ERRV_BITS_IPH_ERR = (1<<4),
+};
+
+enum nes_nic_cqe_bits {
+ NES_NIC_CQE_ERRV_MASK = (0xff<<NES_NIC_CQE_ERRV_SHIFT),
+ NES_NIC_CQE_SQ = (1<<24),
+ NES_NIC_CQE_ACCQP_PORT = (1<<28),
+ NES_NIC_CQE_ACCQP_VALID = (1<<29),
+ NES_NIC_CQE_TAG_VALID = (1<<30),
+ NES_NIC_CQE_VALID = (1<<31),
+};
+
+enum nes_aeqe_word_idx {
+ NES_AEQE_COMP_CTXT_LOW_IDX = 0,
+ NES_AEQE_COMP_CTXT_HIGH_IDX = 1,
+ NES_AEQE_COMP_QP_CQ_ID_IDX = 2,
+ NES_AEQE_MISC_IDX = 3,
+};
+
+enum nes_aeqe_bits {
+ NES_AEQE_QP = (1<<16),
+ NES_AEQE_CQ = (1<<17),
+ NES_AEQE_SQ = (1<<18),
+ NES_AEQE_INBOUND_RDMA = (1<<19),
+ NES_AEQE_IWARP_STATE_MASK = (7<<20),
+ NES_AEQE_TCP_STATE_MASK = (0xf<<24),
+ NES_AEQE_VALID = (1<<31),
+};
+
+#define NES_AEQE_IWARP_STATE_SHIFT 20
+#define NES_AEQE_TCP_STATE_SHIFT 24
+
+enum nes_aeqe_iwarp_state {
+ NES_AEQE_IWARP_STATE_NON_EXISTANT = 0,
+ NES_AEQE_IWARP_STATE_IDLE = 1,
+ NES_AEQE_IWARP_STATE_RTS = 2,
+ NES_AEQE_IWARP_STATE_CLOSING = 3,
+ NES_AEQE_IWARP_STATE_TERMINATE = 5,
+ NES_AEQE_IWARP_STATE_ERROR = 6
+};
+
+enum nes_aeqe_tcp_state {
+ NES_AEQE_TCP_STATE_NON_EXISTANT = 0,
+ NES_AEQE_TCP_STATE_CLOSED = 1,
+ NES_AEQE_TCP_STATE_LISTEN = 2,
+ NES_AEQE_TCP_STATE_SYN_SENT = 3,
+ NES_AEQE_TCP_STATE_SYN_RCVD = 4,
+ NES_AEQE_TCP_STATE_ESTABLISHED = 5,
+ NES_AEQE_TCP_STATE_CLOSE_WAIT = 6,
+ NES_AEQE_TCP_STATE_FIN_WAIT_1 = 7,
+ NES_AEQE_TCP_STATE_CLOSING = 8,
+ NES_AEQE_TCP_STATE_LAST_ACK = 9,
+ NES_AEQE_TCP_STATE_FIN_WAIT_2 = 10,
+ NES_AEQE_TCP_STATE_TIME_WAIT = 11
+};
+
+enum nes_aeqe_aeid {
+ NES_AEQE_AEID_AMP_UNALLOCATED_STAG = 0x0102,
+ NES_AEQE_AEID_AMP_INVALID_STAG = 0x0103,
+ NES_AEQE_AEID_AMP_BAD_QP = 0x0104,
+ NES_AEQE_AEID_AMP_BAD_PD = 0x0105,
+ NES_AEQE_AEID_AMP_BAD_STAG_KEY = 0x0106,
+ NES_AEQE_AEID_AMP_BAD_STAG_INDEX = 0x0107,
+ NES_AEQE_AEID_AMP_BOUNDS_VIOLATION = 0x0108,
+ NES_AEQE_AEID_AMP_RIGHTS_VIOLATION = 0x0109,
+ NES_AEQE_AEID_AMP_TO_WRAP = 0x010a,
+ NES_AEQE_AEID_AMP_FASTREG_SHARED = 0x010b,
+ NES_AEQE_AEID_AMP_FASTREG_VALID_STAG = 0x010c,
+ NES_AEQE_AEID_AMP_FASTREG_MW_STAG = 0x010d,
+ NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS = 0x010e,
+ NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW = 0x010f,
+ NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH = 0x0110,
+ NES_AEQE_AEID_AMP_INVALIDATE_SHARED = 0x0111,
+ NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS = 0x0112,
+ NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS = 0x0113,
+ NES_AEQE_AEID_AMP_MWBIND_VALID_STAG = 0x0114,
+ NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG = 0x0115,
+ NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG = 0x0116,
+ NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG = 0x0117,
+ NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS = 0x0118,
+ NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS = 0x0119,
+ NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT = 0x011a,
+ NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED = 0x011b,
+ NES_AEQE_AEID_BAD_CLOSE = 0x0201,
+ NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE = 0x0202,
+ NES_AEQE_AEID_CQ_OPERATION_ERROR = 0x0203,
+ NES_AEQE_AEID_PRIV_OPERATION_DENIED = 0x0204,
+ NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO = 0x0205,
+ NES_AEQE_AEID_STAG_ZERO_INVALID = 0x0206,
+ NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN = 0x0301,
+ NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID = 0x0302,
+ NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER = 0x0303,
+ NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION = 0x0304,
+ NES_AEQE_AEID_DDP_UBE_INVALID_MO = 0x0305,
+ NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE = 0x0306,
+ NES_AEQE_AEID_DDP_UBE_INVALID_QN = 0x0307,
+ NES_AEQE_AEID_DDP_NO_L_BIT = 0x0308,
+ NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION = 0x0311,
+ NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE = 0x0312,
+ NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST = 0x0313,
+ NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP = 0x0314,
+ NES_AEQE_AEID_INVALID_ARP_ENTRY = 0x0401,
+ NES_AEQE_AEID_INVALID_TCP_OPTION_RCVD = 0x0402,
+ NES_AEQE_AEID_STALE_ARP_ENTRY = 0x0403,
+ NES_AEQE_AEID_LLP_CLOSE_COMPLETE = 0x0501,
+ NES_AEQE_AEID_LLP_CONNECTION_RESET = 0x0502,
+ NES_AEQE_AEID_LLP_FIN_RECEIVED = 0x0503,
+ NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH = 0x0504,
+ NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR = 0x0505,
+ NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE = 0x0506,
+ NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL = 0x0507,
+ NES_AEQE_AEID_LLP_SYN_RECEIVED = 0x0508,
+ NES_AEQE_AEID_LLP_TERMINATE_RECEIVED = 0x0509,
+ NES_AEQE_AEID_LLP_TOO_MANY_RETRIES = 0x050a,
+ NES_AEQE_AEID_LLP_TOO_MANY_KEEPALIVE_RETRIES = 0x050b,
+ NES_AEQE_AEID_RESET_SENT = 0x0601,
+ NES_AEQE_AEID_TERMINATE_SENT = 0x0602,
+ NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC = 0x0700
+};
+
+enum nes_iwarp_sq_opcodes {
+ NES_IWARP_SQ_WQE_WRPDU = (1<<15),
+ NES_IWARP_SQ_WQE_PSH = (1<<21),
+ NES_IWARP_SQ_WQE_STREAMING = (1<<23),
+ NES_IWARP_SQ_WQE_IMM_DATA = (1<<28),
+ NES_IWARP_SQ_WQE_READ_FENCE = (1<<29),
+ NES_IWARP_SQ_WQE_LOCAL_FENCE = (1<<30),
+ NES_IWARP_SQ_WQE_SIGNALED_COMPL = (1<<31),
+};
+
+enum nes_iwarp_sq_wqe_bits {
+ NES_IWARP_SQ_OP_RDMAW = 0,
+ NES_IWARP_SQ_OP_RDMAR = 1,
+ NES_IWARP_SQ_OP_SEND = 3,
+ NES_IWARP_SQ_OP_SENDINV = 4,
+ NES_IWARP_SQ_OP_SENDSE = 5,
+ NES_IWARP_SQ_OP_SENDSEINV = 6,
+ NES_IWARP_SQ_OP_BIND = 8,
+ NES_IWARP_SQ_OP_FAST_REG = 9,
+ NES_IWARP_SQ_OP_LOCINV = 10,
+ NES_IWARP_SQ_OP_RDMAR_LOCINV = 11,
+ NES_IWARP_SQ_OP_NOP = 12,
+};
+
+#define NES_EEPROM_READ_REQUEST (1<<16)
+#define NES_MAC_ADDR_VALID (1<<20)
+
+/*
+ * NES index registers init values.
+ */
+struct nes_init_values {
+ u32 index;
+ u32 data;
+ u8 wrt;
+};
+
+/*
+ * NES registers in BAR0.
+ */
+struct nes_pci_regs {
+ u32 int_status;
+ u32 int_mask;
+ u32 int_pending;
+ u32 intf_int_status;
+ u32 intf_int_mask;
+ u32 other_regs[59]; /* pad out to 256 bytes for now */
+};
+
+#define NES_CQP_SQ_SIZE 128
+#define NES_CCQ_SIZE 128
+#define NES_NIC_WQ_SIZE 512
+#define NES_NIC_CTX_SIZE ((NES_NIC_CTX_RQ_SIZE_512) | (NES_NIC_CTX_SQ_SIZE_512))
+#define NES_NIC_BACK_STORE 0x00038000
+
+struct nes_device;
+
+struct nes_hw_nic_qp_context {
+ __le32 context_words[6];
+};
+
+struct nes_hw_nic_sq_wqe {
+ __le32 wqe_words[16];
+};
+
+struct nes_hw_nic_rq_wqe {
+ __le32 wqe_words[16];
+};
+
+struct nes_hw_nic_cqe {
+ __le32 cqe_words[4];
+};
+
+struct nes_hw_cqp_qp_context {
+ __le32 context_words[4];
+};
+
+struct nes_hw_cqp_wqe {
+ __le32 wqe_words[16];
+};
+
+struct nes_hw_qp_wqe {
+ __le32 wqe_words[32];
+};
+
+struct nes_hw_cqe {
+ __le32 cqe_words[8];
+};
+
+struct nes_hw_ceqe {
+ __le32 ceqe_words[2];
+};
+
+struct nes_hw_aeqe {
+ __le32 aeqe_words[4];
+};
+
+struct nes_cqp_request {
+ union {
+ u64 cqp_callback_context;
+ void *cqp_callback_pointer;
+ };
+ wait_queue_head_t waitq;
+ struct nes_hw_cqp_wqe cqp_wqe;
+ struct list_head list;
+ atomic_t refcount;
+ void (*cqp_callback)(struct nes_device *nesdev, struct nes_cqp_request *cqp_request);
+ u16 major_code;
+ u16 minor_code;
+ u8 waiting;
+ u8 request_done;
+ u8 dynamic;
+ u8 callback;
+};
+
+struct nes_hw_cqp {
+ struct nes_hw_cqp_wqe *sq_vbase;
+ dma_addr_t sq_pbase;
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+ u16 qp_id;
+ u16 sq_head;
+ u16 sq_tail;
+ u16 sq_size;
+};
+
+#define NES_FIRST_FRAG_SIZE 128
+struct nes_first_frag {
+ u8 buffer[NES_FIRST_FRAG_SIZE];
+};
+
+struct nes_hw_nic {
+ struct nes_first_frag *first_frag_vbase; /* virtual address of first frags */
+ struct nes_hw_nic_sq_wqe *sq_vbase; /* virtual address of sq */
+ struct nes_hw_nic_rq_wqe *rq_vbase; /* virtual address of rq */
+ struct sk_buff *tx_skb[NES_NIC_WQ_SIZE];
+ struct sk_buff *rx_skb[NES_NIC_WQ_SIZE];
+ dma_addr_t frag_paddr[NES_NIC_WQ_SIZE];
+ unsigned long first_frag_overflow[BITS_TO_LONGS(NES_NIC_WQ_SIZE)];
+ dma_addr_t sq_pbase; /* PCI memory for host rings */
+ dma_addr_t rq_pbase; /* PCI memory for host rings */
+
+ u16 qp_id;
+ u16 sq_head;
+ u16 sq_tail;
+ u16 sq_size;
+ u16 rq_head;
+ u16 rq_tail;
+ u16 rq_size;
+ u8 replenishing_rq;
+ u8 reserved;
+
+ spinlock_t sq_lock;
+ spinlock_t rq_lock;
+};
+
+struct nes_hw_nic_cq {
+ struct nes_hw_nic_cqe volatile *cq_vbase; /* PCI memory for host rings */
+ void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_nic_cq *cq);
+ dma_addr_t cq_pbase; /* PCI memory for host rings */
+ int rx_cqes_completed;
+ int cqe_allocs_pending;
+ int rx_pkts_indicated;
+ u16 cq_head;
+ u16 cq_size;
+ u16 cq_number;
+ u8 cqes_pending;
+};
+
+struct nes_hw_qp {
+ struct nes_hw_qp_wqe *sq_vbase; /* PCI memory for host rings */
+ struct nes_hw_qp_wqe *rq_vbase; /* PCI memory for host rings */
+ void *q2_vbase; /* PCI memory for host rings */
+ dma_addr_t sq_pbase; /* PCI memory for host rings */
+ dma_addr_t rq_pbase; /* PCI memory for host rings */
+ dma_addr_t q2_pbase; /* PCI memory for host rings */
+ u32 qp_id;
+ u16 sq_head;
+ u16 sq_tail;
+ u16 sq_size;
+ u16 rq_head;
+ u16 rq_tail;
+ u16 rq_size;
+ u8 rq_encoded_size;
+ u8 sq_encoded_size;
+};
+
+struct nes_hw_cq {
+ struct nes_hw_cqe volatile *cq_vbase; /* PCI memory for host rings */
+ void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq);
+ dma_addr_t cq_pbase; /* PCI memory for host rings */
+ u16 cq_head;
+ u16 cq_size;
+ u16 cq_number;
+};
+
+struct nes_hw_ceq {
+ struct nes_hw_ceqe volatile *ceq_vbase; /* PCI memory for host rings */
+ dma_addr_t ceq_pbase; /* PCI memory for host rings */
+ u16 ceq_head;
+ u16 ceq_size;
+};
+
+struct nes_hw_aeq {
+ struct nes_hw_aeqe volatile *aeq_vbase; /* PCI memory for host rings */
+ dma_addr_t aeq_pbase; /* PCI memory for host rings */
+ u16 aeq_head;
+ u16 aeq_size;
+};
+
+struct nic_qp_map {
+ u8 qpid;
+ u8 nic_index;
+ u8 logical_port;
+ u8 is_hnic;
+};
+
+#define NES_CQP_ARP_AEQ_INDEX_MASK 0x000f0000
+#define NES_CQP_ARP_AEQ_INDEX_SHIFT 16
+
+#define NES_CQP_APBVT_ADD 0x00008000
+#define NES_CQP_APBVT_NIC_SHIFT 16
+
+#define NES_ARP_ADD 1
+#define NES_ARP_DELETE 2
+#define NES_ARP_RESOLVE 3
+
+#define NES_MAC_SW_IDLE 0
+#define NES_MAC_SW_INTERRUPT 1
+#define NES_MAC_SW_MH 2
+
+struct nes_arp_entry {
+ u32 ip_addr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+#define NES_NIC_FAST_TIMER 96
+#define NES_NIC_FAST_TIMER_LOW 40
+#define NES_NIC_FAST_TIMER_HIGH 1000
+#define DEFAULT_NES_QL_HIGH 256
+#define DEFAULT_NES_QL_LOW 16
+#define DEFAULT_NES_QL_TARGET 64
+#define DEFAULT_JUMBO_NES_QL_LOW 12
+#define DEFAULT_JUMBO_NES_QL_TARGET 40
+#define DEFAULT_JUMBO_NES_QL_HIGH 128
+#define NES_NIC_CQ_DOWNWARD_TREND 8
+
+struct nes_hw_tune_timer {
+ //u16 cq_count;
+ u16 threshold_low;
+ u16 threshold_target;
+ u16 threshold_high;
+ u16 timer_in_use;
+ u16 timer_in_use_old;
+ u16 timer_in_use_min;
+ u16 timer_in_use_max;
+ u8 timer_direction_upward;
+ u8 timer_direction_downward;
+ u16 cq_count_old;
+ u8 cq_direction_downward;
+};
+
+#define NES_TIMER_INT_LIMIT 2
+#define NES_TIMER_INT_LIMIT_DYNAMIC 10
+#define NES_TIMER_ENABLE_LIMIT 4
+#define NES_MAX_LINK_INTERRUPTS 128
+#define NES_MAX_LINK_CHECK 200
+
+struct nes_adapter {
+ u64 fw_ver;
+ unsigned long *allocated_qps;
+ unsigned long *allocated_cqs;
+ unsigned long *allocated_mrs;
+ unsigned long *allocated_pds;
+ unsigned long *allocated_arps;
+ struct nes_qp **qp_table;
+ struct workqueue_struct *work_q;
+
+ struct list_head list;
+ struct list_head active_listeners;
+ /* list of the netdev's associated with each logical port */
+ struct list_head nesvnic_list[4];
+
+ struct timer_list mh_timer;
+ struct timer_list lc_timer;
+ struct work_struct work;
+ spinlock_t resource_lock;
+ spinlock_t phy_lock;
+ spinlock_t pbl_lock;
+ spinlock_t periodic_timer_lock;
+
+ struct nes_arp_entry arp_table[NES_MAX_ARP_TABLE_SIZE];
+
+ /* Adapter CEQ and AEQs */
+ struct nes_hw_ceq ceq[16];
+ struct nes_hw_aeq aeq[8];
+
+ struct nes_hw_tune_timer tune_timer;
+
+ unsigned long doorbell_start;
+
+ u32 hw_rev;
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 device_cap_flags;
+ u32 tick_delta;
+ u32 timer_int_req;
+ u32 arp_table_size;
+ u32 next_arp_index;
+
+ u32 max_mr;
+ u32 max_256pbl;
+ u32 max_4kpbl;
+ u32 free_256pbl;
+ u32 free_4kpbl;
+ u32 max_mr_size;
+ u32 max_qp;
+ u32 next_qp;
+ u32 max_irrq;
+ u32 max_qp_wr;
+ u32 max_sge;
+ u32 max_cq;
+ u32 next_cq;
+ u32 max_cqe;
+ u32 max_pd;
+ u32 base_pd;
+ u32 next_pd;
+ u32 hte_index_mask;
+
+ /* EEPROM information */
+ u32 rx_pool_size;
+ u32 tx_pool_size;
+ u32 rx_threshold;
+ u32 tcp_timer_core_clk_divisor;
+ u32 iwarp_config;
+ u32 cm_config;
+ u32 sws_timer_config;
+ u32 tcp_config1;
+ u32 wqm_wat;
+ u32 core_clock;
+ u32 firmware_version;
+
+ u32 nic_rx_eth_route_err;
+
+ u32 et_rx_coalesce_usecs;
+ u32 et_rx_max_coalesced_frames;
+ u32 et_rx_coalesce_usecs_irq;
+ u32 et_rx_max_coalesced_frames_irq;
+ u32 et_pkt_rate_low;
+ u32 et_rx_coalesce_usecs_low;
+ u32 et_rx_max_coalesced_frames_low;
+ u32 et_pkt_rate_high;
+ u32 et_rx_coalesce_usecs_high;
+ u32 et_rx_max_coalesced_frames_high;
+ u32 et_rate_sample_interval;
+ u32 timer_int_limit;
+
+ /* Adapter base MAC address */
+ u32 mac_addr_low;
+ u16 mac_addr_high;
+
+ u16 firmware_eeprom_offset;
+ u16 software_eeprom_offset;
+
+ u16 max_irrq_wr;
+
+ /* pd config for each port */
+ u16 pd_config_size[4];
+ u16 pd_config_base[4];
+
+ u16 link_interrupt_count[4];
+
+ /* the phy index for each port */
+ u8 phy_index[4];
+ u8 mac_sw_state[4];
+ u8 mac_link_down[4];
+ u8 phy_type[4];
+
+ /* PCI information */
+ unsigned int devfn;
+ unsigned char bus_number;
+ unsigned char OneG_Mode;
+
+ unsigned char ref_count;
+ u8 netdev_count;
+ u8 netdev_max; /* from host nic address count in EEPROM */
+ u8 port_count;
+ u8 virtwq;
+ u8 et_use_adaptive_rx_coalesce;
+ u8 adapter_fcn_count;
+};
+
+struct nes_pbl {
+ u64 *pbl_vbase;
+ dma_addr_t pbl_pbase;
+ struct page *page;
+ unsigned long user_base;
+ u32 pbl_size;
+ struct list_head list;
+ /* TODO: need to add list for two level tables */
+};
+
+struct nes_listener {
+ struct work_struct work;
+ struct workqueue_struct *wq;
+ struct nes_vnic *nesvnic;
+ struct iw_cm_id *cm_id;
+ struct list_head list;
+ unsigned long socket;
+ u8 accept_failed;
+};
+
+struct nes_ib_device;
+
+struct nes_vnic {
+ struct nes_ib_device *nesibdev;
+ u64 sq_full;
+ u64 sq_locked;
+ u64 tso_requests;
+ u64 segmented_tso_requests;
+ u64 linearized_skbs;
+ u64 tx_sw_dropped;
+ u64 endnode_nstat_rx_discard;
+ u64 endnode_nstat_rx_octets;
+ u64 endnode_nstat_rx_frames;
+ u64 endnode_nstat_tx_octets;
+ u64 endnode_nstat_tx_frames;
+ u64 endnode_ipv4_tcp_retransmits;
+ /* void *mem; */
+ struct nes_device *nesdev;
+ struct net_device *netdev;
+ struct vlan_group *vlan_grp;
+ atomic_t rx_skbs_needed;
+ atomic_t rx_skb_timer_running;
+ int budget;
+ u32 msg_enable;
+ /* u32 tx_avail; */
+ __be32 local_ipaddr;
+ struct napi_struct napi;
+ spinlock_t tx_lock; /* could use netdev tx lock? */
+ struct timer_list rq_wqes_timer;
+ u32 nic_mem_size;
+ void *nic_vbase;
+ dma_addr_t nic_pbase;
+ struct nes_hw_nic nic;
+ struct nes_hw_nic_cq nic_cq;
+ u32 mcrq_qp_id;
+ struct nes_ucontext *mcrq_ucontext;
+ struct nes_cqp_request* (*get_cqp_request)(struct nes_device *nesdev);
+ void (*post_cqp_request)(struct nes_device*, struct nes_cqp_request *, int);
+ int (*mcrq_mcast_filter)( struct nes_vnic* nesvnic, __u8* dmi_addr );
+ struct net_device_stats netstats;
+ /* used to put the netdev on the adapters logical port list */
+ struct list_head list;
+ u16 max_frame_size;
+ u8 netdev_open;
+ u8 linkup;
+ u8 logical_port;
+ u8 netdev_index; /* might not be needed, indexes nesdev->netdev */
+ u8 perfect_filter_index;
+ u8 nic_index;
+ u8 qp_nic_index[4];
+ u8 next_qp_nic_index;
+ u8 of_device_registered;
+ u8 rdma_enabled;
+ u8 rx_checksum_disabled;
+};
+
+struct nes_ib_device {
+ struct ib_device ibdev;
+ struct nes_vnic *nesvnic;
+
+ /* Virtual RNIC Limits */
+ u32 max_mr;
+ u32 max_qp;
+ u32 max_cq;
+ u32 max_pd;
+ u32 num_mr;
+ u32 num_qp;
+ u32 num_cq;
+ u32 num_pd;
+};
+
+#define nes_vlan_rx vlan_hwaccel_receive_skb
+#define nes_netif_rx netif_receive_skb
+
+#endif /* __NES_HW_H */
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
new file mode 100644
index 000000000000..b6cc265aa9a4
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -0,0 +1,1703 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/ethtool.h>
+#include <net/tcp.h>
+
+#include <net/inet_common.h>
+#include <linux/inet.h>
+
+#include "nes.h"
+
+static struct nic_qp_map nic_qp_mapping_0[] = {
+ {16,0,0,1},{24,4,0,0},{28,8,0,0},{32,12,0,0},
+ {20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0},
+ {18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0},
+ {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
+};
+
+static struct nic_qp_map nic_qp_mapping_1[] = {
+ {18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0},
+ {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
+};
+
+static struct nic_qp_map nic_qp_mapping_2[] = {
+ {20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0}
+};
+
+static struct nic_qp_map nic_qp_mapping_3[] = {
+ {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
+};
+
+static struct nic_qp_map nic_qp_mapping_4[] = {
+ {28,8,0,0},{32,12,0,0}
+};
+
+static struct nic_qp_map nic_qp_mapping_5[] = {
+ {29,9,1,0},{33,13,1,0}
+};
+
+static struct nic_qp_map nic_qp_mapping_6[] = {
+ {30,10,2,0},{34,14,2,0}
+};
+
+static struct nic_qp_map nic_qp_mapping_7[] = {
+ {31,11,3,0},{35,15,3,0}
+};
+
+static struct nic_qp_map *nic_qp_mapping_per_function[] = {
+ nic_qp_mapping_0, nic_qp_mapping_1, nic_qp_mapping_2, nic_qp_mapping_3,
+ nic_qp_mapping_4, nic_qp_mapping_5, nic_qp_mapping_6, nic_qp_mapping_7
+};
+
+static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
+ | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
+static int debug = -1;
+
+
+static int nes_netdev_open(struct net_device *);
+static int nes_netdev_stop(struct net_device *);
+static int nes_netdev_start_xmit(struct sk_buff *, struct net_device *);
+static struct net_device_stats *nes_netdev_get_stats(struct net_device *);
+static void nes_netdev_tx_timeout(struct net_device *);
+static int nes_netdev_set_mac_address(struct net_device *, void *);
+static int nes_netdev_change_mtu(struct net_device *, int);
+
+/**
+ * nes_netdev_poll
+ */
+static int nes_netdev_poll(struct napi_struct *napi, int budget)
+{
+ struct nes_vnic *nesvnic = container_of(napi, struct nes_vnic, napi);
+ struct net_device *netdev = nesvnic->netdev;
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_hw_nic_cq *nescq = &nesvnic->nic_cq;
+
+ nesvnic->budget = budget;
+ nescq->cqes_pending = 0;
+ nescq->rx_cqes_completed = 0;
+ nescq->cqe_allocs_pending = 0;
+ nescq->rx_pkts_indicated = 0;
+
+ nes_nic_ce_handler(nesdev, nescq);
+
+ if (nescq->cqes_pending == 0) {
+ netif_rx_complete(netdev, napi);
+ /* clear out completed cqes and arm */
+ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
+ nescq->cq_number | (nescq->cqe_allocs_pending << 16));
+ nes_read32(nesdev->regs+NES_CQE_ALLOC);
+ } else {
+ /* clear out completed cqes but don't arm */
+ nes_write32(nesdev->regs+NES_CQE_ALLOC,
+ nescq->cq_number | (nescq->cqe_allocs_pending << 16));
+ nes_debug(NES_DBG_NETDEV, "%s: exiting with work pending\n",
+ nesvnic->netdev->name);
+ }
+ return nescq->rx_pkts_indicated;
+}
+
+
+/**
+ * nes_netdev_open - Activate the network interface; ifconfig
+ * ethx up.
+ */
+static int nes_netdev_open(struct net_device *netdev)
+{
+ u32 macaddr_low;
+ u16 macaddr_high;
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ int ret;
+ int i;
+ struct nes_vnic *first_nesvnic;
+ u32 nic_active_bit;
+ u32 nic_active;
+
+ assert(nesdev != NULL);
+
+ first_nesvnic = list_entry(nesdev->nesadapter->nesvnic_list[nesdev->mac_index].next,
+ struct nes_vnic, list);
+
+ if (netif_msg_ifup(nesvnic))
+ printk(KERN_INFO PFX "%s: enabling interface\n", netdev->name);
+
+ ret = nes_init_nic_qp(nesdev, netdev);
+ if (ret) {
+ return ret;
+ }
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ if ((!nesvnic->of_device_registered) && (nesvnic->rdma_enabled)) {
+ nesvnic->nesibdev = nes_init_ofa_device(netdev);
+ if (nesvnic->nesibdev == NULL) {
+ printk(KERN_ERR PFX "%s: nesvnic->nesibdev alloc failed", netdev->name);
+ } else {
+ nesvnic->nesibdev->nesvnic = nesvnic;
+ ret = nes_register_ofa_device(nesvnic->nesibdev);
+ if (ret) {
+ printk(KERN_ERR PFX "%s: Unable to register RDMA device, ret = %d\n",
+ netdev->name, ret);
+ }
+ }
+ }
+ /* Set packet filters */
+ nic_active_bit = 1 << nesvnic->nic_index;
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE);
+ nic_active |= nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE);
+ nic_active |= nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON);
+ nic_active |= nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
+
+ macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
+ macaddr_high += (u16)netdev->dev_addr[1];
+ macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
+ macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
+ macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
+ macaddr_low += (u32)netdev->dev_addr[5];
+
+ /* Program the various MAC regs */
+ for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
+ if (nesvnic->qp_nic_index[i] == 0xf) {
+ break;
+ }
+ nes_debug(NES_DBG_NETDEV, "i=%d, perfect filter table index= %d, PERF FILTER LOW"
+ " (Addr:%08X) = %08X, HIGH = %08X.\n",
+ i, nesvnic->qp_nic_index[i],
+ NES_IDX_PERFECT_FILTER_LOW+((nesvnic->perfect_filter_index + i) * 8),
+ macaddr_low,
+ (u32)macaddr_high | NES_MAC_ADDR_VALID |
+ ((((u32)nesvnic->nic_index) << 16)));
+ nes_write_indexed(nesdev,
+ NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8),
+ macaddr_low);
+ nes_write_indexed(nesdev,
+ NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8),
+ (u32)macaddr_high | NES_MAC_ADDR_VALID |
+ ((((u32)nesvnic->nic_index) << 16)));
+ }
+
+
+ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
+ nesvnic->nic_cq.cq_number);
+ nes_read32(nesdev->regs+NES_CQE_ALLOC);
+
+ if (first_nesvnic->linkup) {
+ /* Enable network packets */
+ nesvnic->linkup = 1;
+ netif_start_queue(netdev);
+ netif_carrier_on(netdev);
+ }
+ napi_enable(&nesvnic->napi);
+ nesvnic->netdev_open = 1;
+
+ return 0;
+}
+
+
+/**
+ * nes_netdev_stop
+ */
+static int nes_netdev_stop(struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ u32 nic_active_mask;
+ u32 nic_active;
+
+ nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n",
+ nesvnic, nesdev, netdev, netdev->name);
+ if (nesvnic->netdev_open == 0)
+ return 0;
+
+ if (netif_msg_ifdown(nesvnic))
+ printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name);
+
+ /* Disable network packets */
+ napi_disable(&nesvnic->napi);
+ netif_stop_queue(netdev);
+ if ((nesdev->netdev[0] == netdev) & (nesvnic->logical_port == nesdev->mac_index)) {
+ nes_write_indexed(nesdev,
+ NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff);
+ }
+
+ nic_active_mask = ~((u32)(1 << nesvnic->nic_index));
+ nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_HIGH+
+ (nesvnic->perfect_filter_index*8), 0);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE);
+ nic_active &= nic_active_mask;
+ nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
+ nic_active &= nic_active_mask;
+ nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE);
+ nic_active &= nic_active_mask;
+ nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
+ nic_active &= nic_active_mask;
+ nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON);
+ nic_active &= nic_active_mask;
+ nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
+
+
+ if (nesvnic->of_device_registered) {
+ nes_destroy_ofa_device(nesvnic->nesibdev);
+ nesvnic->nesibdev = NULL;
+ nesvnic->of_device_registered = 0;
+ }
+ nes_destroy_nic_qp(nesvnic);
+
+ nesvnic->netdev_open = 0;
+
+ return 0;
+}
+
+
+/**
+ * nes_nic_send
+ */
+static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_hw_nic *nesnic = &nesvnic->nic;
+ struct nes_hw_nic_sq_wqe *nic_sqe;
+ struct tcphdr *tcph;
+ __le16 *wqe_fragment_length;
+ u32 wqe_misc;
+ u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */
+ u16 skb_fragment_index;
+ dma_addr_t bus_address;
+
+ nic_sqe = &nesnic->sq_vbase[nesnic->sq_head];
+ wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
+
+ /* setup the VLAN tag if present */
+ if (vlan_tx_tag_present(skb)) {
+ nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
+ netdev->name, vlan_tx_tag_get(skb));
+ wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
+ wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
+ } else
+ wqe_misc = 0;
+
+ /* bump past the vlan tag */
+ wqe_fragment_length++;
+ /* wqe_fragment_address = (u64 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX]; */
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ tcph = tcp_hdr(skb);
+ if (1) {
+ if (skb_is_gso(skb)) {
+ /* nes_debug(NES_DBG_NIC_TX, "%s: TSO request... seg size = %u\n",
+ netdev->name, skb_is_gso(skb)); */
+ wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE |
+ NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
+ set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
+ ((u32)tcph->doff) |
+ (((u32)(((unsigned char *)tcph) - skb->data)) << 4));
+ } else {
+ wqe_misc |= NES_NIC_SQ_WQE_COMPLETION;
+ }
+ }
+ } else { /* CHECKSUM_HW */
+ wqe_misc |= NES_NIC_SQ_WQE_DISABLE_CHKSUM | NES_NIC_SQ_WQE_COMPLETION;
+ }
+
+ set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX,
+ skb->len);
+ memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
+ skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE), skb_headlen(skb)));
+ wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE),
+ skb_headlen(skb)));
+ wqe_fragment_length[1] = 0;
+ if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) {
+ if ((skb_shinfo(skb)->nr_frags + 1) > 4) {
+ nes_debug(NES_DBG_NIC_TX, "%s: Packet with %u fragments not sent, skb_headlen=%u\n",
+ netdev->name, skb_shinfo(skb)->nr_frags + 2, skb_headlen(skb));
+ kfree_skb(skb);
+ nesvnic->tx_sw_dropped++;
+ return NETDEV_TX_LOCKED;
+ }
+ set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
+ bus_address = pci_map_single(nesdev->pcidev, skb->data + NES_FIRST_FRAG_SIZE,
+ skb_headlen(skb) - NES_FIRST_FRAG_SIZE, PCI_DMA_TODEVICE);
+ wqe_fragment_length[wqe_fragment_index++] =
+ cpu_to_le16(skb_headlen(skb) - NES_FIRST_FRAG_SIZE);
+ wqe_fragment_length[wqe_fragment_index] = 0;
+ set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
+ ((u64)(bus_address)));
+ nesnic->tx_skb[nesnic->sq_head] = skb;
+ }
+
+ if (skb_headlen(skb) == skb->len) {
+ if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) {
+ nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0;
+ nesnic->tx_skb[nesnic->sq_head] = NULL;
+ dev_kfree_skb(skb);
+ }
+ } else {
+ /* Deal with Fragments */
+ nesnic->tx_skb[nesnic->sq_head] = skb;
+ for (skb_fragment_index = 0; skb_fragment_index < skb_shinfo(skb)->nr_frags;
+ skb_fragment_index++) {
+ bus_address = pci_map_page( nesdev->pcidev,
+ skb_shinfo(skb)->frags[skb_fragment_index].page,
+ skb_shinfo(skb)->frags[skb_fragment_index].page_offset,
+ skb_shinfo(skb)->frags[skb_fragment_index].size,
+ PCI_DMA_TODEVICE);
+ wqe_fragment_length[wqe_fragment_index] =
+ cpu_to_le16(skb_shinfo(skb)->frags[skb_fragment_index].size);
+ set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
+ bus_address);
+ wqe_fragment_index++;
+ if (wqe_fragment_index < 5)
+ wqe_fragment_length[wqe_fragment_index] = 0;
+ }
+ }
+
+ set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc);
+ nesnic->sq_head++;
+ nesnic->sq_head &= nesnic->sq_size - 1;
+
+ return NETDEV_TX_OK;
+}
+
+
+/**
+ * nes_netdev_start_xmit
+ */
+static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_hw_nic *nesnic = &nesvnic->nic;
+ struct nes_hw_nic_sq_wqe *nic_sqe;
+ struct tcphdr *tcph;
+ /* struct udphdr *udph; */
+#define NES_MAX_TSO_FRAGS 18
+ /* 64K segment plus overflow on each side */
+ dma_addr_t tso_bus_address[NES_MAX_TSO_FRAGS];
+ dma_addr_t bus_address;
+ u32 tso_frag_index;
+ u32 tso_frag_count;
+ u32 tso_wqe_length;
+ u32 curr_tcp_seq;
+ u32 wqe_count=1;
+ u32 send_rc;
+ struct iphdr *iph;
+ unsigned long flags;
+ __le16 *wqe_fragment_length;
+ u32 nr_frags;
+ u32 original_first_length;
+// u64 *wqe_fragment_address;
+ /* first fragment (0) is used by copy buffer */
+ u16 wqe_fragment_index=1;
+ u16 hoffset;
+ u16 nhoffset;
+ u16 wqes_needed;
+ u16 wqes_available;
+ u32 old_head;
+ u32 wqe_misc;
+
+ /* nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
+ " (%u frags), tso_size=%u\n",
+ netdev->name, skb->len, skb_headlen(skb),
+ skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
+ */
+
+ if (!netif_carrier_ok(netdev))
+ return NETDEV_TX_OK;
+
+ if (netif_queue_stopped(netdev))
+ return NETDEV_TX_BUSY;
+
+ local_irq_save(flags);
+ if (!spin_trylock(&nesnic->sq_lock)) {
+ local_irq_restore(flags);
+ nesvnic->sq_locked++;
+ return NETDEV_TX_LOCKED;
+ }
+
+ /* Check if SQ is full */
+ if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) {
+ if (!netif_queue_stopped(netdev)) {
+ netif_stop_queue(netdev);
+ barrier();
+ if ((((((volatile u16)nesnic->sq_tail)+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) != 1) {
+ netif_start_queue(netdev);
+ goto sq_no_longer_full;
+ }
+ }
+ nesvnic->sq_full++;
+ spin_unlock_irqrestore(&nesnic->sq_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+sq_no_longer_full:
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) {
+ nr_frags++;
+ }
+ /* Check if too many fragments */
+ if (unlikely((nr_frags > 4))) {
+ if (skb_is_gso(skb)) {
+ nesvnic->segmented_tso_requests++;
+ nesvnic->tso_requests++;
+ old_head = nesnic->sq_head;
+ /* Basically 4 fragments available per WQE with extended fragments */
+ wqes_needed = nr_frags >> 2;
+ wqes_needed += (nr_frags&3)?1:0;
+ wqes_available = (((nesnic->sq_tail+nesnic->sq_size)-nesnic->sq_head) - 1) &
+ (nesnic->sq_size - 1);
+
+ if (unlikely(wqes_needed > wqes_available)) {
+ if (!netif_queue_stopped(netdev)) {
+ netif_stop_queue(netdev);
+ barrier();
+ wqes_available = (((((volatile u16)nesnic->sq_tail)+nesnic->sq_size)-nesnic->sq_head) - 1) &
+ (nesnic->sq_size - 1);
+ if (wqes_needed <= wqes_available) {
+ netif_start_queue(netdev);
+ goto tso_sq_no_longer_full;
+ }
+ }
+ nesvnic->sq_full++;
+ spin_unlock_irqrestore(&nesnic->sq_lock, flags);
+ nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n",
+ netdev->name);
+ return NETDEV_TX_BUSY;
+ }
+tso_sq_no_longer_full:
+ /* Map all the buffers */
+ for (tso_frag_count=0; tso_frag_count < skb_shinfo(skb)->nr_frags;
+ tso_frag_count++) {
+ tso_bus_address[tso_frag_count] = pci_map_page( nesdev->pcidev,
+ skb_shinfo(skb)->frags[tso_frag_count].page,
+ skb_shinfo(skb)->frags[tso_frag_count].page_offset,
+ skb_shinfo(skb)->frags[tso_frag_count].size,
+ PCI_DMA_TODEVICE);
+ }
+
+ tso_frag_index = 0;
+ curr_tcp_seq = ntohl(tcp_hdr(skb)->seq);
+ hoffset = skb_transport_header(skb) - skb->data;
+ nhoffset = skb_network_header(skb) - skb->data;
+ original_first_length = hoffset + ((((struct tcphdr *)skb_transport_header(skb))->doff)<<2);
+
+ for (wqe_count=0; wqe_count<((u32)wqes_needed); wqe_count++) {
+ tso_wqe_length = 0;
+ nic_sqe = &nesnic->sq_vbase[nesnic->sq_head];
+ wqe_fragment_length =
+ (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
+ /* setup the VLAN tag if present */
+ if (vlan_tx_tag_present(skb)) {
+ nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
+ netdev->name, vlan_tx_tag_get(skb) );
+ wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
+ wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
+ } else
+ wqe_misc = 0;
+
+ /* bump past the vlan tag */
+ wqe_fragment_length++;
+
+ /* Assumes header totally fits in allocated buffer and is in first fragment */
+ if (original_first_length > NES_FIRST_FRAG_SIZE) {
+ nes_debug(NES_DBG_NIC_TX, "ERROR: SKB header too big, headlen=%u, FIRST_FRAG_SIZE=%u\n",
+ original_first_length, NES_FIRST_FRAG_SIZE);
+ nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
+ " (%u frags), tso_size=%u\n",
+ netdev->name,
+ skb->len, skb_headlen(skb),
+ skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
+ }
+ memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
+ skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE),
+ original_first_length));
+ iph = (struct iphdr *)
+ (&nesnic->first_frag_vbase[nesnic->sq_head].buffer[nhoffset]);
+ tcph = (struct tcphdr *)
+ (&nesnic->first_frag_vbase[nesnic->sq_head].buffer[hoffset]);
+ if ((wqe_count+1)!=(u32)wqes_needed) {
+ tcph->fin = 0;
+ tcph->psh = 0;
+ tcph->rst = 0;
+ tcph->urg = 0;
+ }
+ if (wqe_count) {
+ tcph->syn = 0;
+ }
+ tcph->seq = htonl(curr_tcp_seq);
+ wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE),
+ original_first_length));
+
+ wqe_fragment_index = 1;
+ if ((wqe_count==0) && (skb_headlen(skb) > original_first_length)) {
+ set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
+ bus_address = pci_map_single(nesdev->pcidev, skb->data + original_first_length,
+ skb_headlen(skb) - original_first_length, PCI_DMA_TODEVICE);
+ wqe_fragment_length[wqe_fragment_index++] =
+ cpu_to_le16(skb_headlen(skb) - original_first_length);
+ wqe_fragment_length[wqe_fragment_index] = 0;
+ set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
+ bus_address);
+ }
+ while (wqe_fragment_index < 5) {
+ wqe_fragment_length[wqe_fragment_index] =
+ cpu_to_le16(skb_shinfo(skb)->frags[tso_frag_index].size);
+ set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
+ (u64)tso_bus_address[tso_frag_index]);
+ wqe_fragment_index++;
+ tso_wqe_length += skb_shinfo(skb)->frags[tso_frag_index++].size;
+ if (wqe_fragment_index < 5)
+ wqe_fragment_length[wqe_fragment_index] = 0;
+ if (tso_frag_index == tso_frag_count)
+ break;
+ }
+ if ((wqe_count+1) == (u32)wqes_needed) {
+ nesnic->tx_skb[nesnic->sq_head] = skb;
+ } else {
+ nesnic->tx_skb[nesnic->sq_head] = NULL;
+ }
+ wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
+ if ((tso_wqe_length + original_first_length) > skb_is_gso(skb)) {
+ wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE;
+ } else {
+ iph->tot_len = htons(tso_wqe_length + original_first_length - nhoffset);
+ }
+
+ set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX,
+ wqe_misc);
+ set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
+ ((u32)tcph->doff) | (((u32)hoffset) << 4));
+
+ set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX,
+ tso_wqe_length + original_first_length);
+ curr_tcp_seq += tso_wqe_length;
+ nesnic->sq_head++;
+ nesnic->sq_head &= nesnic->sq_size-1;
+ }
+ } else {
+ nesvnic->linearized_skbs++;
+ hoffset = skb_transport_header(skb) - skb->data;
+ nhoffset = skb_network_header(skb) - skb->data;
+ skb_linearize(skb);
+ skb_set_transport_header(skb, hoffset);
+ skb_set_network_header(skb, nhoffset);
+ send_rc = nes_nic_send(skb, netdev);
+ if (send_rc != NETDEV_TX_OK) {
+ spin_unlock_irqrestore(&nesnic->sq_lock, flags);
+ return NETDEV_TX_OK;
+ }
+ }
+ } else {
+ send_rc = nes_nic_send(skb, netdev);
+ if (send_rc != NETDEV_TX_OK) {
+ spin_unlock_irqrestore(&nesnic->sq_lock, flags);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ barrier();
+
+ if (wqe_count)
+ nes_write32(nesdev->regs+NES_WQE_ALLOC,
+ (wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id);
+
+ netdev->trans_start = jiffies;
+ spin_unlock_irqrestore(&nesnic->sq_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+
+/**
+ * nes_netdev_get_stats
+ */
+static struct net_device_stats *nes_netdev_get_stats(struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ u64 u64temp;
+ u32 u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_DISCARD + (nesvnic->nic_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->endnode_nstat_rx_discard += u32temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO + (nesvnic->nic_index*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32;
+
+ nesvnic->endnode_nstat_rx_octets += u64temp;
+ nesvnic->netstats.rx_bytes += u64temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO + (nesvnic->nic_index*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32;
+
+ nesvnic->endnode_nstat_rx_frames += u64temp;
+ nesvnic->netstats.rx_packets += u64temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO + (nesvnic->nic_index*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32;
+
+ nesvnic->endnode_nstat_tx_octets += u64temp;
+ nesvnic->netstats.tx_bytes += u64temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO + (nesvnic->nic_index*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32;
+
+ nesvnic->endnode_nstat_tx_frames += u64temp;
+ nesvnic->netstats.tx_packets += u64temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_short_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_length_errors += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_crc_errors += u32temp;
+ nesvnic->netstats.rx_crc_errors += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->nesdev->mac_tx_errors += u32temp;
+ nesvnic->netstats.tx_errors += u32temp;
+
+ return &nesvnic->netstats;
+}
+
+
+/**
+ * nes_netdev_tx_timeout
+ */
+static void nes_netdev_tx_timeout(struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+
+ if (netif_msg_timer(nesvnic))
+ nes_debug(NES_DBG_NIC_TX, "%s: tx timeout\n", netdev->name);
+}
+
+
+/**
+ * nes_netdev_set_mac_address
+ */
+static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct sockaddr *mac_addr = p;
+ int i;
+ u32 macaddr_low;
+ u16 macaddr_high;
+
+ if (!is_valid_ether_addr(mac_addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
+ printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n",
+ __FUNCTION__, netdev->addr_len,
+ mac_addr->sa_data[0], mac_addr->sa_data[1],
+ mac_addr->sa_data[2], mac_addr->sa_data[3],
+ mac_addr->sa_data[4], mac_addr->sa_data[5]);
+ macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
+ macaddr_high += (u16)netdev->dev_addr[1];
+ macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
+ macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
+ macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
+ macaddr_low += (u32)netdev->dev_addr[5];
+
+ for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
+ if (nesvnic->qp_nic_index[i] == 0xf) {
+ break;
+ }
+ nes_write_indexed(nesdev,
+ NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8),
+ macaddr_low);
+ nes_write_indexed(nesdev,
+ NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8),
+ (u32)macaddr_high | NES_MAC_ADDR_VALID |
+ ((((u32)nesvnic->nic_index) << 16)));
+ }
+ return 0;
+}
+
+
+/**
+ * nes_netdev_set_multicast_list
+ */
+void nes_netdev_set_multicast_list(struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct dev_mc_list *multicast_addr;
+ u32 nic_active_bit;
+ u32 nic_active;
+ u32 perfect_filter_register_address;
+ u32 macaddr_low;
+ u16 macaddr_high;
+ u8 mc_all_on = 0;
+ u8 mc_index;
+ int mc_nic_index = -1;
+
+ nic_active_bit = 1 << nesvnic->nic_index;
+
+ if (netdev->flags & IFF_PROMISC) {
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
+ nic_active |= nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
+ nic_active |= nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
+ mc_all_on = 1;
+ } else if ((netdev->flags & IFF_ALLMULTI) || (netdev->mc_count > NES_MULTICAST_PF_MAX) ||
+ (nesvnic->nic_index > 3)) {
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
+ nic_active |= nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
+ nic_active &= ~nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
+ mc_all_on = 1;
+ } else {
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
+ nic_active &= ~nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
+ nic_active &= ~nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
+ }
+
+ nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscous = %d, All Multicast = %d.\n",
+ netdev->mc_count, (netdev->flags & IFF_PROMISC)?1:0,
+ (netdev->flags & IFF_ALLMULTI)?1:0);
+ if (!mc_all_on) {
+ multicast_addr = netdev->mc_list;
+ perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW + 0x80;
+ perfect_filter_register_address += nesvnic->nic_index*0x40;
+ for (mc_index=0; mc_index < NES_MULTICAST_PF_MAX; mc_index++) {
+ while (multicast_addr && nesvnic->mcrq_mcast_filter && ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic, multicast_addr->dmi_addr)) == 0))
+ multicast_addr = multicast_addr->next;
+
+ if (mc_nic_index < 0)
+ mc_nic_index = nesvnic->nic_index;
+ if (multicast_addr) {
+ nes_debug(NES_DBG_NIC_RX, "Assigning MC Address = %02X%02X%02X%02X%02X%02X to register 0x%04X nic_idx=%d\n",
+ multicast_addr->dmi_addr[0], multicast_addr->dmi_addr[1],
+ multicast_addr->dmi_addr[2], multicast_addr->dmi_addr[3],
+ multicast_addr->dmi_addr[4], multicast_addr->dmi_addr[5],
+ perfect_filter_register_address+(mc_index * 8), mc_nic_index);
+ macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8;
+ macaddr_high += (u16)multicast_addr->dmi_addr[1];
+ macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24;
+ macaddr_low += ((u32)multicast_addr->dmi_addr[3]) << 16;
+ macaddr_low += ((u32)multicast_addr->dmi_addr[4]) << 8;
+ macaddr_low += (u32)multicast_addr->dmi_addr[5];
+ nes_write_indexed(nesdev,
+ perfect_filter_register_address+(mc_index * 8),
+ macaddr_low);
+ nes_write_indexed(nesdev,
+ perfect_filter_register_address+4+(mc_index * 8),
+ (u32)macaddr_high | NES_MAC_ADDR_VALID |
+ ((((u32)(1<<mc_nic_index)) << 16)));
+ multicast_addr = multicast_addr->next;
+ } else {
+ nes_debug(NES_DBG_NIC_RX, "Clearing MC Address at register 0x%04X\n",
+ perfect_filter_register_address+(mc_index * 8));
+ nes_write_indexed(nesdev,
+ perfect_filter_register_address+4+(mc_index * 8),
+ 0);
+ }
+ }
+ }
+}
+
+
+/**
+ * nes_netdev_change_mtu
+ */
+static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ int ret = 0;
+ u8 jumbomode=0;
+
+ if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu))
+ return -EINVAL;
+
+ netdev->mtu = new_mtu;
+ nesvnic->max_frame_size = new_mtu+ETH_HLEN;
+
+ if (netdev->mtu > 1500) {
+ jumbomode=1;
+ }
+ nes_nic_init_timer_defaults(nesdev, jumbomode);
+
+ if (netif_running(netdev)) {
+ nes_netdev_stop(netdev);
+ nes_netdev_open(netdev);
+ }
+
+ return ret;
+}
+
+
+/**
+ * nes_netdev_exit - destroy network device
+ */
+void nes_netdev_exit(struct nes_vnic *nesvnic)
+{
+ struct net_device *netdev = nesvnic->netdev;
+ struct nes_ib_device *nesibdev = nesvnic->nesibdev;
+
+ nes_debug(NES_DBG_SHUTDOWN, "\n");
+
+ // destroy the ibdevice if RDMA enabled
+ if ((nesvnic->rdma_enabled)&&(nesvnic->of_device_registered)) {
+ nes_destroy_ofa_device( nesibdev );
+ nesvnic->of_device_registered = 0;
+ nesvnic->nesibdev = NULL;
+ }
+ unregister_netdev(netdev);
+ nes_debug(NES_DBG_SHUTDOWN, "\n");
+}
+
+
+#define NES_ETHTOOL_STAT_COUNT 55
+static const char nes_ethtool_stringset[NES_ETHTOOL_STAT_COUNT][ETH_GSTRING_LEN] = {
+ "Link Change Interrupts",
+ "Linearized SKBs",
+ "T/GSO Requests",
+ "Pause Frames Sent",
+ "Pause Frames Received",
+ "Internal Routing Errors",
+ "SQ SW Dropped SKBs",
+ "SQ Locked",
+ "SQ Full",
+ "Segmented TSO Requests",
+ "Rx Symbol Errors",
+ "Rx Jabber Errors",
+ "Rx Oversized Frames",
+ "Rx Short Frames",
+ "Endnode Rx Discards",
+ "Endnode Rx Octets",
+ "Endnode Rx Frames",
+ "Endnode Tx Octets",
+ "Endnode Tx Frames",
+ "mh detected",
+ "mh pauses",
+ "Retransmission Count",
+ "CM Connects",
+ "CM Accepts",
+ "Disconnects",
+ "Connected Events",
+ "Connect Requests",
+ "CM Rejects",
+ "ModifyQP Timeouts",
+ "CreateQPs",
+ "SW DestroyQPs",
+ "DestroyQPs",
+ "CM Closes",
+ "CM Packets Sent",
+ "CM Packets Bounced",
+ "CM Packets Created",
+ "CM Packets Rcvd",
+ "CM Packets Dropped",
+ "CM Packets Retrans",
+ "CM Listens Created",
+ "CM Listens Destroyed",
+ "CM Backlog Drops",
+ "CM Loopbacks",
+ "CM Nodes Created",
+ "CM Nodes Destroyed",
+ "CM Accel Drops",
+ "CM Resets Received",
+ "Timer Inits",
+ "CQ Depth 1",
+ "CQ Depth 4",
+ "CQ Depth 16",
+ "CQ Depth 24",
+ "CQ Depth 32",
+ "CQ Depth 128",
+ "CQ Depth 256",
+};
+
+
+/**
+ * nes_netdev_get_rx_csum
+ */
+static u32 nes_netdev_get_rx_csum (struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+
+ if (nesvnic->rx_checksum_disabled)
+ return 0;
+ else
+ return 1;
+}
+
+
+/**
+ * nes_netdev_set_rc_csum
+ */
+static int nes_netdev_set_rx_csum(struct net_device *netdev, u32 enable)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+
+ if (enable)
+ nesvnic->rx_checksum_disabled = 0;
+ else
+ nesvnic->rx_checksum_disabled = 1;
+ return 0;
+}
+
+
+/**
+ * nes_netdev_get_stats_count
+ */
+static int nes_netdev_get_stats_count(struct net_device *netdev)
+{
+ return NES_ETHTOOL_STAT_COUNT;
+}
+
+
+/**
+ * nes_netdev_get_strings
+ */
+static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *ethtool_strings)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(ethtool_strings,
+ &nes_ethtool_stringset,
+ sizeof(nes_ethtool_stringset));
+}
+
+
+/**
+ * nes_netdev_get_ethtool_stats
+ */
+static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values)
+{
+ u64 u64temp;
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ u32 nic_count;
+ u32 u32temp;
+
+ target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT;
+ target_stat_values[0] = nesvnic->nesdev->link_status_interrupts;
+ target_stat_values[1] = nesvnic->linearized_skbs;
+ target_stat_values[2] = nesvnic->tso_requests;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->nesdev->mac_pause_frames_sent += u32temp;
+ target_stat_values[3] = nesvnic->nesdev->mac_pause_frames_sent;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->nesdev->mac_pause_frames_received += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_PORT_RX_DISCARDS + (nesvnic->nesdev->mac_index*0x40));
+ nesvnic->nesdev->port_rx_discards += u32temp;
+ nesvnic->netstats.rx_dropped += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_PORT_TX_DISCARDS + (nesvnic->nesdev->mac_index*0x40));
+ nesvnic->nesdev->port_tx_discards += u32temp;
+ nesvnic->netstats.tx_dropped += u32temp;
+
+ for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) {
+ if (nesvnic->qp_nic_index[nic_count] == 0xf)
+ break;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_DISCARD +
+ (nesvnic->qp_nic_index[nic_count]*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->endnode_nstat_rx_discard += u32temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO +
+ (nesvnic->qp_nic_index[nic_count]*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI +
+ (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
+
+ nesvnic->endnode_nstat_rx_octets += u64temp;
+ nesvnic->netstats.rx_bytes += u64temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO +
+ (nesvnic->qp_nic_index[nic_count]*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI +
+ (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
+
+ nesvnic->endnode_nstat_rx_frames += u64temp;
+ nesvnic->netstats.rx_packets += u64temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO +
+ (nesvnic->qp_nic_index[nic_count]*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI +
+ (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
+
+ nesvnic->endnode_nstat_tx_octets += u64temp;
+ nesvnic->netstats.tx_bytes += u64temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO +
+ (nesvnic->qp_nic_index[nic_count]*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI +
+ (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
+
+ nesvnic->endnode_nstat_tx_frames += u64temp;
+ nesvnic->netstats.tx_packets += u64temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_IPV4_TCP_REXMITS + (nesvnic->qp_nic_index[nic_count]*0x200));
+ nesvnic->endnode_ipv4_tcp_retransmits += u32temp;
+ }
+
+ target_stat_values[4] = nesvnic->nesdev->mac_pause_frames_received;
+ target_stat_values[5] = nesdev->nesadapter->nic_rx_eth_route_err;
+ target_stat_values[6] = nesvnic->tx_sw_dropped;
+ target_stat_values[7] = nesvnic->sq_locked;
+ target_stat_values[8] = nesvnic->sq_full;
+ target_stat_values[9] = nesvnic->segmented_tso_requests;
+ target_stat_values[10] = nesvnic->nesdev->mac_rx_symbol_err_frames;
+ target_stat_values[11] = nesvnic->nesdev->mac_rx_jabber_frames;
+ target_stat_values[12] = nesvnic->nesdev->mac_rx_oversized_frames;
+ target_stat_values[13] = nesvnic->nesdev->mac_rx_short_frames;
+ target_stat_values[14] = nesvnic->endnode_nstat_rx_discard;
+ target_stat_values[15] = nesvnic->endnode_nstat_rx_octets;
+ target_stat_values[16] = nesvnic->endnode_nstat_rx_frames;
+ target_stat_values[17] = nesvnic->endnode_nstat_tx_octets;
+ target_stat_values[18] = nesvnic->endnode_nstat_tx_frames;
+ target_stat_values[19] = mh_detected;
+ target_stat_values[20] = mh_pauses_sent;
+ target_stat_values[21] = nesvnic->endnode_ipv4_tcp_retransmits;
+ target_stat_values[22] = atomic_read(&cm_connects);
+ target_stat_values[23] = atomic_read(&cm_accepts);
+ target_stat_values[24] = atomic_read(&cm_disconnects);
+ target_stat_values[25] = atomic_read(&cm_connecteds);
+ target_stat_values[26] = atomic_read(&cm_connect_reqs);
+ target_stat_values[27] = atomic_read(&cm_rejects);
+ target_stat_values[28] = atomic_read(&mod_qp_timouts);
+ target_stat_values[29] = atomic_read(&qps_created);
+ target_stat_values[30] = atomic_read(&sw_qps_destroyed);
+ target_stat_values[31] = atomic_read(&qps_destroyed);
+ target_stat_values[32] = atomic_read(&cm_closes);
+ target_stat_values[33] = cm_packets_sent;
+ target_stat_values[34] = cm_packets_bounced;
+ target_stat_values[35] = cm_packets_created;
+ target_stat_values[36] = cm_packets_received;
+ target_stat_values[37] = cm_packets_dropped;
+ target_stat_values[38] = cm_packets_retrans;
+ target_stat_values[39] = cm_listens_created;
+ target_stat_values[40] = cm_listens_destroyed;
+ target_stat_values[41] = cm_backlog_drops;
+ target_stat_values[42] = atomic_read(&cm_loopbacks);
+ target_stat_values[43] = atomic_read(&cm_nodes_created);
+ target_stat_values[44] = atomic_read(&cm_nodes_destroyed);
+ target_stat_values[45] = atomic_read(&cm_accel_dropped_pkts);
+ target_stat_values[46] = atomic_read(&cm_resets_recvd);
+ target_stat_values[47] = int_mod_timer_init;
+ target_stat_values[48] = int_mod_cq_depth_1;
+ target_stat_values[49] = int_mod_cq_depth_4;
+ target_stat_values[50] = int_mod_cq_depth_16;
+ target_stat_values[51] = int_mod_cq_depth_24;
+ target_stat_values[52] = int_mod_cq_depth_32;
+ target_stat_values[53] = int_mod_cq_depth_128;
+ target_stat_values[54] = int_mod_cq_depth_256;
+
+}
+
+
+/**
+ * nes_netdev_get_drvinfo
+ */
+static void nes_netdev_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+
+ strcpy(drvinfo->driver, DRV_NAME);
+ strcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev));
+ strcpy(drvinfo->fw_version, "TBD");
+ strcpy(drvinfo->version, DRV_VERSION);
+ drvinfo->n_stats = nes_netdev_get_stats_count(netdev);
+ drvinfo->testinfo_len = 0;
+ drvinfo->eedump_len = 0;
+ drvinfo->regdump_len = 0;
+}
+
+
+/**
+ * nes_netdev_set_coalesce
+ */
+static int nes_netdev_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *et_coalesce)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
+ if (et_coalesce->rx_max_coalesced_frames_low) {
+ shared_timer->threshold_low = et_coalesce->rx_max_coalesced_frames_low;
+ }
+ if (et_coalesce->rx_max_coalesced_frames_irq) {
+ shared_timer->threshold_target = et_coalesce->rx_max_coalesced_frames_irq;
+ }
+ if (et_coalesce->rx_max_coalesced_frames_high) {
+ shared_timer->threshold_high = et_coalesce->rx_max_coalesced_frames_high;
+ }
+ if (et_coalesce->rx_coalesce_usecs_low) {
+ shared_timer->timer_in_use_min = et_coalesce->rx_coalesce_usecs_low;
+ }
+ if (et_coalesce->rx_coalesce_usecs_high) {
+ shared_timer->timer_in_use_max = et_coalesce->rx_coalesce_usecs_high;
+ }
+ spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
+
+ /* using this to drive total interrupt moderation */
+ nesadapter->et_rx_coalesce_usecs_irq = et_coalesce->rx_coalesce_usecs_irq;
+ if (et_coalesce->use_adaptive_rx_coalesce) {
+ nesadapter->et_use_adaptive_rx_coalesce = 1;
+ nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
+ nesadapter->et_rx_coalesce_usecs_irq = 0;
+ if (et_coalesce->pkt_rate_low) {
+ nesadapter->et_pkt_rate_low = et_coalesce->pkt_rate_low;
+ }
+ } else {
+ nesadapter->et_use_adaptive_rx_coalesce = 0;
+ nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT;
+ if (nesadapter->et_rx_coalesce_usecs_irq) {
+ nes_write32(nesdev->regs+NES_PERIODIC_CONTROL,
+ 0x80000000 | ((u32)(nesadapter->et_rx_coalesce_usecs_irq*8)));
+ }
+ }
+ return 0;
+}
+
+
+/**
+ * nes_netdev_get_coalesce
+ */
+static int nes_netdev_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *et_coalesce)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct ethtool_coalesce temp_et_coalesce;
+ struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
+ unsigned long flags;
+
+ memset(&temp_et_coalesce, 0, sizeof(temp_et_coalesce));
+ temp_et_coalesce.rx_coalesce_usecs_irq = nesadapter->et_rx_coalesce_usecs_irq;
+ temp_et_coalesce.use_adaptive_rx_coalesce = nesadapter->et_use_adaptive_rx_coalesce;
+ temp_et_coalesce.rate_sample_interval = nesadapter->et_rate_sample_interval;
+ temp_et_coalesce.pkt_rate_low = nesadapter->et_pkt_rate_low;
+ spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
+ temp_et_coalesce.rx_max_coalesced_frames_low = shared_timer->threshold_low;
+ temp_et_coalesce.rx_max_coalesced_frames_irq = shared_timer->threshold_target;
+ temp_et_coalesce.rx_max_coalesced_frames_high = shared_timer->threshold_high;
+ temp_et_coalesce.rx_coalesce_usecs_low = shared_timer->timer_in_use_min;
+ temp_et_coalesce.rx_coalesce_usecs_high = shared_timer->timer_in_use_max;
+ if (nesadapter->et_use_adaptive_rx_coalesce) {
+ temp_et_coalesce.rx_coalesce_usecs_irq = shared_timer->timer_in_use;
+ }
+ spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
+ memcpy(et_coalesce, &temp_et_coalesce, sizeof(*et_coalesce));
+ return 0;
+}
+
+
+/**
+ * nes_netdev_get_pauseparam
+ */
+static void nes_netdev_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *et_pauseparam)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+
+ et_pauseparam->autoneg = 0;
+ et_pauseparam->rx_pause = (nesvnic->nesdev->disable_rx_flow_control == 0) ? 1:0;
+ et_pauseparam->tx_pause = (nesvnic->nesdev->disable_tx_flow_control == 0) ? 1:0;
+}
+
+
+/**
+ * nes_netdev_set_pauseparam
+ */
+static int nes_netdev_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *et_pauseparam)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ u32 u32temp;
+
+ if (et_pauseparam->autoneg) {
+ /* TODO: should return unsupported */
+ return 0;
+ }
+ if ((et_pauseparam->tx_pause == 1) && (nesdev->disable_tx_flow_control == 1)) {
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
+ u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
+ nes_write_indexed(nesdev,
+ NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+ nesdev->disable_tx_flow_control = 0;
+ } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
+ u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
+ nes_write_indexed(nesdev,
+ NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+ nesdev->disable_tx_flow_control = 1;
+ }
+ if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40));
+ u32temp &= ~NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE;
+ nes_write_indexed(nesdev,
+ NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp);
+ nesdev->disable_rx_flow_control = 0;
+ } else if ((et_pauseparam->rx_pause == 0) && (nesdev->disable_rx_flow_control == 0)) {
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40));
+ u32temp |= NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE;
+ nes_write_indexed(nesdev,
+ NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp);
+ nesdev->disable_rx_flow_control = 1;
+ }
+
+ return 0;
+}
+
+
+/**
+ * nes_netdev_get_settings
+ */
+static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u16 phy_data;
+
+ et_cmd->duplex = DUPLEX_FULL;
+ et_cmd->port = PORT_MII;
+ if (nesadapter->OneG_Mode) {
+ et_cmd->supported = SUPPORTED_1000baseT_Full|SUPPORTED_Autoneg;
+ et_cmd->advertising = ADVERTISED_1000baseT_Full|ADVERTISED_Autoneg;
+ et_cmd->speed = SPEED_1000;
+ nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
+ &phy_data);
+ if (phy_data&0x1000) {
+ et_cmd->autoneg = AUTONEG_ENABLE;
+ } else {
+ et_cmd->autoneg = AUTONEG_DISABLE;
+ }
+ et_cmd->transceiver = XCVR_EXTERNAL;
+ et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
+ } else {
+ if (nesadapter->phy_type[nesvnic->logical_port] == NES_PHY_TYPE_IRIS) {
+ et_cmd->transceiver = XCVR_EXTERNAL;
+ et_cmd->port = PORT_FIBRE;
+ et_cmd->supported = SUPPORTED_FIBRE;
+ et_cmd->advertising = ADVERTISED_FIBRE;
+ et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
+ } else {
+ et_cmd->transceiver = XCVR_INTERNAL;
+ et_cmd->supported = SUPPORTED_10000baseT_Full;
+ et_cmd->advertising = ADVERTISED_10000baseT_Full;
+ et_cmd->phy_address = nesdev->mac_index;
+ }
+ et_cmd->speed = SPEED_10000;
+ et_cmd->autoneg = AUTONEG_DISABLE;
+ }
+ et_cmd->maxtxpkt = 511;
+ et_cmd->maxrxpkt = 511;
+ return 0;
+}
+
+
+/**
+ * nes_netdev_set_settings
+ */
+static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u16 phy_data;
+
+ if (nesadapter->OneG_Mode) {
+ nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
+ &phy_data);
+ if (et_cmd->autoneg) {
+ /* Turn on Full duplex, Autoneg, and restart autonegotiation */
+ phy_data |= 0x1300;
+ } else {
+ // Turn off autoneg
+ phy_data &= ~0x1000;
+ }
+ nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
+ phy_data);
+ }
+
+ return 0;
+}
+
+
+static struct ethtool_ops nes_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_settings = nes_netdev_get_settings,
+ .set_settings = nes_netdev_set_settings,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .get_rx_csum = nes_netdev_get_rx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .get_strings = nes_netdev_get_strings,
+ .get_stats_count = nes_netdev_get_stats_count,
+ .get_ethtool_stats = nes_netdev_get_ethtool_stats,
+ .get_drvinfo = nes_netdev_get_drvinfo,
+ .get_coalesce = nes_netdev_get_coalesce,
+ .set_coalesce = nes_netdev_set_coalesce,
+ .get_pauseparam = nes_netdev_get_pauseparam,
+ .set_pauseparam = nes_netdev_set_pauseparam,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .set_rx_csum = nes_netdev_set_rx_csum,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ethtool_op_set_tso,
+};
+
+
+static void nes_netdev_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ u32 u32temp;
+
+ nesvnic->vlan_grp = grp;
+
+ /* Enable/Disable VLAN Stripping */
+ u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG);
+ if (grp)
+ u32temp &= 0xfdffffff;
+ else
+ u32temp |= 0x02000000;
+
+ nes_write_indexed(nesdev, NES_IDX_PCIX_DIAG, u32temp);
+}
+
+
+/**
+ * nes_netdev_init - initialize network device
+ */
+struct net_device *nes_netdev_init(struct nes_device *nesdev,
+ void __iomem *mmio_addr)
+{
+ u64 u64temp;
+ struct nes_vnic *nesvnic = NULL;
+ struct net_device *netdev;
+ struct nic_qp_map *curr_qp_map;
+ u32 u32temp;
+ u16 phy_data;
+ u16 temp_phy_data;
+
+ netdev = alloc_etherdev(sizeof(struct nes_vnic));
+ if (!netdev) {
+ printk(KERN_ERR PFX "nesvnic etherdev alloc failed");
+ return NULL;
+ }
+
+ nes_debug(NES_DBG_INIT, "netdev = %p, %s\n", netdev, netdev->name);
+
+ SET_NETDEV_DEV(netdev, &nesdev->pcidev->dev);
+
+ nesvnic = netdev_priv(netdev);
+ memset(nesvnic, 0, sizeof(*nesvnic));
+
+ netdev->open = nes_netdev_open;
+ netdev->stop = nes_netdev_stop;
+ netdev->hard_start_xmit = nes_netdev_start_xmit;
+ netdev->get_stats = nes_netdev_get_stats;
+ netdev->tx_timeout = nes_netdev_tx_timeout;
+ netdev->set_mac_address = nes_netdev_set_mac_address;
+ netdev->set_multicast_list = nes_netdev_set_multicast_list;
+ netdev->change_mtu = nes_netdev_change_mtu;
+ netdev->watchdog_timeo = NES_TX_TIMEOUT;
+ netdev->irq = nesdev->pcidev->irq;
+ netdev->mtu = ETH_DATA_LEN;
+ netdev->hard_header_len = ETH_HLEN;
+ netdev->addr_len = ETH_ALEN;
+ netdev->type = ARPHRD_ETHER;
+ netdev->features = NETIF_F_HIGHDMA;
+ netdev->ethtool_ops = &nes_ethtool_ops;
+ netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128);
+ nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
+ netdev->features |= NETIF_F_LLTX;
+
+ /* Fill in the port structure */
+ nesvnic->netdev = netdev;
+ nesvnic->nesdev = nesdev;
+ nesvnic->msg_enable = netif_msg_init(debug, default_msg);
+ nesvnic->netdev_index = nesdev->netdev_count;
+ nesvnic->perfect_filter_index = nesdev->nesadapter->netdev_count;
+ nesvnic->max_frame_size = netdev->mtu+netdev->hard_header_len;
+
+ curr_qp_map = nic_qp_mapping_per_function[PCI_FUNC(nesdev->pcidev->devfn)];
+ nesvnic->nic.qp_id = curr_qp_map[nesdev->netdev_count].qpid;
+ nesvnic->nic_index = curr_qp_map[nesdev->netdev_count].nic_index;
+ nesvnic->logical_port = curr_qp_map[nesdev->netdev_count].logical_port;
+
+ /* Setup the burned in MAC address */
+ u64temp = (u64)nesdev->nesadapter->mac_addr_low;
+ u64temp += ((u64)nesdev->nesadapter->mac_addr_high) << 32;
+ u64temp += nesvnic->nic_index;
+ netdev->dev_addr[0] = (u8)(u64temp>>40);
+ netdev->dev_addr[1] = (u8)(u64temp>>32);
+ netdev->dev_addr[2] = (u8)(u64temp>>24);
+ netdev->dev_addr[3] = (u8)(u64temp>>16);
+ netdev->dev_addr[4] = (u8)(u64temp>>8);
+ netdev->dev_addr[5] = (u8)u64temp;
+ memcpy(netdev->perm_addr, netdev->dev_addr, 6);
+
+ if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
+ netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
+ netdev->features |= NETIF_F_GSO | NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
+ } else {
+ netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ }
+
+ nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
+ " nic_index = %d, logical_port = %d, mac_index = %d.\n",
+ nesvnic, (unsigned long)netdev->features, nesvnic->nic.qp_id,
+ nesvnic->nic_index, nesvnic->logical_port, nesdev->mac_index);
+
+ if (nesvnic->nesdev->nesadapter->port_count == 1) {
+ nesvnic->qp_nic_index[0] = nesvnic->nic_index;
+ nesvnic->qp_nic_index[1] = nesvnic->nic_index + 1;
+ if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) {
+ nesvnic->qp_nic_index[2] = 0xf;
+ nesvnic->qp_nic_index[3] = 0xf;
+ } else {
+ nesvnic->qp_nic_index[2] = nesvnic->nic_index + 2;
+ nesvnic->qp_nic_index[3] = nesvnic->nic_index + 3;
+ }
+ } else {
+ if (nesvnic->nesdev->nesadapter->port_count == 2) {
+ nesvnic->qp_nic_index[0] = nesvnic->nic_index;
+ nesvnic->qp_nic_index[1] = nesvnic->nic_index + 2;
+ nesvnic->qp_nic_index[2] = 0xf;
+ nesvnic->qp_nic_index[3] = 0xf;
+ } else {
+ nesvnic->qp_nic_index[0] = nesvnic->nic_index;
+ nesvnic->qp_nic_index[1] = 0xf;
+ nesvnic->qp_nic_index[2] = 0xf;
+ nesvnic->qp_nic_index[3] = 0xf;
+ }
+ }
+ nesvnic->next_qp_nic_index = 0;
+
+ if (nesdev->netdev_count == 0) {
+ nesvnic->rdma_enabled = 1;
+ } else {
+ nesvnic->rdma_enabled = 0;
+ }
+ nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id;
+ spin_lock_init(&nesvnic->tx_lock);
+ nesdev->netdev[nesdev->netdev_count] = netdev;
+
+ nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n",
+ nesvnic, nesdev->mac_index);
+ list_add_tail(&nesvnic->list, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]);
+
+ if ((nesdev->netdev_count == 0) &&
+ (PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index)) {
+ nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n",
+ NES_IDX_PHY_PCS_CONTROL_STATUS0+(0x200*(nesvnic->logical_port&1)));
+ u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
+ (0x200*(nesvnic->logical_port&1)));
+ u32temp |= 0x00200000;
+ nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
+ (0x200*(nesvnic->logical_port&1)), u32temp);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
+ (0x200*(nesvnic->logical_port&1)) );
+ if ((u32temp&0x0f1f0000) == 0x0f0f0000) {
+ if (nesdev->nesadapter->phy_type[nesvnic->logical_port] == NES_PHY_TYPE_IRIS) {
+ nes_init_phy(nesdev);
+ nes_read_10G_phy_reg(nesdev, 1,
+ nesdev->nesadapter->phy_index[nesvnic->logical_port]);
+ temp_phy_data = (u16)nes_read_indexed(nesdev,
+ NES_IDX_MAC_MDIO_CONTROL);
+ u32temp = 20;
+ do {
+ nes_read_10G_phy_reg(nesdev, 1,
+ nesdev->nesadapter->phy_index[nesvnic->logical_port]);
+ phy_data = (u16)nes_read_indexed(nesdev,
+ NES_IDX_MAC_MDIO_CONTROL);
+ if ((phy_data == temp_phy_data) || (!(--u32temp)))
+ break;
+ temp_phy_data = phy_data;
+ } while (1);
+ if (phy_data & 4) {
+ nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
+ nesvnic->linkup = 1;
+ } else {
+ nes_debug(NES_DBG_INIT, "The Link is DOWN!!.\n");
+ }
+ } else {
+ nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
+ nesvnic->linkup = 1;
+ }
+ }
+ nes_debug(NES_DBG_INIT, "Setting up MAC interrupt mask.\n");
+ /* clear the MAC interrupt status, assumes direct logical to physical mapping */
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS+(0x200*nesvnic->logical_port));
+ nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp);
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS+(0x200*nesvnic->logical_port), u32temp);
+
+ if (nesdev->nesadapter->phy_type[nesvnic->logical_port] != NES_PHY_TYPE_IRIS)
+ nes_init_phy(nesdev);
+
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesvnic->logical_port),
+ ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
+ NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
+ }
+
+ return netdev;
+}
+
+
+/**
+ * nes_netdev_destroy - destroy network device structure
+ */
+void nes_netdev_destroy(struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+
+ /* make sure 'stop' method is called by Linux stack */
+ /* nes_netdev_stop(netdev); */
+
+ list_del(&nesvnic->list);
+
+ if (nesvnic->of_device_registered) {
+ nes_destroy_ofa_device(nesvnic->nesibdev);
+ }
+
+ free_netdev(netdev);
+}
+
+
+/**
+ * nes_nic_cm_xmit -- CM calls this to send out pkts
+ */
+int nes_nic_cm_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ int ret;
+
+ skb->dev = netdev;
+ ret = dev_queue_xmit(skb);
+ if (ret) {
+ nes_debug(NES_DBG_CM, "Bad return code from dev_queue_xmit %d\n", ret);
+ }
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h
new file mode 100644
index 000000000000..e64306bce80b
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_user.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect. All rights reserved.
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef NES_USER_H
+#define NES_USER_H
+
+#include <linux/types.h>
+
+#define NES_ABI_USERSPACE_VER 1
+#define NES_ABI_KERNEL_VER 1
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+
+struct nes_alloc_ucontext_req {
+ __u32 reserved32;
+ __u8 userspace_ver;
+ __u8 reserved8[3];
+};
+
+struct nes_alloc_ucontext_resp {
+ __u32 max_pds; /* maximum pds allowed for this user process */
+ __u32 max_qps; /* maximum qps allowed for this user process */
+ __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
+ __u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */
+ __u8 kernel_ver;
+ __u8 reserved[2];
+};
+
+struct nes_alloc_pd_resp {
+ __u32 pd_id;
+ __u32 mmap_db_index;
+};
+
+struct nes_create_cq_req {
+ __u64 user_cq_buffer;
+ __u32 mcrqf;
+ __u8 reserved[4];
+};
+
+struct nes_create_qp_req {
+ __u64 user_wqe_buffers;
+};
+
+enum iwnes_memreg_type {
+ IWNES_MEMREG_TYPE_MEM = 0x0000,
+ IWNES_MEMREG_TYPE_QP = 0x0001,
+ IWNES_MEMREG_TYPE_CQ = 0x0002,
+ IWNES_MEMREG_TYPE_MW = 0x0003,
+ IWNES_MEMREG_TYPE_FMR = 0x0004,
+};
+
+struct nes_mem_reg_req {
+ __u32 reg_type; /* indicates if id is memory, QP or CQ */
+ __u32 reserved;
+};
+
+struct nes_create_cq_resp {
+ __u32 cq_id;
+ __u32 cq_size;
+ __u32 mmap_db_index;
+ __u32 reserved;
+};
+
+struct nes_create_qp_resp {
+ __u32 qp_id;
+ __u32 actual_sq_size;
+ __u32 actual_rq_size;
+ __u32 mmap_sq_db_index;
+ __u32 mmap_rq_db_index;
+ __u32 nes_drv_opt;
+};
+
+#endif /* NES_USER_H */
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
new file mode 100644
index 000000000000..c4ec6ac63461
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -0,0 +1,917 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+
+#include "nes.h"
+
+
+
+static u16 nes_read16_eeprom(void __iomem *addr, u16 offset);
+
+u32 mh_detected;
+u32 mh_pauses_sent;
+
+/**
+ * nes_read_eeprom_values -
+ */
+int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesadapter)
+{
+ u32 mac_addr_low;
+ u16 mac_addr_high;
+ u16 eeprom_data;
+ u16 eeprom_offset;
+ u16 next_section_address;
+ u16 sw_section_ver;
+ u8 major_ver = 0;
+ u8 minor_ver = 0;
+
+ /* TODO: deal with EEPROM endian issues */
+ if (nesadapter->firmware_eeprom_offset == 0) {
+ /* Read the EEPROM Parameters */
+ eeprom_data = nes_read16_eeprom(nesdev->regs, 0);
+ nes_debug(NES_DBG_HW, "EEPROM Offset 0 = 0x%04X\n", eeprom_data);
+ eeprom_offset = 2 + (((eeprom_data & 0x007f) << 3) <<
+ ((eeprom_data & 0x0080) >> 7));
+ nes_debug(NES_DBG_HW, "Firmware Offset = 0x%04X\n", eeprom_offset);
+ nesadapter->firmware_eeprom_offset = eeprom_offset;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 4);
+ if (eeprom_data != 0x5746) {
+ nes_debug(NES_DBG_HW, "Not a valid Firmware Image = 0x%04X\n", eeprom_data);
+ return -1;
+ }
+
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
+ nes_debug(NES_DBG_HW, "EEPROM Offset %u = 0x%04X\n",
+ eeprom_offset + 2, eeprom_data);
+ eeprom_offset += ((eeprom_data & 0x00ff) << 3) << ((eeprom_data & 0x0100) >> 8);
+ nes_debug(NES_DBG_HW, "Software Offset = 0x%04X\n", eeprom_offset);
+ nesadapter->software_eeprom_offset = eeprom_offset;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 4);
+ if (eeprom_data != 0x5753) {
+ printk("Not a valid Software Image = 0x%04X\n", eeprom_data);
+ return -1;
+ }
+ sw_section_ver = nes_read16_eeprom(nesdev->regs, nesadapter->software_eeprom_offset + 6);
+ nes_debug(NES_DBG_HW, "Software section version number = 0x%04X\n",
+ sw_section_ver);
+
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
+ nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
+ eeprom_offset + 2, eeprom_data);
+ next_section_address = eeprom_offset + (((eeprom_data & 0x00ff) << 3) <<
+ ((eeprom_data & 0x0100) >> 8));
+ eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
+ if (eeprom_data != 0x414d) {
+ nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x414d but was 0x%04X\n",
+ eeprom_data);
+ goto no_fw_rev;
+ }
+ eeprom_offset = next_section_address;
+
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
+ nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
+ eeprom_offset + 2, eeprom_data);
+ next_section_address = eeprom_offset + (((eeprom_data & 0x00ff) << 3) <<
+ ((eeprom_data & 0x0100) >> 8));
+ eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
+ if (eeprom_data != 0x4f52) {
+ nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x4f52 but was 0x%04X\n",
+ eeprom_data);
+ goto no_fw_rev;
+ }
+ eeprom_offset = next_section_address;
+
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
+ nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
+ eeprom_offset + 2, eeprom_data);
+ next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
+ eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
+ if (eeprom_data != 0x5746) {
+ nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x5746 but was 0x%04X\n",
+ eeprom_data);
+ goto no_fw_rev;
+ }
+ eeprom_offset = next_section_address;
+
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
+ nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
+ eeprom_offset + 2, eeprom_data);
+ next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
+ eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
+ if (eeprom_data != 0x5753) {
+ nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x5753 but was 0x%04X\n",
+ eeprom_data);
+ goto no_fw_rev;
+ }
+ eeprom_offset = next_section_address;
+
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
+ nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
+ eeprom_offset + 2, eeprom_data);
+ next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
+ eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
+ if (eeprom_data != 0x414d) {
+ nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x414d but was 0x%04X\n",
+ eeprom_data);
+ goto no_fw_rev;
+ }
+ eeprom_offset = next_section_address;
+
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
+ nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
+ eeprom_offset + 2, eeprom_data);
+ next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
+ eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
+ if (eeprom_data != 0x464e) {
+ nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x464e but was 0x%04X\n",
+ eeprom_data);
+ goto no_fw_rev;
+ }
+ eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 8);
+ printk(PFX "Firmware version %u.%u\n", (u8)(eeprom_data>>8), (u8)eeprom_data);
+ major_ver = (u8)(eeprom_data >> 8);
+ minor_ver = (u8)(eeprom_data);
+
+ if (nes_drv_opt & NES_DRV_OPT_DISABLE_VIRT_WQ) {
+ nes_debug(NES_DBG_HW, "Virtual WQs have been disabled\n");
+ } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) {
+ nesadapter->virtwq = 1;
+ }
+ nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
+ (u32)((u8)eeprom_data);
+
+no_fw_rev:
+ /* eeprom is valid */
+ eeprom_offset = nesadapter->software_eeprom_offset;
+ eeprom_offset += 8;
+ nesadapter->netdev_max = (u8)nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ mac_addr_high = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ mac_addr_low = (u32)nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ mac_addr_low <<= 16;
+ mac_addr_low += (u32)nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "Base MAC Address = 0x%04X%08X\n",
+ mac_addr_high, mac_addr_low);
+ nes_debug(NES_DBG_HW, "MAC Address count = %u\n", nesadapter->netdev_max);
+
+ nesadapter->mac_addr_low = mac_addr_low;
+ nesadapter->mac_addr_high = mac_addr_high;
+
+ /* Read the Phy Type array */
+ eeprom_offset += 10;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->phy_type[0] = (u8)(eeprom_data >> 8);
+ nesadapter->phy_type[1] = (u8)eeprom_data;
+
+ /* Read the port array */
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->phy_type[2] = (u8)(eeprom_data >> 8);
+ nesadapter->phy_type[3] = (u8)eeprom_data;
+ /* port_count is set by soft reset reg */
+ nes_debug(NES_DBG_HW, "port_count = %u, port 0 -> %u, port 1 -> %u,"
+ " port 2 -> %u, port 3 -> %u\n",
+ nesadapter->port_count,
+ nesadapter->phy_type[0], nesadapter->phy_type[1],
+ nesadapter->phy_type[2], nesadapter->phy_type[3]);
+
+ /* Read PD config array */
+ eeprom_offset += 10;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_size[0] = eeprom_data;
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_base[0] = eeprom_data;
+ nes_debug(NES_DBG_HW, "PD0 config, size=0x%04x, base=0x%04x\n",
+ nesadapter->pd_config_size[0], nesadapter->pd_config_base[0]);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_size[1] = eeprom_data;
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_base[1] = eeprom_data;
+ nes_debug(NES_DBG_HW, "PD1 config, size=0x%04x, base=0x%04x\n",
+ nesadapter->pd_config_size[1], nesadapter->pd_config_base[1]);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_size[2] = eeprom_data;
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_base[2] = eeprom_data;
+ nes_debug(NES_DBG_HW, "PD2 config, size=0x%04x, base=0x%04x\n",
+ nesadapter->pd_config_size[2], nesadapter->pd_config_base[2]);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_size[3] = eeprom_data;
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_base[3] = eeprom_data;
+ nes_debug(NES_DBG_HW, "PD3 config, size=0x%04x, base=0x%04x\n",
+ nesadapter->pd_config_size[3], nesadapter->pd_config_base[3]);
+
+ /* Read Rx Pool Size */
+ eeprom_offset += 22; /* 46 */
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->rx_pool_size = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "rx_pool_size = 0x%08X\n", nesadapter->rx_pool_size);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->tx_pool_size = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "tx_pool_size = 0x%08X\n", nesadapter->tx_pool_size);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->rx_threshold = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "rx_threshold = 0x%08X\n", nesadapter->rx_threshold);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->tcp_timer_core_clk_divisor = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "tcp_timer_core_clk_divisor = 0x%08X\n",
+ nesadapter->tcp_timer_core_clk_divisor);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->iwarp_config = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "iwarp_config = 0x%08X\n", nesadapter->iwarp_config);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->cm_config = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "cm_config = 0x%08X\n", nesadapter->cm_config);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->sws_timer_config = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "sws_timer_config = 0x%08X\n", nesadapter->sws_timer_config);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->tcp_config1 = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "tcp_config1 = 0x%08X\n", nesadapter->tcp_config1);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->wqm_wat = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "wqm_wat = 0x%08X\n", nesadapter->wqm_wat);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->core_clock = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "core_clock = 0x%08X\n", nesadapter->core_clock);
+
+ if ((sw_section_ver) && (nesadapter->hw_rev != NE020_REV)) {
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->phy_index[0] = (eeprom_data & 0xff00)>>8;
+ nesadapter->phy_index[1] = eeprom_data & 0x00ff;
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->phy_index[2] = (eeprom_data & 0xff00)>>8;
+ nesadapter->phy_index[3] = eeprom_data & 0x00ff;
+ } else {
+ nesadapter->phy_index[0] = 4;
+ nesadapter->phy_index[1] = 5;
+ nesadapter->phy_index[2] = 6;
+ nesadapter->phy_index[3] = 7;
+ }
+ nes_debug(NES_DBG_HW, "Phy address map = 0 > %u, 1 > %u, 2 > %u, 3 > %u\n",
+ nesadapter->phy_index[0],nesadapter->phy_index[1],
+ nesadapter->phy_index[2],nesadapter->phy_index[3]);
+ }
+
+ return 0;
+}
+
+
+/**
+ * nes_read16_eeprom
+ */
+static u16 nes_read16_eeprom(void __iomem *addr, u16 offset)
+{
+ writel(NES_EEPROM_READ_REQUEST + (offset >> 1),
+ (void __iomem *)addr + NES_EEPROM_COMMAND);
+
+ do {
+ } while (readl((void __iomem *)addr + NES_EEPROM_COMMAND) &
+ NES_EEPROM_READ_REQUEST);
+
+ return readw((void __iomem *)addr + NES_EEPROM_DATA);
+}
+
+
+/**
+ * nes_write_1G_phy_reg
+ */
+void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 u32temp;
+ u32 counter;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+
+ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
+ 0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
+ for (counter = 0; counter < 100 ; counter++) {
+ udelay(30);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
+ if (u32temp & 1) {
+ /* nes_debug(NES_DBG_PHY, "Phy interrupt status = 0x%X.\n", u32temp); */
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
+ break;
+ }
+ }
+ if (!(u32temp & 1))
+ nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
+ u32temp);
+
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+}
+
+
+/**
+ * nes_read_1G_phy_reg
+ * This routine only issues the read, the data must be read
+ * separately.
+ */
+void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 u32temp;
+ u32 counter;
+ unsigned long flags;
+
+ /* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n",
+ phy_addr, nesdev->mac_index); */
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+
+ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
+ 0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
+ for (counter = 0; counter < 100 ; counter++) {
+ udelay(30);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
+ if (u32temp & 1) {
+ /* nes_debug(NES_DBG_PHY, "Phy interrupt status = 0x%X.\n", u32temp); */
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
+ break;
+ }
+ }
+ if (!(u32temp & 1)) {
+ nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
+ u32temp);
+ *data = 0xffff;
+ } else {
+ *data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ }
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+}
+
+
+/**
+ * nes_write_10G_phy_reg
+ */
+void nes_write_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg,
+ u8 phy_addr, u16 data)
+{
+ u32 dev_addr;
+ u32 port_addr;
+ u32 u32temp;
+ u32 counter;
+
+ dev_addr = 1;
+ port_addr = phy_addr;
+
+ /* set address */
+ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
+ 0x00020000 | (u32)phy_reg | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
+ for (counter = 0; counter < 100 ; counter++) {
+ udelay(30);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
+ if (u32temp & 1) {
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
+ break;
+ }
+ }
+ if (!(u32temp & 1))
+ nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
+ u32temp);
+
+ /* set data */
+ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
+ 0x10020000 | (u32)data | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
+ for (counter = 0; counter < 100 ; counter++) {
+ udelay(30);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
+ if (u32temp & 1) {
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
+ break;
+ }
+ }
+ if (!(u32temp & 1))
+ nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
+ u32temp);
+}
+
+
+/**
+ * nes_read_10G_phy_reg
+ * This routine only issues the read, the data must be read
+ * separately.
+ */
+void nes_read_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg, u8 phy_addr)
+{
+ u32 dev_addr;
+ u32 port_addr;
+ u32 u32temp;
+ u32 counter;
+
+ dev_addr = 1;
+ port_addr = phy_addr;
+
+ /* set address */
+ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
+ 0x00020000 | (u32)phy_reg | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
+ for (counter = 0; counter < 100 ; counter++) {
+ udelay(30);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
+ if (u32temp & 1) {
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
+ break;
+ }
+ }
+ if (!(u32temp & 1))
+ nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
+ u32temp);
+
+ /* issue read */
+ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
+ 0x30020000 | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
+ for (counter = 0; counter < 100 ; counter++) {
+ udelay(30);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
+ if (u32temp & 1) {
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
+ break;
+ }
+ }
+ if (!(u32temp & 1))
+ nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
+ u32temp);
+}
+
+
+/**
+ * nes_get_cqp_request
+ */
+struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
+{
+ unsigned long flags;
+ struct nes_cqp_request *cqp_request = NULL;
+
+ if (!list_empty(&nesdev->cqp_avail_reqs)) {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ cqp_request = list_entry(nesdev->cqp_avail_reqs.next,
+ struct nes_cqp_request, list);
+ list_del_init(&cqp_request->list);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ } else {
+ cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL);
+ if (cqp_request) {
+ cqp_request->dynamic = 1;
+ INIT_LIST_HEAD(&cqp_request->list);
+ }
+ }
+
+ if (cqp_request) {
+ init_waitqueue_head(&cqp_request->waitq);
+ cqp_request->waiting = 0;
+ cqp_request->request_done = 0;
+ cqp_request->callback = 0;
+ init_waitqueue_head(&cqp_request->waitq);
+ nes_debug(NES_DBG_CQP, "Got cqp request %p from the available list \n",
+ cqp_request);
+ } else
+ printk(KERN_ERR PFX "%s: Could not allocated a CQP request.\n",
+ __FUNCTION__);
+
+ return cqp_request;
+}
+
+
+/**
+ * nes_post_cqp_request
+ */
+void nes_post_cqp_request(struct nes_device *nesdev,
+ struct nes_cqp_request *cqp_request, int ring_doorbell)
+{
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ unsigned long flags;
+ u32 cqp_head;
+ u64 u64temp;
+
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+
+ if (((((nesdev->cqp.sq_tail+(nesdev->cqp.sq_size*2))-nesdev->cqp.sq_head) &
+ (nesdev->cqp.sq_size - 1)) != 1)
+ && (list_empty(&nesdev->cqp_pending_reqs))) {
+ cqp_head = nesdev->cqp.sq_head++;
+ nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe));
+ barrier();
+ u64temp = (unsigned long)cqp_request;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_COMP_SCRATCH_LOW_IDX,
+ u64temp);
+ nes_debug(NES_DBG_CQP, "CQP request (opcode 0x%02X), line 1 = 0x%08X put on CQPs SQ,"
+ " request = %p, cqp_head = %u, cqp_tail = %u, cqp_size = %u,"
+ " waiting = %d, refcount = %d.\n",
+ le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f,
+ le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX]), cqp_request,
+ nesdev->cqp.sq_head, nesdev->cqp.sq_tail, nesdev->cqp.sq_size,
+ cqp_request->waiting, atomic_read(&cqp_request->refcount));
+ barrier();
+ if (ring_doorbell) {
+ /* Ring doorbell (1 WQEs) */
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x01800000 | nesdev->cqp.qp_id);
+ }
+
+ barrier();
+ } else {
+ nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X), line 1 = 0x%08X"
+ " put on the pending queue.\n",
+ cqp_request,
+ le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f,
+ le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_ID_IDX]));
+ list_add_tail(&cqp_request->list, &nesdev->cqp_pending_reqs);
+ }
+
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+
+ return;
+}
+
+
+/**
+ * nes_arp_table
+ */
+int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 action)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ int arp_index;
+ int err = 0;
+
+ for (arp_index = 0; (u32) arp_index < nesadapter->arp_table_size; arp_index++) {
+ if (nesadapter->arp_table[arp_index].ip_addr == ip_addr)
+ break;
+ }
+
+ if (action == NES_ARP_ADD) {
+ if (arp_index != nesadapter->arp_table_size) {
+ return -1;
+ }
+
+ arp_index = 0;
+ err = nes_alloc_resource(nesadapter, nesadapter->allocated_arps,
+ nesadapter->arp_table_size, (u32 *)&arp_index, &nesadapter->next_arp_index);
+ if (err) {
+ nes_debug(NES_DBG_NETDEV, "nes_alloc_resource returned error = %u\n", err);
+ return err;
+ }
+ nes_debug(NES_DBG_NETDEV, "ADD, arp_index=%d\n", arp_index);
+
+ nesadapter->arp_table[arp_index].ip_addr = ip_addr;
+ memcpy(nesadapter->arp_table[arp_index].mac_addr, mac_addr, ETH_ALEN);
+ return arp_index;
+ }
+
+ /* DELETE or RESOLVE */
+ if (arp_index == nesadapter->arp_table_size) {
+ nes_debug(NES_DBG_NETDEV, "mac address not in ARP table - cannot delete or resolve\n");
+ return -1;
+ }
+
+ if (action == NES_ARP_RESOLVE) {
+ nes_debug(NES_DBG_NETDEV, "RESOLVE, arp_index=%d\n", arp_index);
+ return arp_index;
+ }
+
+ if (action == NES_ARP_DELETE) {
+ nes_debug(NES_DBG_NETDEV, "DELETE, arp_index=%d\n", arp_index);
+ nesadapter->arp_table[arp_index].ip_addr = 0;
+ memset(nesadapter->arp_table[arp_index].mac_addr, 0x00, ETH_ALEN);
+ nes_free_resource(nesadapter, nesadapter->allocated_arps, arp_index);
+ return arp_index;
+ }
+
+ return -1;
+}
+
+
+/**
+ * nes_mh_fix
+ */
+void nes_mh_fix(unsigned long parm)
+{
+ unsigned long flags;
+ struct nes_device *nesdev = (struct nes_device *)parm;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_vnic *nesvnic;
+ u32 used_chunks_tx;
+ u32 temp_used_chunks_tx;
+ u32 temp_last_used_chunks_tx;
+ u32 used_chunks_mask;
+ u32 mac_tx_frames_low;
+ u32 mac_tx_frames_high;
+ u32 mac_tx_pauses;
+ u32 serdes_status;
+ u32 reset_value;
+ u32 tx_control;
+ u32 tx_config;
+ u32 tx_pause_quanta;
+ u32 rx_control;
+ u32 rx_config;
+ u32 mac_exact_match;
+ u32 mpp_debug;
+ u32 i=0;
+ u32 chunks_tx_progress = 0;
+
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ if ((nesadapter->mac_sw_state[0] != NES_MAC_SW_IDLE) || (nesadapter->mac_link_down[0])) {
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ goto no_mh_work;
+ }
+ nesadapter->mac_sw_state[0] = NES_MAC_SW_MH;
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ do {
+ mac_tx_frames_low = nes_read_indexed(nesdev, NES_IDX_MAC_TX_FRAMES_LOW);
+ mac_tx_frames_high = nes_read_indexed(nesdev, NES_IDX_MAC_TX_FRAMES_HIGH);
+ mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES);
+ used_chunks_tx = nes_read_indexed(nesdev, NES_IDX_USED_CHUNKS_TX);
+ nesdev->mac_pause_frames_sent += mac_tx_pauses;
+ used_chunks_mask = 0;
+ temp_used_chunks_tx = used_chunks_tx;
+ temp_last_used_chunks_tx = nesdev->last_used_chunks_tx;
+
+ if (nesdev->netdev[0]) {
+ nesvnic = netdev_priv(nesdev->netdev[0]);
+ } else {
+ break;
+ }
+
+ for (i=0; i<4; i++) {
+ used_chunks_mask <<= 8;
+ if (nesvnic->qp_nic_index[i] != 0xff) {
+ used_chunks_mask |= 0xff;
+ if ((temp_used_chunks_tx&0xff)<(temp_last_used_chunks_tx&0xff)) {
+ chunks_tx_progress = 1;
+ }
+ }
+ temp_used_chunks_tx >>= 8;
+ temp_last_used_chunks_tx >>= 8;
+ }
+ if ((mac_tx_frames_low) || (mac_tx_frames_high) ||
+ (!(used_chunks_tx&used_chunks_mask)) ||
+ (!(nesdev->last_used_chunks_tx&used_chunks_mask)) ||
+ (chunks_tx_progress) ) {
+ nesdev->last_used_chunks_tx = used_chunks_tx;
+ break;
+ }
+ nesdev->last_used_chunks_tx = used_chunks_tx;
+ barrier();
+
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, 0x00000005);
+ mh_pauses_sent++;
+ mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES);
+ if (mac_tx_pauses) {
+ nesdev->mac_pause_frames_sent += mac_tx_pauses;
+ break;
+ }
+
+ tx_control = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONTROL);
+ tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
+ tx_pause_quanta = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_QUANTA);
+ rx_control = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CONTROL);
+ rx_config = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CONFIG);
+ mac_exact_match = nes_read_indexed(nesdev, NES_IDX_MAC_EXACT_MATCH_BOTTOM);
+ mpp_debug = nes_read_indexed(nesdev, NES_IDX_MPP_DEBUG);
+
+ /* one last ditch effort to avoid a false positive */
+ mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES);
+ if (mac_tx_pauses) {
+ nesdev->last_mac_tx_pauses = nesdev->mac_pause_frames_sent;
+ nes_debug(NES_DBG_HW, "failsafe caught slow outbound pause\n");
+ break;
+ }
+ mh_detected++;
+
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, 0x00000000);
+ reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
+
+ nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value | 0x0000001d);
+
+ while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
+ & 0x00000040) != 0x00000040) && (i++ < 5000)) {
+ /* mdelay(1); */
+ }
+
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
+ serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0);
+
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x000bdef7);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE0, 0x9ce73000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE0, 0x0ff00000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET0, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS0, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0, 0x00000000);
+ if (nesadapter->OneG_Mode) {
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0182222);
+ } else {
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0042222);
+ }
+ serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_STATUS0);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000ff);
+
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, tx_control);
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_QUANTA, tx_pause_quanta);
+ nes_write_indexed(nesdev, NES_IDX_MAC_RX_CONTROL, rx_control);
+ nes_write_indexed(nesdev, NES_IDX_MAC_RX_CONFIG, rx_config);
+ nes_write_indexed(nesdev, NES_IDX_MAC_EXACT_MATCH_BOTTOM, mac_exact_match);
+ nes_write_indexed(nesdev, NES_IDX_MPP_DEBUG, mpp_debug);
+
+ } while (0);
+
+ nesadapter->mac_sw_state[0] = NES_MAC_SW_IDLE;
+no_mh_work:
+ nesdev->nesadapter->mh_timer.expires = jiffies + (HZ/5);
+ add_timer(&nesdev->nesadapter->mh_timer);
+}
+
+/**
+ * nes_clc
+ */
+void nes_clc(unsigned long parm)
+{
+ unsigned long flags;
+ struct nes_device *nesdev = (struct nes_device *)parm;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ nesadapter->link_interrupt_count[0] = 0;
+ nesadapter->link_interrupt_count[1] = 0;
+ nesadapter->link_interrupt_count[2] = 0;
+ nesadapter->link_interrupt_count[3] = 0;
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+
+ nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */
+ add_timer(&nesadapter->lc_timer);
+}
+
+
+/**
+ * nes_dump_mem
+ */
+void nes_dump_mem(unsigned int dump_debug_level, void *addr, int length)
+{
+ char xlate[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'a', 'b', 'c', 'd', 'e', 'f'};
+ char *ptr;
+ char hex_buf[80];
+ char ascii_buf[20];
+ int num_char;
+ int num_ascii;
+ int num_hex;
+
+ if (!(nes_debug_level & dump_debug_level)) {
+ return;
+ }
+
+ ptr = addr;
+ if (length > 0x100) {
+ nes_debug(dump_debug_level, "Length truncated from %x to %x\n", length, 0x100);
+ length = 0x100;
+ }
+ nes_debug(dump_debug_level, "Address=0x%p, length=0x%x (%d)\n", ptr, length, length);
+
+ memset(ascii_buf, 0, 20);
+ memset(hex_buf, 0, 80);
+
+ num_ascii = 0;
+ num_hex = 0;
+ for (num_char = 0; num_char < length; num_char++) {
+ if (num_ascii == 8) {
+ ascii_buf[num_ascii++] = ' ';
+ hex_buf[num_hex++] = '-';
+ hex_buf[num_hex++] = ' ';
+ }
+
+ if (*ptr < 0x20 || *ptr > 0x7e)
+ ascii_buf[num_ascii++] = '.';
+ else
+ ascii_buf[num_ascii++] = *ptr;
+ hex_buf[num_hex++] = xlate[((*ptr & 0xf0) >> 4)];
+ hex_buf[num_hex++] = xlate[*ptr & 0x0f];
+ hex_buf[num_hex++] = ' ';
+ ptr++;
+
+ if (num_ascii >= 17) {
+ /* output line and reset */
+ nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
+ memset(ascii_buf, 0, 20);
+ memset(hex_buf, 0, 80);
+ num_ascii = 0;
+ num_hex = 0;
+ }
+ }
+
+ /* output the rest */
+ if (num_ascii) {
+ while (num_ascii < 17) {
+ if (num_ascii == 8) {
+ hex_buf[num_hex++] = ' ';
+ hex_buf[num_hex++] = ' ';
+ }
+ hex_buf[num_hex++] = ' ';
+ hex_buf[num_hex++] = ' ';
+ hex_buf[num_hex++] = ' ';
+ num_ascii++;
+ }
+
+ nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
+ }
+}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
new file mode 100644
index 000000000000..ffd4b425567f
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -0,0 +1,3917 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/random.h>
+#include <linux/highmem.h>
+#include <asm/byteorder.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "nes.h"
+
+#include <rdma/ib_umem.h>
+
+atomic_t mod_qp_timouts;
+atomic_t qps_created;
+atomic_t sw_qps_destroyed;
+
+
+/**
+ * nes_alloc_mw
+ */
+static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
+ unsigned long flags;
+ struct nes_pd *nespd = to_nespd(ibpd);
+ struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_cqp_request *cqp_request;
+ struct nes_mr *nesmr;
+ struct ib_mw *ibmw;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ int ret;
+ u32 stag;
+ u32 stag_index = 0;
+ u32 next_stag_index = 0;
+ u32 driver_key = 0;
+ u8 stag_key = 0;
+
+ get_random_bytes(&next_stag_index, sizeof(next_stag_index));
+ stag_key = (u8)next_stag_index;
+
+ driver_key = 0;
+
+ next_stag_index >>= 8;
+ next_stag_index %= nesadapter->max_mr;
+
+ ret = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
+ nesadapter->max_mr, &stag_index, &next_stag_index);
+ if (ret) {
+ return ERR_PTR(ret);
+ }
+
+ nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
+ if (!nesmr) {
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ stag = stag_index << 8;
+ stag |= driver_key;
+ stag += (u32)stag_key;
+
+ nes_debug(NES_DBG_MR, "Registering STag 0x%08X, index = 0x%08X\n",
+ stag, stag_index);
+
+ /* Register the region with the adapter */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ kfree(nesmr);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] =
+ cpu_to_le32( NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_RIGHTS_REMOTE_READ |
+ NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_VA_TO |
+ NES_CQP_STAG_REM_ACC_EN);
+
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX, (nespd->pd_id & 0x00007fff));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
+ stag, ret, cqp_request->major_code, cqp_request->minor_code);
+ if ((!ret) || (cqp_request->major_code)) {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ kfree(nesmr);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ if (!ret) {
+ return ERR_PTR(-ETIME);
+ } else {
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ }
+
+ nesmr->ibmw.rkey = stag;
+ nesmr->mode = IWNES_MEMREG_TYPE_MW;
+ ibmw = &nesmr->ibmw;
+ nesmr->pbl_4k = 0;
+ nesmr->pbls_used = 0;
+
+ return ibmw;
+}
+
+
+/**
+ * nes_dealloc_mw
+ */
+static int nes_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct nes_mr *nesmr = to_nesmw(ibmw);
+ struct nes_vnic *nesvnic = to_nesvnic(ibmw->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_cqp_request *cqp_request;
+ int err = 0;
+ unsigned long flags;
+ int ret;
+
+ /* Deallocate the window with the adapter */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
+ return -ENOMEM;
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, NES_CQP_DEALLOCATE_STAG);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, ibmw->rkey);
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ nes_debug(NES_DBG_MR, "Waiting for deallocate STag 0x%08X to complete.\n",
+ ibmw->rkey);
+ ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_MR, "Deallocate STag completed, wait_event_timeout ret = %u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
+ ret, cqp_request->major_code, cqp_request->minor_code);
+ if ((!ret) || (cqp_request->major_code)) {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ if (!ret) {
+ err = -ETIME;
+ } else {
+ err = -EIO;
+ }
+ } else {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ }
+
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ (ibmw->rkey & 0x0fffff00) >> 8);
+ kfree(nesmr);
+
+ return err;
+}
+
+
+/**
+ * nes_bind_mw
+ */
+static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
+ struct ib_mw_bind *ibmw_bind)
+{
+ u64 u64temp;
+ struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ /* struct nes_mr *nesmr = to_nesmw(ibmw); */
+ struct nes_qp *nesqp = to_nesqp(ibqp);
+ struct nes_hw_qp_wqe *wqe;
+ unsigned long flags = 0;
+ u32 head;
+ u32 wqe_misc = 0;
+ u32 qsize;
+
+ if (nesqp->ibqp_state > IB_QPS_RTS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+
+ head = nesqp->hwqp.sq_head;
+ qsize = nesqp->hwqp.sq_tail;
+
+ /* Check for SQ overflow */
+ if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ return -EINVAL;
+ }
+
+ wqe = &nesqp->hwqp.sq_vbase[head];
+ /* nes_debug(NES_DBG_MR, "processing sq wqe at %p, head = %u.\n", wqe, head); */
+ nes_fill_init_qp_wqe(wqe, nesqp, head);
+ u64temp = ibmw_bind->wr_id;
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, u64temp);
+ wqe_misc = NES_IWARP_SQ_OP_BIND;
+
+ wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
+
+ if (ibmw_bind->send_flags & IB_SEND_SIGNALED)
+ wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
+
+ if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_WRITE) {
+ wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE;
+ }
+ if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_READ) {
+ wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_READ;
+ }
+
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, ibmw_bind->mr->lkey);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX,
+ ibmw_bind->length);
+ wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0;
+ u64temp = (u64)ibmw_bind->addr;
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp);
+
+ head++;
+ if (head >= qsize)
+ head = 0;
+
+ nesqp->hwqp.sq_head = head;
+ barrier();
+
+ nes_write32(nesdev->regs+NES_WQE_ALLOC,
+ (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
+
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+
+ return 0;
+}
+
+
+/**
+ * nes_alloc_fmr
+ */
+static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
+ int ibmr_access_flags,
+ struct ib_fmr_attr *ibfmr_attr)
+{
+ unsigned long flags;
+ struct nes_pd *nespd = to_nespd(ibpd);
+ struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_fmr *nesfmr;
+ struct nes_cqp_request *cqp_request;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ int ret;
+ u32 stag;
+ u32 stag_index = 0;
+ u32 next_stag_index = 0;
+ u32 driver_key = 0;
+ u32 opcode = 0;
+ u8 stag_key = 0;
+ int i=0;
+ struct nes_vpbl vpbl;
+
+ get_random_bytes(&next_stag_index, sizeof(next_stag_index));
+ stag_key = (u8)next_stag_index;
+
+ driver_key = 0;
+
+ next_stag_index >>= 8;
+ next_stag_index %= nesadapter->max_mr;
+
+ ret = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
+ nesadapter->max_mr, &stag_index, &next_stag_index);
+ if (ret) {
+ goto failed_resource_alloc;
+ }
+
+ nesfmr = kzalloc(sizeof(*nesfmr), GFP_KERNEL);
+ if (!nesfmr) {
+ ret = -ENOMEM;
+ goto failed_fmr_alloc;
+ }
+
+ nesfmr->nesmr.mode = IWNES_MEMREG_TYPE_FMR;
+ if (ibfmr_attr->max_pages == 1) {
+ /* use zero length PBL */
+ nesfmr->nesmr.pbl_4k = 0;
+ nesfmr->nesmr.pbls_used = 0;
+ } else if (ibfmr_attr->max_pages <= 32) {
+ /* use PBL 256 */
+ nesfmr->nesmr.pbl_4k = 0;
+ nesfmr->nesmr.pbls_used = 1;
+ } else if (ibfmr_attr->max_pages <= 512) {
+ /* use 4K PBLs */
+ nesfmr->nesmr.pbl_4k = 1;
+ nesfmr->nesmr.pbls_used = 1;
+ } else {
+ /* use two level 4K PBLs */
+ /* add support for two level 256B PBLs */
+ nesfmr->nesmr.pbl_4k = 1;
+ nesfmr->nesmr.pbls_used = 1 + (ibfmr_attr->max_pages >> 9) +
+ ((ibfmr_attr->max_pages & 511) ? 1 : 0);
+ }
+ /* Register the region with the adapter */
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+
+ /* track PBL resources */
+ if (nesfmr->nesmr.pbls_used != 0) {
+ if (nesfmr->nesmr.pbl_4k) {
+ if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ ret = -ENOMEM;
+ goto failed_vpbl_alloc;
+ } else {
+ nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used;
+ }
+ } else {
+ if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ ret = -ENOMEM;
+ goto failed_vpbl_alloc;
+ } else {
+ nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used;
+ }
+ }
+ }
+
+ /* one level pbl */
+ if (nesfmr->nesmr.pbls_used == 0) {
+ nesfmr->root_vpbl.pbl_vbase = NULL;
+ nes_debug(NES_DBG_MR, "zero level pbl \n");
+ } else if (nesfmr->nesmr.pbls_used == 1) {
+ /* can change it to kmalloc & dma_map_single */
+ nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
+ &nesfmr->root_vpbl.pbl_pbase);
+ if (!nesfmr->root_vpbl.pbl_vbase) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ ret = -ENOMEM;
+ goto failed_vpbl_alloc;
+ }
+ nesfmr->leaf_pbl_cnt = 0;
+ nes_debug(NES_DBG_MR, "one level pbl, root_vpbl.pbl_vbase=%p \n",
+ nesfmr->root_vpbl.pbl_vbase);
+ }
+ /* two level pbl */
+ else {
+ nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192,
+ &nesfmr->root_vpbl.pbl_pbase);
+ if (!nesfmr->root_vpbl.pbl_vbase) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ ret = -ENOMEM;
+ goto failed_vpbl_alloc;
+ }
+
+ nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_KERNEL);
+ if (!nesfmr->root_vpbl.leaf_vpbl) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ ret = -ENOMEM;
+ goto failed_leaf_vpbl_alloc;
+ }
+
+ nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1;
+ nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p"
+ " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n",
+ nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl);
+
+ for (i=0; i<nesfmr->leaf_pbl_cnt; i++)
+ nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase = NULL;
+
+ for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
+ vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
+ &vpbl.pbl_pbase);
+
+ if (!vpbl.pbl_vbase) {
+ ret = -ENOMEM;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ goto failed_leaf_vpbl_pages_alloc;
+ }
+
+ nesfmr->root_vpbl.pbl_vbase[i].pa_low = cpu_to_le32((u32)vpbl.pbl_pbase);
+ nesfmr->root_vpbl.pbl_vbase[i].pa_high = cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
+ nesfmr->root_vpbl.leaf_vpbl[i] = vpbl;
+
+ nes_debug(NES_DBG_MR, "pbase_low=0x%x, pbase_high=0x%x, vpbl=%p\n",
+ nesfmr->root_vpbl.pbl_vbase[i].pa_low,
+ nesfmr->root_vpbl.pbl_vbase[i].pa_high,
+ &nesfmr->root_vpbl.leaf_vpbl[i]);
+ }
+ }
+ nesfmr->ib_qp = NULL;
+ nesfmr->access_rights =0;
+
+ stag = stag_index << 8;
+ stag |= driver_key;
+ stag += (u32)stag_key;
+
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
+ ret = -ENOMEM;
+ goto failed_leaf_vpbl_pages_alloc;
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ nes_debug(NES_DBG_MR, "Registering STag 0x%08X, index = 0x%08X\n",
+ stag, stag_index);
+
+ opcode = NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR;
+
+ if (nesfmr->nesmr.pbl_4k == 1)
+ opcode |= NES_CQP_STAG_PBL_BLK_SIZE;
+
+ if (ibmr_access_flags & IB_ACCESS_REMOTE_WRITE) {
+ opcode |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE |
+ NES_CQP_STAG_RIGHTS_LOCAL_WRITE | NES_CQP_STAG_REM_ACC_EN;
+ nesfmr->access_rights |=
+ NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_RIGHTS_LOCAL_WRITE |
+ NES_CQP_STAG_REM_ACC_EN;
+ }
+
+ if (ibmr_access_flags & IB_ACCESS_REMOTE_READ) {
+ opcode |= NES_CQP_STAG_RIGHTS_REMOTE_READ |
+ NES_CQP_STAG_RIGHTS_LOCAL_READ | NES_CQP_STAG_REM_ACC_EN;
+ nesfmr->access_rights |=
+ NES_CQP_STAG_RIGHTS_REMOTE_READ | NES_CQP_STAG_RIGHTS_LOCAL_READ |
+ NES_CQP_STAG_REM_ACC_EN;
+ }
+
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX, (nespd->pd_id & 0x00007fff));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
+
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] =
+ cpu_to_le32((nesfmr->nesmr.pbls_used>1) ?
+ (nesfmr->nesmr.pbls_used-1) : nesfmr->nesmr.pbls_used);
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
+ stag, ret, cqp_request->major_code, cqp_request->minor_code);
+
+ if ((!ret) || (cqp_request->major_code)) {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ ret = (!ret) ? -ETIME : -EIO;
+ goto failed_leaf_vpbl_pages_alloc;
+ } else {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ }
+
+ nesfmr->nesmr.ibfmr.lkey = stag;
+ nesfmr->nesmr.ibfmr.rkey = stag;
+ nesfmr->attr = *ibfmr_attr;
+
+ return &nesfmr->nesmr.ibfmr;
+
+ failed_leaf_vpbl_pages_alloc:
+ /* unroll all allocated pages */
+ for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
+ if (nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase) {
+ pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase,
+ nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase);
+ }
+ }
+ if (nesfmr->root_vpbl.leaf_vpbl)
+ kfree(nesfmr->root_vpbl.leaf_vpbl);
+
+ failed_leaf_vpbl_alloc:
+ if (nesfmr->leaf_pbl_cnt == 0) {
+ if (nesfmr->root_vpbl.pbl_vbase)
+ pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase,
+ nesfmr->root_vpbl.pbl_pbase);
+ } else
+ pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
+ nesfmr->root_vpbl.pbl_pbase);
+
+ failed_vpbl_alloc:
+ kfree(nesfmr);
+
+ failed_fmr_alloc:
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+
+ failed_resource_alloc:
+ return ERR_PTR(ret);
+}
+
+
+/**
+ * nes_dealloc_fmr
+ */
+static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
+{
+ struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr);
+ struct nes_fmr *nesfmr = to_nesfmr(nesmr);
+ struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_mr temp_nesmr = *nesmr;
+ int i = 0;
+
+ temp_nesmr.ibmw.device = ibfmr->device;
+ temp_nesmr.ibmw.pd = ibfmr->pd;
+ temp_nesmr.ibmw.rkey = ibfmr->rkey;
+ temp_nesmr.ibmw.uobject = NULL;
+
+ /* free the resources */
+ if (nesfmr->leaf_pbl_cnt == 0) {
+ /* single PBL case */
+ if (nesfmr->root_vpbl.pbl_vbase)
+ pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase,
+ nesfmr->root_vpbl.pbl_pbase);
+ } else {
+ for (i = 0; i < nesfmr->leaf_pbl_cnt; i++) {
+ pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase,
+ nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase);
+ }
+ kfree(nesfmr->root_vpbl.leaf_vpbl);
+ pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
+ nesfmr->root_vpbl.pbl_pbase);
+ }
+
+ return nes_dealloc_mw(&temp_nesmr.ibmw);
+}
+
+
+/**
+ * nes_map_phys_fmr
+ */
+static int nes_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+ int list_len, u64 iova)
+{
+ return 0;
+}
+
+
+/**
+ * nes_unmap_frm
+ */
+static int nes_unmap_fmr(struct list_head *ibfmr_list)
+{
+ return 0;
+}
+
+
+
+/**
+ * nes_query_device
+ */
+static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
+{
+ struct nes_vnic *nesvnic = to_nesvnic(ibdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_ib_device *nesibdev = nesvnic->nesibdev;
+
+ memset(props, 0, sizeof(*props));
+ memcpy(&props->sys_image_guid, nesvnic->netdev->dev_addr, 6);
+
+ props->fw_ver = nesdev->nesadapter->fw_ver;
+ props->device_cap_flags = nesdev->nesadapter->device_cap_flags;
+ props->vendor_id = nesdev->nesadapter->vendor_id;
+ props->vendor_part_id = nesdev->nesadapter->vendor_part_id;
+ props->hw_ver = nesdev->nesadapter->hw_rev;
+ props->max_mr_size = 0x80000000;
+ props->max_qp = nesibdev->max_qp;
+ props->max_qp_wr = nesdev->nesadapter->max_qp_wr - 2;
+ props->max_sge = nesdev->nesadapter->max_sge;
+ props->max_cq = nesibdev->max_cq;
+ props->max_cqe = nesdev->nesadapter->max_cqe - 1;
+ props->max_mr = nesibdev->max_mr;
+ props->max_mw = nesibdev->max_mr;
+ props->max_pd = nesibdev->max_pd;
+ props->max_sge_rd = 1;
+ switch (nesdev->nesadapter->max_irrq_wr) {
+ case 0:
+ props->max_qp_rd_atom = 1;
+ break;
+ case 1:
+ props->max_qp_rd_atom = 4;
+ break;
+ case 2:
+ props->max_qp_rd_atom = 16;
+ break;
+ case 3:
+ props->max_qp_rd_atom = 32;
+ break;
+ default:
+ props->max_qp_rd_atom = 0;
+ }
+ props->max_qp_init_rd_atom = props->max_qp_wr;
+ props->atomic_cap = IB_ATOMIC_NONE;
+ props->max_map_per_fmr = 1;
+
+ return 0;
+}
+
+
+/**
+ * nes_query_port
+ */
+static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
+{
+ memset(props, 0, sizeof(*props));
+
+ props->max_mtu = IB_MTU_2048;
+ props->active_mtu = IB_MTU_2048;
+ props->lid = 1;
+ props->lmc = 0;
+ props->sm_lid = 0;
+ props->sm_sl = 0;
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = 0;
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
+ props->gid_tbl_len = 1;
+ props->pkey_tbl_len = 1;
+ props->qkey_viol_cntr = 0;
+ props->active_width = IB_WIDTH_4X;
+ props->active_speed = 1;
+ props->max_msg_sz = 0x80000000;
+
+ return 0;
+}
+
+
+/**
+ * nes_modify_port
+ */
+static int nes_modify_port(struct ib_device *ibdev, u8 port,
+ int port_modify_mask, struct ib_port_modify *props)
+{
+ return 0;
+}
+
+
+/**
+ * nes_query_pkey
+ */
+static int nes_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+{
+ *pkey = 0;
+ return 0;
+}
+
+
+/**
+ * nes_query_gid
+ */
+static int nes_query_gid(struct ib_device *ibdev, u8 port,
+ int index, union ib_gid *gid)
+{
+ struct nes_vnic *nesvnic = to_nesvnic(ibdev);
+
+ memset(&(gid->raw[0]), 0, sizeof(gid->raw));
+ memcpy(&(gid->raw[0]), nesvnic->netdev->dev_addr, 6);
+
+ return 0;
+}
+
+
+/**
+ * nes_alloc_ucontext - Allocate the user context data structure. This keeps track
+ * of all objects associated with a particular user-mode client.
+ */
+static struct ib_ucontext *nes_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct nes_vnic *nesvnic = to_nesvnic(ibdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_alloc_ucontext_req req;
+ struct nes_alloc_ucontext_resp uresp;
+ struct nes_ucontext *nes_ucontext;
+ struct nes_ib_device *nesibdev = nesvnic->nesibdev;
+
+
+ if (ib_copy_from_udata(&req, udata, sizeof(struct nes_alloc_ucontext_req))) {
+ printk(KERN_ERR PFX "Invalid structure size on allocate user context.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (req.userspace_ver != NES_ABI_USERSPACE_VER) {
+ printk(KERN_ERR PFX "Invalid userspace driver version detected. Detected version %d, should be %d\n",
+ req.userspace_ver, NES_ABI_USERSPACE_VER);
+ return ERR_PTR(-EINVAL);
+ }
+
+
+ memset(&uresp, 0, sizeof uresp);
+
+ uresp.max_qps = nesibdev->max_qp;
+ uresp.max_pds = nesibdev->max_pd;
+ uresp.wq_size = nesdev->nesadapter->max_qp_wr * 2;
+ uresp.virtwq = nesadapter->virtwq;
+ uresp.kernel_ver = NES_ABI_KERNEL_VER;
+
+ nes_ucontext = kzalloc(sizeof *nes_ucontext, GFP_KERNEL);
+ if (!nes_ucontext)
+ return ERR_PTR(-ENOMEM);
+
+ nes_ucontext->nesdev = nesdev;
+ nes_ucontext->mmap_wq_offset = uresp.max_pds;
+ nes_ucontext->mmap_cq_offset = nes_ucontext->mmap_wq_offset +
+ ((sizeof(struct nes_hw_qp_wqe) * uresp.max_qps * 2) + PAGE_SIZE-1) /
+ PAGE_SIZE;
+
+
+ if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
+ kfree(nes_ucontext);
+ return ERR_PTR(-EFAULT);
+ }
+
+ INIT_LIST_HEAD(&nes_ucontext->cq_reg_mem_list);
+ INIT_LIST_HEAD(&nes_ucontext->qp_reg_mem_list);
+ atomic_set(&nes_ucontext->usecnt, 1);
+ return &nes_ucontext->ibucontext;
+}
+
+
+/**
+ * nes_dealloc_ucontext
+ */
+static int nes_dealloc_ucontext(struct ib_ucontext *context)
+{
+ /* struct nes_vnic *nesvnic = to_nesvnic(context->device); */
+ /* struct nes_device *nesdev = nesvnic->nesdev; */
+ struct nes_ucontext *nes_ucontext = to_nesucontext(context);
+
+ if (!atomic_dec_and_test(&nes_ucontext->usecnt))
+ return 0;
+ kfree(nes_ucontext);
+ return 0;
+}
+
+
+/**
+ * nes_mmap
+ */
+static int nes_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ unsigned long index;
+ struct nes_vnic *nesvnic = to_nesvnic(context->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ /* struct nes_adapter *nesadapter = nesdev->nesadapter; */
+ struct nes_ucontext *nes_ucontext;
+ struct nes_qp *nesqp;
+
+ nes_ucontext = to_nesucontext(context);
+
+
+ if (vma->vm_pgoff >= nes_ucontext->mmap_wq_offset) {
+ index = (vma->vm_pgoff - nes_ucontext->mmap_wq_offset) * PAGE_SIZE;
+ index /= ((sizeof(struct nes_hw_qp_wqe) * nesdev->nesadapter->max_qp_wr * 2) +
+ PAGE_SIZE-1) & (~(PAGE_SIZE-1));
+ if (!test_bit(index, nes_ucontext->allocated_wqs)) {
+ nes_debug(NES_DBG_MMAP, "wq %lu not allocated\n", index);
+ return -EFAULT;
+ }
+ nesqp = nes_ucontext->mmap_nesqp[index];
+ if (nesqp == NULL) {
+ nes_debug(NES_DBG_MMAP, "wq %lu has a NULL QP base.\n", index);
+ return -EFAULT;
+ }
+ if (remap_pfn_range(vma, vma->vm_start,
+ virt_to_phys(nesqp->hwqp.sq_vbase) >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ nes_debug(NES_DBG_MMAP, "remap_pfn_range failed.\n");
+ return -EAGAIN;
+ }
+ vma->vm_private_data = nesqp;
+ return 0;
+ } else {
+ index = vma->vm_pgoff;
+ if (!test_bit(index, nes_ucontext->allocated_doorbells))
+ return -EFAULT;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (io_remap_pfn_range(vma, vma->vm_start,
+ (nesdev->doorbell_start +
+ ((nes_ucontext->mmap_db_index[index] - nesdev->base_doorbell_index) * 4096))
+ >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot))
+ return -EAGAIN;
+ vma->vm_private_data = nes_ucontext;
+ return 0;
+ }
+
+ return -ENOSYS;
+}
+
+
+/**
+ * nes_alloc_pd
+ */
+static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context, struct ib_udata *udata)
+{
+ struct nes_pd *nespd;
+ struct nes_vnic *nesvnic = to_nesvnic(ibdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_ucontext *nesucontext;
+ struct nes_alloc_pd_resp uresp;
+ u32 pd_num = 0;
+ int err;
+
+ nes_debug(NES_DBG_PD, "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n",
+ nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev, context,
+ atomic_read(&nesvnic->netdev->refcnt));
+
+ err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds,
+ nesadapter->max_pd, &pd_num, &nesadapter->next_pd);
+ if (err) {
+ return ERR_PTR(err);
+ }
+
+ nespd = kzalloc(sizeof (struct nes_pd), GFP_KERNEL);
+ if (!nespd) {
+ nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ nes_debug(NES_DBG_PD, "Allocating PD (%p) for ib device %s\n",
+ nespd, nesvnic->nesibdev->ibdev.name);
+
+ nespd->pd_id = (pd_num << (PAGE_SHIFT-12)) + nesadapter->base_pd;
+
+ if (context) {
+ nesucontext = to_nesucontext(context);
+ nespd->mmap_db_index = find_next_zero_bit(nesucontext->allocated_doorbells,
+ NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db);
+ nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n",
+ nespd->mmap_db_index, nespd->pd_id);
+ if (nespd->mmap_db_index > NES_MAX_USER_DB_REGIONS) {
+ nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n");
+ nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
+ kfree(nespd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ uresp.pd_id = nespd->pd_id;
+ uresp.mmap_db_index = nespd->mmap_db_index;
+ if (ib_copy_to_udata(udata, &uresp, sizeof (struct nes_alloc_pd_resp))) {
+ nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
+ kfree(nespd);
+ return ERR_PTR(-EFAULT);
+ }
+
+ set_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells);
+ nesucontext->mmap_db_index[nespd->mmap_db_index] = nespd->pd_id;
+ nesucontext->first_free_db = nespd->mmap_db_index + 1;
+ }
+
+ nes_debug(NES_DBG_PD, "PD%u structure located @%p.\n", nespd->pd_id, nespd);
+ return &nespd->ibpd;
+}
+
+
+/**
+ * nes_dealloc_pd
+ */
+static int nes_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct nes_ucontext *nesucontext;
+ struct nes_pd *nespd = to_nespd(ibpd);
+ struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+
+ if ((ibpd->uobject) && (ibpd->uobject->context)) {
+ nesucontext = to_nesucontext(ibpd->uobject->context);
+ nes_debug(NES_DBG_PD, "Clearing bit %u from allocated doorbells\n",
+ nespd->mmap_db_index);
+ clear_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells);
+ nesucontext->mmap_db_index[nespd->mmap_db_index] = 0;
+ if (nesucontext->first_free_db > nespd->mmap_db_index) {
+ nesucontext->first_free_db = nespd->mmap_db_index;
+ }
+ }
+
+ nes_debug(NES_DBG_PD, "Deallocating PD%u structure located @%p.\n",
+ nespd->pd_id, nespd);
+ nes_free_resource(nesadapter, nesadapter->allocated_pds,
+ (nespd->pd_id-nesadapter->base_pd)>>(PAGE_SHIFT-12));
+ kfree(nespd);
+
+ return 0;
+}
+
+
+/**
+ * nes_create_ah
+ */
+static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+
+/**
+ * nes_destroy_ah
+ */
+static int nes_destroy_ah(struct ib_ah *ah)
+{
+ return -ENOSYS;
+}
+
+
+/**
+ * nes_get_encoded_size
+ */
+static inline u8 nes_get_encoded_size(int *size)
+{
+ u8 encoded_size = 0;
+ if (*size <= 32) {
+ *size = 32;
+ encoded_size = 1;
+ } else if (*size <= 128) {
+ *size = 128;
+ encoded_size = 2;
+ } else if (*size <= 512) {
+ *size = 512;
+ encoded_size = 3;
+ }
+ return (encoded_size);
+}
+
+
+
+/**
+ * nes_setup_virt_qp
+ */
+static int nes_setup_virt_qp(struct nes_qp *nesqp, struct nes_pbl *nespbl,
+ struct nes_vnic *nesvnic, int sq_size, int rq_size)
+{
+ unsigned long flags;
+ void *mem;
+ __le64 *pbl = NULL;
+ __le64 *tpbl;
+ __le64 *pblbuffer;
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 pbl_entries;
+ u8 rq_pbl_entries;
+ u8 sq_pbl_entries;
+
+ pbl_entries = nespbl->pbl_size >> 3;
+ nes_debug(NES_DBG_QP, "Userspace PBL, pbl_size=%u, pbl_entries = %d pbl_vbase=%p, pbl_pbase=%p\n",
+ nespbl->pbl_size, pbl_entries,
+ (void *)nespbl->pbl_vbase,
+ (void *)nespbl->pbl_pbase);
+ pbl = (__le64 *) nespbl->pbl_vbase; /* points to first pbl entry */
+ /* now lets set the sq_vbase as well as rq_vbase addrs we will assign */
+ /* the first pbl to be fro the rq_vbase... */
+ rq_pbl_entries = (rq_size * sizeof(struct nes_hw_qp_wqe)) >> 12;
+ sq_pbl_entries = (sq_size * sizeof(struct nes_hw_qp_wqe)) >> 12;
+ nesqp->hwqp.sq_pbase = (le32_to_cpu(((__le32 *)pbl)[0])) | ((u64)((le32_to_cpu(((__le32 *)pbl)[1]))) << 32);
+ if (!nespbl->page) {
+ nes_debug(NES_DBG_QP, "QP nespbl->page is NULL \n");
+ kfree(nespbl);
+ return -ENOMEM;
+ }
+
+ nesqp->hwqp.sq_vbase = kmap(nespbl->page);
+ nesqp->page = nespbl->page;
+ if (!nesqp->hwqp.sq_vbase) {
+ nes_debug(NES_DBG_QP, "QP sq_vbase kmap failed\n");
+ kfree(nespbl);
+ return -ENOMEM;
+ }
+
+ /* Now to get to sq.. we need to calculate how many */
+ /* PBL entries were used by the rq.. */
+ pbl += sq_pbl_entries;
+ nesqp->hwqp.rq_pbase = (le32_to_cpu(((__le32 *)pbl)[0])) | ((u64)((le32_to_cpu(((__le32 *)pbl)[1]))) << 32);
+ /* nesqp->hwqp.rq_vbase = bus_to_virt(*pbl); */
+ /*nesqp->hwqp.rq_vbase = phys_to_virt(*pbl); */
+
+ nes_debug(NES_DBG_QP, "QP sq_vbase= %p sq_pbase=%p rq_vbase=%p rq_pbase=%p\n",
+ nesqp->hwqp.sq_vbase, (void *)nesqp->hwqp.sq_pbase,
+ nesqp->hwqp.rq_vbase, (void *)nesqp->hwqp.rq_pbase);
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ if (!nesadapter->free_256pbl) {
+ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
+ nespbl->pbl_pbase);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kunmap(nesqp->page);
+ kfree(nespbl);
+ return -ENOMEM;
+ }
+ nesadapter->free_256pbl--;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+
+ nesqp->pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 256, &nesqp->pbl_pbase);
+ pblbuffer = nesqp->pbl_vbase;
+ if (!nesqp->pbl_vbase) {
+ /* memory allocated during nes_reg_user_mr() */
+ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
+ nespbl->pbl_pbase);
+ kfree(nespbl);
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ nesadapter->free_256pbl++;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kunmap(nesqp->page);
+ return -ENOMEM;
+ }
+ memset(nesqp->pbl_vbase, 0, 256);
+ /* fill in the page address in the pbl buffer.. */
+ tpbl = pblbuffer + 16;
+ pbl = (__le64 *)nespbl->pbl_vbase;
+ while (sq_pbl_entries--)
+ *tpbl++ = *pbl++;
+ tpbl = pblbuffer;
+ while (rq_pbl_entries--)
+ *tpbl++ = *pbl++;
+
+ /* done with memory allocated during nes_reg_user_mr() */
+ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
+ nespbl->pbl_pbase);
+ kfree(nespbl);
+
+ nesqp->qp_mem_size =
+ max((u32)sizeof(struct nes_qp_context), ((u32)256)) + 256; /* this is Q2 */
+ /* Round up to a multiple of a page */
+ nesqp->qp_mem_size += PAGE_SIZE - 1;
+ nesqp->qp_mem_size &= ~(PAGE_SIZE - 1);
+
+ mem = pci_alloc_consistent(nesdev->pcidev, nesqp->qp_mem_size,
+ &nesqp->hwqp.q2_pbase);
+
+ if (!mem) {
+ pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase);
+ nesqp->pbl_vbase = NULL;
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ nesadapter->free_256pbl++;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kunmap(nesqp->page);
+ return -ENOMEM;
+ }
+ nesqp->hwqp.q2_vbase = mem;
+ mem += 256;
+ memset(nesqp->hwqp.q2_vbase, 0, 256);
+ nesqp->nesqp_context = mem;
+ memset(nesqp->nesqp_context, 0, sizeof(*nesqp->nesqp_context));
+ nesqp->nesqp_context_pbase = nesqp->hwqp.q2_pbase + 256;
+
+ return 0;
+}
+
+
+/**
+ * nes_setup_mmap_qp
+ */
+static int nes_setup_mmap_qp(struct nes_qp *nesqp, struct nes_vnic *nesvnic,
+ int sq_size, int rq_size)
+{
+ void *mem;
+ struct nes_device *nesdev = nesvnic->nesdev;
+
+ nesqp->qp_mem_size = (sizeof(struct nes_hw_qp_wqe) * sq_size) +
+ (sizeof(struct nes_hw_qp_wqe) * rq_size) +
+ max((u32)sizeof(struct nes_qp_context), ((u32)256)) +
+ 256; /* this is Q2 */
+ /* Round up to a multiple of a page */
+ nesqp->qp_mem_size += PAGE_SIZE - 1;
+ nesqp->qp_mem_size &= ~(PAGE_SIZE - 1);
+
+ mem = pci_alloc_consistent(nesdev->pcidev, nesqp->qp_mem_size,
+ &nesqp->hwqp.sq_pbase);
+ if (!mem)
+ return -ENOMEM;
+ nes_debug(NES_DBG_QP, "PCI consistent memory for "
+ "host descriptor rings located @ %p (pa = 0x%08lX.) size = %u.\n",
+ mem, (unsigned long)nesqp->hwqp.sq_pbase, nesqp->qp_mem_size);
+
+ memset(mem, 0, nesqp->qp_mem_size);
+
+ nesqp->hwqp.sq_vbase = mem;
+ mem += sizeof(struct nes_hw_qp_wqe) * sq_size;
+
+ nesqp->hwqp.rq_vbase = mem;
+ nesqp->hwqp.rq_pbase = nesqp->hwqp.sq_pbase +
+ sizeof(struct nes_hw_qp_wqe) * sq_size;
+ mem += sizeof(struct nes_hw_qp_wqe) * rq_size;
+
+ nesqp->hwqp.q2_vbase = mem;
+ nesqp->hwqp.q2_pbase = nesqp->hwqp.rq_pbase +
+ sizeof(struct nes_hw_qp_wqe) * rq_size;
+ mem += 256;
+ memset(nesqp->hwqp.q2_vbase, 0, 256);
+
+ nesqp->nesqp_context = mem;
+ nesqp->nesqp_context_pbase = nesqp->hwqp.q2_pbase + 256;
+ memset(nesqp->nesqp_context, 0, sizeof(*nesqp->nesqp_context));
+ return 0;
+}
+
+
+/**
+ * nes_free_qp_mem() is to free up the qp's pci_alloc_consistent() memory.
+ */
+static inline void nes_free_qp_mem(struct nes_device *nesdev,
+ struct nes_qp *nesqp, int virt_wqs)
+{
+ unsigned long flags;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ if (!virt_wqs) {
+ pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
+ nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase);
+ }else {
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ nesadapter->free_256pbl++;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
+ pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase );
+ nesqp->pbl_vbase = NULL;
+ kunmap(nesqp->page);
+ }
+}
+
+
+/**
+ * nes_create_qp
+ */
+static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr, struct ib_udata *udata)
+{
+ u64 u64temp= 0;
+ u64 u64nesqp = 0;
+ struct nes_pd *nespd = to_nespd(ibpd);
+ struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_qp *nesqp;
+ struct nes_cq *nescq;
+ struct nes_ucontext *nes_ucontext;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_cqp_request *cqp_request;
+ struct nes_create_qp_req req;
+ struct nes_create_qp_resp uresp;
+ struct nes_pbl *nespbl = NULL;
+ u32 qp_num = 0;
+ u32 opcode = 0;
+ /* u32 counter = 0; */
+ void *mem;
+ unsigned long flags;
+ int ret;
+ int err;
+ int virt_wqs = 0;
+ int sq_size;
+ int rq_size;
+ u8 sq_encoded_size;
+ u8 rq_encoded_size;
+ /* int counter; */
+
+ atomic_inc(&qps_created);
+ switch (init_attr->qp_type) {
+ case IB_QPT_RC:
+ if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
+ init_attr->cap.max_inline_data = 0;
+ } else {
+ init_attr->cap.max_inline_data = 64;
+ }
+ sq_size = init_attr->cap.max_send_wr;
+ rq_size = init_attr->cap.max_recv_wr;
+
+ // check if the encoded sizes are OK or not...
+ sq_encoded_size = nes_get_encoded_size(&sq_size);
+ rq_encoded_size = nes_get_encoded_size(&rq_size);
+
+ if ((!sq_encoded_size) || (!rq_encoded_size)) {
+ nes_debug(NES_DBG_QP, "ERROR bad rq (%u) or sq (%u) size\n",
+ rq_size, sq_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ init_attr->cap.max_send_wr = sq_size -2;
+ init_attr->cap.max_recv_wr = rq_size -1;
+ nes_debug(NES_DBG_QP, "RQ size=%u, SQ Size=%u\n", rq_size, sq_size);
+
+ ret = nes_alloc_resource(nesadapter, nesadapter->allocated_qps,
+ nesadapter->max_qp, &qp_num, &nesadapter->next_qp);
+ if (ret) {
+ return ERR_PTR(ret);
+ }
+
+ /* Need 512 (actually now 1024) byte alignment on this structure */
+ mem = kzalloc(sizeof(*nesqp)+NES_SW_CONTEXT_ALIGN-1, GFP_KERNEL);
+ if (!mem) {
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ nes_debug(NES_DBG_QP, "Unable to allocate QP\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ u64nesqp = (unsigned long)mem;
+ u64nesqp += ((u64)NES_SW_CONTEXT_ALIGN) - 1;
+ u64temp = ((u64)NES_SW_CONTEXT_ALIGN) - 1;
+ u64nesqp &= ~u64temp;
+ nesqp = (struct nes_qp *)(unsigned long)u64nesqp;
+ /* nes_debug(NES_DBG_QP, "nesqp=%p, allocated buffer=%p. Rounded to closest %u\n",
+ nesqp, mem, NES_SW_CONTEXT_ALIGN); */
+ nesqp->allocated_buffer = mem;
+
+ if (udata) {
+ if (ib_copy_from_udata(&req, udata, sizeof(struct nes_create_qp_req))) {
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ kfree(nesqp->allocated_buffer);
+ nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n");
+ return NULL;
+ }
+ if (req.user_wqe_buffers) {
+ virt_wqs = 1;
+ }
+ if ((ibpd->uobject) && (ibpd->uobject->context)) {
+ nesqp->user_mode = 1;
+ nes_ucontext = to_nesucontext(ibpd->uobject->context);
+ if (virt_wqs) {
+ err = 1;
+ list_for_each_entry(nespbl, &nes_ucontext->qp_reg_mem_list, list) {
+ if (nespbl->user_base == (unsigned long )req.user_wqe_buffers) {
+ list_del(&nespbl->list);
+ err = 0;
+ nes_debug(NES_DBG_QP, "Found PBL for virtual QP. nespbl=%p. user_base=0x%lx\n",
+ nespbl, nespbl->user_base);
+ break;
+ }
+ }
+ if (err) {
+ nes_debug(NES_DBG_QP, "Didn't Find PBL for virtual QP. address = %llx.\n",
+ (long long unsigned int)req.user_wqe_buffers);
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ kfree(nesqp->allocated_buffer);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ nes_ucontext = to_nesucontext(ibpd->uobject->context);
+ nesqp->mmap_sq_db_index =
+ find_next_zero_bit(nes_ucontext->allocated_wqs,
+ NES_MAX_USER_WQ_REGIONS, nes_ucontext->first_free_wq);
+ /* nes_debug(NES_DBG_QP, "find_first_zero_biton wqs returned %u\n",
+ nespd->mmap_db_index); */
+ if (nesqp->mmap_sq_db_index > NES_MAX_USER_WQ_REGIONS) {
+ nes_debug(NES_DBG_QP,
+ "db index > max user regions, failing create QP\n");
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ if (virt_wqs) {
+ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
+ nespbl->pbl_pbase);
+ kfree(nespbl);
+ }
+ kfree(nesqp->allocated_buffer);
+ return ERR_PTR(-ENOMEM);
+ }
+ set_bit(nesqp->mmap_sq_db_index, nes_ucontext->allocated_wqs);
+ nes_ucontext->mmap_nesqp[nesqp->mmap_sq_db_index] = nesqp;
+ nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index + 1;
+ } else {
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ kfree(nesqp->allocated_buffer);
+ return ERR_PTR(-EFAULT);
+ }
+ }
+ err = (!virt_wqs) ? nes_setup_mmap_qp(nesqp, nesvnic, sq_size, rq_size) :
+ nes_setup_virt_qp(nesqp, nespbl, nesvnic, sq_size, rq_size);
+ if (err) {
+ nes_debug(NES_DBG_QP,
+ "error geting qp mem code = %d\n", err);
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ kfree(nesqp->allocated_buffer);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ nesqp->hwqp.sq_size = sq_size;
+ nesqp->hwqp.sq_encoded_size = sq_encoded_size;
+ nesqp->hwqp.sq_head = 1;
+ nesqp->hwqp.rq_size = rq_size;
+ nesqp->hwqp.rq_encoded_size = rq_encoded_size;
+ /* nes_debug(NES_DBG_QP, "nesqp->nesqp_context_pbase = %p\n",
+ (void *)nesqp->nesqp_context_pbase);
+ */
+ nesqp->hwqp.qp_id = qp_num;
+ nesqp->ibqp.qp_num = nesqp->hwqp.qp_id;
+ nesqp->nespd = nespd;
+
+ nescq = to_nescq(init_attr->send_cq);
+ nesqp->nesscq = nescq;
+ nescq = to_nescq(init_attr->recv_cq);
+ nesqp->nesrcq = nescq;
+
+ nesqp->nesqp_context->misc |= cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) <<
+ NES_QPCONTEXT_MISC_PCI_FCN_SHIFT);
+ nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.rq_encoded_size <<
+ NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT);
+ nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.sq_encoded_size <<
+ NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT);
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_PRIV_EN);
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_FAST_REGISTER_EN);
+ nesqp->nesqp_context->cqs = cpu_to_le32(nesqp->nesscq->hw_cq.cq_number +
+ ((u32)nesqp->nesrcq->hw_cq.cq_number << 16));
+ u64temp = (u64)nesqp->hwqp.sq_pbase;
+ nesqp->nesqp_context->sq_addr_low = cpu_to_le32((u32)u64temp);
+ nesqp->nesqp_context->sq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
+
+
+ if (!virt_wqs) {
+ u64temp = (u64)nesqp->hwqp.sq_pbase;
+ nesqp->nesqp_context->sq_addr_low = cpu_to_le32((u32)u64temp);
+ nesqp->nesqp_context->sq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
+ u64temp = (u64)nesqp->hwqp.rq_pbase;
+ nesqp->nesqp_context->rq_addr_low = cpu_to_le32((u32)u64temp);
+ nesqp->nesqp_context->rq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
+ } else {
+ u64temp = (u64)nesqp->pbl_pbase;
+ nesqp->nesqp_context->rq_addr_low = cpu_to_le32((u32)u64temp);
+ nesqp->nesqp_context->rq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
+ }
+
+ /* nes_debug(NES_DBG_QP, "next_qp_nic_index=%u, using nic_index=%d\n",
+ nesvnic->next_qp_nic_index,
+ nesvnic->qp_nic_index[nesvnic->next_qp_nic_index]); */
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ nesqp->nesqp_context->misc2 |= cpu_to_le32(
+ (u32)nesvnic->qp_nic_index[nesvnic->next_qp_nic_index] <<
+ NES_QPCONTEXT_MISC2_NIC_INDEX_SHIFT);
+ nesvnic->next_qp_nic_index++;
+ if ((nesvnic->next_qp_nic_index > 3) ||
+ (nesvnic->qp_nic_index[nesvnic->next_qp_nic_index] == 0xf)) {
+ nesvnic->next_qp_nic_index = 0;
+ }
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+
+ nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32((u32)nesqp->nespd->pd_id << 16);
+ u64temp = (u64)nesqp->hwqp.q2_pbase;
+ nesqp->nesqp_context->q2_addr_low = cpu_to_le32((u32)u64temp);
+ nesqp->nesqp_context->q2_addr_high = cpu_to_le32((u32)(u64temp >> 32));
+ nesqp->nesqp_context->aeq_token_low = cpu_to_le32((u32)((unsigned long)(nesqp)));
+ nesqp->nesqp_context->aeq_token_high = cpu_to_le32((u32)(upper_32_bits((unsigned long)(nesqp))));
+ nesqp->nesqp_context->ird_ord_sizes = cpu_to_le32(NES_QPCONTEXT_ORDIRD_ALSMM |
+ ((((u32)nesadapter->max_irrq_wr) <<
+ NES_QPCONTEXT_ORDIRD_IRDSIZE_SHIFT) & NES_QPCONTEXT_ORDIRD_IRDSIZE_MASK));
+ if (disable_mpa_crc) {
+ nes_debug(NES_DBG_QP, "Disabling MPA crc checking due to module option.\n");
+ nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(NES_QPCONTEXT_ORDIRD_RNMC);
+ }
+
+
+ /* Create the QP */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_QP, "Failed to get a cqp_request\n");
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ nes_free_qp_mem(nesdev, nesqp,virt_wqs);
+ kfree(nesqp->allocated_buffer);
+ return ERR_PTR(-ENOMEM);
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ if (!virt_wqs) {
+ opcode = NES_CQP_CREATE_QP | NES_CQP_QP_TYPE_IWARP |
+ NES_CQP_QP_IWARP_STATE_IDLE;
+ } else {
+ opcode = NES_CQP_CREATE_QP | NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_VIRT_WQS |
+ NES_CQP_QP_IWARP_STATE_IDLE;
+ }
+ opcode |= NES_CQP_QP_CQS_VALID;
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
+
+ u64temp = (u64)nesqp->nesqp_context_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ nes_debug(NES_DBG_QP, "Waiting for create iWARP QP%u to complete.\n",
+ nesqp->hwqp.qp_id);
+ ret = wait_event_timeout(cqp_request->waitq,
+ (cqp_request->request_done != 0), NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_QP, "Create iwarp QP%u completed, wait_event_timeout ret=%u,"
+ " nesdev->cqp_head = %u, nesdev->cqp.sq_tail = %u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
+ nesqp->hwqp.qp_id, ret, nesdev->cqp.sq_head, nesdev->cqp.sq_tail,
+ cqp_request->major_code, cqp_request->minor_code);
+ if ((!ret) || (cqp_request->major_code)) {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ nes_free_qp_mem(nesdev, nesqp,virt_wqs);
+ kfree(nesqp->allocated_buffer);
+ if (!ret) {
+ return ERR_PTR(-ETIME);
+ } else {
+ return ERR_PTR(-EIO);
+ }
+ } else {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ }
+
+ if (ibpd->uobject) {
+ uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index;
+ uresp.actual_sq_size = sq_size;
+ uresp.actual_rq_size = rq_size;
+ uresp.qp_id = nesqp->hwqp.qp_id;
+ uresp.nes_drv_opt = nes_drv_opt;
+ if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ nes_free_qp_mem(nesdev, nesqp,virt_wqs);
+ kfree(nesqp->allocated_buffer);
+ return ERR_PTR(-EFAULT);
+ }
+ }
+
+ nes_debug(NES_DBG_QP, "QP%u structure located @%p.Size = %u.\n",
+ nesqp->hwqp.qp_id, nesqp, (u32)sizeof(*nesqp));
+ spin_lock_init(&nesqp->lock);
+ init_waitqueue_head(&nesqp->state_waitq);
+ init_waitqueue_head(&nesqp->kick_waitq);
+ nes_add_ref(&nesqp->ibqp);
+ break;
+ default:
+ nes_debug(NES_DBG_QP, "Invalid QP type: %d\n", init_attr->qp_type);
+ return ERR_PTR(-EINVAL);
+ break;
+ }
+
+ /* update the QP table */
+ nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
+ nes_debug(NES_DBG_QP, "netdev refcnt=%u\n",
+ atomic_read(&nesvnic->netdev->refcnt));
+
+ return &nesqp->ibqp;
+}
+
+
+/**
+ * nes_destroy_qp
+ */
+static int nes_destroy_qp(struct ib_qp *ibqp)
+{
+ struct nes_qp *nesqp = to_nesqp(ibqp);
+ /* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */
+ struct nes_ucontext *nes_ucontext;
+ struct ib_qp_attr attr;
+ struct iw_cm_id *cm_id;
+ struct iw_cm_event cm_event;
+ int ret;
+
+ atomic_inc(&sw_qps_destroyed);
+ nesqp->destroyed = 1;
+
+ /* Blow away the connection if it exists. */
+ if (nesqp->ibqp_state >= IB_QPS_INIT && nesqp->ibqp_state <= IB_QPS_RTS) {
+ /* if (nesqp->ibqp_state == IB_QPS_RTS) { */
+ attr.qp_state = IB_QPS_ERR;
+ nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
+ }
+
+ if (((nesqp->ibqp_state == IB_QPS_INIT) ||
+ (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) {
+ cm_id = nesqp->cm_id;
+ cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
+ cm_event.status = IW_CM_EVENT_STATUS_TIMEOUT;
+ cm_event.local_addr = cm_id->local_addr;
+ cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.private_data = NULL;
+ cm_event.private_data_len = 0;
+
+ nes_debug(NES_DBG_QP, "Generating a CM Timeout Event for "
+ "QP%u. cm_id = %p, refcount = %u. \n",
+ nesqp->hwqp.qp_id, cm_id, atomic_read(&nesqp->refcount));
+
+ cm_id->rem_ref(cm_id);
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ if (ret)
+ nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret);
+ }
+
+
+ if (nesqp->user_mode) {
+ if ((ibqp->uobject)&&(ibqp->uobject->context)) {
+ nes_ucontext = to_nesucontext(ibqp->uobject->context);
+ clear_bit(nesqp->mmap_sq_db_index, nes_ucontext->allocated_wqs);
+ nes_ucontext->mmap_nesqp[nesqp->mmap_sq_db_index] = NULL;
+ if (nes_ucontext->first_free_wq > nesqp->mmap_sq_db_index) {
+ nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index;
+ }
+ }
+ if (nesqp->pbl_pbase)
+ kunmap(nesqp->page);
+ }
+
+ nes_rem_ref(&nesqp->ibqp);
+ return 0;
+}
+
+
+/**
+ * nes_create_cq
+ */
+static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
+ int comp_vector,
+ struct ib_ucontext *context, struct ib_udata *udata)
+{
+ u64 u64temp;
+ struct nes_vnic *nesvnic = to_nesvnic(ibdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_cq *nescq;
+ struct nes_ucontext *nes_ucontext = NULL;
+ struct nes_cqp_request *cqp_request;
+ void *mem = NULL;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_pbl *nespbl = NULL;
+ struct nes_create_cq_req req;
+ struct nes_create_cq_resp resp;
+ u32 cq_num = 0;
+ u32 opcode = 0;
+ u32 pbl_entries = 1;
+ int err;
+ unsigned long flags;
+ int ret;
+
+ err = nes_alloc_resource(nesadapter, nesadapter->allocated_cqs,
+ nesadapter->max_cq, &cq_num, &nesadapter->next_cq);
+ if (err) {
+ return ERR_PTR(err);
+ }
+
+ nescq = kzalloc(sizeof(struct nes_cq), GFP_KERNEL);
+ if (!nescq) {
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ nes_debug(NES_DBG_CQ, "Unable to allocate nes_cq struct\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ nescq->hw_cq.cq_size = max(entries + 1, 5);
+ nescq->hw_cq.cq_number = cq_num;
+ nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1;
+
+
+ if (context) {
+ nes_ucontext = to_nesucontext(context);
+ if (ib_copy_from_udata(&req, udata, sizeof (struct nes_create_cq_req))) {
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(-EFAULT);
+ }
+ nesvnic->mcrq_ucontext = nes_ucontext;
+ nes_ucontext->mcrqf = req.mcrqf;
+ if (nes_ucontext->mcrqf) {
+ if (nes_ucontext->mcrqf & 0x80000000)
+ nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 12 + (nes_ucontext->mcrqf & 0xf) - 1;
+ else if (nes_ucontext->mcrqf & 0x40000000)
+ nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff;
+ else
+ nescq->hw_cq.cq_number = nesvnic->mcrq_qp_id + nes_ucontext->mcrqf-1;
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ }
+ nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n",
+ (unsigned long)req.user_cq_buffer, entries);
+ list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) {
+ if (nespbl->user_base == (unsigned long )req.user_cq_buffer) {
+ list_del(&nespbl->list);
+ err = 0;
+ nes_debug(NES_DBG_CQ, "Found PBL for virtual CQ. nespbl=%p.\n",
+ nespbl);
+ break;
+ }
+ }
+ if (err) {
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(err);
+ }
+
+ pbl_entries = nespbl->pbl_size >> 3;
+ nescq->cq_mem_size = 0;
+ } else {
+ nescq->cq_mem_size = nescq->hw_cq.cq_size * sizeof(struct nes_hw_cqe);
+ nes_debug(NES_DBG_CQ, "Attempting to allocate pci memory (%u entries, %u bytes) for CQ%u.\n",
+ entries, nescq->cq_mem_size, nescq->hw_cq.cq_number);
+
+ /* allocate the physical buffer space */
+ mem = pci_alloc_consistent(nesdev->pcidev, nescq->cq_mem_size,
+ &nescq->hw_cq.cq_pbase);
+ if (!mem) {
+ printk(KERN_ERR PFX "Unable to allocate pci memory for cq\n");
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memset(mem, 0, nescq->cq_mem_size);
+ nescq->hw_cq.cq_vbase = mem;
+ nescq->hw_cq.cq_head = 0;
+ nes_debug(NES_DBG_CQ, "CQ%u virtual address @ %p, phys = 0x%08X\n",
+ nescq->hw_cq.cq_number, nescq->hw_cq.cq_vbase,
+ (u32)nescq->hw_cq.cq_pbase);
+ }
+
+ nescq->hw_cq.ce_handler = nes_iwarp_ce_handler;
+ spin_lock_init(&nescq->lock);
+
+ /* send CreateCQ request to CQP */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n");
+ if (!context)
+ pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
+ nescq->hw_cq.cq_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(-ENOMEM);
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ opcode = NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
+ NES_CQP_CQ_CHK_OVERFLOW |
+ NES_CQP_CQ_CEQE_MASK | ((u32)nescq->hw_cq.cq_size << 16);
+
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+
+ if (pbl_entries != 1) {
+ if (pbl_entries > 32) {
+ /* use 4k pbl */
+ nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 4k PBL\n", pbl_entries);
+ if (nesadapter->free_4kpbl == 0) {
+ if (cqp_request->dynamic) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kfree(cqp_request);
+ } else {
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ }
+ if (!context)
+ pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
+ nescq->hw_cq.cq_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(-ENOMEM);
+ } else {
+ opcode |= (NES_CQP_CQ_VIRT | NES_CQP_CQ_4KB_CHUNK);
+ nescq->virtual_cq = 2;
+ nesadapter->free_4kpbl--;
+ }
+ } else {
+ /* use 256 byte pbl */
+ nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 256 byte PBL\n", pbl_entries);
+ if (nesadapter->free_256pbl == 0) {
+ if (cqp_request->dynamic) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kfree(cqp_request);
+ } else {
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ }
+ if (!context)
+ pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
+ nescq->hw_cq.cq_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(-ENOMEM);
+ } else {
+ opcode |= NES_CQP_CQ_VIRT;
+ nescq->virtual_cq = 1;
+ nesadapter->free_256pbl--;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
+ (nescq->hw_cq.cq_number | ((u32)nesdev->ceq_index << 16)));
+
+ if (context) {
+ if (pbl_entries != 1)
+ u64temp = (u64)nespbl->pbl_pbase;
+ else
+ u64temp = le64_to_cpu(nespbl->pbl_vbase[0]);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX,
+ nes_ucontext->mmap_db_index[0]);
+ } else {
+ u64temp = (u64)nescq->hw_cq.cq_pbase;
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
+ }
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
+ u64temp = (u64)(unsigned long)&nescq->hw_cq;
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] =
+ cpu_to_le32((u32)(u64temp >> 1));
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
+ cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ nes_debug(NES_DBG_CQ, "Waiting for create iWARP CQ%u to complete.\n",
+ nescq->hw_cq.cq_number);
+ ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
+ NES_EVENT_TIMEOUT * 2);
+ nes_debug(NES_DBG_CQ, "Create iWARP CQ%u completed, wait_event_timeout ret = %d.\n",
+ nescq->hw_cq.cq_number, ret);
+ if ((!ret) || (cqp_request->major_code)) {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ nes_debug(NES_DBG_CQ, "iWARP CQ%u create timeout expired, major code = 0x%04X,"
+ " minor code = 0x%04X\n",
+ nescq->hw_cq.cq_number, cqp_request->major_code, cqp_request->minor_code);
+ if (!context)
+ pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
+ nescq->hw_cq.cq_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(-EIO);
+ } else {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ }
+
+ if (context) {
+ /* free the nespbl */
+ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
+ nespbl->pbl_pbase);
+ kfree(nespbl);
+ resp.cq_id = nescq->hw_cq.cq_number;
+ resp.cq_size = nescq->hw_cq.cq_size;
+ resp.mmap_db_index = 0;
+ if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(-EFAULT);
+ }
+ }
+
+ return &nescq->ibcq;
+}
+
+
+/**
+ * nes_destroy_cq
+ */
+static int nes_destroy_cq(struct ib_cq *ib_cq)
+{
+ struct nes_cq *nescq;
+ struct nes_device *nesdev;
+ struct nes_vnic *nesvnic;
+ struct nes_adapter *nesadapter;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_cqp_request *cqp_request;
+ unsigned long flags;
+ u32 opcode = 0;
+ int ret;
+
+ if (ib_cq == NULL)
+ return 0;
+
+ nescq = to_nescq(ib_cq);
+ nesvnic = to_nesvnic(ib_cq->device);
+ nesdev = nesvnic->nesdev;
+ nesadapter = nesdev->nesadapter;
+
+ nes_debug(NES_DBG_CQ, "Destroy CQ%u\n", nescq->hw_cq.cq_number);
+
+ /* Send DestroyCQ request to CQP */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n");
+ return -ENOMEM;
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+ opcode = NES_CQP_DESTROY_CQ | (nescq->hw_cq.cq_size << 16);
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ if (nescq->virtual_cq == 1) {
+ nesadapter->free_256pbl++;
+ if (nesadapter->free_256pbl > nesadapter->max_256pbl) {
+ printk(KERN_ERR PFX "%s: free 256B PBLs(%u) has exceeded the max(%u)\n",
+ __FUNCTION__, nesadapter->free_256pbl, nesadapter->max_256pbl);
+ }
+ } else if (nescq->virtual_cq == 2) {
+ nesadapter->free_4kpbl++;
+ if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) {
+ printk(KERN_ERR PFX "%s: free 4K PBLs(%u) has exceeded the max(%u)\n",
+ __FUNCTION__, nesadapter->free_4kpbl, nesadapter->max_4kpbl);
+ }
+ opcode |= NES_CQP_CQ_4KB_CHUNK;
+ }
+
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
+ (nescq->hw_cq.cq_number | ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 16)));
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, nescq->hw_cq.cq_number);
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ nes_debug(NES_DBG_CQ, "Waiting for destroy iWARP CQ%u to complete.\n",
+ nescq->hw_cq.cq_number);
+ ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_CQ, "Destroy iWARP CQ%u completed, wait_event_timeout ret = %u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
+ nescq->hw_cq.cq_number, ret, cqp_request->major_code,
+ cqp_request->minor_code);
+ if ((!ret) || (cqp_request->major_code)) {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ if (!ret) {
+ nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy timeout expired\n",
+ nescq->hw_cq.cq_number);
+ ret = -ETIME;
+ } else {
+ nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy failed\n",
+ nescq->hw_cq.cq_number);
+ ret = -EIO;
+ }
+ } else {
+ ret = 0;
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ }
+
+ if (nescq->cq_mem_size)
+ pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size,
+ (void *)nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase);
+ kfree(nescq);
+
+ return ret;
+}
+
+
+/**
+ * nes_reg_mr
+ */
+static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
+ u32 stag, u64 region_length, struct nes_root_vpbl *root_vpbl,
+ dma_addr_t single_buffer, u16 pbl_count, u16 residual_page_count,
+ int acc, u64 *iova_start)
+{
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_cqp_request *cqp_request;
+ unsigned long flags;
+ int ret;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ /* int count; */
+ u32 opcode = 0;
+ u16 major_code;
+
+ /* Register the region with the adapter */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
+ return -ENOMEM;
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ /* track PBL resources */
+ if (pbl_count != 0) {
+ if (pbl_count > 1) {
+ /* Two level PBL */
+ if ((pbl_count+1) > nesadapter->free_4kpbl) {
+ nes_debug(NES_DBG_MR, "Out of 4KB Pbls for two level request.\n");
+ if (cqp_request->dynamic) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kfree(cqp_request);
+ } else {
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ }
+ return -ENOMEM;
+ } else {
+ nesadapter->free_4kpbl -= pbl_count+1;
+ }
+ } else if (residual_page_count > 32) {
+ if (pbl_count > nesadapter->free_4kpbl) {
+ nes_debug(NES_DBG_MR, "Out of 4KB Pbls.\n");
+ if (cqp_request->dynamic) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kfree(cqp_request);
+ } else {
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ }
+ return -ENOMEM;
+ } else {
+ nesadapter->free_4kpbl -= pbl_count;
+ }
+ } else {
+ if (pbl_count > nesadapter->free_256pbl) {
+ nes_debug(NES_DBG_MR, "Out of 256B Pbls.\n");
+ if (cqp_request->dynamic) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kfree(cqp_request);
+ } else {
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ }
+ return -ENOMEM;
+ } else {
+ nesadapter->free_256pbl -= pbl_count;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+
+ opcode = NES_CQP_REGISTER_STAG | NES_CQP_STAG_RIGHTS_LOCAL_READ |
+ NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR;
+ if (acc & IB_ACCESS_LOCAL_WRITE)
+ opcode |= NES_CQP_STAG_RIGHTS_LOCAL_WRITE;
+ if (acc & IB_ACCESS_REMOTE_WRITE)
+ opcode |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_REM_ACC_EN;
+ if (acc & IB_ACCESS_REMOTE_READ)
+ opcode |= NES_CQP_STAG_RIGHTS_REMOTE_READ | NES_CQP_STAG_REM_ACC_EN;
+ if (acc & IB_ACCESS_MW_BIND)
+ opcode |= NES_CQP_STAG_RIGHTS_WINDOW_BIND | NES_CQP_STAG_REM_ACC_EN;
+
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_VA_LOW_IDX, *iova_start);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_LOW_IDX, region_length);
+
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] =
+ cpu_to_le32((u32)(region_length >> 8) & 0xff000000);
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] |=
+ cpu_to_le32(nespd->pd_id & 0x00007fff);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
+
+ if (pbl_count == 0) {
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, single_buffer);
+ } else {
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, root_vpbl->pbl_pbase);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, pbl_count);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX,
+ (((pbl_count - 1) * 4096) + (residual_page_count*8)));
+
+ if ((pbl_count > 1) || (residual_page_count > 32))
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE);
+ }
+ barrier();
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
+ stag, ret, cqp_request->major_code, cqp_request->minor_code);
+ major_code = cqp_request->major_code;
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ if (!ret)
+ return -ETIME;
+ else if (major_code)
+ return -EIO;
+ else
+ return 0;
+
+ return 0;
+}
+
+
+/**
+ * nes_reg_phys_mr
+ */
+static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
+ struct ib_phys_buf *buffer_list, int num_phys_buf, int acc,
+ u64 * iova_start)
+{
+ u64 region_length;
+ struct nes_pd *nespd = to_nespd(ib_pd);
+ struct nes_vnic *nesvnic = to_nesvnic(ib_pd->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_mr *nesmr;
+ struct ib_mr *ibmr;
+ struct nes_vpbl vpbl;
+ struct nes_root_vpbl root_vpbl;
+ u32 stag;
+ u32 i;
+ u32 stag_index = 0;
+ u32 next_stag_index = 0;
+ u32 driver_key = 0;
+ u32 root_pbl_index = 0;
+ u32 cur_pbl_index = 0;
+ int err = 0, pbl_depth = 0;
+ int ret = 0;
+ u16 pbl_count = 0;
+ u8 single_page = 1;
+ u8 stag_key = 0;
+
+ pbl_depth = 0;
+ region_length = 0;
+ vpbl.pbl_vbase = NULL;
+ root_vpbl.pbl_vbase = NULL;
+ root_vpbl.pbl_pbase = 0;
+
+ get_random_bytes(&next_stag_index, sizeof(next_stag_index));
+ stag_key = (u8)next_stag_index;
+
+ driver_key = 0;
+
+ next_stag_index >>= 8;
+ next_stag_index %= nesadapter->max_mr;
+ if (num_phys_buf > (1024*512)) {
+ return ERR_PTR(-E2BIG);
+ }
+
+ err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs, nesadapter->max_mr,
+ &stag_index, &next_stag_index);
+ if (err) {
+ return ERR_PTR(err);
+ }
+
+ nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
+ if (!nesmr) {
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < num_phys_buf; i++) {
+
+ if ((i & 0x01FF) == 0) {
+ if (root_pbl_index == 1) {
+ /* Allocate the root PBL */
+ root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192,
+ &root_vpbl.pbl_pbase);
+ nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
+ root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
+ if (!root_vpbl.pbl_vbase) {
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ kfree(nesmr);
+ return ERR_PTR(-ENOMEM);
+ }
+ root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024, GFP_KERNEL);
+ if (!root_vpbl.leaf_vpbl) {
+ pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
+ root_vpbl.pbl_pbase);
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ kfree(nesmr);
+ return ERR_PTR(-ENOMEM);
+ }
+ root_vpbl.pbl_vbase[0].pa_low = cpu_to_le32((u32)vpbl.pbl_pbase);
+ root_vpbl.pbl_vbase[0].pa_high =
+ cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
+ root_vpbl.leaf_vpbl[0] = vpbl;
+ }
+ /* Allocate a 4K buffer for the PBL */
+ vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
+ &vpbl.pbl_pbase);
+ nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%016lX\n",
+ vpbl.pbl_vbase, (unsigned long)vpbl.pbl_pbase);
+ if (!vpbl.pbl_vbase) {
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ ibmr = ERR_PTR(-ENOMEM);
+ kfree(nesmr);
+ goto reg_phys_err;
+ }
+ /* Fill in the root table */
+ if (1 <= root_pbl_index) {
+ root_vpbl.pbl_vbase[root_pbl_index].pa_low =
+ cpu_to_le32((u32)vpbl.pbl_pbase);
+ root_vpbl.pbl_vbase[root_pbl_index].pa_high =
+ cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
+ root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
+ }
+ root_pbl_index++;
+ cur_pbl_index = 0;
+ }
+ if (buffer_list[i].addr & ~PAGE_MASK) {
+ /* TODO: Unwind allocated buffers */
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n",
+ (unsigned int) buffer_list[i].addr);
+ ibmr = ERR_PTR(-EINVAL);
+ kfree(nesmr);
+ goto reg_phys_err;
+ }
+
+ if (!buffer_list[i].size) {
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ nes_debug(NES_DBG_MR, "Invalid Buffer Size\n");
+ ibmr = ERR_PTR(-EINVAL);
+ kfree(nesmr);
+ goto reg_phys_err;
+ }
+
+ region_length += buffer_list[i].size;
+ if ((i != 0) && (single_page)) {
+ if ((buffer_list[i-1].addr+PAGE_SIZE) != buffer_list[i].addr)
+ single_page = 0;
+ }
+ vpbl.pbl_vbase[cur_pbl_index].pa_low = cpu_to_le32((u32)buffer_list[i].addr);
+ vpbl.pbl_vbase[cur_pbl_index++].pa_high =
+ cpu_to_le32((u32)((((u64)buffer_list[i].addr) >> 32)));
+ }
+
+ stag = stag_index << 8;
+ stag |= driver_key;
+ stag += (u32)stag_key;
+
+ nes_debug(NES_DBG_MR, "Registering STag 0x%08X, VA = 0x%016lX,"
+ " length = 0x%016lX, index = 0x%08X\n",
+ stag, (unsigned long)*iova_start, (unsigned long)region_length, stag_index);
+
+ region_length -= (*iova_start)&PAGE_MASK;
+
+ /* Make the leaf PBL the root if only one PBL */
+ if (root_pbl_index == 1) {
+ root_vpbl.pbl_pbase = vpbl.pbl_pbase;
+ }
+
+ if (single_page) {
+ pbl_count = 0;
+ } else {
+ pbl_count = root_pbl_index;
+ }
+ ret = nes_reg_mr(nesdev, nespd, stag, region_length, &root_vpbl,
+ buffer_list[0].addr, pbl_count, (u16)cur_pbl_index, acc, iova_start);
+
+ if (ret == 0) {
+ nesmr->ibmr.rkey = stag;
+ nesmr->ibmr.lkey = stag;
+ nesmr->mode = IWNES_MEMREG_TYPE_MEM;
+ ibmr = &nesmr->ibmr;
+ nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0;
+ nesmr->pbls_used = pbl_count;
+ if (pbl_count > 1) {
+ nesmr->pbls_used++;
+ }
+ } else {
+ kfree(nesmr);
+ ibmr = ERR_PTR(-ENOMEM);
+ }
+
+ reg_phys_err:
+ /* free the resources */
+ if (root_pbl_index == 1) {
+ /* single PBL case */
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase, vpbl.pbl_pbase);
+ } else {
+ for (i=0; i<root_pbl_index; i++) {
+ pci_free_consistent(nesdev->pcidev, 4096, root_vpbl.leaf_vpbl[i].pbl_vbase,
+ root_vpbl.leaf_vpbl[i].pbl_pbase);
+ }
+ kfree(root_vpbl.leaf_vpbl);
+ pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
+ root_vpbl.pbl_pbase);
+ }
+
+ return ibmr;
+}
+
+
+/**
+ * nes_get_dma_mr
+ */
+static struct ib_mr *nes_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct ib_phys_buf bl;
+ u64 kva = 0;
+
+ nes_debug(NES_DBG_MR, "\n");
+
+ bl.size = (u64)0xffffffffffULL;
+ bl.addr = 0;
+ return nes_reg_phys_mr(pd, &bl, 1, acc, &kva);
+}
+
+
+/**
+ * nes_reg_user_mr
+ */
+static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt, int acc, struct ib_udata *udata)
+{
+ u64 iova_start;
+ __le64 *pbl;
+ u64 region_length;
+ dma_addr_t last_dma_addr = 0;
+ dma_addr_t first_dma_addr = 0;
+ struct nes_pd *nespd = to_nespd(pd);
+ struct nes_vnic *nesvnic = to_nesvnic(pd->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct ib_mr *ibmr = ERR_PTR(-EINVAL);
+ struct ib_umem_chunk *chunk;
+ struct nes_ucontext *nes_ucontext;
+ struct nes_pbl *nespbl;
+ struct nes_mr *nesmr;
+ struct ib_umem *region;
+ struct nes_mem_reg_req req;
+ struct nes_vpbl vpbl;
+ struct nes_root_vpbl root_vpbl;
+ int nmap_index, page_index;
+ int page_count = 0;
+ int err, pbl_depth = 0;
+ int chunk_pages;
+ int ret;
+ u32 stag;
+ u32 stag_index = 0;
+ u32 next_stag_index;
+ u32 driver_key;
+ u32 root_pbl_index = 0;
+ u32 cur_pbl_index = 0;
+ u32 skip_pages;
+ u16 pbl_count;
+ u8 single_page = 1;
+ u8 stag_key;
+
+ region = ib_umem_get(pd->uobject->context, start, length, acc);
+ if (IS_ERR(region)) {
+ return (struct ib_mr *)region;
+ }
+
+ nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u,"
+ " offset = %u, page size = %u.\n",
+ (unsigned long int)start, (unsigned long int)virt, (u32)length,
+ region->offset, region->page_size);
+
+ skip_pages = ((u32)region->offset) >> 12;
+
+ if (ib_copy_from_udata(&req, udata, sizeof(req)))
+ return ERR_PTR(-EFAULT);
+ nes_debug(NES_DBG_MR, "Memory Registration type = %08X.\n", req.reg_type);
+
+ switch (req.reg_type) {
+ case IWNES_MEMREG_TYPE_MEM:
+ pbl_depth = 0;
+ region_length = 0;
+ vpbl.pbl_vbase = NULL;
+ root_vpbl.pbl_vbase = NULL;
+ root_vpbl.pbl_pbase = 0;
+
+ get_random_bytes(&next_stag_index, sizeof(next_stag_index));
+ stag_key = (u8)next_stag_index;
+
+ driver_key = next_stag_index & 0x70000000;
+
+ next_stag_index >>= 8;
+ next_stag_index %= nesadapter->max_mr;
+
+ err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
+ nesadapter->max_mr, &stag_index, &next_stag_index);
+ if (err) {
+ ib_umem_release(region);
+ return ERR_PTR(err);
+ }
+
+ nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
+ if (!nesmr) {
+ ib_umem_release(region);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ return ERR_PTR(-ENOMEM);
+ }
+ nesmr->region = region;
+
+ list_for_each_entry(chunk, &region->chunk_list, list) {
+ nes_debug(NES_DBG_MR, "Chunk: nents = %u, nmap = %u .\n",
+ chunk->nents, chunk->nmap);
+ for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) {
+ if (sg_dma_address(&chunk->page_list[nmap_index]) & ~PAGE_MASK) {
+ ib_umem_release(region);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n",
+ (unsigned int) sg_dma_address(&chunk->page_list[nmap_index]));
+ ibmr = ERR_PTR(-EINVAL);
+ kfree(nesmr);
+ goto reg_user_mr_err;
+ }
+
+ if (!sg_dma_len(&chunk->page_list[nmap_index])) {
+ ib_umem_release(region);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ stag_index);
+ nes_debug(NES_DBG_MR, "Invalid Buffer Size\n");
+ ibmr = ERR_PTR(-EINVAL);
+ kfree(nesmr);
+ goto reg_user_mr_err;
+ }
+
+ region_length += sg_dma_len(&chunk->page_list[nmap_index]);
+ chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12;
+ region_length -= skip_pages << 12;
+ for (page_index=skip_pages; page_index < chunk_pages; page_index++) {
+ skip_pages = 0;
+ if ((page_count!=0)&&(page_count<<12)-(region->offset&(4096-1))>=region->length)
+ goto enough_pages;
+ if ((page_count&0x01FF) == 0) {
+ if (page_count>(1024*512)) {
+ ib_umem_release(region);
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter,
+ nesadapter->allocated_mrs, stag_index);
+ kfree(nesmr);
+ ibmr = ERR_PTR(-E2BIG);
+ goto reg_user_mr_err;
+ }
+ if (root_pbl_index == 1) {
+ root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
+ 8192, &root_vpbl.pbl_pbase);
+ nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
+ root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
+ if (!root_vpbl.pbl_vbase) {
+ ib_umem_release(region);
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ stag_index);
+ kfree(nesmr);
+ ibmr = ERR_PTR(-ENOMEM);
+ goto reg_user_mr_err;
+ }
+ root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024,
+ GFP_KERNEL);
+ if (!root_vpbl.leaf_vpbl) {
+ ib_umem_release(region);
+ pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
+ root_vpbl.pbl_pbase);
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ stag_index);
+ kfree(nesmr);
+ ibmr = ERR_PTR(-ENOMEM);
+ goto reg_user_mr_err;
+ }
+ root_vpbl.pbl_vbase[0].pa_low =
+ cpu_to_le32((u32)vpbl.pbl_pbase);
+ root_vpbl.pbl_vbase[0].pa_high =
+ cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
+ root_vpbl.leaf_vpbl[0] = vpbl;
+ }
+ vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
+ &vpbl.pbl_pbase);
+ nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n",
+ vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase);
+ if (!vpbl.pbl_vbase) {
+ ib_umem_release(region);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ ibmr = ERR_PTR(-ENOMEM);
+ kfree(nesmr);
+ goto reg_user_mr_err;
+ }
+ if (1 <= root_pbl_index) {
+ root_vpbl.pbl_vbase[root_pbl_index].pa_low =
+ cpu_to_le32((u32)vpbl.pbl_pbase);
+ root_vpbl.pbl_vbase[root_pbl_index].pa_high =
+ cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
+ root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
+ }
+ root_pbl_index++;
+ cur_pbl_index = 0;
+ }
+ if (single_page) {
+ if (page_count != 0) {
+ if ((last_dma_addr+4096) !=
+ (sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*4096)))
+ single_page = 0;
+ last_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*4096);
+ } else {
+ first_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*4096);
+ last_dma_addr = first_dma_addr;
+ }
+ }
+
+ vpbl.pbl_vbase[cur_pbl_index].pa_low =
+ cpu_to_le32((u32)(sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*4096)));
+ vpbl.pbl_vbase[cur_pbl_index].pa_high =
+ cpu_to_le32((u32)((((u64)(sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*4096))) >> 32)));
+ cur_pbl_index++;
+ page_count++;
+ }
+ }
+ }
+ enough_pages:
+ nes_debug(NES_DBG_MR, "calculating stag, stag_index=0x%08x, driver_key=0x%08x,"
+ " stag_key=0x%08x\n",
+ stag_index, driver_key, stag_key);
+ stag = stag_index << 8;
+ stag |= driver_key;
+ stag += (u32)stag_key;
+ if (stag == 0) {
+ stag = 1;
+ }
+
+ iova_start = virt;
+ /* Make the leaf PBL the root if only one PBL */
+ if (root_pbl_index == 1) {
+ root_vpbl.pbl_pbase = vpbl.pbl_pbase;
+ }
+
+ if (single_page) {
+ pbl_count = 0;
+ } else {
+ pbl_count = root_pbl_index;
+ first_dma_addr = 0;
+ }
+ nes_debug(NES_DBG_MR, "Registering STag 0x%08X, VA = 0x%08X, length = 0x%08X,"
+ " index = 0x%08X, region->length=0x%08llx, pbl_count = %u\n",
+ stag, (unsigned int)iova_start,
+ (unsigned int)region_length, stag_index,
+ (unsigned long long)region->length, pbl_count);
+ ret = nes_reg_mr( nesdev, nespd, stag, region->length, &root_vpbl,
+ first_dma_addr, pbl_count, (u16)cur_pbl_index, acc, &iova_start);
+
+ nes_debug(NES_DBG_MR, "ret=%d\n", ret);
+
+ if (ret == 0) {
+ nesmr->ibmr.rkey = stag;
+ nesmr->ibmr.lkey = stag;
+ nesmr->mode = IWNES_MEMREG_TYPE_MEM;
+ ibmr = &nesmr->ibmr;
+ nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0;
+ nesmr->pbls_used = pbl_count;
+ if (pbl_count > 1) {
+ nesmr->pbls_used++;
+ }
+ } else {
+ ib_umem_release(region);
+ kfree(nesmr);
+ ibmr = ERR_PTR(-ENOMEM);
+ }
+
+ reg_user_mr_err:
+ /* free the resources */
+ if (root_pbl_index == 1) {
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ } else {
+ for (page_index=0; page_index<root_pbl_index; page_index++) {
+ pci_free_consistent(nesdev->pcidev, 4096,
+ root_vpbl.leaf_vpbl[page_index].pbl_vbase,
+ root_vpbl.leaf_vpbl[page_index].pbl_pbase);
+ }
+ kfree(root_vpbl.leaf_vpbl);
+ pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
+ root_vpbl.pbl_pbase);
+ }
+
+ nes_debug(NES_DBG_MR, "Leaving, ibmr=%p", ibmr);
+
+ return ibmr;
+ break;
+ case IWNES_MEMREG_TYPE_QP:
+ case IWNES_MEMREG_TYPE_CQ:
+ nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
+ if (!nespbl) {
+ nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
+ ib_umem_release(region);
+ return ERR_PTR(-ENOMEM);
+ }
+ nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
+ if (!nesmr) {
+ ib_umem_release(region);
+ kfree(nespbl);
+ nes_debug(NES_DBG_MR, "Unable to allocate nesmr\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ nesmr->region = region;
+ nes_ucontext = to_nesucontext(pd->uobject->context);
+ pbl_depth = region->length >> 12;
+ pbl_depth += (region->length & (4096-1)) ? 1 : 0;
+ nespbl->pbl_size = pbl_depth*sizeof(u64);
+ if (req.reg_type == IWNES_MEMREG_TYPE_QP) {
+ nes_debug(NES_DBG_MR, "Attempting to allocate QP PBL memory");
+ } else {
+ nes_debug(NES_DBG_MR, "Attempting to allocate CP PBL memory");
+ }
+
+ nes_debug(NES_DBG_MR, " %u bytes, %u entries.\n",
+ nespbl->pbl_size, pbl_depth);
+ pbl = pci_alloc_consistent(nesdev->pcidev, nespbl->pbl_size,
+ &nespbl->pbl_pbase);
+ if (!pbl) {
+ ib_umem_release(region);
+ kfree(nesmr);
+ kfree(nespbl);
+ nes_debug(NES_DBG_MR, "Unable to allocate PBL memory\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ nespbl->pbl_vbase = (u64 *)pbl;
+ nespbl->user_base = start;
+ nes_debug(NES_DBG_MR, "Allocated PBL memory, %u bytes, pbl_pbase=%p,"
+ " pbl_vbase=%p user_base=0x%lx\n",
+ nespbl->pbl_size, (void *)nespbl->pbl_pbase,
+ (void*)nespbl->pbl_vbase, nespbl->user_base);
+
+ list_for_each_entry(chunk, &region->chunk_list, list) {
+ for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) {
+ chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12;
+ chunk_pages += (sg_dma_len(&chunk->page_list[nmap_index]) & (4096-1)) ? 1 : 0;
+ nespbl->page = sg_page(&chunk->page_list[0]);
+ for (page_index=0; page_index<chunk_pages; page_index++) {
+ ((__le32 *)pbl)[0] = cpu_to_le32((u32)
+ (sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*4096)));
+ ((__le32 *)pbl)[1] = cpu_to_le32(((u64)
+ (sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*4096)))>>32);
+ nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl,
+ (unsigned long long)*pbl,
+ le32_to_cpu(((__le32 *)pbl)[1]), le32_to_cpu(((__le32 *)pbl)[0]));
+ pbl++;
+ }
+ }
+ }
+ if (req.reg_type == IWNES_MEMREG_TYPE_QP) {
+ list_add_tail(&nespbl->list, &nes_ucontext->qp_reg_mem_list);
+ } else {
+ list_add_tail(&nespbl->list, &nes_ucontext->cq_reg_mem_list);
+ }
+ nesmr->ibmr.rkey = -1;
+ nesmr->ibmr.lkey = -1;
+ nesmr->mode = req.reg_type;
+ return &nesmr->ibmr;
+ break;
+ }
+
+ return ERR_PTR(-ENOSYS);
+}
+
+
+/**
+ * nes_dereg_mr
+ */
+static int nes_dereg_mr(struct ib_mr *ib_mr)
+{
+ struct nes_mr *nesmr = to_nesmr(ib_mr);
+ struct nes_vnic *nesvnic = to_nesvnic(ib_mr->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_cqp_request *cqp_request;
+ unsigned long flags;
+ int ret;
+ u16 major_code;
+ u16 minor_code;
+
+ if (nesmr->region) {
+ ib_umem_release(nesmr->region);
+ }
+ if (nesmr->mode != IWNES_MEMREG_TYPE_MEM) {
+ kfree(nesmr);
+ return 0;
+ }
+
+ /* Deallocate the region with the adapter */
+
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
+ return -ENOMEM;
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ if (nesmr->pbls_used != 0) {
+ if (nesmr->pbl_4k) {
+ nesadapter->free_4kpbl += nesmr->pbls_used;
+ if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) {
+ printk(KERN_ERR PFX "free 4KB PBLs(%u) has exceeded the max(%u)\n",
+ nesadapter->free_4kpbl, nesadapter->max_4kpbl);
+ }
+ } else {
+ nesadapter->free_256pbl += nesmr->pbls_used;
+ if (nesadapter->free_256pbl > nesadapter->max_256pbl) {
+ printk(KERN_ERR PFX "free 256B PBLs(%u) has exceeded the max(%u)\n",
+ nesadapter->free_256pbl, nesadapter->max_256pbl);
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ NES_CQP_DEALLOCATE_STAG | NES_CQP_STAG_VA_TO |
+ NES_CQP_STAG_DEALLOC_PBLS | NES_CQP_STAG_MR);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, ib_mr->rkey);
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ nes_debug(NES_DBG_MR, "Waiting for deallocate STag 0x%08X completed\n", ib_mr->rkey);
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_MR, "Deallocate STag 0x%08X completed, wait_event_timeout ret = %u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X\n",
+ ib_mr->rkey, ret, cqp_request->major_code, cqp_request->minor_code);
+
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ (ib_mr->rkey & 0x0fffff00) >> 8);
+
+ kfree(nesmr);
+
+ major_code = cqp_request->major_code;
+ minor_code = cqp_request->minor_code;
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ if (!ret) {
+ nes_debug(NES_DBG_MR, "Timeout waiting to destroy STag,"
+ " ib_mr=%p, rkey = 0x%08X\n",
+ ib_mr, ib_mr->rkey);
+ return -ETIME;
+ } else if (major_code) {
+ nes_debug(NES_DBG_MR, "Error (0x%04X:0x%04X) while attempting"
+ " to destroy STag, ib_mr=%p, rkey = 0x%08X\n",
+ major_code, minor_code, ib_mr, ib_mr->rkey);
+ return -EIO;
+ } else
+ return 0;
+}
+
+
+/**
+ * show_rev
+ */
+static ssize_t show_rev(struct class_device *cdev, char *buf)
+{
+ struct nes_ib_device *nesibdev =
+ container_of(cdev, struct nes_ib_device, ibdev.class_dev);
+ struct nes_vnic *nesvnic = nesibdev->nesvnic;
+
+ nes_debug(NES_DBG_INIT, "\n");
+ return sprintf(buf, "%x\n", nesvnic->nesdev->nesadapter->hw_rev);
+}
+
+
+/**
+ * show_fw_ver
+ */
+static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
+{
+ struct nes_ib_device *nesibdev =
+ container_of(cdev, struct nes_ib_device, ibdev.class_dev);
+ struct nes_vnic *nesvnic = nesibdev->nesvnic;
+
+ nes_debug(NES_DBG_INIT, "\n");
+ return sprintf(buf, "%x.%x.%x\n",
+ (int)(nesvnic->nesdev->nesadapter->fw_ver >> 32),
+ (int)(nesvnic->nesdev->nesadapter->fw_ver >> 16) & 0xffff,
+ (int)(nesvnic->nesdev->nesadapter->fw_ver & 0xffff));
+}
+
+
+/**
+ * show_hca
+ */
+static ssize_t show_hca(struct class_device *cdev, char *buf)
+{
+ nes_debug(NES_DBG_INIT, "\n");
+ return sprintf(buf, "NES020\n");
+}
+
+
+/**
+ * show_board
+ */
+static ssize_t show_board(struct class_device *cdev, char *buf)
+{
+ nes_debug(NES_DBG_INIT, "\n");
+ return sprintf(buf, "%.*s\n", 32, "NES020 Board ID");
+}
+
+
+static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
+static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+
+static struct class_device_attribute *nes_class_attributes[] = {
+ &class_device_attr_hw_rev,
+ &class_device_attr_fw_ver,
+ &class_device_attr_hca_type,
+ &class_device_attr_board_id
+};
+
+
+/**
+ * nes_query_qp
+ */
+static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct nes_qp *nesqp = to_nesqp(ibqp);
+
+ nes_debug(NES_DBG_QP, "\n");
+
+ attr->qp_access_flags = 0;
+ attr->cap.max_send_wr = nesqp->hwqp.sq_size;
+ attr->cap.max_recv_wr = nesqp->hwqp.rq_size;
+ attr->cap.max_recv_sge = 1;
+ if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
+ init_attr->cap.max_inline_data = 0;
+ } else {
+ init_attr->cap.max_inline_data = 64;
+ }
+
+ init_attr->event_handler = nesqp->ibqp.event_handler;
+ init_attr->qp_context = nesqp->ibqp.qp_context;
+ init_attr->send_cq = nesqp->ibqp.send_cq;
+ init_attr->recv_cq = nesqp->ibqp.recv_cq;
+ init_attr->srq = nesqp->ibqp.srq = nesqp->ibqp.srq;
+ init_attr->cap = attr->cap;
+
+ return 0;
+}
+
+
+/**
+ * nes_hw_modify_qp
+ */
+int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
+ u32 next_iwarp_state, u32 wait_completion)
+{
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ /* struct iw_cm_id *cm_id = nesqp->cm_id; */
+ /* struct iw_cm_event cm_event; */
+ struct nes_cqp_request *cqp_request;
+ unsigned long flags;
+ int ret;
+ u16 major_code;
+
+ nes_debug(NES_DBG_MOD_QP, "QP%u, refcount=%d\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount));
+
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_MOD_QP, "Failed to get a cqp_request.\n");
+ return -ENOMEM;
+ }
+ if (wait_completion) {
+ cqp_request->waiting = 1;
+ } else {
+ cqp_request->waiting = 0;
+ }
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ NES_CQP_MODIFY_QP | NES_CQP_QP_TYPE_IWARP | next_iwarp_state);
+ nes_debug(NES_DBG_MOD_QP, "using next_iwarp_state=%08x, wqe_words=%08x\n",
+ next_iwarp_state, le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX]));
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase);
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ if (wait_completion) {
+ /* nes_debug(NES_DBG_MOD_QP, "Waiting for modify iWARP QP%u to complete.\n",
+ nesqp->hwqp.qp_id); */
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_MOD_QP, "Modify iwarp QP%u completed, wait_event_timeout ret=%u, "
+ "CQP Major:Minor codes = 0x%04X:0x%04X.\n",
+ nesqp->hwqp.qp_id, ret, cqp_request->major_code, cqp_request->minor_code);
+ major_code = cqp_request->major_code;
+ if (major_code) {
+ nes_debug(NES_DBG_MOD_QP, "Modify iwarp QP%u failed"
+ "CQP Major:Minor codes = 0x%04X:0x%04X, intended next state = 0x%08X.\n",
+ nesqp->hwqp.qp_id, cqp_request->major_code,
+ cqp_request->minor_code, next_iwarp_state);
+ }
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ if (!ret)
+ return -ETIME;
+ else if (major_code)
+ return -EIO;
+ else
+ return 0;
+ } else {
+ return 0;
+ }
+}
+
+
+/**
+ * nes_modify_qp
+ */
+int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct nes_qp *nesqp = to_nesqp(ibqp);
+ struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ /* u32 cqp_head; */
+ /* u32 counter; */
+ u32 next_iwarp_state = 0;
+ int err;
+ unsigned long qplockflags;
+ int ret;
+ u16 original_last_aeq;
+ u8 issue_modify_qp = 0;
+ u8 issue_disconnect = 0;
+ u8 dont_wait = 0;
+
+ nes_debug(NES_DBG_MOD_QP, "QP%u: QP State=%u, cur QP State=%u,"
+ " iwarp_state=0x%X, refcount=%d\n",
+ nesqp->hwqp.qp_id, attr->qp_state, nesqp->ibqp_state,
+ nesqp->iwarp_state, atomic_read(&nesqp->refcount));
+
+ nes_add_ref(&nesqp->ibqp);
+ spin_lock_irqsave(&nesqp->lock, qplockflags);
+
+ nes_debug(NES_DBG_MOD_QP, "QP%u: hw_iwarp_state=0x%X, hw_tcp_state=0x%X,"
+ " QP Access Flags=0x%X, attr_mask = 0x%0x\n",
+ nesqp->hwqp.qp_id, nesqp->hw_iwarp_state,
+ nesqp->hw_tcp_state, attr->qp_access_flags, attr_mask);
+
+ if (attr_mask & IB_QP_STATE) {
+ switch (attr->qp_state) {
+ case IB_QPS_INIT:
+ nes_debug(NES_DBG_MOD_QP, "QP%u: new state = init\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_IDLE) {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ }
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_RTR:
+ nes_debug(NES_DBG_MOD_QP, "QP%u: new state = rtr\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_IDLE) {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ }
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_RTS:
+ nes_debug(NES_DBG_MOD_QP, "QP%u: new state = rts\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_RTS) {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ }
+ if (nesqp->cm_id == NULL) {
+ nes_debug(NES_DBG_MOD_QP, "QP%u: Failing attempt to move QP to RTS without a CM_ID. \n",
+ nesqp->hwqp.qp_id );
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ }
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_RTS;
+ if (nesqp->iwarp_state != NES_CQP_QP_IWARP_STATE_RTS)
+ next_iwarp_state |= NES_CQP_QP_CONTEXT_VALID |
+ NES_CQP_QP_ARP_VALID | NES_CQP_QP_ORD_VALID;
+ issue_modify_qp = 1;
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_ESTABLISHED;
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_RTS;
+ nesqp->hte_added = 1;
+ break;
+ case IB_QPS_SQD:
+ issue_modify_qp = 1;
+ nes_debug(NES_DBG_MOD_QP, "QP%u: new state=closing. SQ head=%u, SQ tail=%u\n",
+ nesqp->hwqp.qp_id, nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail);
+ if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_CLOSING) {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return 0;
+ } else {
+ if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_CLOSING) {
+ nes_debug(NES_DBG_MOD_QP, "QP%u: State change to closing"
+ " ignored due to current iWARP state\n",
+ nesqp->hwqp.qp_id);
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ }
+ if (nesqp->hw_iwarp_state != NES_AEQE_IWARP_STATE_RTS) {
+ nes_debug(NES_DBG_MOD_QP, "QP%u: State change to closing"
+ " already done based on hw state.\n",
+ nesqp->hwqp.qp_id);
+ issue_modify_qp = 0;
+ nesqp->in_disconnect = 0;
+ }
+ switch (nesqp->hw_iwarp_state) {
+ case NES_AEQE_IWARP_STATE_CLOSING:
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+ case NES_AEQE_IWARP_STATE_TERMINATE:
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
+ break;
+ case NES_AEQE_IWARP_STATE_ERROR:
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
+ break;
+ default:
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+ nesqp->in_disconnect = 1;
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+ break;
+ }
+ }
+ break;
+ case IB_QPS_SQE:
+ nes_debug(NES_DBG_MOD_QP, "QP%u: new state = terminate\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->iwarp_state>=(u32)NES_CQP_QP_IWARP_STATE_TERMINATE) {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ }
+ /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE;
+ issue_modify_qp = 1;
+ nesqp->in_disconnect = 1;
+ break;
+ case IB_QPS_ERR:
+ case IB_QPS_RESET:
+ if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_ERROR) {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ }
+ nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n",
+ nesqp->hwqp.qp_id);
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
+ /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
+ if (nesqp->hte_added) {
+ nes_debug(NES_DBG_MOD_QP, "set CQP_QP_DEL_HTE\n");
+ next_iwarp_state |= NES_CQP_QP_DEL_HTE;
+ nesqp->hte_added = 0;
+ }
+ if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) &&
+ (nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) {
+ next_iwarp_state |= NES_CQP_QP_RESET;
+ nesqp->in_disconnect = 1;
+ } else {
+ nes_debug(NES_DBG_MOD_QP, "QP%u NOT setting NES_CQP_QP_RESET since TCP state = %u\n",
+ nesqp->hwqp.qp_id, nesqp->hw_tcp_state);
+ dont_wait = 1;
+ }
+ issue_modify_qp = 1;
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR;
+ break;
+ default:
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ break;
+ }
+
+ nesqp->ibqp_state = attr->qp_state;
+ if (((nesqp->iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) ==
+ (u32)NES_CQP_QP_IWARP_STATE_RTS) &&
+ ((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) >
+ (u32)NES_CQP_QP_IWARP_STATE_RTS)) {
+ nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
+ nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
+ nesqp->iwarp_state);
+ issue_disconnect = 1;
+ } else {
+ nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
+ nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
+ nesqp->iwarp_state);
+ }
+ }
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) {
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_WRITE_EN |
+ NES_QPCONTEXT_MISC_RDMA_READ_EN);
+ issue_modify_qp = 1;
+ }
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) {
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_WRITE_EN);
+ issue_modify_qp = 1;
+ }
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) {
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_READ_EN);
+ issue_modify_qp = 1;
+ }
+ if (attr->qp_access_flags & IB_ACCESS_MW_BIND) {
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WBIND_EN);
+ issue_modify_qp = 1;
+ }
+
+ if (nesqp->user_mode) {
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_WRITE_EN |
+ NES_QPCONTEXT_MISC_RDMA_READ_EN);
+ issue_modify_qp = 1;
+ }
+ }
+
+ original_last_aeq = nesqp->last_aeq;
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+
+ nes_debug(NES_DBG_MOD_QP, "issue_modify_qp=%u\n", issue_modify_qp);
+
+ ret = 0;
+
+
+ if (issue_modify_qp) {
+ nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n");
+ ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 1);
+ if (ret)
+ nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)"
+ " failed for QP%u.\n",
+ next_iwarp_state, nesqp->hwqp.qp_id);
+
+ }
+
+ if ((issue_modify_qp) && (nesqp->ibqp_state > IB_QPS_RTS)) {
+ nes_debug(NES_DBG_MOD_QP, "QP%u Issued ModifyQP refcount (%d),"
+ " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
+ original_last_aeq, nesqp->last_aeq);
+ if ((!ret) ||
+ ((original_last_aeq != NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) &&
+ (ret))) {
+ if (dont_wait) {
+ if (nesqp->cm_id && nesqp->hw_tcp_state != 0) {
+ nes_debug(NES_DBG_MOD_QP, "QP%u Queuing fake disconnect for QP refcount (%d),"
+ " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
+ original_last_aeq, nesqp->last_aeq);
+ /* this one is for the cm_disconnect thread */
+ nes_add_ref(&nesqp->ibqp);
+ spin_lock_irqsave(&nesqp->lock, qplockflags);
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_cm_disconn(nesqp);
+ } else {
+ nes_debug(NES_DBG_MOD_QP, "QP%u No fake disconnect, QP refcount=%d\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount));
+ nes_rem_ref(&nesqp->ibqp);
+ }
+ } else {
+ spin_lock_irqsave(&nesqp->lock, qplockflags);
+ if (nesqp->cm_id) {
+ /* These two are for the timer thread */
+ if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
+ nes_add_ref(&nesqp->ibqp);
+ nesqp->cm_id->add_ref(nesqp->cm_id);
+ nes_debug(NES_DBG_MOD_QP, "QP%u Not decrementing QP refcount (%d),"
+ " need ae to finish up, original_last_aeq = 0x%04X."
+ " last_aeq = 0x%04X, scheduling timer.\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
+ original_last_aeq, nesqp->last_aeq);
+ schedule_nes_timer(nesqp->cm_node, (struct sk_buff *) nesqp, NES_TIMER_TYPE_CLOSE, 1, 0);
+ }
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ } else {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_debug(NES_DBG_MOD_QP, "QP%u Not decrementing QP refcount (%d),"
+ " need ae to finish up, original_last_aeq = 0x%04X."
+ " last_aeq = 0x%04X.\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
+ original_last_aeq, nesqp->last_aeq);
+ }
+ }
+ } else {
+ nes_debug(NES_DBG_MOD_QP, "QP%u Decrementing QP refcount (%d), No ae to finish up,"
+ " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
+ original_last_aeq, nesqp->last_aeq);
+ nes_rem_ref(&nesqp->ibqp);
+ }
+ } else {
+ nes_debug(NES_DBG_MOD_QP, "QP%u Decrementing QP refcount (%d), No ae to finish up,"
+ " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
+ original_last_aeq, nesqp->last_aeq);
+ nes_rem_ref(&nesqp->ibqp);
+ }
+
+ err = 0;
+
+ nes_debug(NES_DBG_MOD_QP, "QP%u Leaving, refcount=%d\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount));
+
+ return err;
+}
+
+
+/**
+ * nes_muticast_attach
+ */
+static int nes_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ nes_debug(NES_DBG_INIT, "\n");
+ return -ENOSYS;
+}
+
+
+/**
+ * nes_multicast_detach
+ */
+static int nes_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ nes_debug(NES_DBG_INIT, "\n");
+ return -ENOSYS;
+}
+
+
+/**
+ * nes_process_mad
+ */
+static int nes_process_mad(struct ib_device *ibdev, int mad_flags,
+ u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ nes_debug(NES_DBG_INIT, "\n");
+ return -ENOSYS;
+}
+
+static inline void
+fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, struct ib_send_wr *ib_wr, u32 uselkey)
+{
+ int sge_index;
+ int total_payload_length = 0;
+ for (sge_index = 0; sge_index < ib_wr->num_sge; sge_index++) {
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX+(sge_index*4),
+ ib_wr->sg_list[sge_index].addr);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_LENGTH0_IDX + (sge_index*4),
+ ib_wr->sg_list[sge_index].length);
+ if (uselkey)
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX + (sge_index*4),
+ (ib_wr->sg_list[sge_index].lkey));
+ else
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX + (sge_index*4), 0);
+
+ total_payload_length += ib_wr->sg_list[sge_index].length;
+ }
+ nes_debug(NES_DBG_IW_TX, "UC UC UC, sending total_payload_length=%u \n",
+ total_payload_length);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
+ total_payload_length);
+}
+
+/**
+ * nes_post_send
+ */
+static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
+ struct ib_send_wr **bad_wr)
+{
+ u64 u64temp;
+ unsigned long flags = 0;
+ struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_qp *nesqp = to_nesqp(ibqp);
+ struct nes_hw_qp_wqe *wqe;
+ int err;
+ u32 qsize = nesqp->hwqp.sq_size;
+ u32 head;
+ u32 wqe_misc;
+ u32 wqe_count;
+ u32 counter;
+ u32 total_payload_length;
+
+ err = 0;
+ wqe_misc = 0;
+ wqe_count = 0;
+ total_payload_length = 0;
+
+ if (nesqp->ibqp_state > IB_QPS_RTS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+
+ head = nesqp->hwqp.sq_head;
+
+ while (ib_wr) {
+ /* Check for SQ overflow */
+ if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
+ err = -EINVAL;
+ break;
+ }
+
+ wqe = &nesqp->hwqp.sq_vbase[head];
+ /* nes_debug(NES_DBG_IW_TX, "processing sq wqe for QP%u at %p, head = %u.\n",
+ nesqp->hwqp.qp_id, wqe, head); */
+ nes_fill_init_qp_wqe(wqe, nesqp, head);
+ u64temp = (u64)(ib_wr->wr_id);
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX,
+ u64temp);
+ switch (ib_wr->opcode) {
+ case IB_WR_SEND:
+ if (ib_wr->send_flags & IB_SEND_SOLICITED) {
+ wqe_misc = NES_IWARP_SQ_OP_SENDSE;
+ } else {
+ wqe_misc = NES_IWARP_SQ_OP_SEND;
+ }
+ if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
+ err = -EINVAL;
+ break;
+ }
+ if (ib_wr->send_flags & IB_SEND_FENCE) {
+ wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
+ }
+ if ((ib_wr->send_flags & IB_SEND_INLINE) &&
+ ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
+ (ib_wr->sg_list[0].length <= 64)) {
+ memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
+ (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
+ ib_wr->sg_list[0].length);
+ wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
+ } else {
+ fill_wqe_sg_send(wqe, ib_wr, 1);
+ }
+
+ break;
+ case IB_WR_RDMA_WRITE:
+ wqe_misc = NES_IWARP_SQ_OP_RDMAW;
+ if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
+ nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=%u\n",
+ ib_wr->num_sge,
+ nesdev->nesadapter->max_sge);
+ err = -EINVAL;
+ break;
+ }
+ if (ib_wr->send_flags & IB_SEND_FENCE) {
+ wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
+ }
+
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
+ ib_wr->wr.rdma.rkey);
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
+ ib_wr->wr.rdma.remote_addr);
+
+ if ((ib_wr->send_flags & IB_SEND_INLINE) &&
+ ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
+ (ib_wr->sg_list[0].length <= 64)) {
+ memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
+ (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
+ ib_wr->sg_list[0].length);
+ wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
+ } else {
+ fill_wqe_sg_send(wqe, ib_wr, 1);
+ }
+ wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] =
+ wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX];
+ break;
+ case IB_WR_RDMA_READ:
+ /* iWARP only supports 1 sge for RDMA reads */
+ if (ib_wr->num_sge > 1) {
+ nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=1\n",
+ ib_wr->num_sge);
+ err = -EINVAL;
+ break;
+ }
+ wqe_misc = NES_IWARP_SQ_OP_RDMAR;
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
+ ib_wr->wr.rdma.remote_addr);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
+ ib_wr->wr.rdma.rkey);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX,
+ ib_wr->sg_list->length);
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
+ ib_wr->sg_list->addr);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX,
+ ib_wr->sg_list->lkey);
+ break;
+ default:
+ /* error */
+ err = -EINVAL;
+ break;
+ }
+
+ if (ib_wr->send_flags & IB_SEND_SIGNALED) {
+ wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
+ }
+ wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(wqe_misc);
+
+ ib_wr = ib_wr->next;
+ head++;
+ wqe_count++;
+ if (head >= qsize)
+ head = 0;
+
+ }
+
+ nesqp->hwqp.sq_head = head;
+ barrier();
+ while (wqe_count) {
+ counter = min(wqe_count, ((u32)255));
+ wqe_count -= counter;
+ nes_write32(nesdev->regs + NES_WQE_ALLOC,
+ (counter << 24) | 0x00800000 | nesqp->hwqp.qp_id);
+ }
+
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+
+ if (err)
+ *bad_wr = ib_wr;
+ return err;
+}
+
+
+/**
+ * nes_post_recv
+ */
+static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
+ struct ib_recv_wr **bad_wr)
+{
+ u64 u64temp;
+ unsigned long flags = 0;
+ struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_qp *nesqp = to_nesqp(ibqp);
+ struct nes_hw_qp_wqe *wqe;
+ int err = 0;
+ int sge_index;
+ u32 qsize = nesqp->hwqp.rq_size;
+ u32 head;
+ u32 wqe_count = 0;
+ u32 counter;
+ u32 total_payload_length;
+
+ if (nesqp->ibqp_state > IB_QPS_RTS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+
+ head = nesqp->hwqp.rq_head;
+
+ while (ib_wr) {
+ if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
+ err = -EINVAL;
+ break;
+ }
+ /* Check for RQ overflow */
+ if (((head + (2 * qsize) - nesqp->hwqp.rq_tail) % qsize) == (qsize - 1)) {
+ err = -EINVAL;
+ break;
+ }
+
+ nes_debug(NES_DBG_IW_RX, "ibwr sge count = %u.\n", ib_wr->num_sge);
+ wqe = &nesqp->hwqp.rq_vbase[head];
+
+ /* nes_debug(NES_DBG_IW_RX, "QP%u:processing rq wqe at %p, head = %u.\n",
+ nesqp->hwqp.qp_id, wqe, head); */
+ nes_fill_init_qp_wqe(wqe, nesqp, head);
+ u64temp = (u64)(ib_wr->wr_id);
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX,
+ u64temp);
+ total_payload_length = 0;
+ for (sge_index=0; sge_index < ib_wr->num_sge; sge_index++) {
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_FRAG0_LOW_IDX+(sge_index*4),
+ ib_wr->sg_list[sge_index].addr);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_LENGTH0_IDX+(sge_index*4),
+ ib_wr->sg_list[sge_index].length);
+ set_wqe_32bit_value(wqe->wqe_words,NES_IWARP_RQ_WQE_STAG0_IDX+(sge_index*4),
+ ib_wr->sg_list[sge_index].lkey);
+
+ total_payload_length += ib_wr->sg_list[sge_index].length;
+ }
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX,
+ total_payload_length);
+
+ ib_wr = ib_wr->next;
+ head++;
+ wqe_count++;
+ if (head >= qsize)
+ head = 0;
+ }
+
+ nesqp->hwqp.rq_head = head;
+ barrier();
+ while (wqe_count) {
+ counter = min(wqe_count, ((u32)255));
+ wqe_count -= counter;
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter<<24) | nesqp->hwqp.qp_id);
+ }
+
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+
+ if (err)
+ *bad_wr = ib_wr;
+ return err;
+}
+
+
+/**
+ * nes_poll_cq
+ */
+static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
+{
+ u64 u64temp;
+ u64 wrid;
+ /* u64 u64temp; */
+ unsigned long flags = 0;
+ struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_cq *nescq = to_nescq(ibcq);
+ struct nes_qp *nesqp;
+ struct nes_hw_cqe cqe;
+ u32 head;
+ u32 wq_tail;
+ u32 cq_size;
+ u32 cqe_count = 0;
+ u32 wqe_index;
+ u32 u32temp;
+ /* u32 counter; */
+
+ nes_debug(NES_DBG_CQ, "\n");
+
+ spin_lock_irqsave(&nescq->lock, flags);
+
+ head = nescq->hw_cq.cq_head;
+ cq_size = nescq->hw_cq.cq_size;
+
+ while (cqe_count < num_entries) {
+ if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
+ NES_CQE_VALID) {
+ cqe = nescq->hw_cq.cq_vbase[head];
+ nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
+ u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
+ wqe_index = u32temp &
+ (nesdev->nesadapter->max_qp_wr - 1);
+ u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
+ /* parse CQE, get completion context from WQE (either rq or sq */
+ u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
+ ((u64)u32temp);
+ nesqp = *((struct nes_qp **)&u64temp);
+ memset(entry, 0, sizeof *entry);
+ if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) {
+ entry->status = IB_WC_SUCCESS;
+ } else {
+ entry->status = IB_WC_WR_FLUSH_ERR;
+ }
+
+ entry->qp = &nesqp->ibqp;
+ entry->src_qp = nesqp->hwqp.qp_id;
+
+ if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) {
+ if (nesqp->skip_lsmm) {
+ nesqp->skip_lsmm = 0;
+ wq_tail = nesqp->hwqp.sq_tail++;
+ }
+
+ /* Working on a SQ Completion*/
+ wq_tail = wqe_index;
+ nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
+ wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
+ wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
+ ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
+ wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])));
+ entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
+ wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]);
+
+ switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
+ wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) {
+ case NES_IWARP_SQ_OP_RDMAW:
+ nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n");
+ entry->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case NES_IWARP_SQ_OP_RDMAR:
+ nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n");
+ entry->opcode = IB_WC_RDMA_READ;
+ entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
+ wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]);
+ break;
+ case NES_IWARP_SQ_OP_SENDINV:
+ case NES_IWARP_SQ_OP_SENDSEINV:
+ case NES_IWARP_SQ_OP_SEND:
+ case NES_IWARP_SQ_OP_SENDSE:
+ nes_debug(NES_DBG_CQ, "Operation = Send.\n");
+ entry->opcode = IB_WC_SEND;
+ break;
+ }
+ } else {
+ /* Working on a RQ Completion*/
+ wq_tail = wqe_index;
+ nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
+ entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
+ wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
+ ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
+ entry->opcode = IB_WC_RECV;
+ }
+ entry->wr_id = wrid;
+
+ if (++head >= cq_size)
+ head = 0;
+ cqe_count++;
+ nescq->polled_completions++;
+ if ((nescq->polled_completions > (cq_size / 2)) ||
+ (nescq->polled_completions == 255)) {
+ nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes"
+ " are pending %u of %u.\n",
+ nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
+ nes_write32(nesdev->regs+NES_CQE_ALLOC,
+ nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
+ nescq->polled_completions = 0;
+ }
+ entry++;
+ } else
+ break;
+ }
+
+ if (nescq->polled_completions) {
+ nes_write32(nesdev->regs+NES_CQE_ALLOC,
+ nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
+ nescq->polled_completions = 0;
+ }
+
+ nescq->hw_cq.cq_head = head;
+ nes_debug(NES_DBG_CQ, "Reporting %u completions for CQ%u.\n",
+ cqe_count, nescq->hw_cq.cq_number);
+
+ spin_unlock_irqrestore(&nescq->lock, flags);
+
+ return cqe_count;
+}
+
+
+/**
+ * nes_req_notify_cq
+ */
+static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
+ {
+ struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_cq *nescq = to_nescq(ibcq);
+ u32 cq_arm;
+
+ nes_debug(NES_DBG_CQ, "Requesting notification for CQ%u.\n",
+ nescq->hw_cq.cq_number);
+
+ cq_arm = nescq->hw_cq.cq_number;
+ if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
+ cq_arm |= NES_CQE_ALLOC_NOTIFY_NEXT;
+ else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
+ cq_arm |= NES_CQE_ALLOC_NOTIFY_SE;
+ else
+ return -EINVAL;
+
+ nes_write32(nesdev->regs+NES_CQE_ALLOC, cq_arm);
+ nes_read32(nesdev->regs+NES_CQE_ALLOC);
+
+ return 0;
+}
+
+
+/**
+ * nes_init_ofa_device
+ */
+struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
+{
+ struct nes_ib_device *nesibdev;
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+
+ nesibdev = (struct nes_ib_device *)ib_alloc_device(sizeof(struct nes_ib_device));
+ if (nesibdev == NULL) {
+ return NULL;
+ }
+ strlcpy(nesibdev->ibdev.name, "nes%d", IB_DEVICE_NAME_MAX);
+ nesibdev->ibdev.owner = THIS_MODULE;
+
+ nesibdev->ibdev.node_type = RDMA_NODE_RNIC;
+ memset(&nesibdev->ibdev.node_guid, 0, sizeof(nesibdev->ibdev.node_guid));
+ memcpy(&nesibdev->ibdev.node_guid, netdev->dev_addr, 6);
+
+ nesibdev->ibdev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
+ (1ull << IB_USER_VERBS_CMD_BIND_MW) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_MW) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND);
+
+ nesibdev->ibdev.phys_port_cnt = 1;
+ nesibdev->ibdev.num_comp_vectors = 1;
+ nesibdev->ibdev.dma_device = &nesdev->pcidev->dev;
+ nesibdev->ibdev.class_dev.dev = &nesdev->pcidev->dev;
+ nesibdev->ibdev.query_device = nes_query_device;
+ nesibdev->ibdev.query_port = nes_query_port;
+ nesibdev->ibdev.modify_port = nes_modify_port;
+ nesibdev->ibdev.query_pkey = nes_query_pkey;
+ nesibdev->ibdev.query_gid = nes_query_gid;
+ nesibdev->ibdev.alloc_ucontext = nes_alloc_ucontext;
+ nesibdev->ibdev.dealloc_ucontext = nes_dealloc_ucontext;
+ nesibdev->ibdev.mmap = nes_mmap;
+ nesibdev->ibdev.alloc_pd = nes_alloc_pd;
+ nesibdev->ibdev.dealloc_pd = nes_dealloc_pd;
+ nesibdev->ibdev.create_ah = nes_create_ah;
+ nesibdev->ibdev.destroy_ah = nes_destroy_ah;
+ nesibdev->ibdev.create_qp = nes_create_qp;
+ nesibdev->ibdev.modify_qp = nes_modify_qp;
+ nesibdev->ibdev.query_qp = nes_query_qp;
+ nesibdev->ibdev.destroy_qp = nes_destroy_qp;
+ nesibdev->ibdev.create_cq = nes_create_cq;
+ nesibdev->ibdev.destroy_cq = nes_destroy_cq;
+ nesibdev->ibdev.poll_cq = nes_poll_cq;
+ nesibdev->ibdev.get_dma_mr = nes_get_dma_mr;
+ nesibdev->ibdev.reg_phys_mr = nes_reg_phys_mr;
+ nesibdev->ibdev.reg_user_mr = nes_reg_user_mr;
+ nesibdev->ibdev.dereg_mr = nes_dereg_mr;
+ nesibdev->ibdev.alloc_mw = nes_alloc_mw;
+ nesibdev->ibdev.dealloc_mw = nes_dealloc_mw;
+ nesibdev->ibdev.bind_mw = nes_bind_mw;
+
+ nesibdev->ibdev.alloc_fmr = nes_alloc_fmr;
+ nesibdev->ibdev.unmap_fmr = nes_unmap_fmr;
+ nesibdev->ibdev.dealloc_fmr = nes_dealloc_fmr;
+ nesibdev->ibdev.map_phys_fmr = nes_map_phys_fmr;
+
+ nesibdev->ibdev.attach_mcast = nes_multicast_attach;
+ nesibdev->ibdev.detach_mcast = nes_multicast_detach;
+ nesibdev->ibdev.process_mad = nes_process_mad;
+
+ nesibdev->ibdev.req_notify_cq = nes_req_notify_cq;
+ nesibdev->ibdev.post_send = nes_post_send;
+ nesibdev->ibdev.post_recv = nes_post_recv;
+
+ nesibdev->ibdev.iwcm = kzalloc(sizeof(*nesibdev->ibdev.iwcm), GFP_KERNEL);
+ if (nesibdev->ibdev.iwcm == NULL) {
+ ib_dealloc_device(&nesibdev->ibdev);
+ return NULL;
+ }
+ nesibdev->ibdev.iwcm->add_ref = nes_add_ref;
+ nesibdev->ibdev.iwcm->rem_ref = nes_rem_ref;
+ nesibdev->ibdev.iwcm->get_qp = nes_get_qp;
+ nesibdev->ibdev.iwcm->connect = nes_connect;
+ nesibdev->ibdev.iwcm->accept = nes_accept;
+ nesibdev->ibdev.iwcm->reject = nes_reject;
+ nesibdev->ibdev.iwcm->create_listen = nes_create_listen;
+ nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen;
+
+ return nesibdev;
+}
+
+
+/**
+ * nes_destroy_ofa_device
+ */
+void nes_destroy_ofa_device(struct nes_ib_device *nesibdev)
+{
+ if (nesibdev == NULL)
+ return;
+
+ nes_unregister_ofa_device(nesibdev);
+
+ kfree(nesibdev->ibdev.iwcm);
+ ib_dealloc_device(&nesibdev->ibdev);
+}
+
+
+/**
+ * nes_register_ofa_device
+ */
+int nes_register_ofa_device(struct nes_ib_device *nesibdev)
+{
+ struct nes_vnic *nesvnic = nesibdev->nesvnic;
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ int i, ret;
+
+ ret = ib_register_device(&nesvnic->nesibdev->ibdev);
+ if (ret) {
+ return ret;
+ }
+
+ /* Get the resources allocated to this device */
+ nesibdev->max_cq = (nesadapter->max_cq-NES_FIRST_QPN) / nesadapter->port_count;
+ nesibdev->max_mr = nesadapter->max_mr / nesadapter->port_count;
+ nesibdev->max_qp = (nesadapter->max_qp-NES_FIRST_QPN) / nesadapter->port_count;
+ nesibdev->max_pd = nesadapter->max_pd / nesadapter->port_count;
+
+ for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) {
+ ret = class_device_create_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]);
+ if (ret) {
+ while (i > 0) {
+ i--;
+ class_device_remove_file(&nesibdev->ibdev.class_dev,
+ nes_class_attributes[i]);
+ }
+ ib_unregister_device(&nesibdev->ibdev);
+ return ret;
+ }
+ }
+
+ nesvnic->of_device_registered = 1;
+
+ return 0;
+}
+
+
+/**
+ * nes_unregister_ofa_device
+ */
+void nes_unregister_ofa_device(struct nes_ib_device *nesibdev)
+{
+ struct nes_vnic *nesvnic = nesibdev->nesvnic;
+ int i;
+
+ if (nesibdev == NULL)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) {
+ class_device_remove_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]);
+ }
+
+ if (nesvnic->of_device_registered) {
+ ib_unregister_device(&nesibdev->ibdev);
+ }
+
+ nesvnic->of_device_registered = 0;
+}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
new file mode 100644
index 000000000000..6c6b4da5184f
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef NES_VERBS_H
+#define NES_VERBS_H
+
+struct nes_device;
+
+#define NES_MAX_USER_DB_REGIONS 4096
+#define NES_MAX_USER_WQ_REGIONS 4096
+
+struct nes_ucontext {
+ struct ib_ucontext ibucontext;
+ struct nes_device *nesdev;
+ unsigned long mmap_wq_offset;
+ unsigned long mmap_cq_offset; /* to be removed */
+ int index; /* rnic index (minor) */
+ unsigned long allocated_doorbells[BITS_TO_LONGS(NES_MAX_USER_DB_REGIONS)];
+ u16 mmap_db_index[NES_MAX_USER_DB_REGIONS];
+ u16 first_free_db;
+ unsigned long allocated_wqs[BITS_TO_LONGS(NES_MAX_USER_WQ_REGIONS)];
+ struct nes_qp *mmap_nesqp[NES_MAX_USER_WQ_REGIONS];
+ u16 first_free_wq;
+ struct list_head cq_reg_mem_list;
+ struct list_head qp_reg_mem_list;
+ u32 mcrqf;
+ atomic_t usecnt;
+};
+
+struct nes_pd {
+ struct ib_pd ibpd;
+ u16 pd_id;
+ atomic_t sqp_count;
+ u16 mmap_db_index;
+};
+
+struct nes_mr {
+ union {
+ struct ib_mr ibmr;
+ struct ib_mw ibmw;
+ struct ib_fmr ibfmr;
+ };
+ struct ib_umem *region;
+ u16 pbls_used;
+ u8 mode;
+ u8 pbl_4k;
+};
+
+struct nes_hw_pb {
+ __le32 pa_low;
+ __le32 pa_high;
+};
+
+struct nes_vpbl {
+ dma_addr_t pbl_pbase;
+ struct nes_hw_pb *pbl_vbase;
+};
+
+struct nes_root_vpbl {
+ dma_addr_t pbl_pbase;
+ struct nes_hw_pb *pbl_vbase;
+ struct nes_vpbl *leaf_vpbl;
+};
+
+struct nes_fmr {
+ struct nes_mr nesmr;
+ u32 leaf_pbl_cnt;
+ struct nes_root_vpbl root_vpbl;
+ struct ib_qp *ib_qp;
+ int access_rights;
+ struct ib_fmr_attr attr;
+};
+
+struct nes_av;
+
+struct nes_cq {
+ struct ib_cq ibcq;
+ struct nes_hw_cq hw_cq;
+ u32 polled_completions;
+ u32 cq_mem_size;
+ spinlock_t lock;
+ u8 virtual_cq;
+ u8 pad[3];
+};
+
+struct nes_wq {
+ spinlock_t lock;
+};
+
+struct iw_cm_id;
+struct ietf_mpa_frame;
+
+struct nes_qp {
+ struct ib_qp ibqp;
+ void *allocated_buffer;
+ struct iw_cm_id *cm_id;
+ struct workqueue_struct *wq;
+ struct work_struct disconn_work;
+ struct nes_cq *nesscq;
+ struct nes_cq *nesrcq;
+ struct nes_pd *nespd;
+ void *cm_node; /* handle of the node this QP is associated with */
+ struct ietf_mpa_frame *ietf_frame;
+ dma_addr_t ietf_frame_pbase;
+ wait_queue_head_t state_waitq;
+ unsigned long socket;
+ struct nes_hw_qp hwqp;
+ struct work_struct work;
+ struct work_struct ae_work;
+ enum ib_qp_state ibqp_state;
+ u32 iwarp_state;
+ u32 hte_index;
+ u32 last_aeq;
+ u32 qp_mem_size;
+ atomic_t refcount;
+ atomic_t close_timer_started;
+ u32 mmap_sq_db_index;
+ u32 mmap_rq_db_index;
+ spinlock_t lock;
+ struct nes_qp_context *nesqp_context;
+ dma_addr_t nesqp_context_pbase;
+ void *pbl_vbase;
+ dma_addr_t pbl_pbase;
+ struct page *page;
+ wait_queue_head_t kick_waitq;
+ u16 in_disconnect;
+ u16 private_data_len;
+ u8 active_conn;
+ u8 skip_lsmm;
+ u8 user_mode;
+ u8 hte_added;
+ u8 hw_iwarp_state;
+ u8 flush_issued;
+ u8 hw_tcp_state;
+ u8 disconn_pending;
+ u8 destroyed;
+};
+#endif /* NES_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index a082466f4a83..09f5371137a1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -680,12 +680,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
neigh = *to_ipoib_neigh(skb->dst->neighbour);
- if (ipoib_cm_get(neigh)) {
- if (ipoib_cm_up(neigh)) {
- ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
- goto out;
- }
- } else if (neigh->ah) {
+ if (neigh->ah)
if (unlikely((memcmp(&neigh->dgid.raw,
skb->dst->neighbour->ha + 4,
sizeof(union ib_gid))) ||
@@ -706,6 +701,12 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ if (ipoib_cm_get(neigh)) {
+ if (ipoib_cm_up(neigh)) {
+ ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
+ goto out;
+ }
+ } else if (neigh->ah) {
ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
goto out;
}
@@ -813,11 +814,9 @@ static void ipoib_neigh_cleanup(struct neighbour *n)
struct ipoib_ah *ah = NULL;
neigh = *to_ipoib_neigh(n);
- if (neigh) {
+ if (neigh)
priv = netdev_priv(neigh->dev);
- ipoib_dbg(priv, "neigh_destructor for bonding device: %s\n",
- n->dev->name);
- } else
+ else
return;
ipoib_dbg(priv,
"neigh_cleanup for %06x " IPOIB_GID_FMT "\n",
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 195ce7c12319..fd4a49fc4773 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -204,6 +204,22 @@ out:
return ret;
}
+static int srp_new_cm_id(struct srp_target_port *target)
+{
+ struct ib_cm_id *new_cm_id;
+
+ new_cm_id = ib_create_cm_id(target->srp_host->dev->dev,
+ srp_cm_handler, target);
+ if (IS_ERR(new_cm_id))
+ return PTR_ERR(new_cm_id);
+
+ if (target->cm_id)
+ ib_destroy_cm_id(target->cm_id);
+ target->cm_id = new_cm_id;
+
+ return 0;
+}
+
static int srp_create_target_ib(struct srp_target_port *target)
{
struct ib_qp_init_attr *init_attr;
@@ -436,6 +452,7 @@ static void srp_remove_work(struct work_struct *work)
static int srp_connect_target(struct srp_target_port *target)
{
+ int retries = 3;
int ret;
ret = srp_lookup_path(target);
@@ -468,6 +485,21 @@ static int srp_connect_target(struct srp_target_port *target)
case SRP_DLID_REDIRECT:
break;
+ case SRP_STALE_CONN:
+ /* Our current CM id was stale, and is now in timewait.
+ * Try to reconnect with a new one.
+ */
+ if (!retries-- || srp_new_cm_id(target)) {
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "giving up on stale connection\n");
+ target->status = -ECONNRESET;
+ return target->status;
+ }
+
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "retrying stale connection\n");
+ break;
+
default:
return target->status;
}
@@ -507,7 +539,6 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re
static int srp_reconnect_target(struct srp_target_port *target)
{
- struct ib_cm_id *new_cm_id;
struct ib_qp_attr qp_attr;
struct srp_request *req, *tmp;
struct ib_wc wc;
@@ -526,14 +557,9 @@ static int srp_reconnect_target(struct srp_target_port *target)
* Now get a new local CM ID so that we avoid confusing the
* target in case things are really fouled up.
*/
- new_cm_id = ib_create_cm_id(target->srp_host->dev->dev,
- srp_cm_handler, target);
- if (IS_ERR(new_cm_id)) {
- ret = PTR_ERR(new_cm_id);
+ ret = srp_new_cm_id(target);
+ if (ret)
goto err;
- }
- ib_destroy_cm_id(target->cm_id);
- target->cm_id = new_cm_id;
qp_attr.qp_state = IB_QPS_RESET;
ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
@@ -1171,6 +1197,11 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
target->status = -ECONNRESET;
break;
+ case IB_CM_REJ_STALE_CONN:
+ shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
+ target->status = SRP_STALE_CONN;
+ break;
+
default:
shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
event->param.rej_rcvd.reason);
@@ -1862,11 +1893,9 @@ static ssize_t srp_create_target(struct class_device *class_dev,
if (ret)
goto err;
- target->cm_id = ib_create_cm_id(host->dev->dev, srp_cm_handler, target);
- if (IS_ERR(target->cm_id)) {
- ret = PTR_ERR(target->cm_id);
+ ret = srp_new_cm_id(target);
+ if (ret)
goto err_free;
- }
target->qp_in_error = 0;
ret = srp_connect_target(target);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 4a3c1f37e4c2..cb6eb816024a 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -54,6 +54,7 @@ enum {
SRP_PORT_REDIRECT = 1,
SRP_DLID_REDIRECT = 2,
+ SRP_STALE_CONN = 3,
SRP_MAX_LUN = 512,
SRP_DEF_SG_TABLESIZE = 12,
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 1dc2ac9f3d1c..c5600ac5feb3 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -17,7 +17,6 @@
#include <linux/init.h>
#include <linux/gameport.h>
#include <linux/wait.h>
-#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/kthread.h>
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index e5f4da928340..05e3494cf8b8 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -42,7 +42,6 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/input.h>
-#include <linux/irq.h>
#include <asm/portmux.h>
#include <asm/mach/bf54x_keys.h>
diff --git a/drivers/input/keyboard/jornada720_kbd.c b/drivers/input/keyboard/jornada720_kbd.c
index e6696b3c9416..986f93cfc6b8 100644
--- a/drivers/input/keyboard/jornada720_kbd.c
+++ b/drivers/input/keyboard/jornada720_kbd.c
@@ -17,7 +17,6 @@
*/
#include <linux/device.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/input.h>
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index b438d998625c..72176f3d49cb 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -998,12 +998,12 @@ static void wistron_wifi_led_set(struct led_classdev *led_cdev,
}
static struct led_classdev wistron_mail_led = {
- .name = "mail:green",
+ .name = "wistron:green:mail",
.brightness_set = wistron_mail_led_set,
};
static struct led_classdev wistron_wifi_led = {
- .name = "wifi:red",
+ .name = "wistron:red:wifi",
.brightness_set = wistron_wifi_led_set,
};
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index 3e99df6be084..adc3bd6e7f7b 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -141,7 +141,7 @@ static void gscps2_flush(struct gscps2port *ps2port)
/*
* gscps2_writeb_output() - write a byte to the port
*
- * returns 1 on sucess, 0 on error
+ * returns 1 on success, 0 on error
*/
static inline int gscps2_writeb_output(struct gscps2port *ps2port, u8 data)
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
index 2ae6c6016a86..28ae15ed12c5 100644
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ b/drivers/input/touchscreen/h3600_ts_input.c
@@ -109,7 +109,7 @@ struct h3600_dev {
static irqreturn_t action_button_handler(int irq, void *dev_id)
{
int down = (GPLR & GPIO_BITSY_ACTION_BUTTON) ? 0 : 1;
- struct input_dev *dev = (struct input_dev *) dev_id;
+ struct input_dev *dev = dev_id;
input_report_key(dev, KEY_ENTER, down);
input_sync(dev);
@@ -120,7 +120,7 @@ static irqreturn_t action_button_handler(int irq, void *dev_id)
static irqreturn_t npower_button_handler(int irq, void *dev_id)
{
int down = (GPLR & GPIO_BITSY_NPOWER_BUTTON) ? 0 : 1;
- struct input_dev *dev = (struct input_dev *) dev_id;
+ struct input_dev *dev = dev_id;
/*
* This interrupt is only called when we release the key. So we have
diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c
index ee2b0b9f8f46..8325022e2bed 100644
--- a/drivers/isdn/act2000/module.c
+++ b/drivers/isdn/act2000/module.c
@@ -310,7 +310,7 @@ act2000_command(act2000_card * card, isdn_ctrl * c)
}
break;
case ISDN_CMD_DIAL:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
@@ -339,7 +339,7 @@ act2000_command(act2000_card * card, isdn_ctrl * c)
}
return ret;
case ISDN_CMD_ACCEPTD:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
@@ -347,11 +347,11 @@ act2000_command(act2000_card * card, isdn_ctrl * c)
actcapi_select_b2_protocol_req(card, chan);
return 0;
case ISDN_CMD_ACCEPTB:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return 0;
case ISDN_CMD_HANGUP:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
@@ -366,7 +366,7 @@ act2000_command(act2000_card * card, isdn_ctrl * c)
}
return 0;
case ISDN_CMD_SETEAZ:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
@@ -386,7 +386,7 @@ act2000_command(act2000_card * card, isdn_ctrl * c)
actcapi_listen_req(card);
return 0;
case ISDN_CMD_CLREAZ:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
@@ -394,14 +394,14 @@ act2000_command(act2000_card * card, isdn_ctrl * c)
actcapi_listen_req(card);
return 0;
case ISDN_CMD_SETL2:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
chan->l2prot = (c->arg >> 8);
return 0;
case ISDN_CMD_SETL3:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if ((c->arg >> 8) != ISDN_PROTO_L3_TRANS) {
printk(KERN_WARNING "L3 protocol unknown\n");
@@ -524,7 +524,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel)
act2000_card *card = act2000_findcard(id);
if (card) {
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return (len);
}
@@ -539,7 +539,7 @@ if_readstatus(u_char __user * buf, int len, int id, int channel)
act2000_card *card = act2000_findcard(id);
if (card) {
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return (act2000_readstatus(buf, len, card));
}
@@ -554,7 +554,7 @@ if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
act2000_card *card = act2000_findcard(id);
if (card) {
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return (act2000_sendbuf(card, channel, ack, skb));
}
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index 00a3be5b862b..091deb9d1c47 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -350,8 +350,8 @@ void gigaset_m10x_input(struct inbuf_t *inbuf)
unsigned char *src, c;
int procbytes;
- head = atomic_read(&inbuf->head);
- tail = atomic_read(&inbuf->tail);
+ head = inbuf->head;
+ tail = inbuf->tail;
gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
if (head != tail) {
@@ -361,7 +361,7 @@ void gigaset_m10x_input(struct inbuf_t *inbuf)
gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
while (numbytes) {
- if (atomic_read(&cs->mstate) == MS_LOCKED) {
+ if (cs->mstate == MS_LOCKED) {
procbytes = lock_loop(src, numbytes, inbuf);
src += procbytes;
numbytes -= procbytes;
@@ -436,7 +436,7 @@ nextbyte:
}
gig_dbg(DEBUG_INTR, "setting head to %u", head);
- atomic_set(&inbuf->head, head);
+ inbuf->head = head;
}
}
EXPORT_SYMBOL_GPL(gigaset_m10x_input);
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index af7648274b38..5255b5e20e13 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -73,6 +73,14 @@ static int gigaset_probe(struct usb_interface *interface,
/* Function will be called if the device is unplugged */
static void gigaset_disconnect(struct usb_interface *interface);
+/* functions called before/after suspend */
+static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
+static int gigaset_resume(struct usb_interface *intf);
+
+/* functions called before/after device reset */
+static int gigaset_pre_reset(struct usb_interface *intf);
+static int gigaset_post_reset(struct usb_interface *intf);
+
static int atread_submit(struct cardstate *, int);
static void stopurbs(struct bas_bc_state *);
static int req_submit(struct bc_state *, int, int, int);
@@ -105,8 +113,9 @@ struct bas_cardstate {
unsigned char int_in_buf[3];
spinlock_t lock; /* locks all following */
- atomic_t basstate; /* bitmap (BS_*) */
+ int basstate; /* bitmap (BS_*) */
int pending; /* uncompleted base request */
+ wait_queue_head_t waitqueue;
int rcvbuf_size; /* size of AT receive buffer */
/* 0: no receive in progress */
int retry_cmd_in; /* receive req retry count */
@@ -121,10 +130,10 @@ struct bas_cardstate {
#define BS_ATTIMER 0x020 /* waiting for HD_READY_SEND_ATDATA */
#define BS_ATRDPEND 0x040 /* urb_cmd_in in use */
#define BS_ATWRPEND 0x080 /* urb_cmd_out in use */
+#define BS_SUSPEND 0x100 /* USB port suspended */
static struct gigaset_driver *driver = NULL;
-static struct cardstate *cardstate = NULL;
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver gigaset_usb_driver = {
@@ -132,6 +141,11 @@ static struct usb_driver gigaset_usb_driver = {
.probe = gigaset_probe,
.disconnect = gigaset_disconnect,
.id_table = gigaset_table,
+ .suspend = gigaset_suspend,
+ .resume = gigaset_resume,
+ .reset_resume = gigaset_post_reset,
+ .pre_reset = gigaset_pre_reset,
+ .post_reset = gigaset_post_reset,
};
/* get message text for usb_submit_urb return code
@@ -248,12 +262,12 @@ static inline void dump_urb(enum debuglevel level, const char *tag,
if (urb) {
gig_dbg(level,
" dev=0x%08lx, pipe=%s:EP%d/DV%d:%s, "
- "status=%d, hcpriv=0x%08lx, transfer_flags=0x%x,",
+ "hcpriv=0x%08lx, transfer_flags=0x%x,",
(unsigned long) urb->dev,
usb_pipetype_str(urb->pipe),
usb_pipeendpoint(urb->pipe), usb_pipedevice(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
- urb->status, (unsigned long) urb->hcpriv,
+ (unsigned long) urb->hcpriv,
urb->transfer_flags);
gig_dbg(level,
" transfer_buffer=0x%08lx[%d], actual_length=%d, "
@@ -355,27 +369,27 @@ static void check_pending(struct bas_cardstate *ucs)
case 0:
break;
case HD_OPEN_ATCHANNEL:
- if (atomic_read(&ucs->basstate) & BS_ATOPEN)
+ if (ucs->basstate & BS_ATOPEN)
ucs->pending = 0;
break;
case HD_OPEN_B1CHANNEL:
- if (atomic_read(&ucs->basstate) & BS_B1OPEN)
+ if (ucs->basstate & BS_B1OPEN)
ucs->pending = 0;
break;
case HD_OPEN_B2CHANNEL:
- if (atomic_read(&ucs->basstate) & BS_B2OPEN)
+ if (ucs->basstate & BS_B2OPEN)
ucs->pending = 0;
break;
case HD_CLOSE_ATCHANNEL:
- if (!(atomic_read(&ucs->basstate) & BS_ATOPEN))
+ if (!(ucs->basstate & BS_ATOPEN))
ucs->pending = 0;
break;
case HD_CLOSE_B1CHANNEL:
- if (!(atomic_read(&ucs->basstate) & BS_B1OPEN))
+ if (!(ucs->basstate & BS_B1OPEN))
ucs->pending = 0;
break;
case HD_CLOSE_B2CHANNEL:
- if (!(atomic_read(&ucs->basstate) & BS_B2OPEN))
+ if (!(ucs->basstate & BS_B2OPEN))
ucs->pending = 0;
break;
case HD_DEVICE_INIT_ACK: /* no reply expected */
@@ -441,8 +455,8 @@ inline static int update_basstate(struct bas_cardstate *ucs,
int state;
spin_lock_irqsave(&ucs->lock, flags);
- state = atomic_read(&ucs->basstate);
- atomic_set(&ucs->basstate, (state & ~clear) | set);
+ state = ucs->basstate;
+ ucs->basstate = (state & ~clear) | set;
spin_unlock_irqrestore(&ucs->lock, flags);
return state;
}
@@ -459,11 +473,13 @@ static void read_ctrl_callback(struct urb *urb)
struct inbuf_t *inbuf = urb->context;
struct cardstate *cs = inbuf->cs;
struct bas_cardstate *ucs = cs->hw.bas;
+ int status = urb->status;
int have_data = 0;
unsigned numbytes;
int rc;
update_basstate(ucs, 0, BS_ATRDPEND);
+ wake_up(&ucs->waitqueue);
if (!ucs->rcvbuf_size) {
dev_warn(cs->dev, "%s: no receive in progress\n", __func__);
@@ -472,7 +488,7 @@ static void read_ctrl_callback(struct urb *urb)
del_timer(&ucs->timer_cmd_in);
- switch (urb->status) {
+ switch (status) {
case 0: /* normal completion */
numbytes = urb->actual_length;
if (unlikely(numbytes != ucs->rcvbuf_size)) {
@@ -506,12 +522,12 @@ static void read_ctrl_callback(struct urb *urb)
case -ESHUTDOWN: /* device shut down */
/* no action necessary */
gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
break;
default: /* severe trouble */
dev_warn(cs->dev, "control read: %s\n",
- get_usb_statmsg(urb->status));
+ get_usb_statmsg(status));
if (ucs->retry_cmd_in++ < BAS_RETRY) {
dev_notice(cs->dev, "control read: retry %d\n",
ucs->retry_cmd_in);
@@ -550,17 +566,28 @@ static void read_ctrl_callback(struct urb *urb)
static int atread_submit(struct cardstate *cs, int timeout)
{
struct bas_cardstate *ucs = cs->hw.bas;
+ int basstate;
int ret;
gig_dbg(DEBUG_USBREQ, "-------> HD_READ_ATMESSAGE (%d)",
ucs->rcvbuf_size);
- if (update_basstate(ucs, BS_ATRDPEND, 0) & BS_ATRDPEND) {
+ basstate = update_basstate(ucs, BS_ATRDPEND, 0);
+ if (basstate & BS_ATRDPEND) {
dev_err(cs->dev,
"could not submit HD_READ_ATMESSAGE: URB busy\n");
return -EBUSY;
}
+ if (basstate & BS_SUSPEND) {
+ dev_notice(cs->dev,
+ "HD_READ_ATMESSAGE not submitted, "
+ "suspend in progress\n");
+ update_basstate(ucs, 0, BS_ATRDPEND);
+ /* treat like disconnect */
+ return -ENODEV;
+ }
+
ucs->dr_cmd_in.bRequestType = IN_VENDOR_REQ;
ucs->dr_cmd_in.bRequest = HD_READ_ATMESSAGE;
ucs->dr_cmd_in.wValue = 0;
@@ -601,12 +628,13 @@ static void read_int_callback(struct urb *urb)
struct cardstate *cs = urb->context;
struct bas_cardstate *ucs = cs->hw.bas;
struct bc_state *bcs;
+ int status = urb->status;
unsigned long flags;
int rc;
unsigned l;
int channel;
- switch (urb->status) {
+ switch (status) {
case 0: /* success */
break;
case -ENOENT: /* cancelled */
@@ -614,7 +642,7 @@ static void read_int_callback(struct urb *urb)
case -EINPROGRESS: /* pending */
/* ignore silently */
gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
return;
case -ENODEV: /* device removed */
case -ESHUTDOWN: /* device shut down */
@@ -623,7 +651,7 @@ static void read_int_callback(struct urb *urb)
return;
default: /* severe trouble */
dev_warn(cs->dev, "interrupt read: %s\n",
- get_usb_statmsg(urb->status));
+ get_usb_statmsg(status));
//FIXME corrective action? resubmission always ok?
goto resubmit;
}
@@ -745,6 +773,7 @@ static void read_int_callback(struct urb *urb)
}
check_pending(ucs);
+ wake_up(&ucs->waitqueue);
resubmit:
rc = usb_submit_urb(urb, GFP_ATOMIC);
@@ -766,17 +795,18 @@ static void read_iso_callback(struct urb *urb)
{
struct bc_state *bcs;
struct bas_bc_state *ubc;
+ int status = urb->status;
unsigned long flags;
int i, rc;
/* status codes not worth bothering the tasklet with */
- if (unlikely(urb->status == -ENOENT ||
- urb->status == -ECONNRESET ||
- urb->status == -EINPROGRESS ||
- urb->status == -ENODEV ||
- urb->status == -ESHUTDOWN)) {
+ if (unlikely(status == -ENOENT ||
+ status == -ECONNRESET ||
+ status == -EINPROGRESS ||
+ status == -ENODEV ||
+ status == -ESHUTDOWN)) {
gig_dbg(DEBUG_ISO, "%s: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
return;
}
@@ -787,10 +817,11 @@ static void read_iso_callback(struct urb *urb)
if (likely(ubc->isoindone == NULL)) {
/* pass URB to tasklet */
ubc->isoindone = urb;
+ ubc->isoinstatus = status;
tasklet_schedule(&ubc->rcvd_tasklet);
} else {
/* tasklet still busy, drop data and resubmit URB */
- ubc->loststatus = urb->status;
+ ubc->loststatus = status;
for (i = 0; i < BAS_NUMFRAMES; i++) {
ubc->isoinlost += urb->iso_frame_desc[i].actual_length;
if (unlikely(urb->iso_frame_desc[i].status != 0 &&
@@ -800,7 +831,7 @@ static void read_iso_callback(struct urb *urb)
urb->iso_frame_desc[i].status = 0;
urb->iso_frame_desc[i].actual_length = 0;
}
- if (likely(atomic_read(&ubc->running))) {
+ if (likely(ubc->running)) {
/* urb->dev is clobbered by USB subsystem */
urb->dev = bcs->cs->hw.bas->udev;
urb->transfer_flags = URB_ISO_ASAP;
@@ -831,22 +862,24 @@ static void write_iso_callback(struct urb *urb)
{
struct isow_urbctx_t *ucx;
struct bas_bc_state *ubc;
+ int status = urb->status;
unsigned long flags;
/* status codes not worth bothering the tasklet with */
- if (unlikely(urb->status == -ENOENT ||
- urb->status == -ECONNRESET ||
- urb->status == -EINPROGRESS ||
- urb->status == -ENODEV ||
- urb->status == -ESHUTDOWN)) {
+ if (unlikely(status == -ENOENT ||
+ status == -ECONNRESET ||
+ status == -EINPROGRESS ||
+ status == -ENODEV ||
+ status == -ESHUTDOWN)) {
gig_dbg(DEBUG_ISO, "%s: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
return;
}
/* pass URB context to tasklet */
ucx = urb->context;
ubc = ucx->bcs->hw.bas;
+ ucx->status = status;
spin_lock_irqsave(&ubc->isooutlock, flags);
ubc->isooutovfl = ubc->isooutdone;
@@ -875,7 +908,7 @@ static int starturbs(struct bc_state *bcs)
bcs->inputstate |= INS_flag_hunt;
/* submit all isochronous input URBs */
- atomic_set(&ubc->running, 1);
+ ubc->running = 1;
for (k = 0; k < BAS_INURBS; k++) {
urb = ubc->isoinurbs[k];
if (!urb) {
@@ -932,15 +965,15 @@ static int starturbs(struct bc_state *bcs)
ubc->isoouturbs[k].limit = -1;
}
- /* submit two URBs, keep third one */
- for (k = 0; k < 2; ++k) {
+ /* keep one URB free, submit the others */
+ for (k = 0; k < BAS_OUTURBS-1; ++k) {
dump_urb(DEBUG_ISO, "Initial isoc write", urb);
rc = usb_submit_urb(ubc->isoouturbs[k].urb, GFP_ATOMIC);
if (rc != 0)
goto error;
}
dump_urb(DEBUG_ISO, "Initial isoc write (free)", urb);
- ubc->isooutfree = &ubc->isoouturbs[2];
+ ubc->isooutfree = &ubc->isoouturbs[BAS_OUTURBS-1];
ubc->isooutdone = ubc->isooutovfl = NULL;
return 0;
error:
@@ -958,7 +991,7 @@ static void stopurbs(struct bas_bc_state *ubc)
{
int k, rc;
- atomic_set(&ubc->running, 0);
+ ubc->running = 0;
for (k = 0; k < BAS_INURBS; ++k) {
rc = usb_unlink_urb(ubc->isoinurbs[k]);
@@ -1034,7 +1067,7 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
}
break;
}
- ucx->limit = atomic_read(&ubc->isooutbuf->nextread);
+ ucx->limit = ubc->isooutbuf->nextread;
ifd->status = 0;
ifd->actual_length = 0;
}
@@ -1070,6 +1103,7 @@ static void write_iso_tasklet(unsigned long data)
struct cardstate *cs = bcs->cs;
struct isow_urbctx_t *done, *next, *ovfl;
struct urb *urb;
+ int status;
struct usb_iso_packet_descriptor *ifd;
int offset;
unsigned long flags;
@@ -1080,7 +1114,7 @@ static void write_iso_tasklet(unsigned long data)
/* loop while completed URBs arrive in time */
for (;;) {
- if (unlikely(!(atomic_read(&ubc->running)))) {
+ if (unlikely(!(ubc->running))) {
gig_dbg(DEBUG_ISO, "%s: not running", __func__);
return;
}
@@ -1126,7 +1160,8 @@ static void write_iso_tasklet(unsigned long data)
/* process completed URB */
urb = done->urb;
- switch (urb->status) {
+ status = done->status;
+ switch (status) {
case -EXDEV: /* partial completion */
gig_dbg(DEBUG_ISO, "%s: URB partially completed",
__func__);
@@ -1179,12 +1214,12 @@ static void write_iso_tasklet(unsigned long data)
break;
default: /* severe trouble */
dev_warn(cs->dev, "isochronous write: %s\n",
- get_usb_statmsg(urb->status));
+ get_usb_statmsg(status));
}
/* mark the write buffer area covered by this URB as free */
if (done->limit >= 0)
- atomic_set(&ubc->isooutbuf->read, done->limit);
+ ubc->isooutbuf->read = done->limit;
/* mark URB as free */
spin_lock_irqsave(&ubc->isooutlock, flags);
@@ -1233,6 +1268,7 @@ static void read_iso_tasklet(unsigned long data)
struct bas_bc_state *ubc = bcs->hw.bas;
struct cardstate *cs = bcs->cs;
struct urb *urb;
+ int status;
char *rcvbuf;
unsigned long flags;
int totleft, numbytes, offset, frame, rc;
@@ -1245,6 +1281,7 @@ static void read_iso_tasklet(unsigned long data)
spin_unlock_irqrestore(&ubc->isoinlock, flags);
return;
}
+ status = ubc->isoinstatus;
ubc->isoindone = NULL;
if (unlikely(ubc->loststatus != -EINPROGRESS)) {
dev_warn(cs->dev,
@@ -1256,15 +1293,15 @@ static void read_iso_tasklet(unsigned long data)
}
spin_unlock_irqrestore(&ubc->isoinlock, flags);
- if (unlikely(!(atomic_read(&ubc->running)))) {
+ if (unlikely(!(ubc->running))) {
gig_dbg(DEBUG_ISO,
"%s: channel not running, "
"dropped URB with status: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
return;
}
- switch (urb->status) {
+ switch (status) {
case 0: /* normal completion */
break;
case -EXDEV: /* inspect individual frames
@@ -1276,7 +1313,7 @@ static void read_iso_tasklet(unsigned long data)
case -ECONNRESET:
case -EINPROGRESS:
gig_dbg(DEBUG_ISO, "%s: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
continue; /* -> skip */
case -EPIPE:
dev_err(cs->dev, "isochronous read stalled\n");
@@ -1284,7 +1321,7 @@ static void read_iso_tasklet(unsigned long data)
continue; /* -> skip */
default: /* severe trouble */
dev_warn(cs->dev, "isochronous read: %s\n",
- get_usb_statmsg(urb->status));
+ get_usb_statmsg(status));
goto error;
}
@@ -1406,6 +1443,8 @@ static void req_timeout(unsigned long data)
dev_warn(bcs->cs->dev, "request 0x%02x timed out, clearing\n",
pending);
}
+
+ wake_up(&ucs->waitqueue);
}
/* write_ctrl_callback
@@ -1418,11 +1457,12 @@ static void req_timeout(unsigned long data)
static void write_ctrl_callback(struct urb *urb)
{
struct bas_cardstate *ucs = urb->context;
+ int status = urb->status;
int rc;
unsigned long flags;
/* check status */
- switch (urb->status) {
+ switch (status) {
case 0: /* normal completion */
spin_lock_irqsave(&ucs->lock, flags);
switch (ucs->pending) {
@@ -1441,20 +1481,22 @@ static void write_ctrl_callback(struct urb *urb)
case -ESHUTDOWN: /* device shut down */
/* ignore silently */
gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
break;
default: /* any failure */
- if (++ucs->retry_ctrl > BAS_RETRY) {
+ /* don't retry if suspend requested */
+ if (++ucs->retry_ctrl > BAS_RETRY ||
+ (ucs->basstate & BS_SUSPEND)) {
dev_err(&ucs->interface->dev,
"control request 0x%02x failed: %s\n",
ucs->dr_ctrl.bRequest,
- get_usb_statmsg(urb->status));
+ get_usb_statmsg(status));
break; /* give up */
}
dev_notice(&ucs->interface->dev,
"control request 0x%02x: %s, retry %d\n",
- ucs->dr_ctrl.bRequest, get_usb_statmsg(urb->status),
+ ucs->dr_ctrl.bRequest, get_usb_statmsg(status),
ucs->retry_ctrl);
/* urb->dev is clobbered by USB subsystem */
urb->dev = ucs->udev;
@@ -1474,6 +1516,7 @@ static void write_ctrl_callback(struct urb *urb)
del_timer(&ucs->timer_ctrl);
ucs->pending = 0;
spin_unlock_irqrestore(&ucs->lock, flags);
+ wake_up(&ucs->waitqueue);
}
/* req_submit
@@ -1548,37 +1591,46 @@ static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
*/
static int gigaset_init_bchannel(struct bc_state *bcs)
{
+ struct cardstate *cs = bcs->cs;
int req, ret;
unsigned long flags;
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (unlikely(!bcs->cs->connected)) {
+ spin_lock_irqsave(&cs->lock, flags);
+ if (unlikely(!cs->connected)) {
gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ spin_unlock_irqrestore(&cs->lock, flags);
return -ENODEV;
}
+ if (cs->hw.bas->basstate & BS_SUSPEND) {
+ dev_notice(cs->dev,
+ "not starting isochronous I/O, "
+ "suspend in progress\n");
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return -EHOSTUNREACH;
+ }
+
if ((ret = starturbs(bcs)) < 0) {
- dev_err(bcs->cs->dev,
+ dev_err(cs->dev,
"could not start isochronous I/O for channel B%d: %s\n",
bcs->channel + 1,
ret == -EFAULT ? "null URB" : get_usb_rcmsg(ret));
if (ret != -ENODEV)
error_hangup(bcs);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ spin_unlock_irqrestore(&cs->lock, flags);
return ret;
}
req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL;
if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) {
- dev_err(bcs->cs->dev, "could not open channel B%d\n",
+ dev_err(cs->dev, "could not open channel B%d\n",
bcs->channel + 1);
stopurbs(bcs->hw.bas);
if (ret != -ENODEV)
error_hangup(bcs);
}
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ spin_unlock_irqrestore(&cs->lock, flags);
return ret;
}
@@ -1594,20 +1646,20 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
*/
static int gigaset_close_bchannel(struct bc_state *bcs)
{
+ struct cardstate *cs = bcs->cs;
int req, ret;
unsigned long flags;
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (unlikely(!bcs->cs->connected)) {
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ spin_lock_irqsave(&cs->lock, flags);
+ if (unlikely(!cs->connected)) {
+ spin_unlock_irqrestore(&cs->lock, flags);
gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
return -ENODEV;
}
- if (!(atomic_read(&bcs->cs->hw.bas->basstate) &
- (bcs->channel ? BS_B2OPEN : BS_B1OPEN))) {
+ if (!(cs->hw.bas->basstate & (bcs->channel ? BS_B2OPEN : BS_B1OPEN))) {
/* channel not running: just signal common.c */
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ spin_unlock_irqrestore(&cs->lock, flags);
gigaset_bchannel_down(bcs);
return 0;
}
@@ -1615,10 +1667,10 @@ static int gigaset_close_bchannel(struct bc_state *bcs)
/* channel running: tell device to close it */
req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL;
if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0)
- dev_err(bcs->cs->dev, "closing channel B%d failed\n",
+ dev_err(cs->dev, "closing channel B%d failed\n",
bcs->channel + 1);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ spin_unlock_irqrestore(&cs->lock, flags);
return ret;
}
@@ -1665,12 +1717,14 @@ static void write_command_callback(struct urb *urb)
{
struct cardstate *cs = urb->context;
struct bas_cardstate *ucs = cs->hw.bas;
+ int status = urb->status;
unsigned long flags;
update_basstate(ucs, 0, BS_ATWRPEND);
+ wake_up(&ucs->waitqueue);
/* check status */
- switch (urb->status) {
+ switch (status) {
case 0: /* normal completion */
break;
case -ENOENT: /* cancelled */
@@ -1680,26 +1734,33 @@ static void write_command_callback(struct urb *urb)
case -ESHUTDOWN: /* device shut down */
/* ignore silently */
gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
return;
default: /* any failure */
if (++ucs->retry_cmd_out > BAS_RETRY) {
dev_warn(cs->dev,
"command write: %s, "
"giving up after %d retries\n",
- get_usb_statmsg(urb->status),
+ get_usb_statmsg(status),
ucs->retry_cmd_out);
break;
}
+ if (ucs->basstate & BS_SUSPEND) {
+ dev_warn(cs->dev,
+ "command write: %s, "
+ "won't retry - suspend requested\n",
+ get_usb_statmsg(status));
+ break;
+ }
if (cs->cmdbuf == NULL) {
dev_warn(cs->dev,
"command write: %s, "
"cannot retry - cmdbuf gone\n",
- get_usb_statmsg(urb->status));
+ get_usb_statmsg(status));
break;
}
dev_notice(cs->dev, "command write: %s, retry %d\n",
- get_usb_statmsg(urb->status), ucs->retry_cmd_out);
+ get_usb_statmsg(status), ucs->retry_cmd_out);
if (atwrite_submit(cs, cs->cmdbuf->buf, cs->cmdbuf->len) >= 0)
/* resubmitted - bypass regular exit block */
return;
@@ -1799,8 +1860,14 @@ static int start_cbsend(struct cardstate *cs)
int rc;
int retval = 0;
+ /* check if suspend requested */
+ if (ucs->basstate & BS_SUSPEND) {
+ gig_dbg(DEBUG_TRANSCMD|DEBUG_LOCKCMD, "suspending");
+ return -EHOSTUNREACH;
+ }
+
/* check if AT channel is open */
- if (!(atomic_read(&ucs->basstate) & BS_ATOPEN)) {
+ if (!(ucs->basstate & BS_ATOPEN)) {
gig_dbg(DEBUG_TRANSCMD|DEBUG_LOCKCMD, "AT channel not open");
rc = req_submit(cs->bcs, HD_OPEN_ATCHANNEL, 0, BAS_TIMEOUT);
if (rc < 0) {
@@ -1816,8 +1883,7 @@ static int start_cbsend(struct cardstate *cs)
/* try to send first command in queue */
spin_lock_irqsave(&cs->cmdlock, flags);
- while ((cb = cs->cmdbuf) != NULL &&
- atomic_read(&ucs->basstate) & BS_ATREADY) {
+ while ((cb = cs->cmdbuf) != NULL && (ucs->basstate & BS_ATREADY)) {
ucs->retry_cmd_out = 0;
rc = atwrite_submit(cs, cb->buf, cb->len);
if (unlikely(rc)) {
@@ -1855,7 +1921,7 @@ static int gigaset_write_cmd(struct cardstate *cs,
unsigned long flags;
int rc;
- gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ?
+ gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
DEBUG_TRANSCMD : DEBUG_LOCKCMD,
"CMD Transmit", len, buf);
@@ -1970,7 +2036,7 @@ static int gigaset_freebcshw(struct bc_state *bcs)
return 0;
/* kill URBs and tasklets before freeing - better safe than sorry */
- atomic_set(&ubc->running, 0);
+ ubc->running = 0;
gig_dbg(DEBUG_INIT, "%s: killing iso URBs", __func__);
for (i = 0; i < BAS_OUTURBS; ++i) {
usb_kill_urb(ubc->isoouturbs[i].urb);
@@ -2005,7 +2071,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
return 0;
}
- atomic_set(&ubc->running, 0);
+ ubc->running = 0;
atomic_set(&ubc->corrbytes, 0);
spin_lock_init(&ubc->isooutlock);
for (i = 0; i < BAS_OUTURBS; ++i) {
@@ -2050,7 +2116,7 @@ static void gigaset_reinitbcshw(struct bc_state *bcs)
{
struct bas_bc_state *ubc = bcs->hw.bas;
- atomic_set(&bcs->hw.bas->running, 0);
+ bcs->hw.bas->running = 0;
atomic_set(&bcs->hw.bas->corrbytes, 0);
bcs->hw.bas->numsub = 0;
spin_lock_init(&ubc->isooutlock);
@@ -2081,10 +2147,11 @@ static int gigaset_initcshw(struct cardstate *cs)
spin_lock_init(&ucs->lock);
ucs->pending = 0;
- atomic_set(&ucs->basstate, 0);
+ ucs->basstate = 0;
init_timer(&ucs->timer_ctrl);
init_timer(&ucs->timer_atrdy);
init_timer(&ucs->timer_cmd_in);
+ init_waitqueue_head(&ucs->waitqueue);
return 1;
}
@@ -2102,7 +2169,7 @@ static void freeurbs(struct cardstate *cs)
int i, j;
gig_dbg(DEBUG_INIT, "%s: killing URBs", __func__);
- for (j = 0; j < 2; ++j) {
+ for (j = 0; j < BAS_CHANNELS; ++j) {
ubc = cs->bcs[j].hw.bas;
for (i = 0; i < BAS_OUTURBS; ++i) {
usb_kill_urb(ubc->isoouturbs[i].urb);
@@ -2179,11 +2246,11 @@ static int gigaset_probe(struct usb_interface *interface,
__func__, le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
- cs = gigaset_getunassignedcs(driver);
- if (!cs) {
- dev_err(&udev->dev, "no free cardstate\n");
+ /* allocate memory for our device state and intialize it */
+ cs = gigaset_initcs(driver, BAS_CHANNELS, 0, 0, cidmode,
+ GIGASET_MODULENAME);
+ if (!cs)
return -ENODEV;
- }
ucs = cs->hw.bas;
/* save off device structure ptrs for later use */
@@ -2203,7 +2270,7 @@ static int gigaset_probe(struct usb_interface *interface,
!(ucs->urb_ctrl = usb_alloc_urb(0, GFP_KERNEL)))
goto allocerr;
- for (j = 0; j < 2; ++j) {
+ for (j = 0; j < BAS_CHANNELS; ++j) {
ubc = cs->bcs[j].hw.bas;
for (i = 0; i < BAS_OUTURBS; ++i)
if (!(ubc->isoouturbs[i].urb =
@@ -2237,7 +2304,7 @@ static int gigaset_probe(struct usb_interface *interface,
/* tell common part that the device is ready */
if (startmode == SM_LOCKED)
- atomic_set(&cs->mstate, MS_LOCKED);
+ cs->mstate = MS_LOCKED;
/* save address of controller structure */
usb_set_intfdata(interface, cs);
@@ -2252,7 +2319,7 @@ allocerr:
error:
freeurbs(cs);
usb_set_intfdata(interface, NULL);
- gigaset_unassign(cs);
+ gigaset_freecs(cs);
return -ENODEV;
}
@@ -2272,11 +2339,10 @@ static void gigaset_disconnect(struct usb_interface *interface)
dev_info(cs->dev, "disconnecting Gigaset base\n");
/* mark base as not ready, all channels disconnected */
- atomic_set(&ucs->basstate, 0);
+ ucs->basstate = 0;
/* tell LL all channels are down */
- //FIXME shouldn't gigaset_stop() do this?
- for (j = 0; j < 2; ++j)
+ for (j = 0; j < BAS_CHANNELS; ++j)
gigaset_bchannel_down(cs->bcs + j);
/* stop driver (common part) */
@@ -2295,9 +2361,113 @@ static void gigaset_disconnect(struct usb_interface *interface)
ucs->interface = NULL;
ucs->udev = NULL;
cs->dev = NULL;
- gigaset_unassign(cs);
+ gigaset_freecs(cs);
}
+/* gigaset_suspend
+ * This function is called before the USB connection is suspended.
+ */
+static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct cardstate *cs = usb_get_intfdata(intf);
+ struct bas_cardstate *ucs = cs->hw.bas;
+ int rc;
+
+ /* set suspend flag; this stops AT command/response traffic */
+ if (update_basstate(ucs, BS_SUSPEND, 0) & BS_SUSPEND) {
+ gig_dbg(DEBUG_SUSPEND, "already suspended");
+ return 0;
+ }
+
+ /* wait a bit for blocking conditions to go away */
+ rc = wait_event_timeout(ucs->waitqueue,
+ !(ucs->basstate &
+ (BS_B1OPEN|BS_B2OPEN|BS_ATRDPEND|BS_ATWRPEND)),
+ BAS_TIMEOUT*HZ/10);
+ gig_dbg(DEBUG_SUSPEND, "wait_event_timeout() -> %d", rc);
+
+ /* check for conditions preventing suspend */
+ if (ucs->basstate & (BS_B1OPEN|BS_B2OPEN|BS_ATRDPEND|BS_ATWRPEND)) {
+ dev_warn(cs->dev, "cannot suspend:\n");
+ if (ucs->basstate & BS_B1OPEN)
+ dev_warn(cs->dev, " B channel 1 open\n");
+ if (ucs->basstate & BS_B2OPEN)
+ dev_warn(cs->dev, " B channel 2 open\n");
+ if (ucs->basstate & BS_ATRDPEND)
+ dev_warn(cs->dev, " receiving AT reply\n");
+ if (ucs->basstate & BS_ATWRPEND)
+ dev_warn(cs->dev, " sending AT command\n");
+ update_basstate(ucs, 0, BS_SUSPEND);
+ return -EBUSY;
+ }
+
+ /* close AT channel if open */
+ if (ucs->basstate & BS_ATOPEN) {
+ gig_dbg(DEBUG_SUSPEND, "closing AT channel");
+ rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, 0);
+ if (rc) {
+ update_basstate(ucs, 0, BS_SUSPEND);
+ return rc;
+ }
+ wait_event_timeout(ucs->waitqueue, !ucs->pending,
+ BAS_TIMEOUT*HZ/10);
+ /* in case of timeout, proceed anyway */
+ }
+
+ /* kill all URBs and timers that might still be pending */
+ usb_kill_urb(ucs->urb_ctrl);
+ usb_kill_urb(ucs->urb_int_in);
+ del_timer_sync(&ucs->timer_ctrl);
+
+ gig_dbg(DEBUG_SUSPEND, "suspend complete");
+ return 0;
+}
+
+/* gigaset_resume
+ * This function is called after the USB connection has been resumed.
+ */
+static int gigaset_resume(struct usb_interface *intf)
+{
+ struct cardstate *cs = usb_get_intfdata(intf);
+ struct bas_cardstate *ucs = cs->hw.bas;
+ int rc;
+
+ /* resubmit interrupt URB for spontaneous messages from base */
+ rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
+ if (rc) {
+ dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
+ get_usb_rcmsg(rc));
+ return rc;
+ }
+
+ /* clear suspend flag to reallow activity */
+ update_basstate(ucs, 0, BS_SUSPEND);
+
+ gig_dbg(DEBUG_SUSPEND, "resume complete");
+ return 0;
+}
+
+/* gigaset_pre_reset
+ * This function is called before the USB connection is reset.
+ */
+static int gigaset_pre_reset(struct usb_interface *intf)
+{
+ /* handle just like suspend */
+ return gigaset_suspend(intf, PMSG_ON);
+}
+
+/* gigaset_post_reset
+ * This function is called after the USB connection has been reset.
+ */
+static int gigaset_post_reset(struct usb_interface *intf)
+{
+ /* FIXME: send HD_DEVICE_INIT_ACK? */
+
+ /* resume operations */
+ return gigaset_resume(intf);
+}
+
+
static const struct gigaset_ops gigops = {
gigaset_write_cmd,
gigaset_write_room,
@@ -2330,12 +2500,6 @@ static int __init bas_gigaset_init(void)
&gigops, THIS_MODULE)) == NULL)
goto error;
- /* allocate memory for our device state and intialize it */
- cardstate = gigaset_initcs(driver, 2, 0, 0, cidmode,
- GIGASET_MODULENAME);
- if (!cardstate)
- goto error;
-
/* register this driver with the USB subsystem */
result = usb_register(&gigaset_usb_driver);
if (result < 0) {
@@ -2347,9 +2511,7 @@ static int __init bas_gigaset_init(void)
info(DRIVER_DESC);
return 0;
-error: if (cardstate)
- gigaset_freecs(cardstate);
- cardstate = NULL;
+error:
if (driver)
gigaset_freedriver(driver);
driver = NULL;
@@ -2361,43 +2523,50 @@ error: if (cardstate)
*/
static void __exit bas_gigaset_exit(void)
{
- struct bas_cardstate *ucs = cardstate->hw.bas;
+ struct bas_cardstate *ucs;
+ int i;
gigaset_blockdriver(driver); /* => probe will fail
* => no gigaset_start any more
*/
- gigaset_shutdown(cardstate);
- /* from now on, no isdn callback should be possible */
-
- /* close all still open channels */
- if (atomic_read(&ucs->basstate) & BS_B1OPEN) {
- gig_dbg(DEBUG_INIT, "closing B1 channel");
- usb_control_msg(ucs->udev, usb_sndctrlpipe(ucs->udev, 0),
- HD_CLOSE_B1CHANNEL, OUT_VENDOR_REQ, 0, 0,
- NULL, 0, BAS_TIMEOUT);
- }
- if (atomic_read(&ucs->basstate) & BS_B2OPEN) {
- gig_dbg(DEBUG_INIT, "closing B2 channel");
- usb_control_msg(ucs->udev, usb_sndctrlpipe(ucs->udev, 0),
- HD_CLOSE_B2CHANNEL, OUT_VENDOR_REQ, 0, 0,
- NULL, 0, BAS_TIMEOUT);
- }
- if (atomic_read(&ucs->basstate) & BS_ATOPEN) {
- gig_dbg(DEBUG_INIT, "closing AT channel");
- usb_control_msg(ucs->udev, usb_sndctrlpipe(ucs->udev, 0),
- HD_CLOSE_ATCHANNEL, OUT_VENDOR_REQ, 0, 0,
- NULL, 0, BAS_TIMEOUT);
+ /* stop all connected devices */
+ for (i = 0; i < driver->minors; i++) {
+ if (gigaset_shutdown(driver->cs + i) < 0)
+ continue; /* no device */
+ /* from now on, no isdn callback should be possible */
+
+ /* close all still open channels */
+ ucs = driver->cs[i].hw.bas;
+ if (ucs->basstate & BS_B1OPEN) {
+ gig_dbg(DEBUG_INIT, "closing B1 channel");
+ usb_control_msg(ucs->udev,
+ usb_sndctrlpipe(ucs->udev, 0),
+ HD_CLOSE_B1CHANNEL, OUT_VENDOR_REQ,
+ 0, 0, NULL, 0, BAS_TIMEOUT);
+ }
+ if (ucs->basstate & BS_B2OPEN) {
+ gig_dbg(DEBUG_INIT, "closing B2 channel");
+ usb_control_msg(ucs->udev,
+ usb_sndctrlpipe(ucs->udev, 0),
+ HD_CLOSE_B2CHANNEL, OUT_VENDOR_REQ,
+ 0, 0, NULL, 0, BAS_TIMEOUT);
+ }
+ if (ucs->basstate & BS_ATOPEN) {
+ gig_dbg(DEBUG_INIT, "closing AT channel");
+ usb_control_msg(ucs->udev,
+ usb_sndctrlpipe(ucs->udev, 0),
+ HD_CLOSE_ATCHANNEL, OUT_VENDOR_REQ,
+ 0, 0, NULL, 0, BAS_TIMEOUT);
+ }
+ ucs->basstate = 0;
}
- atomic_set(&ucs->basstate, 0);
/* deregister this driver with the USB subsystem */
usb_deregister(&gigaset_usb_driver);
/* this will call the disconnect-callback */
/* from now on, no disconnect/probe callback should be running */
- gigaset_freecs(cardstate);
- cardstate = NULL;
gigaset_freedriver(driver);
driver = NULL;
}
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index acd417197d03..aacedec4986f 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -31,7 +31,6 @@ MODULE_PARM_DESC(debug, "debug level");
/* driver state flags */
#define VALID_MINOR 0x01
#define VALID_ID 0x02
-#define ASSIGNED 0x04
void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
size_t len, const unsigned char *buf)
@@ -178,7 +177,7 @@ int gigaset_get_channel(struct bc_state *bcs)
unsigned long flags;
spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->use_count) {
+ if (bcs->use_count || !try_module_get(bcs->cs->driver->owner)) {
gig_dbg(DEBUG_ANY, "could not allocate channel %d",
bcs->channel);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
@@ -203,6 +202,7 @@ void gigaset_free_channel(struct bc_state *bcs)
}
--bcs->use_count;
bcs->busy = 0;
+ module_put(bcs->cs->driver->owner);
gig_dbg(DEBUG_ANY, "freed channel %d", bcs->channel);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
}
@@ -356,31 +356,28 @@ static struct cardstate *alloc_cs(struct gigaset_driver *drv)
{
unsigned long flags;
unsigned i;
+ struct cardstate *cs;
struct cardstate *ret = NULL;
spin_lock_irqsave(&drv->lock, flags);
+ if (drv->blocked)
+ goto exit;
for (i = 0; i < drv->minors; ++i) {
- if (!(drv->flags[i] & VALID_MINOR)) {
- if (try_module_get(drv->owner)) {
- drv->flags[i] = VALID_MINOR;
- ret = drv->cs + i;
- }
+ cs = drv->cs + i;
+ if (!(cs->flags & VALID_MINOR)) {
+ cs->flags = VALID_MINOR;
+ ret = cs;
break;
}
}
+exit:
spin_unlock_irqrestore(&drv->lock, flags);
return ret;
}
static void free_cs(struct cardstate *cs)
{
- unsigned long flags;
- struct gigaset_driver *drv = cs->driver;
- spin_lock_irqsave(&drv->lock, flags);
- if (drv->flags[cs->minor_index] & VALID_MINOR)
- module_put(drv->owner);
- drv->flags[cs->minor_index] = 0;
- spin_unlock_irqrestore(&drv->lock, flags);
+ cs->flags = 0;
}
static void make_valid(struct cardstate *cs, unsigned mask)
@@ -388,7 +385,7 @@ static void make_valid(struct cardstate *cs, unsigned mask)
unsigned long flags;
struct gigaset_driver *drv = cs->driver;
spin_lock_irqsave(&drv->lock, flags);
- drv->flags[cs->minor_index] |= mask;
+ cs->flags |= mask;
spin_unlock_irqrestore(&drv->lock, flags);
}
@@ -397,7 +394,7 @@ static void make_invalid(struct cardstate *cs, unsigned mask)
unsigned long flags;
struct gigaset_driver *drv = cs->driver;
spin_lock_irqsave(&drv->lock, flags);
- drv->flags[cs->minor_index] &= ~mask;
+ cs->flags &= ~mask;
spin_unlock_irqrestore(&drv->lock, flags);
}
@@ -501,11 +498,11 @@ static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs,
struct cardstate *cs, int inputstate)
/* inbuf->read must be allocated before! */
{
- atomic_set(&inbuf->head, 0);
- atomic_set(&inbuf->tail, 0);
+ inbuf->head = 0;
+ inbuf->tail = 0;
inbuf->cs = cs;
inbuf->bcs = bcs; /*base driver: NULL*/
- inbuf->rcvbuf = NULL; //FIXME
+ inbuf->rcvbuf = NULL;
inbuf->inputstate = inputstate;
}
@@ -521,8 +518,8 @@ int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
return 0;
bytesleft = numbytes;
- tail = atomic_read(&inbuf->tail);
- head = atomic_read(&inbuf->head);
+ tail = inbuf->tail;
+ head = inbuf->head;
gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
while (bytesleft) {
@@ -546,7 +543,7 @@ int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
src += n;
}
gig_dbg(DEBUG_INTR, "setting tail to %u", tail);
- atomic_set(&inbuf->tail, tail);
+ inbuf->tail = tail;
return numbytes != bytesleft;
}
EXPORT_SYMBOL_GPL(gigaset_fill_inbuf);
@@ -668,7 +665,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
tasklet_init(&cs->event_tasklet, &gigaset_handle_event,
(unsigned long) cs);
- atomic_set(&cs->commands_pending, 0);
+ cs->commands_pending = 0;
cs->cur_at_seq = 0;
cs->gotfwver = -1;
cs->open_count = 0;
@@ -688,8 +685,8 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
init_waitqueue_head(&cs->waitqueue);
cs->waiting = 0;
- atomic_set(&cs->mode, M_UNKNOWN);
- atomic_set(&cs->mstate, MS_UNINITIALIZED);
+ cs->mode = M_UNKNOWN;
+ cs->mstate = MS_UNINITIALIZED;
for (i = 0; i < channels; ++i) {
gig_dbg(DEBUG_INIT, "setting up bcs[%d].read", i);
@@ -806,8 +803,8 @@ static void cleanup_cs(struct cardstate *cs)
spin_lock_irqsave(&cs->lock, flags);
- atomic_set(&cs->mode, M_UNKNOWN);
- atomic_set(&cs->mstate, MS_UNINITIALIZED);
+ cs->mode = M_UNKNOWN;
+ cs->mstate = MS_UNINITIALIZED;
clear_at_state(&cs->at_state);
dealloc_at_states(cs);
@@ -817,8 +814,8 @@ static void cleanup_cs(struct cardstate *cs)
kfree(cs->inbuf->rcvbuf);
cs->inbuf->rcvbuf = NULL;
cs->inbuf->inputstate = INS_command;
- atomic_set(&cs->inbuf->head, 0);
- atomic_set(&cs->inbuf->tail, 0);
+ cs->inbuf->head = 0;
+ cs->inbuf->tail = 0;
cb = cs->cmdbuf;
while (cb) {
@@ -832,7 +829,7 @@ static void cleanup_cs(struct cardstate *cs)
cs->gotfwver = -1;
cs->dle = 0;
cs->cur_at_seq = 0;
- atomic_set(&cs->commands_pending, 0);
+ cs->commands_pending = 0;
cs->cbytes = 0;
spin_unlock_irqrestore(&cs->lock, flags);
@@ -862,7 +859,7 @@ int gigaset_start(struct cardstate *cs)
cs->connected = 1;
spin_unlock_irqrestore(&cs->lock, flags);
- if (atomic_read(&cs->mstate) != MS_LOCKED) {
+ if (cs->mstate != MS_LOCKED) {
cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR|TIOCM_RTS);
cs->ops->baud_rate(cs, B115200);
cs->ops->set_line_ctrl(cs, CS8);
@@ -893,10 +890,17 @@ error:
}
EXPORT_SYMBOL_GPL(gigaset_start);
-void gigaset_shutdown(struct cardstate *cs)
+/* gigaset_shutdown
+ * check if a device is associated to the cardstate structure and stop it
+ * return value: 0 if ok, -1 if no device was associated
+ */
+int gigaset_shutdown(struct cardstate *cs)
{
mutex_lock(&cs->mutex);
+ if (!(cs->flags & VALID_MINOR))
+ return -1;
+
cs->waiting = 1;
if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) {
@@ -913,6 +917,7 @@ void gigaset_shutdown(struct cardstate *cs)
exit:
mutex_unlock(&cs->mutex);
+ return 0;
}
EXPORT_SYMBOL_GPL(gigaset_shutdown);
@@ -954,13 +959,11 @@ struct cardstate *gigaset_get_cs_by_id(int id)
list_for_each_entry(drv, &drivers, list) {
spin_lock(&drv->lock);
for (i = 0; i < drv->minors; ++i) {
- if (drv->flags[i] & VALID_ID) {
- cs = drv->cs + i;
- if (cs->myid == id)
- ret = cs;
- }
- if (ret)
+ cs = drv->cs + i;
+ if ((cs->flags & VALID_ID) && cs->myid == id) {
+ ret = cs;
break;
+ }
}
spin_unlock(&drv->lock);
if (ret)
@@ -983,10 +986,9 @@ void gigaset_debugdrivers(void)
spin_lock(&drv->lock);
for (i = 0; i < drv->minors; ++i) {
gig_dbg(DEBUG_DRIVER, " index %u", i);
- gig_dbg(DEBUG_DRIVER, " flags 0x%02x",
- drv->flags[i]);
cs = drv->cs + i;
gig_dbg(DEBUG_DRIVER, " cardstate %p", cs);
+ gig_dbg(DEBUG_DRIVER, " flags 0x%02x", cs->flags);
gig_dbg(DEBUG_DRIVER, " minor_index %u",
cs->minor_index);
gig_dbg(DEBUG_DRIVER, " driver %p", cs->driver);
@@ -1010,7 +1012,7 @@ static struct cardstate *gigaset_get_cs_by_minor(unsigned minor)
continue;
index = minor - drv->minor;
spin_lock(&drv->lock);
- if (drv->flags[index] & VALID_MINOR)
+ if (drv->cs[index].flags & VALID_MINOR)
ret = drv->cs + index;
spin_unlock(&drv->lock);
if (ret)
@@ -1038,7 +1040,6 @@ void gigaset_freedriver(struct gigaset_driver *drv)
gigaset_if_freedriver(drv);
kfree(drv->cs);
- kfree(drv->flags);
kfree(drv);
}
EXPORT_SYMBOL_GPL(gigaset_freedriver);
@@ -1080,12 +1081,8 @@ struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
if (!drv->cs)
goto error;
- drv->flags = kmalloc(minors * sizeof *drv->flags, GFP_KERNEL);
- if (!drv->flags)
- goto error;
-
for (i = 0; i < minors; ++i) {
- drv->flags[i] = 0;
+ drv->cs[i].flags = 0;
drv->cs[i].driver = drv;
drv->cs[i].ops = drv->ops;
drv->cs[i].minor_index = i;
@@ -1106,53 +1103,9 @@ error:
}
EXPORT_SYMBOL_GPL(gigaset_initdriver);
-/* For drivers without fixed assignment device<->cardstate (usb) */
-struct cardstate *gigaset_getunassignedcs(struct gigaset_driver *drv)
-{
- unsigned long flags;
- struct cardstate *cs = NULL;
- unsigned i;
-
- spin_lock_irqsave(&drv->lock, flags);
- if (drv->blocked)
- goto exit;
- for (i = 0; i < drv->minors; ++i) {
- if ((drv->flags[i] & VALID_MINOR) &&
- !(drv->flags[i] & ASSIGNED)) {
- drv->flags[i] |= ASSIGNED;
- cs = drv->cs + i;
- break;
- }
- }
-exit:
- spin_unlock_irqrestore(&drv->lock, flags);
- return cs;
-}
-EXPORT_SYMBOL_GPL(gigaset_getunassignedcs);
-
-void gigaset_unassign(struct cardstate *cs)
-{
- unsigned long flags;
- unsigned *minor_flags;
- struct gigaset_driver *drv;
-
- if (!cs)
- return;
- drv = cs->driver;
- spin_lock_irqsave(&drv->lock, flags);
- minor_flags = drv->flags + cs->minor_index;
- if (*minor_flags & VALID_MINOR)
- *minor_flags &= ~ASSIGNED;
- spin_unlock_irqrestore(&drv->lock, flags);
-}
-EXPORT_SYMBOL_GPL(gigaset_unassign);
-
void gigaset_blockdriver(struct gigaset_driver *drv)
{
- unsigned long flags;
- spin_lock_irqsave(&drv->lock, flags);
drv->blocked = 1;
- spin_unlock_irqrestore(&drv->lock, flags);
}
EXPORT_SYMBOL_GPL(gigaset_blockdriver);
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index cec1ef342fcc..5cbf64d850ee 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -735,7 +735,7 @@ static void disconnect(struct at_state_t **at_state_p)
/* revert to selected idle mode */
if (!cs->cidmode) {
cs->at_state.pending_commands |= PC_UMMODE;
- atomic_set(&cs->commands_pending, 1); //FIXME
+ cs->commands_pending = 1;
gig_dbg(DEBUG_CMD, "Scheduling PC_UMMODE");
}
spin_unlock_irqrestore(&cs->lock, flags);
@@ -793,15 +793,15 @@ static void init_failed(struct cardstate *cs, int mode)
struct at_state_t *at_state;
cs->at_state.pending_commands &= ~PC_INIT;
- atomic_set(&cs->mode, mode);
- atomic_set(&cs->mstate, MS_UNINITIALIZED);
+ cs->mode = mode;
+ cs->mstate = MS_UNINITIALIZED;
gigaset_free_channels(cs);
for (i = 0; i < cs->channels; ++i) {
at_state = &cs->bcs[i].at_state;
if (at_state->pending_commands & PC_CID) {
at_state->pending_commands &= ~PC_CID;
at_state->pending_commands |= PC_NOCID;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
}
}
}
@@ -812,11 +812,11 @@ static void schedule_init(struct cardstate *cs, int state)
gig_dbg(DEBUG_CMD, "not scheduling PC_INIT again");
return;
}
- atomic_set(&cs->mstate, state);
- atomic_set(&cs->mode, M_UNKNOWN);
+ cs->mstate = state;
+ cs->mode = M_UNKNOWN;
gigaset_block_channels(cs);
cs->at_state.pending_commands |= PC_INIT;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
gig_dbg(DEBUG_CMD, "Scheduling PC_INIT");
}
@@ -953,13 +953,13 @@ static void start_dial(struct at_state_t *at_state, void *data, unsigned seq_ind
at_state->pending_commands |= PC_CID;
gig_dbg(DEBUG_CMD, "Scheduling PC_CID");
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
return;
error:
at_state->pending_commands |= PC_NOCID;
gig_dbg(DEBUG_CMD, "Scheduling PC_NOCID");
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
return;
}
@@ -973,12 +973,12 @@ static void start_accept(struct at_state_t *at_state)
if (retval == 0) {
at_state->pending_commands |= PC_ACCEPT;
gig_dbg(DEBUG_CMD, "Scheduling PC_ACCEPT");
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
} else {
- //FIXME
+ /* error reset */
at_state->pending_commands |= PC_HUP;
gig_dbg(DEBUG_CMD, "Scheduling PC_HUP");
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
}
}
@@ -986,7 +986,7 @@ static void do_start(struct cardstate *cs)
{
gigaset_free_channels(cs);
- if (atomic_read(&cs->mstate) != MS_LOCKED)
+ if (cs->mstate != MS_LOCKED)
schedule_init(cs, MS_INIT);
cs->isdn_up = 1;
@@ -1000,9 +1000,9 @@ static void do_start(struct cardstate *cs)
static void finish_shutdown(struct cardstate *cs)
{
- if (atomic_read(&cs->mstate) != MS_LOCKED) {
- atomic_set(&cs->mstate, MS_UNINITIALIZED);
- atomic_set(&cs->mode, M_UNKNOWN);
+ if (cs->mstate != MS_LOCKED) {
+ cs->mstate = MS_UNINITIALIZED;
+ cs->mode = M_UNKNOWN;
}
/* Tell the LL that the device is not available .. */
@@ -1022,10 +1022,10 @@ static void do_shutdown(struct cardstate *cs)
{
gigaset_block_channels(cs);
- if (atomic_read(&cs->mstate) == MS_READY) {
- atomic_set(&cs->mstate, MS_SHUTDOWN);
+ if (cs->mstate == MS_READY) {
+ cs->mstate = MS_SHUTDOWN;
cs->at_state.pending_commands |= PC_SHUTDOWN;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
gig_dbg(DEBUG_CMD, "Scheduling PC_SHUTDOWN");
} else
finish_shutdown(cs);
@@ -1120,7 +1120,7 @@ static void handle_icall(struct cardstate *cs, struct bc_state *bcs,
* In fact it doesn't.
*/
at_state->pending_commands |= PC_HUP;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
break;
}
}
@@ -1130,7 +1130,7 @@ static int do_lock(struct cardstate *cs)
int mode;
int i;
- switch (atomic_read(&cs->mstate)) {
+ switch (cs->mstate) {
case MS_UNINITIALIZED:
case MS_READY:
if (cs->cur_at_seq || !list_empty(&cs->temp_at_states) ||
@@ -1152,20 +1152,20 @@ static int do_lock(struct cardstate *cs)
return -EBUSY;
}
- mode = atomic_read(&cs->mode);
- atomic_set(&cs->mstate, MS_LOCKED);
- atomic_set(&cs->mode, M_UNKNOWN);
+ mode = cs->mode;
+ cs->mstate = MS_LOCKED;
+ cs->mode = M_UNKNOWN;
return mode;
}
static int do_unlock(struct cardstate *cs)
{
- if (atomic_read(&cs->mstate) != MS_LOCKED)
+ if (cs->mstate != MS_LOCKED)
return -EINVAL;
- atomic_set(&cs->mstate, MS_UNINITIALIZED);
- atomic_set(&cs->mode, M_UNKNOWN);
+ cs->mstate = MS_UNINITIALIZED;
+ cs->mode = M_UNKNOWN;
gigaset_free_channels(cs);
if (cs->connected)
schedule_init(cs, MS_INIT);
@@ -1198,17 +1198,17 @@ static void do_action(int action, struct cardstate *cs,
case ACT_INIT:
cs->at_state.pending_commands &= ~PC_INIT;
cs->cur_at_seq = SEQ_NONE;
- atomic_set(&cs->mode, M_UNIMODEM);
+ cs->mode = M_UNIMODEM;
spin_lock_irqsave(&cs->lock, flags);
if (!cs->cidmode) {
spin_unlock_irqrestore(&cs->lock, flags);
gigaset_free_channels(cs);
- atomic_set(&cs->mstate, MS_READY);
+ cs->mstate = MS_READY;
break;
}
spin_unlock_irqrestore(&cs->lock, flags);
cs->at_state.pending_commands |= PC_CIDMODE;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
gig_dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
break;
case ACT_FAILINIT:
@@ -1234,22 +1234,20 @@ static void do_action(int action, struct cardstate *cs,
| INS_command;
break;
case ACT_CMODESET:
- if (atomic_read(&cs->mstate) == MS_INIT ||
- atomic_read(&cs->mstate) == MS_RECOVER) {
+ if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
gigaset_free_channels(cs);
- atomic_set(&cs->mstate, MS_READY);
+ cs->mstate = MS_READY;
}
- atomic_set(&cs->mode, M_CID);
+ cs->mode = M_CID;
cs->cur_at_seq = SEQ_NONE;
break;
case ACT_UMODESET:
- atomic_set(&cs->mode, M_UNIMODEM);
+ cs->mode = M_UNIMODEM;
cs->cur_at_seq = SEQ_NONE;
break;
case ACT_FAILCMODE:
cs->cur_at_seq = SEQ_NONE;
- if (atomic_read(&cs->mstate) == MS_INIT ||
- atomic_read(&cs->mstate) == MS_RECOVER) {
+ if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
init_failed(cs, M_UNKNOWN);
break;
}
@@ -1307,7 +1305,7 @@ static void do_action(int action, struct cardstate *cs,
case ACT_CONNECT:
if (cs->onechannel) {
at_state->pending_commands |= PC_DLE1;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
break;
}
bcs->chstate |= CHS_D_UP;
@@ -1333,7 +1331,7 @@ static void do_action(int action, struct cardstate *cs,
* DLE only used for M10x with one B channel.
*/
at_state->pending_commands |= PC_DLE0;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
} else
disconnect(p_at_state);
break;
@@ -1369,7 +1367,7 @@ static void do_action(int action, struct cardstate *cs,
"Could not enter DLE mode. Trying to hang up.\n");
channel = cs->curchannel;
cs->bcs[channel].at_state.pending_commands |= PC_HUP;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
break;
case ACT_CID: /* got cid; start dialing */
@@ -1379,7 +1377,7 @@ static void do_action(int action, struct cardstate *cs,
cs->bcs[channel].at_state.cid = ev->parameter;
cs->bcs[channel].at_state.pending_commands |=
PC_DIAL;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
break;
}
/* fall through */
@@ -1411,14 +1409,14 @@ static void do_action(int action, struct cardstate *cs,
case ACT_ABORTDIAL: /* error/timeout during dial preparation */
cs->cur_at_seq = SEQ_NONE;
at_state->pending_commands |= PC_HUP;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
break;
case ACT_REMOTEREJECT: /* DISCONNECT_IND after dialling */
case ACT_CONNTIMEOUT: /* timeout waiting for ZSAU=ACTIVE */
case ACT_REMOTEHUP: /* DISCONNECT_IND with established connection */
at_state->pending_commands |= PC_HUP;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
break;
case ACT_GETSTRING: /* warning: RING, ZDLE, ...
are not handled properly anymore */
@@ -1515,7 +1513,7 @@ static void do_action(int action, struct cardstate *cs,
break;
case ACT_HUP:
at_state->pending_commands |= PC_HUP;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
gig_dbg(DEBUG_CMD, "Scheduling PC_HUP");
break;
@@ -1558,7 +1556,7 @@ static void do_action(int action, struct cardstate *cs,
cs->at_state.pending_commands |= PC_UMMODE;
gig_dbg(DEBUG_CMD, "Scheduling PC_UMMODE");
}
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
}
spin_unlock_irqrestore(&cs->lock, flags);
cs->waiting = 0;
@@ -1741,7 +1739,7 @@ static void process_command_flags(struct cardstate *cs)
int sequence;
unsigned long flags;
- atomic_set(&cs->commands_pending, 0);
+ cs->commands_pending = 0;
if (cs->cur_at_seq) {
gig_dbg(DEBUG_CMD, "not searching scheduled commands: busy");
@@ -1779,7 +1777,7 @@ static void process_command_flags(struct cardstate *cs)
~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
if (at_state->cid > 0)
at_state->pending_commands |= PC_HUP;
- if (atomic_read(&cs->mstate) == MS_RECOVER) {
+ if (cs->mstate == MS_RECOVER) {
if (at_state->pending_commands & PC_CID) {
at_state->pending_commands |= PC_NOCID;
at_state->pending_commands &= ~PC_CID;
@@ -1793,7 +1791,7 @@ static void process_command_flags(struct cardstate *cs)
if (cs->at_state.pending_commands == PC_UMMODE
&& !cs->cidmode
&& list_empty(&cs->temp_at_states)
- && atomic_read(&cs->mode) == M_CID) {
+ && cs->mode == M_CID) {
sequence = SEQ_UMMODE;
at_state = &cs->at_state;
for (i = 0; i < cs->channels; ++i) {
@@ -1860,7 +1858,7 @@ static void process_command_flags(struct cardstate *cs)
}
if (cs->at_state.pending_commands & PC_CIDMODE) {
cs->at_state.pending_commands &= ~PC_CIDMODE;
- if (atomic_read(&cs->mode) == M_UNIMODEM) {
+ if (cs->mode == M_UNIMODEM) {
cs->retry_count = 1;
schedule_sequence(cs, &cs->at_state, SEQ_CIDMODE);
return;
@@ -1886,11 +1884,11 @@ static void process_command_flags(struct cardstate *cs)
return;
}
if (bcs->at_state.pending_commands & PC_CID) {
- switch (atomic_read(&cs->mode)) {
+ switch (cs->mode) {
case M_UNIMODEM:
cs->at_state.pending_commands |= PC_CIDMODE;
gig_dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
return;
#ifdef GIG_MAYINITONDIAL
case M_UNKNOWN:
@@ -1926,7 +1924,7 @@ static void process_events(struct cardstate *cs)
for (i = 0; i < 2 * MAX_EVENTS; ++i) {
tail = cs->ev_tail;
if (tail == head) {
- if (!check_flags && !atomic_read(&cs->commands_pending))
+ if (!check_flags && !cs->commands_pending)
break;
check_flags = 0;
spin_unlock_irqrestore(&cs->ev_lock, flags);
@@ -1934,7 +1932,7 @@ static void process_events(struct cardstate *cs)
spin_lock_irqsave(&cs->ev_lock, flags);
tail = cs->ev_tail;
if (tail == head) {
- if (!atomic_read(&cs->commands_pending))
+ if (!cs->commands_pending)
break;
continue;
}
@@ -1971,7 +1969,7 @@ void gigaset_handle_event(unsigned long data)
struct cardstate *cs = (struct cardstate *) data;
/* handle incoming data on control/common channel */
- if (atomic_read(&cs->inbuf->head) != atomic_read(&cs->inbuf->tail)) {
+ if (cs->inbuf->head != cs->inbuf->tail) {
gig_dbg(DEBUG_INTR, "processing new data");
cs->ops->handle_input(cs->inbuf);
}
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 02bdaf22d7ea..f365993161fc 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -70,22 +70,13 @@
extern int gigaset_debuglevel; /* "needs" cast to (enum debuglevel) */
-/* any combination of these can be given with the 'debug=' parameter to insmod,
- * e.g. 'insmod usb_gigaset.o debug=0x2c' will set DEBUG_OPEN, DEBUG_CMD and
- * DEBUG_INTR.
- */
+/* debug flags, combine by adding/bitwise OR */
enum debuglevel {
- DEBUG_REG = 0x0002, /* serial port I/O register operations */
- DEBUG_OPEN = 0x0004, /* open/close serial port */
- DEBUG_INTR = 0x0008, /* interrupt processing */
- DEBUG_INTR_DUMP = 0x0010, /* Activating hexdump debug output on
- interrupt requests, not available as
- run-time option */
+ DEBUG_INTR = 0x00008, /* interrupt processing */
DEBUG_CMD = 0x00020, /* sent/received LL commands */
DEBUG_STREAM = 0x00040, /* application data stream I/O events */
DEBUG_STREAM_DUMP = 0x00080, /* application data stream content */
DEBUG_LLDATA = 0x00100, /* sent/received LL data */
- DEBUG_INTR_0 = 0x00200, /* serial port interrupt processing */
DEBUG_DRIVER = 0x00400, /* driver structure */
DEBUG_HDLC = 0x00800, /* M10x HDLC processing */
DEBUG_WRITE = 0x01000, /* M105 data write */
@@ -93,7 +84,7 @@ enum debuglevel {
DEBUG_MCMD = 0x04000, /* COMMANDS THAT ARE SENT VERY OFTEN */
DEBUG_INIT = 0x08000, /* (de)allocation+initialization of data
structures */
- DEBUG_LOCK = 0x10000, /* semaphore operations */
+ DEBUG_SUSPEND = 0x10000, /* suspend/resume processing */
DEBUG_OUTPUT = 0x20000, /* output to device */
DEBUG_ISO = 0x40000, /* isochronous transfers */
DEBUG_IF = 0x80000, /* character device operations */
@@ -191,6 +182,9 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
#define HD_OPEN_ATCHANNEL (0x28) // 3070
#define HD_CLOSE_ATCHANNEL (0x29) // 3070
+/* number of B channels supported by base driver */
+#define BAS_CHANNELS 2
+
/* USB frames for isochronous transfer */
#define BAS_FRAMETIME 1 /* number of milliseconds between frames */
#define BAS_NUMFRAMES 8 /* number of frames per URB */
@@ -313,7 +307,7 @@ struct inbuf_t {
struct bc_state *bcs;
struct cardstate *cs;
int inputstate;
- atomic_t head, tail;
+ int head, tail;
unsigned char data[RBUFSIZE];
};
@@ -335,9 +329,9 @@ struct inbuf_t {
* are also filled with that value
*/
struct isowbuf_t {
- atomic_t read;
- atomic_t nextread;
- atomic_t write;
+ int read;
+ int nextread;
+ int write;
atomic_t writesem;
int wbits;
unsigned char data[BAS_OUTBUFSIZE + BAS_OUTBUFPAD];
@@ -350,11 +344,13 @@ struct isowbuf_t {
* - urb: pointer to the URB itself
* - bcs: pointer to the B Channel control structure
* - limit: end of write buffer area covered by this URB
+ * - status: URB completion status
*/
struct isow_urbctx_t {
struct urb *urb;
struct bc_state *bcs;
int limit;
+ int status;
};
/* AT state structure
@@ -439,14 +435,15 @@ struct cardstate {
unsigned minor_index;
struct device *dev;
struct device *tty_dev;
+ unsigned flags;
const struct gigaset_ops *ops;
/* Stuff to handle communication */
wait_queue_head_t waitqueue;
int waiting;
- atomic_t mode; /* see M_XXXX */
- atomic_t mstate; /* Modem state: see MS_XXXX */
+ int mode; /* see M_XXXX */
+ int mstate; /* Modem state: see MS_XXXX */
/* only changed by the event layer */
int cmd_result;
@@ -503,7 +500,7 @@ struct cardstate {
processed */
int curchannel; /* channel those commands are meant
for */
- atomic_t commands_pending; /* flag(s) in xxx.commands_pending have
+ int commands_pending; /* flag(s) in xxx.commands_pending have
been set */
struct tasklet_struct event_tasklet;
/* tasklet for serializing AT commands.
@@ -543,7 +540,6 @@ struct gigaset_driver {
unsigned minor;
unsigned minors;
struct cardstate *cs;
- unsigned *flags;
int blocked;
const struct gigaset_ops *ops;
@@ -559,7 +555,7 @@ struct cmdbuf_t {
struct bas_bc_state {
/* isochronous output state */
- atomic_t running;
+ int running;
atomic_t corrbytes;
spinlock_t isooutlock;
struct isow_urbctx_t isoouturbs[BAS_OUTURBS];
@@ -574,6 +570,7 @@ struct bas_bc_state {
struct urb *isoinurbs[BAS_INURBS];
unsigned char isoinbuf[BAS_INBUFSIZE * BAS_INURBS];
struct urb *isoindone; /* completed isoc read URB */
+ int isoinstatus; /* status of completed URB */
int loststatus; /* status of dropped URB */
unsigned isoinlost; /* number of bytes lost */
/* state of bit unstuffing algorithm
@@ -770,10 +767,6 @@ void gigaset_freedriver(struct gigaset_driver *drv);
void gigaset_debugdrivers(void);
struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty);
struct cardstate *gigaset_get_cs_by_id(int id);
-
-/* For drivers without fixed assignment device<->cardstate (usb) */
-struct cardstate *gigaset_getunassignedcs(struct gigaset_driver *drv);
-void gigaset_unassign(struct cardstate *cs);
void gigaset_blockdriver(struct gigaset_driver *drv);
/* Allocate and initialize card state. Calls hardware dependent
@@ -792,7 +785,7 @@ int gigaset_start(struct cardstate *cs);
void gigaset_stop(struct cardstate *cs);
/* Tell common.c that the driver is being unloaded. */
-void gigaset_shutdown(struct cardstate *cs);
+int gigaset_shutdown(struct cardstate *cs);
/* Tell common.c that an skb has been sent. */
void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index eb50f3dab5f7..af195b07c191 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -28,12 +28,11 @@ static int if_lock(struct cardstate *cs, int *arg)
return -EINVAL;
if (cmd < 0) {
- *arg = atomic_read(&cs->mstate) == MS_LOCKED; //FIXME remove?
+ *arg = cs->mstate == MS_LOCKED;
return 0;
}
- if (!cmd && atomic_read(&cs->mstate) == MS_LOCKED
- && cs->connected) {
+ if (!cmd && cs->mstate == MS_LOCKED && cs->connected) {
cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR|TIOCM_RTS);
cs->ops->baud_rate(cs, B115200);
cs->ops->set_line_ctrl(cs, CS8);
@@ -104,7 +103,7 @@ static int if_config(struct cardstate *cs, int *arg)
if (*arg != 1)
return -EINVAL;
- if (atomic_read(&cs->mstate) != MS_LOCKED)
+ if (cs->mstate != MS_LOCKED)
return -EBUSY;
if (!cs->connected) {
@@ -162,7 +161,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
tty->driver_data = NULL;
cs = gigaset_get_cs_by_tty(tty);
- if (!cs)
+ if (!cs || !try_module_get(cs->driver->owner))
return -ENODEV;
if (mutex_lock_interruptible(&cs->mutex))
@@ -208,6 +207,8 @@ static void if_close(struct tty_struct *tty, struct file *filp)
}
mutex_unlock(&cs->mutex);
+
+ module_put(cs->driver->owner);
}
static int if_ioctl(struct tty_struct *tty, struct file *file,
@@ -364,7 +365,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
if (!cs->open_count)
warn("%s: device not opened", __func__);
- else if (atomic_read(&cs->mstate) != MS_LOCKED) {
+ else if (cs->mstate != MS_LOCKED) {
warn("can't write to unlocked device");
retval = -EBUSY;
} else if (!cs->connected) {
@@ -398,9 +399,9 @@ static int if_write_room(struct tty_struct *tty)
if (!cs->open_count)
warn("%s: device not opened", __func__);
- else if (atomic_read(&cs->mstate) != MS_LOCKED) {
+ else if (cs->mstate != MS_LOCKED) {
warn("can't write to unlocked device");
- retval = -EBUSY; //FIXME
+ retval = -EBUSY;
} else if (!cs->connected) {
gig_dbg(DEBUG_ANY, "can't write to unplugged device");
retval = -EBUSY; //FIXME
@@ -430,7 +431,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
if (!cs->open_count)
warn("%s: device not opened", __func__);
- else if (atomic_read(&cs->mstate) != MS_LOCKED) {
+ else if (cs->mstate != MS_LOCKED) {
warn("can't write to unlocked device");
retval = -EBUSY;
} else if (!cs->connected) {
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index e0505f238807..e30a7773f93c 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -23,9 +23,9 @@
*/
void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle)
{
- atomic_set(&iwb->read, 0);
- atomic_set(&iwb->nextread, 0);
- atomic_set(&iwb->write, 0);
+ iwb->read = 0;
+ iwb->nextread = 0;
+ iwb->write = 0;
atomic_set(&iwb->writesem, 1);
iwb->wbits = 0;
iwb->idle = idle;
@@ -39,8 +39,8 @@ static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
{
int read, write, freebytes;
- read = atomic_read(&iwb->read);
- write = atomic_read(&iwb->write);
+ read = iwb->read;
+ write = iwb->write;
if ((freebytes = read - write) > 0) {
/* no wraparound: need padding space within regular area */
return freebytes - BAS_OUTBUFPAD;
@@ -62,7 +62,7 @@ static inline int isowbuf_poscmp(struct isowbuf_t *iwb, int a, int b)
int read;
if (a == b)
return 0;
- read = atomic_read(&iwb->read);
+ read = iwb->read;
if (a < b) {
if (a < read && read <= b)
return +1;
@@ -91,18 +91,18 @@ static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
#ifdef CONFIG_GIGASET_DEBUG
gig_dbg(DEBUG_ISO,
"%s: acquired iso write semaphore, data[write]=%02x, nbits=%d",
- __func__, iwb->data[atomic_read(&iwb->write)], iwb->wbits);
+ __func__, iwb->data[iwb->write], iwb->wbits);
#endif
return 1;
}
/* finish writing
- * release the write semaphore and update the maximum buffer fill level
+ * release the write semaphore
* returns the current write position
*/
static inline int isowbuf_donewrite(struct isowbuf_t *iwb)
{
- int write = atomic_read(&iwb->write);
+ int write = iwb->write;
atomic_inc(&iwb->writesem);
return write;
}
@@ -116,7 +116,7 @@ static inline int isowbuf_donewrite(struct isowbuf_t *iwb)
*/
static inline void isowbuf_putbits(struct isowbuf_t *iwb, u32 data, int nbits)
{
- int write = atomic_read(&iwb->write);
+ int write = iwb->write;
data <<= iwb->wbits;
data |= iwb->data[write];
nbits += iwb->wbits;
@@ -128,7 +128,7 @@ static inline void isowbuf_putbits(struct isowbuf_t *iwb, u32 data, int nbits)
}
iwb->wbits = nbits;
iwb->data[write] = data & 0xff;
- atomic_set(&iwb->write, write);
+ iwb->write = write;
}
/* put final flag on HDLC bitstream
@@ -142,7 +142,7 @@ static inline void isowbuf_putflag(struct isowbuf_t *iwb)
/* add two flags, thus reliably covering one byte */
isowbuf_putbits(iwb, 0x7e7e, 8);
/* recover the idle flag byte */
- write = atomic_read(&iwb->write);
+ write = iwb->write;
iwb->idle = iwb->data[write];
gig_dbg(DEBUG_ISO, "idle fill byte %02x", iwb->idle);
/* mask extraneous bits in buffer */
@@ -160,8 +160,8 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
int read, write, limit, src, dst;
unsigned char pbyte;
- read = atomic_read(&iwb->nextread);
- write = atomic_read(&iwb->write);
+ read = iwb->nextread;
+ write = iwb->write;
if (likely(read == write)) {
/* return idle frame */
return read < BAS_OUTBUFPAD ?
@@ -176,7 +176,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
err("invalid size %d", size);
return -EINVAL;
}
- src = atomic_read(&iwb->read);
+ src = iwb->read;
if (unlikely(limit > BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
(read < src && limit >= src))) {
err("isoc write buffer frame reservation violated");
@@ -191,7 +191,8 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
if (!isowbuf_startwrite(iwb))
return -EBUSY;
/* write position could have changed */
- if (limit >= (write = atomic_read(&iwb->write))) {
+ write = iwb->write;
+ if (limit >= write) {
pbyte = iwb->data[write]; /* save
partial byte */
limit = write + BAS_OUTBUFPAD;
@@ -213,7 +214,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
__func__, pbyte, limit);
iwb->data[limit] = pbyte; /* restore
partial byte */
- atomic_set(&iwb->write, limit);
+ iwb->write = limit;
}
isowbuf_donewrite(iwb);
}
@@ -233,7 +234,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
limit = src;
}
}
- atomic_set(&iwb->nextread, limit);
+ iwb->nextread = limit;
return read;
}
@@ -477,7 +478,7 @@ static inline int trans_buildframe(struct isowbuf_t *iwb,
unsigned char c;
if (unlikely(count <= 0))
- return atomic_read(&iwb->write); /* better ideas? */
+ return iwb->write;
if (isowbuf_freebytes(iwb) < count ||
!isowbuf_startwrite(iwb)) {
@@ -486,13 +487,13 @@ static inline int trans_buildframe(struct isowbuf_t *iwb,
}
gig_dbg(DEBUG_STREAM, "put %d bytes", count);
- write = atomic_read(&iwb->write);
+ write = iwb->write;
do {
c = bitrev8(*in++);
iwb->data[write++] = c;
write %= BAS_OUTBUFSIZE;
} while (--count > 0);
- atomic_set(&iwb->write, write);
+ iwb->write = write;
iwb->idle = c;
return isowbuf_donewrite(iwb);
@@ -947,8 +948,8 @@ void gigaset_isoc_input(struct inbuf_t *inbuf)
unsigned tail, head, numbytes;
unsigned char *src;
- head = atomic_read(&inbuf->head);
- while (head != (tail = atomic_read(&inbuf->tail))) {
+ head = inbuf->head;
+ while (head != (tail = inbuf->tail)) {
gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
if (head > tail)
tail = RBUFSIZE;
@@ -956,7 +957,7 @@ void gigaset_isoc_input(struct inbuf_t *inbuf)
numbytes = tail - head;
gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
- if (atomic_read(&cs->mstate) == MS_LOCKED) {
+ if (cs->mstate == MS_LOCKED) {
gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response",
numbytes, src);
gigaset_if_receive(inbuf->cs, src, numbytes);
@@ -970,7 +971,7 @@ void gigaset_isoc_input(struct inbuf_t *inbuf)
if (head == RBUFSIZE)
head = 0;
gig_dbg(DEBUG_INTR, "setting head to %u", head);
- atomic_set(&inbuf->head, head);
+ inbuf->head = head;
}
}
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index ea44302e6e7e..fceeb1d57682 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/poll.h>
+#include <linux/completion.h>
/* Version Information */
#define DRIVER_AUTHOR "Tilman Schmidt"
@@ -48,7 +49,7 @@ struct ser_cardstate {
struct platform_device dev;
struct tty_struct *tty;
atomic_t refcnt;
- struct mutex dead_mutex;
+ struct completion dead_cmp;
};
static struct platform_driver device_driver = {
@@ -240,7 +241,7 @@ static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
struct cmdbuf_t *cb;
unsigned long flags;
- gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ?
+ gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
DEBUG_TRANSCMD : DEBUG_LOCKCMD,
"CMD Transmit", len, buf);
@@ -498,7 +499,7 @@ static struct cardstate *cs_get(struct tty_struct *tty)
static void cs_put(struct cardstate *cs)
{
if (atomic_dec_and_test(&cs->hw.ser->refcnt))
- mutex_unlock(&cs->hw.ser->dead_mutex);
+ complete(&cs->hw.ser->dead_cmp);
}
/*
@@ -527,8 +528,8 @@ gigaset_tty_open(struct tty_struct *tty)
cs->dev = &cs->hw.ser->dev.dev;
cs->hw.ser->tty = tty;
- mutex_init(&cs->hw.ser->dead_mutex);
atomic_set(&cs->hw.ser->refcnt, 1);
+ init_completion(&cs->hw.ser->dead_cmp);
tty->disc_data = cs;
@@ -536,14 +537,13 @@ gigaset_tty_open(struct tty_struct *tty)
* startup system and notify the LL that we are ready to run
*/
if (startmode == SM_LOCKED)
- atomic_set(&cs->mstate, MS_LOCKED);
+ cs->mstate = MS_LOCKED;
if (!gigaset_start(cs)) {
tasklet_kill(&cs->write_tasklet);
goto error;
}
gig_dbg(DEBUG_INIT, "Startup of HLL done");
- mutex_lock(&cs->hw.ser->dead_mutex);
return 0;
error:
@@ -577,7 +577,7 @@ gigaset_tty_close(struct tty_struct *tty)
else {
/* wait for running methods to finish */
if (!atomic_dec_and_test(&cs->hw.ser->refcnt))
- mutex_lock(&cs->hw.ser->dead_mutex);
+ wait_for_completion(&cs->hw.ser->dead_cmp);
}
/* stop operations */
@@ -714,8 +714,8 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
return;
}
- tail = atomic_read(&inbuf->tail);
- head = atomic_read(&inbuf->head);
+ tail = inbuf->tail;
+ head = inbuf->head;
gig_dbg(DEBUG_INTR, "buffer state: %u -> %u, receive %u bytes",
head, tail, count);
@@ -742,7 +742,7 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
}
gig_dbg(DEBUG_INTR, "setting tail to %u", tail);
- atomic_set(&inbuf->tail, tail);
+ inbuf->tail = tail;
/* Everything was received .. Push data into handler */
gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index ca4bee173cfb..77d20ab0cd4d 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -104,12 +104,17 @@ MODULE_DEVICE_TABLE(usb, gigaset_table);
* flags per packet.
*/
+/* functions called if a device of this driver is connected/disconnected */
static int gigaset_probe(struct usb_interface *interface,
const struct usb_device_id *id);
static void gigaset_disconnect(struct usb_interface *interface);
+/* functions called before/after suspend */
+static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
+static int gigaset_resume(struct usb_interface *intf);
+static int gigaset_pre_reset(struct usb_interface *intf);
+
static struct gigaset_driver *driver = NULL;
-static struct cardstate *cardstate = NULL;
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver gigaset_usb_driver = {
@@ -117,12 +122,17 @@ static struct usb_driver gigaset_usb_driver = {
.probe = gigaset_probe,
.disconnect = gigaset_disconnect,
.id_table = gigaset_table,
+ .suspend = gigaset_suspend,
+ .resume = gigaset_resume,
+ .reset_resume = gigaset_resume,
+ .pre_reset = gigaset_pre_reset,
+ .post_reset = gigaset_resume,
};
struct usb_cardstate {
struct usb_device *udev; /* usb device pointer */
struct usb_interface *interface; /* interface for this device */
- atomic_t busy; /* bulk output in progress */
+ int busy; /* bulk output in progress */
/* Output buffer */
unsigned char *bulk_out_buffer;
@@ -314,7 +324,7 @@ static void gigaset_modem_fill(unsigned long data)
gig_dbg(DEBUG_OUTPUT, "modem_fill");
- if (atomic_read(&cs->hw.usb->busy)) {
+ if (cs->hw.usb->busy) {
gig_dbg(DEBUG_OUTPUT, "modem_fill: busy");
return;
}
@@ -361,18 +371,13 @@ static void gigaset_read_int_callback(struct urb *urb)
{
struct inbuf_t *inbuf = urb->context;
struct cardstate *cs = inbuf->cs;
- int resubmit = 0;
+ int status = urb->status;
int r;
unsigned numbytes;
unsigned char *src;
unsigned long flags;
- if (!urb->status) {
- if (!cs->connected) {
- err("%s: disconnected", __func__); /* should never happen */
- return;
- }
-
+ if (!status) {
numbytes = urb->actual_length;
if (numbytes) {
@@ -389,28 +394,26 @@ static void gigaset_read_int_callback(struct urb *urb)
}
} else
gig_dbg(DEBUG_INTR, "Received zero block length");
- resubmit = 1;
} else {
/* The urb might have been killed. */
- gig_dbg(DEBUG_ANY, "%s - nonzero read bulk status received: %d",
- __func__, urb->status);
- if (urb->status != -ENOENT) { /* not killed */
- if (!cs->connected) {
- err("%s: disconnected", __func__); /* should never happen */
- return;
- }
- resubmit = 1;
- }
+ gig_dbg(DEBUG_ANY, "%s - nonzero status received: %d",
+ __func__, status);
+ if (status == -ENOENT || status == -ESHUTDOWN)
+ /* killed or endpoint shutdown: don't resubmit */
+ return;
}
- if (resubmit) {
- spin_lock_irqsave(&cs->lock, flags);
- r = cs->connected ? usb_submit_urb(urb, GFP_ATOMIC) : -ENODEV;
+ /* resubmit URB */
+ spin_lock_irqsave(&cs->lock, flags);
+ if (!cs->connected) {
spin_unlock_irqrestore(&cs->lock, flags);
- if (r)
- dev_err(cs->dev, "error %d when resubmitting urb.\n",
- -r);
+ err("%s: disconnected", __func__);
+ return;
}
+ r = usb_submit_urb(urb, GFP_ATOMIC);
+ spin_unlock_irqrestore(&cs->lock, flags);
+ if (r)
+ dev_err(cs->dev, "error %d resubmitting URB\n", -r);
}
@@ -418,19 +421,28 @@ static void gigaset_read_int_callback(struct urb *urb)
static void gigaset_write_bulk_callback(struct urb *urb)
{
struct cardstate *cs = urb->context;
+ int status = urb->status;
unsigned long flags;
- if (urb->status)
+ switch (status) {
+ case 0: /* normal completion */
+ break;
+ case -ENOENT: /* killed */
+ gig_dbg(DEBUG_ANY, "%s: killed", __func__);
+ cs->hw.usb->busy = 0;
+ return;
+ default:
dev_err(cs->dev, "bulk transfer failed (status %d)\n",
- -urb->status);
+ -status);
/* That's all we can do. Communication problems
are handled by timeouts or network protocols. */
+ }
spin_lock_irqsave(&cs->lock, flags);
if (!cs->connected) {
err("%s: not connected", __func__);
} else {
- atomic_set(&cs->hw.usb->busy, 0);
+ cs->hw.usb->busy = 0;
tasklet_schedule(&cs->write_tasklet);
}
spin_unlock_irqrestore(&cs->lock, flags);
@@ -478,14 +490,14 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
cb->offset += count;
cb->len -= count;
- atomic_set(&ucs->busy, 1);
+ ucs->busy = 1;
spin_lock_irqsave(&cs->lock, flags);
status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) : -ENODEV;
spin_unlock_irqrestore(&cs->lock, flags);
if (status) {
- atomic_set(&ucs->busy, 0);
+ ucs->busy = 0;
err("could not submit urb (error %d)\n",
-status);
cb->len = 0; /* skip urb => remove cb+wakeup
@@ -504,7 +516,7 @@ static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
struct cmdbuf_t *cb;
unsigned long flags;
- gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ?
+ gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
DEBUG_TRANSCMD : DEBUG_LOCKCMD,
"CMD Transmit", len, buf);
@@ -641,7 +653,7 @@ static int write_modem(struct cardstate *cs)
count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count);
skb_pull(bcs->tx_skb, count);
- atomic_set(&ucs->busy, 1);
+ ucs->busy = 1;
gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count);
spin_lock_irqsave(&cs->lock, flags);
@@ -659,7 +671,7 @@ static int write_modem(struct cardstate *cs)
if (ret) {
err("could not submit urb (error %d)\n", -ret);
- atomic_set(&ucs->busy, 0);
+ ucs->busy = 0;
}
if (!bcs->tx_skb->len) {
@@ -680,53 +692,44 @@ static int gigaset_probe(struct usb_interface *interface,
{
int retval;
struct usb_device *udev = interface_to_usbdev(interface);
- unsigned int ifnum;
- struct usb_host_interface *hostif;
+ struct usb_host_interface *hostif = interface->cur_altsetting;
struct cardstate *cs = NULL;
struct usb_cardstate *ucs = NULL;
struct usb_endpoint_descriptor *endpoint;
int buffer_size;
- int alt;
- gig_dbg(DEBUG_ANY,
- "%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)",
- __func__, le16_to_cpu(udev->descriptor.idVendor),
- le16_to_cpu(udev->descriptor.idProduct));
-
- retval = -ENODEV; //FIXME
+ gig_dbg(DEBUG_ANY, "%s: Check if device matches ...", __func__);
/* See if the device offered us matches what we can accept */
if ((le16_to_cpu(udev->descriptor.idVendor) != USB_M105_VENDOR_ID) ||
- (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID))
+ (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID)) {
+ gig_dbg(DEBUG_ANY, "device ID (0x%x, 0x%x) not for me - skip",
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct));
return -ENODEV;
-
- /* this starts to become ascii art... */
- hostif = interface->cur_altsetting;
- alt = hostif->desc.bAlternateSetting;
- ifnum = hostif->desc.bInterfaceNumber; // FIXME ?
-
- if (alt != 0 || ifnum != 0) {
- dev_warn(&udev->dev, "ifnum %d, alt %d\n", ifnum, alt);
+ }
+ if (hostif->desc.bInterfaceNumber != 0) {
+ gig_dbg(DEBUG_ANY, "interface %d not for me - skip",
+ hostif->desc.bInterfaceNumber);
+ return -ENODEV;
+ }
+ if (hostif->desc.bAlternateSetting != 0) {
+ dev_notice(&udev->dev, "unsupported altsetting %d - skip",
+ hostif->desc.bAlternateSetting);
return -ENODEV;
}
-
- /* Reject application specific intefaces
- *
- */
if (hostif->desc.bInterfaceClass != 255) {
- dev_info(&udev->dev,
- "%s: Device matched but iface_desc[%d]->bInterfaceClass==%d!\n",
- __func__, ifnum, hostif->desc.bInterfaceClass);
+ dev_notice(&udev->dev, "unsupported interface class %d - skip",
+ hostif->desc.bInterfaceClass);
return -ENODEV;
}
dev_info(&udev->dev, "%s: Device matched ... !\n", __func__);
- cs = gigaset_getunassignedcs(driver);
- if (!cs) {
- dev_warn(&udev->dev, "no free cardstate\n");
+ /* allocate memory for our device state and intialize it */
+ cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
+ if (!cs)
return -ENODEV;
- }
ucs = cs->hw.usb;
/* save off device structure ptrs for later use */
@@ -759,7 +762,7 @@ static int gigaset_probe(struct usb_interface *interface,
endpoint = &hostif->endpoint[1].desc;
- atomic_set(&ucs->busy, 0);
+ ucs->busy = 0;
ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!ucs->read_urb) {
@@ -792,7 +795,7 @@ static int gigaset_probe(struct usb_interface *interface,
/* tell common part that the device is ready */
if (startmode == SM_LOCKED)
- atomic_set(&cs->mstate, MS_LOCKED);
+ cs->mstate = MS_LOCKED;
if (!gigaset_start(cs)) {
tasklet_kill(&cs->write_tasklet);
@@ -813,7 +816,7 @@ error:
usb_put_dev(ucs->udev);
ucs->udev = NULL;
ucs->interface = NULL;
- gigaset_unassign(cs);
+ gigaset_freecs(cs);
return retval;
}
@@ -824,6 +827,9 @@ static void gigaset_disconnect(struct usb_interface *interface)
cs = usb_get_intfdata(interface);
ucs = cs->hw.usb;
+
+ dev_info(cs->dev, "disconnecting Gigaset USB adapter\n");
+
usb_kill_urb(ucs->read_urb);
gigaset_stop(cs);
@@ -831,7 +837,7 @@ static void gigaset_disconnect(struct usb_interface *interface)
usb_set_intfdata(interface, NULL);
tasklet_kill(&cs->write_tasklet);
- usb_kill_urb(ucs->bulk_out_urb); /* FIXME: only if active? */
+ usb_kill_urb(ucs->bulk_out_urb);
kfree(ucs->bulk_out_buffer);
usb_free_urb(ucs->bulk_out_urb);
@@ -844,7 +850,53 @@ static void gigaset_disconnect(struct usb_interface *interface)
ucs->interface = NULL;
ucs->udev = NULL;
cs->dev = NULL;
- gigaset_unassign(cs);
+ gigaset_freecs(cs);
+}
+
+/* gigaset_suspend
+ * This function is called before the USB connection is suspended or reset.
+ */
+static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct cardstate *cs = usb_get_intfdata(intf);
+
+ /* stop activity */
+ cs->connected = 0; /* prevent rescheduling */
+ usb_kill_urb(cs->hw.usb->read_urb);
+ tasklet_kill(&cs->write_tasklet);
+ usb_kill_urb(cs->hw.usb->bulk_out_urb);
+
+ gig_dbg(DEBUG_SUSPEND, "suspend complete");
+ return 0;
+}
+
+/* gigaset_resume
+ * This function is called after the USB connection has been resumed or reset.
+ */
+static int gigaset_resume(struct usb_interface *intf)
+{
+ struct cardstate *cs = usb_get_intfdata(intf);
+ int rc;
+
+ /* resubmit interrupt URB */
+ cs->connected = 1;
+ rc = usb_submit_urb(cs->hw.usb->read_urb, GFP_KERNEL);
+ if (rc) {
+ dev_err(cs->dev, "Could not submit read URB (error %d)\n", -rc);
+ return rc;
+ }
+
+ gig_dbg(DEBUG_SUSPEND, "resume complete");
+ return 0;
+}
+
+/* gigaset_pre_reset
+ * This function is called before the USB connection is reset.
+ */
+static int gigaset_pre_reset(struct usb_interface *intf)
+{
+ /* same as suspend */
+ return gigaset_suspend(intf, PMSG_ON);
}
static const struct gigaset_ops ops = {
@@ -880,11 +932,6 @@ static int __init usb_gigaset_init(void)
&ops, THIS_MODULE)) == NULL)
goto error;
- /* allocate memory for our device state and intialize it */
- cardstate = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
- if (!cardstate)
- goto error;
-
/* register this driver with the USB subsystem */
result = usb_register(&gigaset_usb_driver);
if (result < 0) {
@@ -897,9 +944,7 @@ static int __init usb_gigaset_init(void)
info(DRIVER_DESC);
return 0;
-error: if (cardstate)
- gigaset_freecs(cardstate);
- cardstate = NULL;
+error:
if (driver)
gigaset_freedriver(driver);
driver = NULL;
@@ -913,11 +958,16 @@ error: if (cardstate)
*/
static void __exit usb_gigaset_exit(void)
{
+ int i;
+
gigaset_blockdriver(driver); /* => probe will fail
* => no gigaset_start any more
*/
- gigaset_shutdown(cardstate);
+ /* stop all connected devices */
+ for (i = 0; i < driver->minors; i++)
+ gigaset_shutdown(driver->cs + i);
+
/* from now on, no isdn callback should be possible */
/* deregister this driver with the USB subsystem */
@@ -925,8 +975,6 @@ static void __exit usb_gigaset_exit(void)
/* this will call the disconnect-callback */
/* from now on, no disconnect/probe callback should be running */
- gigaset_freecs(cardstate);
- cardstate = NULL;
gigaset_freedriver(driver);
driver = NULL;
}
diff --git a/drivers/isdn/hardware/eicon/debug.c b/drivers/isdn/hardware/eicon/debug.c
index 0db9cc661e28..84318ec8d13e 100644
--- a/drivers/isdn/hardware/eicon/debug.c
+++ b/drivers/isdn/hardware/eicon/debug.c
@@ -1188,7 +1188,7 @@ int SuperTraceASSIGN (void* AdapterHandle, byte* data) {
if ((features[0] & DIVA_XDI_EXTENDED_FEATURES_VALID) &&
(features[0] & DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA)) {
- dword rx_dma_magic;
+ dword uninitialized_var(rx_dma_magic);
if ((pC->dma_handle = diva_get_dma_descriptor (pC->request, &rx_dma_magic)) >= 0) {
pC->xbuffer[0] = LLI;
pC->xbuffer[1] = 8;
diff --git a/drivers/isdn/hardware/eicon/debuglib.c b/drivers/isdn/hardware/eicon/debuglib.c
index a19b7ffe9ace..e39c5c1f623e 100644
--- a/drivers/isdn/hardware/eicon/debuglib.c
+++ b/drivers/isdn/hardware/eicon/debuglib.c
@@ -106,7 +106,7 @@ DbgRegister (char *drvName, char *drvTag, unsigned long dbgMask)
return (1) ;
}
/*
- * Check if we registered whith an old maint driver (see debuglib.h)
+ * Check if we registered with an old maint driver (see debuglib.h)
*/
if ( myDriverDebugHandle.dbg_end != NULL
/* location of 'dbg_prt' in _OldDbgHandle_ struct */
diff --git a/drivers/isdn/hardware/eicon/debuglib.h b/drivers/isdn/hardware/eicon/debuglib.h
index 11b3b9edd1d6..016410cf2273 100644
--- a/drivers/isdn/hardware/eicon/debuglib.h
+++ b/drivers/isdn/hardware/eicon/debuglib.h
@@ -177,7 +177,7 @@ DBG_DECL(PRV3)
} }
#endif
/*
- * For event level debug use a separate define, the paramete are
+ * For event level debug use a separate define, the parameter are
* different and cause compiler errors on some systems.
*/
#define DBG_EVL_ID(args) \
diff --git a/drivers/isdn/hardware/eicon/di.c b/drivers/isdn/hardware/eicon/di.c
index ce8df3878908..10760b3c5eb5 100644
--- a/drivers/isdn/hardware/eicon/di.c
+++ b/drivers/isdn/hardware/eicon/di.c
@@ -285,7 +285,7 @@ byte pr_dpc(ADAPTER * a)
a->ram_in(a, &RcIn->RcId),
a->ram_in(a, &RcIn->RcCh),
a->ram_inw(a, &RcIn->Reference),
- tmp[0], /* type of extended informtion */
+ tmp[0], /* type of extended information */
tmp[1]); /* extended information */
a->ram_out(a, &RcIn->Rc, 0);
}
diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c
index ffa2afa77c2f..1403a5458e68 100644
--- a/drivers/isdn/hardware/eicon/diva.c
+++ b/drivers/isdn/hardware/eicon/diva.c
@@ -515,12 +515,11 @@ diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
irqreturn_t diva_os_irq_wrapper(int irq, void *context)
{
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) context;
+ diva_os_xdi_adapter_t *a = context;
diva_xdi_clear_interrupts_proc_t clear_int_proc;
- if (!a || !a->xdi_adapter.diva_isr_handler) {
+ if (!a || !a->xdi_adapter.diva_isr_handler)
return IRQ_NONE;
- }
if ((clear_int_proc = a->clear_interrupts_proc)) {
(*clear_int_proc) (a);
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index ccd35d047ec8..1ff98e7eb794 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -4941,7 +4941,7 @@ void sig_ind(PLCI * plci)
/* b = IE1 */
/* S = IE1 length + cont. */
/* b = IE2 */
- /* S = IE2 lenght + cont. */
+ /* S = IE2 length + cont. */
sendf(plci->appl,
_MANUFACTURER_I,
Id,
@@ -9027,7 +9027,7 @@ static byte AddInfo(byte **add_i,
/* facility is a nested structure */
/* FTY can be more than once */
- if(esc_chi[0] && !(esc_chi[esc_chi[0]])&0x7f )
+ if (esc_chi[0] && !(esc_chi[esc_chi[0]] & 0x7f))
{
add_i[0] = (byte *)"\x02\x02\x00"; /* use neither b nor d channel */
}
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index 035d158779df..0f1db1f669b2 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -263,11 +263,7 @@ hdlc_empty_fifo(struct BCState *bcs, int count)
outl(idx, cs->hw.avm.cfg_reg + 4);
while (cnt < count) {
#ifdef __powerpc__
-#ifdef CONFIG_APUS
- *ptr++ = in_le32((unsigned *)(cs->hw.avm.isac +_IO_BASE));
-#else
*ptr++ = in_be32((unsigned *)(cs->hw.avm.isac +_IO_BASE));
-#endif /* CONFIG_APUS */
#else
*ptr++ = inl(cs->hw.avm.isac);
#endif /* __powerpc__ */
@@ -328,11 +324,7 @@ hdlc_fill_fifo(struct BCState *bcs)
if (cs->subtyp == AVM_FRITZ_PCI) {
while (cnt<count) {
#ifdef __powerpc__
-#ifdef CONFIG_APUS
- out_le32((unsigned *)(cs->hw.avm.isac +_IO_BASE), *ptr++);
-#else
out_be32((unsigned *)(cs->hw.avm.isac +_IO_BASE), *ptr++);
-#endif /* CONFIG_APUS */
#else
outl(*ptr++, cs->hw.avm.isac);
#endif /* __powerpc__ */
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c
index f85450146bdc..d3999a8e9f88 100644
--- a/drivers/isdn/hysdn/hycapi.c
+++ b/drivers/isdn/hysdn/hycapi.c
@@ -541,7 +541,7 @@ hycapi_rx_capipkt(hysdn_card * card, unsigned char *buf, unsigned short len)
}
ctrl = &cinfo->capi_ctrl;
if(len < CAPI_MSG_BASELEN) {
- printk(KERN_ERR "HYSDN Card%d: invalid CAPI-message, lenght %d!\n",
+ printk(KERN_ERR "HYSDN Card%d: invalid CAPI-message, length %d!\n",
card->myid, len);
return;
}
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 9cb6e5021adb..133eb18e65cc 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1917,7 +1917,6 @@ isdn_tty_modem_init(void)
info->owner = THIS_MODULE;
#endif
spin_lock_init(&info->readlock);
- init_MUTEX(&info->write_sem);
sprintf(info->last_cause, "0000");
sprintf(info->last_num, "none");
info->last_dir = 0;
diff --git a/drivers/isdn/i4l/isdn_ttyfax.c b/drivers/isdn/i4l/isdn_ttyfax.c
index a943d078bacc..f93de4a30355 100644
--- a/drivers/isdn/i4l/isdn_ttyfax.c
+++ b/drivers/isdn/i4l/isdn_ttyfax.c
@@ -834,7 +834,7 @@ isdn_tty_cmd_FCLASS2(char **p, modem_info * info)
char *rp = &f->resolution;
p[0] += 2;
- if (!info->faxonline & 1) /* not outgoing connection */
+ if (!(info->faxonline & 1)) /* not outgoing connection */
PARSE_ERROR1;
for (i = 0; (((*p[0] >= '0') && (*p[0] <= '9')) || (*p[0] == ',')) && (i < 4); i++) {
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 82d957bde299..bf7997abc4ac 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1302,7 +1302,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_DIAL:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
@@ -1328,7 +1328,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_ACCEPTD:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
@@ -1348,7 +1348,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_ACCEPTB:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
@@ -1366,7 +1366,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_HANGUP:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
@@ -1375,7 +1375,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_SETEAZ:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
@@ -1391,7 +1391,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_CLREAZ:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
@@ -1405,7 +1405,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_SETL2:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if ((c->arg & 255) < ICN_BCH) {
a = c->arg;
@@ -1424,7 +1424,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_SETL3:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return 0;
default:
@@ -1471,7 +1471,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel)
icn_card *card = icn_findcard(id);
if (card) {
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return (icn_writecmd(buf, len, 1, card));
}
@@ -1486,7 +1486,7 @@ if_readstatus(u_char __user *buf, int len, int id, int channel)
icn_card *card = icn_findcard(id);
if (card) {
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return (icn_readstatus(buf, len, card));
}
@@ -1501,7 +1501,7 @@ if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
icn_card *card = icn_findcard(id);
if (card) {
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return (icn_sendbuf(channel, ack, skb, card));
}
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index bb92e3cd9334..655ef9a3f4df 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -1184,7 +1184,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
}
break;
case ISDN_CMD_DIAL:
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
@@ -1210,7 +1210,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
}
break;
case ISDN_CMD_ACCEPTD:
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ISDNLOOP_BCH) {
a = c->arg + 1;
@@ -1238,7 +1238,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
}
break;
case ISDN_CMD_ACCEPTB:
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ISDNLOOP_BCH) {
a = c->arg + 1;
@@ -1264,7 +1264,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
break;
case ISDN_CMD_HANGUP:
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ISDNLOOP_BCH) {
a = c->arg + 1;
@@ -1273,7 +1273,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
}
break;
case ISDN_CMD_SETEAZ:
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
@@ -1303,7 +1303,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
}
break;
case ISDN_CMD_SETL2:
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if ((c->arg & 255) < ISDNLOOP_BCH) {
a = c->arg;
@@ -1395,7 +1395,7 @@ if_readstatus(u_char __user *buf, int len, int id, int channel)
isdnloop_card *card = isdnloop_findcard(id);
if (card) {
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
return (isdnloop_readstatus(buf, len, card));
}
@@ -1410,7 +1410,7 @@ if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
isdnloop_card *card = isdnloop_findcard(id);
if (card) {
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
/* ack request stored in skb scratch area */
*(skb->head) = ack;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ec568fa1c6cc..851a3b01781e 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -39,15 +39,6 @@ config LEDS_SPITZ
This option enables support for the LEDs on Sharp Zaurus
SL-Cxx00 series (C1000, C3000, C3100).
-config LEDS_IXP4XX
- tristate "LED Support for GPIO connected LEDs on IXP4XX processors"
- depends on LEDS_CLASS && ARCH_IXP4XX
- help
- This option enables support for the LEDs connected to GPIO
- outputs of the Intel IXP4XX processors. To be useful the
- particular board must have LEDs and they must be connected
- to the GPIO lines. If unsure, say Y.
-
config LEDS_TOSA
tristate "LED Support for the Sharp SL-6000 series"
depends on LEDS_CLASS && PXA_SHARPSL
@@ -100,6 +91,13 @@ config LEDS_COBALT_RAQ
help
This option enables support for the Cobalt Raq series LEDs.
+config LEDS_HP6XX
+ tristate "LED Support for the HP Jornada 6xx"
+ depends on LEDS_CLASS && SH_HP6XX
+ help
+ This option enables led support for the handheld
+ HP Jornada 620/660/680/690.
+
config LEDS_GPIO
tristate "LED Support for GPIO connected LEDs"
depends on LEDS_CLASS && GENERIC_GPIO
@@ -114,6 +112,32 @@ config LEDS_CM_X270
help
This option enables support for the CM-X270 LEDs.
+config LEDS_CLEVO_MAIL
+ tristate "Mail LED on Clevo notebook (EXPERIMENTAL)"
+ depends on LEDS_CLASS && X86 && SERIO_I8042 && DMI && EXPERIMENTAL
+ help
+ This driver makes the mail LED accessible from userspace
+ programs through the leds subsystem. This LED have three
+ known mode: off, blink at 0.5Hz and blink at 1Hz.
+
+ The driver supports two kinds of interface: using ledtrig-timer
+ or through /sys/class/leds/clevo::mail/brightness. As this LED
+ cannot change it's brightness it blinks instead. The brightness
+ value 0 means off, 1..127 means blink at 0.5Hz and 128..255 means
+ blink at 1Hz.
+
+ This module can drive the mail LED for the following notebooks:
+
+ Clevo D410J
+ Clevo D410V
+ Clevo D400V/D470V (not tested, but might work)
+ Clevo M540N
+ Clevo M5x0N (not tested, but might work)
+ Positivo Mobile (Clevo M5x0V)
+
+ To compile this driver as a module, choose M here: the
+ module will be called leds-clevo-mail.
+
comment "LED Triggers"
config LEDS_TRIGGERS
@@ -128,7 +152,11 @@ config LEDS_TRIGGER_TIMER
depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by a programmable timer
- via sysfs. If unsure, say Y.
+ via sysfs. Some LED hardware can be programmed to start
+ blinking the LED without any further software interaction.
+ For more details read Documentation/leds-class.txt.
+
+ If unsure, say Y.
config LEDS_TRIGGER_IDE_DISK
bool "LED IDE Disk Trigger"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index a60de1b46c2c..bc6afc8dcb27 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
obj-$(CONFIG_LEDS_CORGI) += leds-corgi.o
obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_SPITZ) += leds-spitz.o
-obj-$(CONFIG_LEDS_IXP4XX) += leds-ixp4xx-gpio.o
obj-$(CONFIG_LEDS_TOSA) += leds-tosa.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
@@ -19,6 +18,8 @@ obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o
obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
obj-$(CONFIG_LEDS_CM_X270) += leds-cm-x270.o
+obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
+obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
# LED Triggers
obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 64c66b3769c9..4a938780dfc3 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -137,12 +137,14 @@ err_out:
EXPORT_SYMBOL_GPL(led_classdev_register);
/**
- * led_classdev_unregister - unregisters a object of led_properties class.
+ * __led_classdev_unregister - unregisters a object of led_properties class.
* @led_cdev: the led device to unregister
+ * @suspended: indicates whether system-wide suspend or resume is in progress
*
* Unregisters a previously registered via led_classdev_register object.
*/
-void led_classdev_unregister(struct led_classdev *led_cdev)
+void __led_classdev_unregister(struct led_classdev *led_cdev,
+ bool suspended)
{
device_remove_file(led_cdev->dev, &dev_attr_brightness);
#ifdef CONFIG_LEDS_TRIGGERS
@@ -153,13 +155,16 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
up_write(&led_cdev->trigger_lock);
#endif
- device_unregister(led_cdev->dev);
+ if (suspended)
+ device_pm_schedule_removal(led_cdev->dev);
+ else
+ device_unregister(led_cdev->dev);
down_write(&leds_list_lock);
list_del(&led_cdev->node);
up_write(&leds_list_lock);
}
-EXPORT_SYMBOL_GPL(led_classdev_unregister);
+EXPORT_SYMBOL_GPL(__led_classdev_unregister);
static int __init leds_init(void)
{
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
index 599878c8e714..9e3077463d84 100644
--- a/drivers/leds/leds-ams-delta.c
+++ b/drivers/leds/leds-ams-delta.c
@@ -37,42 +37,42 @@ static void ams_delta_led_set(struct led_classdev *led_cdev,
static struct ams_delta_led ams_delta_leds[] = {
{
.cdev = {
- .name = "ams-delta:camera",
+ .name = "ams-delta::camera",
.brightness_set = ams_delta_led_set,
},
.bitmask = AMS_DELTA_LATCH1_LED_CAMERA,
},
{
.cdev = {
- .name = "ams-delta:advert",
+ .name = "ams-delta::advert",
.brightness_set = ams_delta_led_set,
},
.bitmask = AMS_DELTA_LATCH1_LED_ADVERT,
},
{
.cdev = {
- .name = "ams-delta:email",
+ .name = "ams-delta::email",
.brightness_set = ams_delta_led_set,
},
.bitmask = AMS_DELTA_LATCH1_LED_EMAIL,
},
{
.cdev = {
- .name = "ams-delta:handsfree",
+ .name = "ams-delta::handsfree",
.brightness_set = ams_delta_led_set,
},
.bitmask = AMS_DELTA_LATCH1_LED_HANDSFREE,
},
{
.cdev = {
- .name = "ams-delta:voicemail",
+ .name = "ams-delta::voicemail",
.brightness_set = ams_delta_led_set,
},
.bitmask = AMS_DELTA_LATCH1_LED_VOICEMAIL,
},
{
.cdev = {
- .name = "ams-delta:voice",
+ .name = "ams-delta::voice",
.brightness_set = ams_delta_led_set,
},
.bitmask = AMS_DELTA_LATCH1_LED_VOICE,
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
new file mode 100644
index 000000000000..6c3d33b8e383
--- /dev/null
+++ b/drivers/leds/leds-clevo-mail.c
@@ -0,0 +1,219 @@
+
+#include <linux/module.h>
+
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/leds.h>
+
+#include <linux/io.h>
+#include <linux/dmi.h>
+
+#include <linux/i8042.h>
+
+#define CLEVO_MAIL_LED_OFF 0x0084
+#define CLEVO_MAIL_LED_BLINK_1HZ 0x008A
+#define CLEVO_MAIL_LED_BLINK_0_5HZ 0x0083
+
+MODULE_AUTHOR("Márton Németh <nm127@freemail.hu>");
+MODULE_DESCRIPTION("Clevo mail LED driver");
+MODULE_LICENSE("GPL");
+
+static unsigned int __initdata nodetect;
+module_param_named(nodetect, nodetect, bool, 0);
+MODULE_PARM_DESC(nodetect, "Skip DMI hardware detection");
+
+static struct platform_device *pdev;
+
+static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
+{
+ printk(KERN_INFO KBUILD_MODNAME ": '%s' found\n", id->ident);
+ return 1;
+}
+
+/*
+ * struct mail_led_whitelist - List of known good models
+ *
+ * Contains the known good models this driver is compatible with.
+ * When adding a new model try to be as strict as possible. This
+ * makes it possible to keep the false positives (the model is
+ * detected as working, but in reality it is not) as low as
+ * possible.
+ */
+static struct dmi_system_id __initdata mail_led_whitelist[] = {
+ {
+ .callback = clevo_mail_led_dmi_callback,
+ .ident = "Clevo D410J",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "VIA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K8N800"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "VT8204B")
+ }
+ },
+ {
+ .callback = clevo_mail_led_dmi_callback,
+ .ident = "Clevo M5x0N",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M5x0N")
+ }
+ },
+ {
+ .callback = clevo_mail_led_dmi_callback,
+ .ident = "Positivo Mobile",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "CLEVO Co. "),
+ DMI_MATCH(DMI_BOARD_NAME, "M5X0V "),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Positivo Mobile"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "VT6198")
+ }
+ },
+ {
+ .callback = clevo_mail_led_dmi_callback,
+ .ident = "Clevo D410V",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Clevo, Co."),
+ DMI_MATCH(DMI_BOARD_NAME, "D400V/D470V"),
+ DMI_MATCH(DMI_BOARD_VERSION, "SS78B"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Rev. A1")
+ }
+ },
+ { }
+};
+
+static void clevo_mail_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (value == LED_OFF)
+ i8042_command(NULL, CLEVO_MAIL_LED_OFF);
+ else if (value <= LED_HALF)
+ i8042_command(NULL, CLEVO_MAIL_LED_BLINK_0_5HZ);
+ else
+ i8042_command(NULL, CLEVO_MAIL_LED_BLINK_1HZ);
+
+}
+
+static int clevo_mail_led_blink(struct led_classdev *led_cdev,
+ unsigned long* delay_on,
+ unsigned long* delay_off)
+{
+ int status = -EINVAL;
+
+ if (*delay_on == 0 /* ms */ && *delay_off == 0 /* ms */) {
+ /* Special case: the leds subsystem requested us to
+ * chose one user friendly blinking of the LED, and
+ * start it. Let's blink the led slowly (0.5Hz).
+ */
+ *delay_on = 1000; /* ms */
+ *delay_off = 1000; /* ms */
+ i8042_command(NULL, CLEVO_MAIL_LED_BLINK_0_5HZ);
+ status = 0;
+
+ } else if (*delay_on == 500 /* ms */ && *delay_off == 500 /* ms */) {
+ /* blink the led with 1Hz */
+ i8042_command(NULL, CLEVO_MAIL_LED_BLINK_1HZ);
+ status = 0;
+
+ } else if (*delay_on == 1000 /* ms */ && *delay_off == 1000 /* ms */) {
+ /* blink the led with 0.5Hz */
+ i8042_command(NULL, CLEVO_MAIL_LED_BLINK_0_5HZ);
+ status = 0;
+
+ } else {
+ printk(KERN_DEBUG KBUILD_MODNAME
+ ": clevo_mail_led_blink(..., %lu, %lu),"
+ " returning -EINVAL (unsupported)\n",
+ *delay_on, *delay_off);
+ }
+
+ return status;
+}
+
+static struct led_classdev clevo_mail_led = {
+ .name = "clevo::mail",
+ .brightness_set = clevo_mail_led_set,
+ .blink_set = clevo_mail_led_blink,
+};
+
+static int __init clevo_mail_led_probe(struct platform_device *pdev)
+{
+ return led_classdev_register(&pdev->dev, &clevo_mail_led);
+}
+
+static int clevo_mail_led_remove(struct platform_device *pdev)
+{
+ led_classdev_unregister(&clevo_mail_led);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int clevo_mail_led_suspend(struct platform_device *dev,
+ pm_message_t state)
+{
+ led_classdev_suspend(&clevo_mail_led);
+ return 0;
+}
+
+static int clevo_mail_led_resume(struct platform_device *dev)
+{
+ led_classdev_resume(&clevo_mail_led);
+ return 0;
+}
+#else
+#define clevo_mail_led_suspend NULL
+#define clevo_mail_led_resume NULL
+#endif
+
+static struct platform_driver clevo_mail_led_driver = {
+ .probe = clevo_mail_led_probe,
+ .remove = clevo_mail_led_remove,
+ .suspend = clevo_mail_led_suspend,
+ .resume = clevo_mail_led_resume,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+
+static int __init clevo_mail_led_init(void)
+{
+ int error = 0;
+ int count = 0;
+
+ /* Check with the help of DMI if we are running on supported hardware */
+ if (!nodetect) {
+ count = dmi_check_system(mail_led_whitelist);
+ } else {
+ count = 1;
+ printk(KERN_ERR KBUILD_MODNAME ": Skipping DMI detection. "
+ "If the driver works on your hardware please "
+ "report model and the output of dmidecode in tracker "
+ "at http://sourceforge.net/projects/clevo-mailled/\n");
+ }
+
+ if (!count)
+ return -ENODEV;
+
+ pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
+ if (!IS_ERR(pdev)) {
+ error = platform_driver_probe(&clevo_mail_led_driver,
+ clevo_mail_led_probe);
+ if (error) {
+ printk(KERN_ERR KBUILD_MODNAME
+ ": Can't probe platform driver\n");
+ platform_device_unregister(pdev);
+ }
+ } else
+ error = PTR_ERR(pdev);
+
+ return error;
+}
+
+static void __exit clevo_mail_led_exit(void)
+{
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&clevo_mail_led_driver);
+
+ clevo_mail_led_set(NULL, LED_OFF);
+}
+
+module_init(clevo_mail_led_init);
+module_exit(clevo_mail_led_exit);
diff --git a/drivers/leds/leds-corgi.c b/drivers/leds/leds-corgi.c
index cf1dcd719a28..e45f6c4b59ba 100644
--- a/drivers/leds/leds-corgi.c
+++ b/drivers/leds/leds-corgi.c
@@ -38,13 +38,13 @@ static void corgiled_green_set(struct led_classdev *led_cdev, enum led_brightnes
}
static struct led_classdev corgi_amber_led = {
- .name = "corgi:amber",
+ .name = "corgi:amber:charge",
.default_trigger = "sharpsl-charge",
.brightness_set = corgiled_amber_set,
};
static struct led_classdev corgi_green_led = {
- .name = "corgi:green",
+ .name = "corgi:green:mail",
.default_trigger = "nand-disk",
.brightness_set = corgiled_green_set,
};
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 99bc50059d35..6c0a9c4761ee 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -85,7 +85,7 @@ static int gpio_led_probe(struct platform_device *pdev)
led_dat->can_sleep = gpio_cansleep(cur_led->gpio);
led_dat->active_low = cur_led->active_low;
led_dat->cdev.brightness_set = gpio_led_set;
- led_dat->cdev.brightness = cur_led->active_low ? LED_FULL : LED_OFF;
+ led_dat->cdev.brightness = LED_OFF;
ret = gpio_request(led_dat->gpio, led_dat->cdev.name);
if (ret < 0)
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
new file mode 100644
index 000000000000..82d4ec384797
--- /dev/null
+++ b/drivers/leds/leds-hp6xx.c
@@ -0,0 +1,120 @@
+/*
+ * LED Triggers Core
+ * For the HP Jornada 620/660/680/690 handhelds
+ *
+ * Copyright 2008 Kristoffer Ericson <kristoffer.ericson@gmail.com>
+ * this driver is based on leds-spitz.c by Richard Purdie.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <asm/hd64461.h>
+#include <asm/hp6xx.h>
+
+static void hp6xxled_green_set(struct led_classdev *led_cdev, enum led_brightness value)
+{
+ u8 v8;
+
+ v8 = inb(PKDR);
+ if (value)
+ outb(v8 & (~PKDR_LED_GREEN), PKDR);
+ else
+ outb(v8 | PKDR_LED_GREEN, PKDR);
+}
+
+static void hp6xxled_red_set(struct led_classdev *led_cdev, enum led_brightness value)
+{
+ u16 v16;
+
+ v16 = inw(HD64461_GPBDR);
+ if (value)
+ outw(v16 & (~HD64461_GPBDR_LED_RED), HD64461_GPBDR);
+ else
+ outw(v16 | HD64461_GPBDR_LED_RED, HD64461_GPBDR);
+}
+
+static struct led_classdev hp6xx_red_led = {
+ .name = "hp6xx:red",
+ .default_trigger = "hp6xx-charge",
+ .brightness_set = hp6xxled_red_set,
+};
+
+static struct led_classdev hp6xx_green_led = {
+ .name = "hp6xx:green",
+ .default_trigger = "ide-disk",
+ .brightness_set = hp6xxled_green_set,
+};
+
+#ifdef CONFIG_PM
+static int hp6xxled_suspend(struct platform_device *dev, pm_message_t state)
+{
+ led_classdev_suspend(&hp6xx_red_led);
+ led_classdev_suspend(&hp6xx_green_led);
+ return 0;
+}
+
+static int hp6xxled_resume(struct platform_device *dev)
+{
+ led_classdev_resume(&hp6xx_red_led);
+ led_classdev_resume(&hp6xx_green_led);
+ return 0;
+}
+#endif
+
+static int hp6xxled_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = led_classdev_register(&pdev->dev, &hp6xx_red_led);
+ if (ret < 0)
+ return ret;
+
+ ret = led_classdev_register(&pdev->dev, &hp6xx_green_led);
+ if (ret < 0)
+ led_classdev_unregister(&hp6xx_red_led);
+
+ return ret;
+}
+
+static int hp6xxled_remove(struct platform_device *pdev)
+{
+ led_classdev_unregister(&hp6xx_red_led);
+ led_classdev_unregister(&hp6xx_green_led);
+
+ return 0;
+}
+
+static struct platform_driver hp6xxled_driver = {
+ .probe = hp6xxled_probe,
+ .remove = hp6xxled_remove,
+#ifdef CONFIG_PM
+ .suspend = hp6xxled_suspend,
+ .resume = hp6xxled_resume,
+#endif
+ .driver = {
+ .name = "hp6xx-led",
+ },
+};
+
+static int __init hp6xxled_init(void)
+{
+ return platform_driver_register(&hp6xxled_driver);
+}
+
+static void __exit hp6xxled_exit(void)
+{
+ platform_driver_unregister(&hp6xxled_driver);
+}
+
+module_init(hp6xxled_init);
+module_exit(hp6xxled_exit);
+
+MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
+MODULE_DESCRIPTION("HP Jornada 6xx LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-ixp4xx-gpio.c b/drivers/leds/leds-ixp4xx-gpio.c
deleted file mode 100644
index 7dcf0b92c460..000000000000
--- a/drivers/leds/leds-ixp4xx-gpio.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * IXP4XX GPIO driver LED driver
- *
- * Author: John Bowler <jbowler@acm.org>
- *
- * Copyright (c) 2006 John Bowler
- *
- * Permission is hereby granted, free of charge, to any
- * person obtaining a copy of this software and associated
- * documentation files (the "Software"), to deal in the
- * Software without restriction, including without
- * limitation the rights to use, copy, modify, merge,
- * publish, distribute, sublicense, and/or sell copies of
- * the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the
- * following conditions:
- *
- * The above copyright notice and this permission notice
- * shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
- * ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
- * TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
- * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
- * SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/leds.h>
-#include <asm/arch/hardware.h>
-
-extern spinlock_t gpio_lock;
-
-/* Up to 16 gpio lines are possible. */
-#define GPIO_MAX 16
-static struct ixp4xxgpioled_device {
- struct led_classdev ancestor;
- int flags;
-} ixp4xxgpioled_devices[GPIO_MAX];
-
-void ixp4xxgpioled_brightness_set(struct led_classdev *pled,
- enum led_brightness value)
-{
- const struct ixp4xxgpioled_device *const ixp4xx_dev =
- container_of(pled, struct ixp4xxgpioled_device, ancestor);
- const u32 gpio_pin = ixp4xx_dev - ixp4xxgpioled_devices;
-
- if (gpio_pin < GPIO_MAX && ixp4xx_dev->ancestor.name != 0) {
- /* Set or clear the 'gpio_pin' bit according to the style
- * and the required setting (value > 0 == on)
- */
- const int gpio_value =
- (value > 0) == (ixp4xx_dev->flags != IXP4XX_GPIO_LOW) ?
- IXP4XX_GPIO_HIGH : IXP4XX_GPIO_LOW;
-
- {
- unsigned long flags;
- spin_lock_irqsave(&gpio_lock, flags);
- gpio_line_set(gpio_pin, gpio_value);
- spin_unlock_irqrestore(&gpio_lock, flags);
- }
- }
-}
-
-/* LEDs are described in resources, the following iterates over the valid
- * LED resources.
- */
-#define for_all_leds(i, pdev) \
- for (i=0; i<pdev->num_resources; ++i) \
- if (pdev->resource[i].start < GPIO_MAX && \
- pdev->resource[i].name != 0)
-
-/* The following applies 'operation' to each LED from the given platform,
- * the function always returns 0 to allow tail call elimination.
- */
-static int apply_to_all_leds(struct platform_device *pdev,
- void (*operation)(struct led_classdev *pled))
-{
- int i;
-
- for_all_leds(i, pdev)
- operation(&ixp4xxgpioled_devices[pdev->resource[i].start].ancestor);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ixp4xxgpioled_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- return apply_to_all_leds(pdev, led_classdev_suspend);
-}
-
-static int ixp4xxgpioled_resume(struct platform_device *pdev)
-{
- return apply_to_all_leds(pdev, led_classdev_resume);
-}
-#endif
-
-static void ixp4xxgpioled_remove_one_led(struct led_classdev *pled)
-{
- led_classdev_unregister(pled);
- pled->name = 0;
-}
-
-static int ixp4xxgpioled_remove(struct platform_device *pdev)
-{
- return apply_to_all_leds(pdev, ixp4xxgpioled_remove_one_led);
-}
-
-static int ixp4xxgpioled_probe(struct platform_device *pdev)
-{
- /* The board level has to tell the driver where the
- * LEDs are connected - there is no way to find out
- * electrically. It must also say whether the GPIO
- * lines are active high or active low.
- *
- * To do this read the num_resources (the number of
- * LEDs) and the struct resource (the data for each
- * LED). The name comes from the resource, and it
- * isn't copied.
- */
- int i;
-
- for_all_leds(i, pdev) {
- const u8 gpio_pin = pdev->resource[i].start;
- int rc;
-
- if (ixp4xxgpioled_devices[gpio_pin].ancestor.name == 0) {
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
- gpio_line_config(gpio_pin, IXP4XX_GPIO_OUT);
- /* The config can, apparently, reset the state,
- * I suspect the gpio line may be an input and
- * the config may cause the line to be latched,
- * so the setting depends on how the LED is
- * connected to the line (which affects how it
- * floats if not driven).
- */
- gpio_line_set(gpio_pin, IXP4XX_GPIO_HIGH);
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- ixp4xxgpioled_devices[gpio_pin].flags =
- pdev->resource[i].flags & IORESOURCE_BITS;
-
- ixp4xxgpioled_devices[gpio_pin].ancestor.name =
- pdev->resource[i].name;
-
- /* This is how a board manufacturer makes the LED
- * come on on reset - the GPIO line will be high, so
- * make the LED light when the line is low...
- */
- if (ixp4xxgpioled_devices[gpio_pin].flags != IXP4XX_GPIO_LOW)
- ixp4xxgpioled_devices[gpio_pin].ancestor.brightness = 100;
- else
- ixp4xxgpioled_devices[gpio_pin].ancestor.brightness = 0;
-
- ixp4xxgpioled_devices[gpio_pin].ancestor.flags = 0;
-
- ixp4xxgpioled_devices[gpio_pin].ancestor.brightness_set =
- ixp4xxgpioled_brightness_set;
-
- ixp4xxgpioled_devices[gpio_pin].ancestor.default_trigger = 0;
- }
-
- rc = led_classdev_register(&pdev->dev,
- &ixp4xxgpioled_devices[gpio_pin].ancestor);
- if (rc < 0) {
- ixp4xxgpioled_devices[gpio_pin].ancestor.name = 0;
- ixp4xxgpioled_remove(pdev);
- return rc;
- }
- }
-
- return 0;
-}
-
-static struct platform_driver ixp4xxgpioled_driver = {
- .probe = ixp4xxgpioled_probe,
- .remove = ixp4xxgpioled_remove,
-#ifdef CONFIG_PM
- .suspend = ixp4xxgpioled_suspend,
- .resume = ixp4xxgpioled_resume,
-#endif
- .driver = {
- .name = "IXP4XX-GPIO-LED",
- },
-};
-
-static int __init ixp4xxgpioled_init(void)
-{
- return platform_driver_register(&ixp4xxgpioled_driver);
-}
-
-static void __exit ixp4xxgpioled_exit(void)
-{
- platform_driver_unregister(&ixp4xxgpioled_driver);
-}
-
-module_init(ixp4xxgpioled_init);
-module_exit(ixp4xxgpioled_exit);
-
-MODULE_AUTHOR("John Bowler <jbowler@acm.org>");
-MODULE_DESCRIPTION("IXP4XX GPIO LED driver");
-MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/leds/leds-locomo.c b/drivers/leds/leds-locomo.c
index 2207335e9212..7295f7f52185 100644
--- a/drivers/leds/leds-locomo.c
+++ b/drivers/leds/leds-locomo.c
@@ -43,13 +43,13 @@ static void locomoled_brightness_set1(struct led_classdev *led_cdev,
}
static struct led_classdev locomo_led0 = {
- .name = "locomo:amber",
+ .name = "locomo:amber:charge",
.default_trigger = "sharpsl-charge",
.brightness_set = locomoled_brightness_set0,
};
static struct led_classdev locomo_led1 = {
- .name = "locomo:green",
+ .name = "locomo:green:mail",
.default_trigger = "nand-disk",
.brightness_set = locomoled_brightness_set1,
};
diff --git a/drivers/leds/leds-net48xx.c b/drivers/leds/leds-net48xx.c
index 45ba3d45bcb8..054360473c94 100644
--- a/drivers/leds/leds-net48xx.c
+++ b/drivers/leds/leds-net48xx.c
@@ -31,7 +31,7 @@ static void net48xx_error_led_set(struct led_classdev *led_cdev,
}
static struct led_classdev net48xx_error_led = {
- .name = "net48xx:error",
+ .name = "net48xx::error",
.brightness_set = net48xx_error_led_set,
};
diff --git a/drivers/leds/leds-spitz.c b/drivers/leds/leds-spitz.c
index 126d09cc96ec..93e1012b17e6 100644
--- a/drivers/leds/leds-spitz.c
+++ b/drivers/leds/leds-spitz.c
@@ -38,13 +38,13 @@ static void spitzled_green_set(struct led_classdev *led_cdev, enum led_brightnes
}
static struct led_classdev spitz_amber_led = {
- .name = "spitz:amber",
+ .name = "spitz:amber:charge",
.default_trigger = "sharpsl-charge",
.brightness_set = spitzled_amber_set,
};
static struct led_classdev spitz_green_led = {
- .name = "spitz:green",
+ .name = "spitz:green:hddactivity",
.default_trigger = "ide-disk",
.brightness_set = spitzled_green_set,
};
@@ -72,8 +72,10 @@ static int spitzled_probe(struct platform_device *pdev)
{
int ret;
- if (machine_is_akita())
+ if (machine_is_akita()) {
+ spitz_green_led.name = "spitz:green:mail";
spitz_green_led.default_trigger = "nand-disk";
+ }
ret = led_classdev_register(&pdev->dev, &spitz_amber_led);
if (ret < 0)
diff --git a/drivers/leds/leds-tosa.c b/drivers/leds/leds-tosa.c
index fb2416a38303..9e0a188fbb0a 100644
--- a/drivers/leds/leds-tosa.c
+++ b/drivers/leds/leds-tosa.c
@@ -45,13 +45,13 @@ static void tosaled_green_set(struct led_classdev *led_cdev,
}
static struct led_classdev tosa_amber_led = {
- .name = "tosa:amber",
+ .name = "tosa:amber:charge",
.default_trigger = "sharpsl-charge",
.brightness_set = tosaled_amber_set,
};
static struct led_classdev tosa_green_led = {
- .name = "tosa:green",
+ .name = "tosa:green:mail",
.default_trigger = "nand-disk",
.brightness_set = tosaled_green_set,
};
diff --git a/drivers/leds/leds-wrap.c b/drivers/leds/leds-wrap.c
index 27fb2d8e991f..7ac61a7b56ad 100644
--- a/drivers/leds/leds-wrap.c
+++ b/drivers/leds/leds-wrap.c
@@ -19,11 +19,21 @@
#include <linux/scx200_gpio.h>
#define DRVNAME "wrap-led"
+#define WRAP_POWER_LED_GPIO 2
#define WRAP_ERROR_LED_GPIO 3
-#define WRAP_EXTRA_LED_GPIO 18
+#define WRAP_EXTRA_LED_GPIO 18
static struct platform_device *pdev;
+static void wrap_power_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (value)
+ scx200_gpio_set_low(WRAP_POWER_LED_GPIO);
+ else
+ scx200_gpio_set_high(WRAP_POWER_LED_GPIO);
+}
+
static void wrap_error_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
@@ -42,13 +52,18 @@ static void wrap_extra_led_set(struct led_classdev *led_cdev,
scx200_gpio_set_high(WRAP_EXTRA_LED_GPIO);
}
+static struct led_classdev wrap_power_led = {
+ .name = "wrap::power",
+ .brightness_set = wrap_power_led_set,
+};
+
static struct led_classdev wrap_error_led = {
- .name = "wrap:error",
+ .name = "wrap::error",
.brightness_set = wrap_error_led_set,
};
static struct led_classdev wrap_extra_led = {
- .name = "wrap:extra",
+ .name = "wrap::extra",
.brightness_set = wrap_extra_led_set,
};
@@ -56,6 +71,7 @@ static struct led_classdev wrap_extra_led = {
static int wrap_led_suspend(struct platform_device *dev,
pm_message_t state)
{
+ led_classdev_suspend(&wrap_power_led);
led_classdev_suspend(&wrap_error_led);
led_classdev_suspend(&wrap_extra_led);
return 0;
@@ -63,6 +79,7 @@ static int wrap_led_suspend(struct platform_device *dev,
static int wrap_led_resume(struct platform_device *dev)
{
+ led_classdev_resume(&wrap_power_led);
led_classdev_resume(&wrap_error_led);
led_classdev_resume(&wrap_extra_led);
return 0;
@@ -76,17 +93,31 @@ static int wrap_led_probe(struct platform_device *pdev)
{
int ret;
+ ret = led_classdev_register(&pdev->dev, &wrap_power_led);
+ if (ret < 0)
+ return ret;
+
ret = led_classdev_register(&pdev->dev, &wrap_error_led);
- if (ret == 0) {
- ret = led_classdev_register(&pdev->dev, &wrap_extra_led);
- if (ret < 0)
- led_classdev_unregister(&wrap_error_led);
- }
+ if (ret < 0)
+ goto err1;
+
+ ret = led_classdev_register(&pdev->dev, &wrap_extra_led);
+ if (ret < 0)
+ goto err2;
+
+ return ret;
+
+err2:
+ led_classdev_unregister(&wrap_error_led);
+err1:
+ led_classdev_unregister(&wrap_power_led);
+
return ret;
}
static int wrap_led_remove(struct platform_device *pdev)
{
+ led_classdev_unregister(&wrap_power_led);
led_classdev_unregister(&wrap_error_led);
led_classdev_unregister(&wrap_extra_led);
return 0;
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index ed9ff02c77ea..82c55d6e4902 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -77,8 +77,21 @@ static ssize_t led_delay_on_store(struct device *dev,
count++;
if (count == size) {
- timer_data->delay_on = state;
- mod_timer(&timer_data->timer, jiffies + 1);
+ if (timer_data->delay_on != state) {
+ /* the new value differs from the previous */
+ timer_data->delay_on = state;
+
+ /* deactivate previous settings */
+ del_timer_sync(&timer_data->timer);
+
+ /* try to activate hardware acceleration, if any */
+ if (!led_cdev->blink_set ||
+ led_cdev->blink_set(led_cdev,
+ &timer_data->delay_on, &timer_data->delay_off)) {
+ /* no hardware acceleration, blink via timer */
+ mod_timer(&timer_data->timer, jiffies + 1);
+ }
+ }
ret = count;
}
@@ -110,8 +123,21 @@ static ssize_t led_delay_off_store(struct device *dev,
count++;
if (count == size) {
- timer_data->delay_off = state;
- mod_timer(&timer_data->timer, jiffies + 1);
+ if (timer_data->delay_off != state) {
+ /* the new value differs from the previous */
+ timer_data->delay_off = state;
+
+ /* deactivate previous settings */
+ del_timer_sync(&timer_data->timer);
+
+ /* try to activate hardware acceleration, if any */
+ if (!led_cdev->blink_set ||
+ led_cdev->blink_set(led_cdev,
+ &timer_data->delay_on, &timer_data->delay_off)) {
+ /* no hardware acceleration, blink via timer */
+ mod_timer(&timer_data->timer, jiffies + 1);
+ }
+ }
ret = count;
}
@@ -143,6 +169,13 @@ static void timer_trig_activate(struct led_classdev *led_cdev)
if (rc)
goto err_out_delayon;
+ /* If there is hardware support for blinking, start one
+ * user friendly blink rate chosen by the driver.
+ */
+ if (led_cdev->blink_set)
+ led_cdev->blink_set(led_cdev,
+ &timer_data->delay_on, &timer_data->delay_off);
+
return;
err_out_delayon:
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index e2eec38c83c2..84f85e23cca7 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -52,57 +52,82 @@ struct lguest_device {
/*D:130
* Device configurations
*
- * The configuration information for a device consists of a series of fields.
- * We don't really care what they are: the Launcher set them up, and the driver
- * will look at them during setup.
+ * The configuration information for a device consists of one or more
+ * virtqueues, a feature bitmaks, and some configuration bytes. The
+ * configuration bytes don't really matter to us: the Launcher sets them up, and
+ * the driver will look at them during setup.
*
- * For us these fields come immediately after that device's descriptor in the
- * lguest_devices page.
- *
- * Each field starts with a "type" byte, a "length" byte, then that number of
- * bytes of configuration information. The device descriptor tells us the
- * total configuration length so we know when we've reached the last field. */
+ * A convenient routine to return the device's virtqueue config array:
+ * immediately after the descriptor. */
+static struct lguest_vqconfig *lg_vq(const struct lguest_device_desc *desc)
+{
+ return (void *)(desc + 1);
+}
-/* type + length bytes */
-#define FHDR_LEN 2
+/* The features come immediately after the virtqueues. */
+static u8 *lg_features(const struct lguest_device_desc *desc)
+{
+ return (void *)(lg_vq(desc) + desc->num_vq);
+}
-/* This finds the first field of a given type for a device's configuration. */
-static void *lg_find(struct virtio_device *vdev, u8 type, unsigned int *len)
+/* The config space comes after the two feature bitmasks. */
+static u8 *lg_config(const struct lguest_device_desc *desc)
{
- struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
- int i;
-
- for (i = 0; i < desc->config_len; i += FHDR_LEN + desc->config[i+1]) {
- if (desc->config[i] == type) {
- /* Mark it used, so Host can know we looked at it, and
- * also so we won't find the same one twice. */
- desc->config[i] |= 0x80;
- /* Remember, the second byte is the length. */
- *len = desc->config[i+1];
- /* We return a pointer to the field header. */
- return desc->config + i;
- }
- }
+ return lg_features(desc) + desc->feature_len * 2;
+}
- /* Not found: return NULL for failure. */
- return NULL;
+/* The total size of the config page used by this device (incl. desc) */
+static unsigned desc_size(const struct lguest_device_desc *desc)
+{
+ return sizeof(*desc)
+ + desc->num_vq * sizeof(struct lguest_vqconfig)
+ + desc->feature_len * 2
+ + desc->config_len;
+}
+
+/* This tests (and acknowleges) a feature bit. */
+static bool lg_feature(struct virtio_device *vdev, unsigned fbit)
+{
+ struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
+ u8 *features;
+
+ /* Obviously if they ask for a feature off the end of our feature
+ * bitmap, it's not set. */
+ if (fbit / 8 > desc->feature_len)
+ return false;
+
+ /* The feature bitmap comes after the virtqueues. */
+ features = lg_features(desc);
+ if (!(features[fbit / 8] & (1 << (fbit % 8))))
+ return false;
+
+ /* We set the matching bit in the other half of the bitmap to tell the
+ * Host we want to use this feature. We don't use this yet, but we
+ * could in future. */
+ features[desc->feature_len + fbit / 8] |= (1 << (fbit % 8));
+ return true;
}
/* Once they've found a field, getting a copy of it is easy. */
-static void lg_get(struct virtio_device *vdev, void *token,
+static void lg_get(struct virtio_device *vdev, unsigned int offset,
void *buf, unsigned len)
{
- /* Check they didn't ask for more than the length of the field! */
- BUG_ON(len > ((u8 *)token)[1]);
- memcpy(buf, token + FHDR_LEN, len);
+ struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
+
+ /* Check they didn't ask for more than the length of the config! */
+ BUG_ON(offset + len > desc->config_len);
+ memcpy(buf, lg_config(desc) + offset, len);
}
/* Setting the contents is also trivial. */
-static void lg_set(struct virtio_device *vdev, void *token,
+static void lg_set(struct virtio_device *vdev, unsigned int offset,
const void *buf, unsigned len)
{
- BUG_ON(len > ((u8 *)token)[1]);
- memcpy(token + FHDR_LEN, buf, len);
+ struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
+
+ /* Check they didn't ask for more than the length of the config! */
+ BUG_ON(offset + len > desc->config_len);
+ memcpy(lg_config(desc) + offset, buf, len);
}
/* The operations to get and set the status word just access the status field
@@ -114,9 +139,20 @@ static u8 lg_get_status(struct virtio_device *vdev)
static void lg_set_status(struct virtio_device *vdev, u8 status)
{
+ BUG_ON(!status);
to_lgdev(vdev)->desc->status = status;
}
+/* To reset the device, we (ab)use the NOTIFY hypercall, with the descriptor
+ * address of the device. The Host will zero the status and all the
+ * features. */
+static void lg_reset(struct virtio_device *vdev)
+{
+ unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices;
+
+ hcall(LHCALL_NOTIFY, (max_pfn<<PAGE_SHIFT) + offset, 0, 0);
+}
+
/*
* Virtqueues
*
@@ -165,39 +201,29 @@ static void lg_notify(struct virtqueue *vq)
*
* So we provide devices with a "find virtqueue and set it up" function. */
static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
- bool (*callback)(struct virtqueue *vq))
+ unsigned index,
+ void (*callback)(struct virtqueue *vq))
{
+ struct lguest_device *ldev = to_lgdev(vdev);
struct lguest_vq_info *lvq;
struct virtqueue *vq;
- unsigned int len;
- void *token;
int err;
- /* Look for a field of the correct type to mark a virtqueue. Note that
- * if this succeeds, then the type will be changed so it won't be found
- * again, and future lg_find_vq() calls will find the next
- * virtqueue (if any). */
- token = vdev->config->find(vdev, VIRTIO_CONFIG_F_VIRTQUEUE, &len);
- if (!token)
+ /* We must have this many virtqueues. */
+ if (index >= ldev->desc->num_vq)
return ERR_PTR(-ENOENT);
lvq = kmalloc(sizeof(*lvq), GFP_KERNEL);
if (!lvq)
return ERR_PTR(-ENOMEM);
- /* Note: we could use a configuration space inside here, just like we
- * do for the device. This would allow expansion in future, because
- * our configuration system is designed to be expansible. But this is
- * way easier. */
- if (len != sizeof(lvq->config)) {
- dev_err(&vdev->dev, "Unexpected virtio config len %u\n", len);
- err = -EIO;
- goto free_lvq;
- }
- /* Make a copy of the "struct lguest_vqconfig" field. We need a copy
- * because the config space might not be aligned correctly. */
- vdev->config->get(vdev, token, &lvq->config, sizeof(lvq->config));
+ /* Make a copy of the "struct lguest_vqconfig" entry, which sits after
+ * the descriptor. We need a copy because the config space might not
+ * be aligned correctly. */
+ memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config));
+ printk("Mapping virtqueue %i addr %lx\n", index,
+ (unsigned long)lvq->config.pfn << PAGE_SHIFT);
/* Figure out how many pages the ring will take, and map that memory */
lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT,
DIV_ROUND_UP(vring_size(lvq->config.num,
@@ -259,11 +285,12 @@ static void lg_del_vq(struct virtqueue *vq)
/* The ops structure which hooks everything together. */
static struct virtio_config_ops lguest_config_ops = {
- .find = lg_find,
+ .feature = lg_feature,
.get = lg_get,
.set = lg_set,
.get_status = lg_get_status,
.set_status = lg_set_status,
+ .reset = lg_reset,
.find_vq = lg_find_vq,
.del_vq = lg_del_vq,
};
@@ -329,13 +356,14 @@ static void scan_devices(void)
struct lguest_device_desc *d;
/* We start at the page beginning, and skip over each entry. */
- for (i = 0; i < PAGE_SIZE; i += sizeof(*d) + d->config_len) {
+ for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
d = lguest_devices + i;
/* Once we hit a zero, stop. */
if (d->type == 0)
break;
+ printk("Device at %i has size %u\n", i, desc_size(d));
add_lguest_device(d);
}
}
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 7ce0ea64465c..28958101061f 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -36,6 +36,7 @@
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/kthread.h>
+#include <linux/platform_device.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index de9ebbfbf122..936788272a5f 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -78,12 +78,14 @@ struct media_bay_info {
int cached_gpio;
int sleeping;
struct semaphore lock;
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
void __iomem *cd_base;
- int cd_index;
int cd_irq;
int cd_retry;
#endif
+#if defined(CONFIG_BLK_DEV_IDE_PMAC) || defined(CONFIG_MAC_FLOPPY)
+ int cd_index;
+#endif
};
#define MAX_BAYS 2
@@ -91,7 +93,7 @@ struct media_bay_info {
static struct media_bay_info media_bays[MAX_BAYS];
int media_bay_count = 0;
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
/* check the busy bit in the media-bay ide interface
(assumes the media-bay contains an ide device) */
#define MB_IDE_READY(i) ((readb(media_bays[i].cd_base + 0x70) & 0x80) == 0)
@@ -401,7 +403,7 @@ static void poll_media_bay(struct media_bay_info* bay)
set_mb_power(bay, id != MB_NO);
bay->content_id = id;
if (id == MB_NO) {
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
bay->cd_retry = 0;
#endif
printk(KERN_INFO "media bay %d is empty\n", bay->index);
@@ -414,9 +416,9 @@ static void poll_media_bay(struct media_bay_info* bay)
}
}
+#ifdef CONFIG_MAC_FLOPPY
int check_media_bay(struct device_node *which_bay, int what)
{
-#ifdef CONFIG_BLK_DEV_IDE
int i;
for (i=0; i<media_bay_count; i++)
@@ -426,14 +428,14 @@ int check_media_bay(struct device_node *which_bay, int what)
media_bays[i].cd_index = -1;
return -EINVAL;
}
-#endif /* CONFIG_BLK_DEV_IDE */
return -ENODEV;
}
EXPORT_SYMBOL(check_media_bay);
+#endif /* CONFIG_MAC_FLOPPY */
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
int check_media_bay_by_base(unsigned long base, int what)
{
-#ifdef CONFIG_BLK_DEV_IDE
int i;
for (i=0; i<media_bay_count; i++)
@@ -443,15 +445,13 @@ int check_media_bay_by_base(unsigned long base, int what)
media_bays[i].cd_index = -1;
return -EINVAL;
}
-#endif
-
+
return -ENODEV;
}
int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base,
- int irq, int index)
+ int irq, int index)
{
-#ifdef CONFIG_BLK_DEV_IDE
int i;
for (i=0; i<media_bay_count; i++) {
@@ -483,10 +483,10 @@ int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base,
return -ENODEV;
}
}
-#endif /* CONFIG_BLK_DEV_IDE */
-
+
return -ENODEV;
}
+#endif /* CONFIG_BLK_DEV_IDE_PMAC */
static void media_bay_step(int i)
{
@@ -521,14 +521,13 @@ static void media_bay_step(int i)
bay->state = mb_resetting;
MBDBG("mediabay%d: waiting reset (kind:%d)\n", i, bay->content_id);
break;
-
case mb_resetting:
if (bay->content_id != MB_CD) {
MBDBG("mediabay%d: bay is up (kind:%d)\n", i, bay->content_id);
bay->state = mb_up;
break;
}
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
MBDBG("mediabay%d: waiting IDE reset (kind:%d)\n", i, bay->content_id);
bay->ops->un_reset_ide(bay);
bay->timer = msecs_to_jiffies(MB_IDE_WAIT);
@@ -536,16 +535,14 @@ static void media_bay_step(int i)
#else
printk(KERN_DEBUG "media-bay %d is ide (not compiled in kernel)\n", i);
set_mb_power(bay, 0);
-#endif /* CONFIG_BLK_DEV_IDE */
+#endif /* CONFIG_BLK_DEV_IDE_PMAC */
break;
-
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
case mb_ide_resetting:
bay->timer = msecs_to_jiffies(MB_IDE_TIMEOUT);
bay->state = mb_ide_waiting;
MBDBG("mediabay%d: waiting IDE ready (kind:%d)\n", i, bay->content_id);
break;
-
case mb_ide_waiting:
if (bay->cd_base == NULL) {
bay->timer = 0;
@@ -587,11 +584,10 @@ static void media_bay_step(int i)
bay->timer = 0;
}
break;
-#endif /* CONFIG_BLK_DEV_IDE */
-
+#endif /* CONFIG_BLK_DEV_IDE_PMAC */
case mb_powering_down:
bay->state = mb_empty;
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
if (bay->cd_index >= 0) {
printk(KERN_DEBUG "Unregistering mb %d ide, index:%d\n", i,
bay->cd_index);
@@ -607,7 +603,7 @@ static void media_bay_step(int i)
bay->content_id = MB_NO;
}
}
-#endif /* CONFIG_BLK_DEV_IDE */
+#endif /* CONFIG_BLK_DEV_IDE_PMAC */
MBDBG("mediabay%d: end of power down\n", i);
break;
}
@@ -739,7 +735,7 @@ static int media_bay_resume(struct macio_dev *mdev)
bay->last_value = bay->content_id;
bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY);
bay->timer = msecs_to_jiffies(MB_POWER_DELAY);
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
bay->cd_retry = 0;
#endif
do {
@@ -829,7 +825,7 @@ static int __init media_bay_init(void)
for (i=0; i<MAX_BAYS; i++) {
memset((char *)&media_bays[i], 0, sizeof(struct media_bay_info));
media_bays[i].content_id = -1;
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
media_bays[i].cd_index = -1;
#endif
}
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index d409f6759482..8ba49385c3ff 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -12,7 +12,7 @@
* - maybe add timeout to commands ?
* - blocking version of time functions
* - polling version of i2c commands (including timer that works with
- * interrutps off)
+ * interrupts off)
* - maybe avoid some data copies with i2c by directly using the smu cmd
* buffer and a lower level internal interface
* - understand SMU -> CPU events and implement reception of them via
@@ -179,7 +179,7 @@ static irqreturn_t smu_db_intr(int irq, void *arg)
/* CPU might have brought back the cache line, so we need
* to flush again before peeking at the SMU response. We
* flush the entire buffer for now as we haven't read the
- * reply lenght (it's only 2 cache lines anyway)
+ * reply length (it's only 2 cache lines anyway)
*/
faddr = (unsigned long)smu->cmd_buf;
flush_inval_dcache_range(faddr, faddr + 256);
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index 01b8eca7ccd5..6e6dd17ab572 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -111,7 +111,7 @@ static enum macii_state {
static struct adb_request *current_req; /* first request struct in the queue */
static struct adb_request *last_req; /* last request struct in the queue */
static unsigned char reply_buf[16]; /* storage for autopolled replies */
-static unsigned char *reply_ptr; /* next byte in req->data or reply_buf */
+static unsigned char *reply_ptr; /* next byte in reply_buf or req->reply */
static int reading_reply; /* store reply in reply_buf else req->reply */
static int data_index; /* index of the next byte to send from req->data */
static int reply_len; /* number of bytes received in reply_buf or req->reply */
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 1b1ef3130e6e..a0585fb6da94 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -237,7 +237,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde
if (!page)
return ERR_PTR(-ENOMEM);
- ITERATE_RDEV(mddev, rdev, tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (! test_bit(In_sync, &rdev->flags)
|| test_bit(Faulty, &rdev->flags))
continue;
@@ -261,7 +261,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
struct list_head *tmp;
mddev_t *mddev = bitmap->mddev;
- ITERATE_RDEV(mddev, rdev, tmp)
+ rdev_for_each(rdev, tmp, mddev)
if (test_bit(In_sync, &rdev->flags)
&& !test_bit(Faulty, &rdev->flags)) {
int size = PAGE_SIZE;
@@ -1348,14 +1348,38 @@ void bitmap_close_sync(struct bitmap *bitmap)
*/
sector_t sector = 0;
int blocks;
- if (!bitmap) return;
+ if (!bitmap)
+ return;
while (sector < bitmap->mddev->resync_max_sectors) {
bitmap_end_sync(bitmap, sector, &blocks, 0);
-/*
- if (sector < 500) printk("bitmap_close_sync: sec %llu blks %d\n",
- (unsigned long long)sector, blocks);
-*/ sector += blocks;
+ sector += blocks;
+ }
+}
+
+void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
+{
+ sector_t s = 0;
+ int blocks;
+
+ if (!bitmap)
+ return;
+ if (sector == 0) {
+ bitmap->last_end_sync = jiffies;
+ return;
+ }
+ if (time_before(jiffies, (bitmap->last_end_sync
+ + bitmap->daemon_sleep * HZ)))
+ return;
+ wait_event(bitmap->mddev->recovery_wait,
+ atomic_read(&bitmap->mddev->recovery_active) == 0);
+
+ sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
+ s = 0;
+ while (s < sector && s < bitmap->mddev->resync_max_sectors) {
+ bitmap_end_sync(bitmap, s, &blocks, 0);
+ s += blocks;
}
+ bitmap->last_end_sync = jiffies;
}
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
@@ -1565,3 +1589,4 @@ EXPORT_SYMBOL(bitmap_start_sync);
EXPORT_SYMBOL(bitmap_end_sync);
EXPORT_SYMBOL(bitmap_unplug);
EXPORT_SYMBOL(bitmap_close_sync);
+EXPORT_SYMBOL(bitmap_cond_end_sync);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index cf2ddce34118..d107ddceefcd 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -294,7 +294,7 @@ static int run(mddev_t *mddev)
}
conf->nfaults = 0;
- ITERATE_RDEV(mddev, rdev, tmp)
+ rdev_for_each(rdev, tmp, mddev)
conf->rdev = rdev;
mddev->array_size = mddev->size;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 3dac1cfb8189..0b8511776b3e 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -122,7 +122,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
cnt = 0;
conf->array_size = 0;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
int j = rdev->raid_disk;
dev_info_t *disk = conf->disks + j;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c28a120b4161..5fc326d3970e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -195,7 +195,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
* Any code which breaks out of this loop while own
* a reference to the current mddev and must mddev_put it.
*/
-#define ITERATE_MDDEV(mddev,tmp) \
+#define for_each_mddev(mddev,tmp) \
\
for (({ spin_lock(&all_mddevs_lock); \
tmp = all_mddevs.next; \
@@ -275,6 +275,7 @@ static mddev_t * mddev_find(dev_t unit)
spin_lock_init(&new->write_lock);
init_waitqueue_head(&new->sb_wait);
new->reshape_position = MaxSector;
+ new->resync_max = MaxSector;
new->queue = blk_alloc_queue(GFP_KERNEL);
if (!new->queue) {
@@ -310,7 +311,7 @@ static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
mdk_rdev_t * rdev;
struct list_head *tmp;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (rdev->desc_nr == nr)
return rdev;
}
@@ -322,7 +323,7 @@ static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
struct list_head *tmp;
mdk_rdev_t *rdev;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (rdev->bdev->bd_dev == dev)
return rdev;
}
@@ -773,12 +774,16 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
__u64 ev1 = md_event(sb);
rdev->raid_disk = -1;
- rdev->flags = 0;
+ clear_bit(Faulty, &rdev->flags);
+ clear_bit(In_sync, &rdev->flags);
+ clear_bit(WriteMostly, &rdev->flags);
+ clear_bit(BarriersNotsupp, &rdev->flags);
+
if (mddev->raid_disks == 0) {
mddev->major_version = 0;
mddev->minor_version = sb->minor_version;
mddev->patch_version = sb->patch_version;
- mddev->persistent = ! sb->not_persistent;
+ mddev->external = 0;
mddev->chunk_size = sb->chunk_size;
mddev->ctime = sb->ctime;
mddev->utime = sb->utime;
@@ -904,7 +909,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->size = mddev->size;
sb->raid_disks = mddev->raid_disks;
sb->md_minor = mddev->md_minor;
- sb->not_persistent = !mddev->persistent;
+ sb->not_persistent = 0;
sb->utime = mddev->utime;
sb->state = 0;
sb->events_hi = (mddev->events>>32);
@@ -938,7 +943,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->state |= (1<<MD_SB_BITMAP_PRESENT);
sb->disks[0].state = (1<<MD_DISK_REMOVED);
- ITERATE_RDEV(mddev,rdev2,tmp) {
+ rdev_for_each(rdev2, tmp, mddev) {
mdp_disk_t *d;
int desc_nr;
if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
@@ -1153,11 +1158,15 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
__u64 ev1 = le64_to_cpu(sb->events);
rdev->raid_disk = -1;
- rdev->flags = 0;
+ clear_bit(Faulty, &rdev->flags);
+ clear_bit(In_sync, &rdev->flags);
+ clear_bit(WriteMostly, &rdev->flags);
+ clear_bit(BarriersNotsupp, &rdev->flags);
+
if (mddev->raid_disks == 0) {
mddev->major_version = 1;
mddev->patch_version = 0;
- mddev->persistent = 1;
+ mddev->external = 0;
mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
@@ -1286,7 +1295,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
}
max_dev = 0;
- ITERATE_RDEV(mddev,rdev2,tmp)
+ rdev_for_each(rdev2, tmp, mddev)
if (rdev2->desc_nr+1 > max_dev)
max_dev = rdev2->desc_nr+1;
@@ -1295,7 +1304,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
for (i=0; i<max_dev;i++)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
- ITERATE_RDEV(mddev,rdev2,tmp) {
+ rdev_for_each(rdev2, tmp, mddev) {
i = rdev2->desc_nr;
if (test_bit(Faulty, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(0xfffe);
@@ -1333,8 +1342,8 @@ static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
struct list_head *tmp, *tmp2;
mdk_rdev_t *rdev, *rdev2;
- ITERATE_RDEV(mddev1,rdev,tmp)
- ITERATE_RDEV(mddev2, rdev2, tmp2)
+ rdev_for_each(rdev, tmp, mddev1)
+ rdev_for_each(rdev2, tmp2, mddev2)
if (rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains)
return 1;
@@ -1401,7 +1410,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
goto fail;
}
list_add(&rdev->same_set, &mddev->disks);
- bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
+ bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
return 0;
fail:
@@ -1410,10 +1419,11 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
return err;
}
-static void delayed_delete(struct work_struct *ws)
+static void md_delayed_delete(struct work_struct *ws)
{
mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
kobject_del(&rdev->kobj);
+ kobject_put(&rdev->kobj);
}
static void unbind_rdev_from_array(mdk_rdev_t * rdev)
@@ -1432,7 +1442,8 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
/* We need to delay this, otherwise we can deadlock when
* writing to 'remove' to "dev/state"
*/
- INIT_WORK(&rdev->del_work, delayed_delete);
+ INIT_WORK(&rdev->del_work, md_delayed_delete);
+ kobject_get(&rdev->kobj);
schedule_work(&rdev->del_work);
}
@@ -1441,7 +1452,7 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
* otherwise reused by a RAID array (or any other kernel
* subsystem), by bd_claiming the device.
*/
-static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
+static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
{
int err = 0;
struct block_device *bdev;
@@ -1453,13 +1464,15 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
__bdevname(dev, b));
return PTR_ERR(bdev);
}
- err = bd_claim(bdev, rdev);
+ err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
if (err) {
printk(KERN_ERR "md: could not bd_claim %s.\n",
bdevname(bdev, b));
blkdev_put(bdev);
return err;
}
+ if (!shared)
+ set_bit(AllReserved, &rdev->flags);
rdev->bdev = bdev;
return err;
}
@@ -1503,7 +1516,7 @@ static void export_array(mddev_t *mddev)
struct list_head *tmp;
mdk_rdev_t *rdev;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (!rdev->mddev) {
MD_BUG();
continue;
@@ -1581,17 +1594,17 @@ static void md_print_devices(void)
printk("md: **********************************\n");
printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
printk("md: **********************************\n");
- ITERATE_MDDEV(mddev,tmp) {
+ for_each_mddev(mddev, tmp) {
if (mddev->bitmap)
bitmap_print_sb(mddev->bitmap);
else
printk("%s: ", mdname(mddev));
- ITERATE_RDEV(mddev,rdev,tmp2)
+ rdev_for_each(rdev, tmp2, mddev)
printk("<%s>", bdevname(rdev->bdev,b));
printk("\n");
- ITERATE_RDEV(mddev,rdev,tmp2)
+ rdev_for_each(rdev, tmp2, mddev)
print_rdev(rdev);
}
printk("md: **********************************\n");
@@ -1610,7 +1623,7 @@ static void sync_sbs(mddev_t * mddev, int nospares)
mdk_rdev_t *rdev;
struct list_head *tmp;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (rdev->sb_events == mddev->events ||
(nospares &&
rdev->raid_disk < 0 &&
@@ -1696,18 +1709,20 @@ repeat:
MD_BUG();
mddev->events --;
}
- sync_sbs(mddev, nospares);
/*
* do not write anything to disk if using
* nonpersistent superblocks
*/
if (!mddev->persistent) {
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ if (!mddev->external)
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+
spin_unlock_irq(&mddev->write_lock);
wake_up(&mddev->sb_wait);
return;
}
+ sync_sbs(mddev, nospares);
spin_unlock_irq(&mddev->write_lock);
dprintk(KERN_INFO
@@ -1715,7 +1730,7 @@ repeat:
mdname(mddev),mddev->in_sync);
bitmap_update_sb(mddev->bitmap);
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
char b[BDEVNAME_SIZE];
dprintk(KERN_INFO "md: ");
if (rdev->sb_loaded != 1)
@@ -1785,7 +1800,7 @@ static ssize_t
state_show(mdk_rdev_t *rdev, char *page)
{
char *sep = "";
- int len=0;
+ size_t len = 0;
if (test_bit(Faulty, &rdev->flags)) {
len+= sprintf(page+len, "%sfaulty",sep);
@@ -1887,20 +1902,45 @@ static ssize_t
slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{
char *e;
+ int err;
+ char nm[20];
int slot = simple_strtoul(buf, &e, 10);
if (strncmp(buf, "none", 4)==0)
slot = -1;
else if (e==buf || (*e && *e!= '\n'))
return -EINVAL;
- if (rdev->mddev->pers)
- /* Cannot set slot in active array (yet) */
- return -EBUSY;
- if (slot >= rdev->mddev->raid_disks)
- return -ENOSPC;
- rdev->raid_disk = slot;
- /* assume it is working */
- rdev->flags = 0;
- set_bit(In_sync, &rdev->flags);
+ if (rdev->mddev->pers) {
+ /* Setting 'slot' on an active array requires also
+ * updating the 'rd%d' link, and communicating
+ * with the personality with ->hot_*_disk.
+ * For now we only support removing
+ * failed/spare devices. This normally happens automatically,
+ * but not when the metadata is externally managed.
+ */
+ if (slot != -1)
+ return -EBUSY;
+ if (rdev->raid_disk == -1)
+ return -EEXIST;
+ /* personality does all needed checks */
+ if (rdev->mddev->pers->hot_add_disk == NULL)
+ return -EINVAL;
+ err = rdev->mddev->pers->
+ hot_remove_disk(rdev->mddev, rdev->raid_disk);
+ if (err)
+ return err;
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_remove_link(&rdev->mddev->kobj, nm);
+ set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
+ md_wakeup_thread(rdev->mddev->thread);
+ } else {
+ if (slot >= rdev->mddev->raid_disks)
+ return -ENOSPC;
+ rdev->raid_disk = slot;
+ /* assume it is working */
+ clear_bit(Faulty, &rdev->flags);
+ clear_bit(WriteMostly, &rdev->flags);
+ set_bit(In_sync, &rdev->flags);
+ }
return len;
}
@@ -1923,6 +1963,10 @@ offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
return -EINVAL;
if (rdev->mddev->pers)
return -EBUSY;
+ if (rdev->size && rdev->mddev->external)
+ /* Must set offset before size, so overlap checks
+ * can be sane */
+ return -EBUSY;
rdev->data_offset = offset;
return len;
}
@@ -1936,16 +1980,69 @@ rdev_size_show(mdk_rdev_t *rdev, char *page)
return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
}
+static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
+{
+ /* check if two start/length pairs overlap */
+ if (s1+l1 <= s2)
+ return 0;
+ if (s2+l2 <= s1)
+ return 0;
+ return 1;
+}
+
static ssize_t
rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{
char *e;
unsigned long long size = simple_strtoull(buf, &e, 10);
+ unsigned long long oldsize = rdev->size;
if (e==buf || (*e && *e != '\n'))
return -EINVAL;
if (rdev->mddev->pers)
return -EBUSY;
rdev->size = size;
+ if (size > oldsize && rdev->mddev->external) {
+ /* need to check that all other rdevs with the same ->bdev
+ * do not overlap. We need to unlock the mddev to avoid
+ * a deadlock. We have already changed rdev->size, and if
+ * we have to change it back, we will have the lock again.
+ */
+ mddev_t *mddev;
+ int overlap = 0;
+ struct list_head *tmp, *tmp2;
+
+ mddev_unlock(rdev->mddev);
+ for_each_mddev(mddev, tmp) {
+ mdk_rdev_t *rdev2;
+
+ mddev_lock(mddev);
+ rdev_for_each(rdev2, tmp2, mddev)
+ if (test_bit(AllReserved, &rdev2->flags) ||
+ (rdev->bdev == rdev2->bdev &&
+ rdev != rdev2 &&
+ overlaps(rdev->data_offset, rdev->size,
+ rdev2->data_offset, rdev2->size))) {
+ overlap = 1;
+ break;
+ }
+ mddev_unlock(mddev);
+ if (overlap) {
+ mddev_put(mddev);
+ break;
+ }
+ }
+ mddev_lock(rdev->mddev);
+ if (overlap) {
+ /* Someone else could have slipped in a size
+ * change here, but doing so is just silly.
+ * We put oldsize back because we *know* it is
+ * safe, and trust userspace not to race with
+ * itself
+ */
+ rdev->size = oldsize;
+ return -EBUSY;
+ }
+ }
if (size < rdev->mddev->size || rdev->mddev->size == 0)
rdev->mddev->size = size;
return len;
@@ -1980,12 +2077,18 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
{
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
+ int rv;
if (!entry->store)
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- return entry->store(rdev, page, length);
+ rv = mddev_lock(rdev->mddev);
+ if (!rv) {
+ rv = entry->store(rdev, page, length);
+ mddev_unlock(rdev->mddev);
+ }
+ return rv;
}
static void rdev_free(struct kobject *ko)
@@ -2029,7 +2132,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
if ((err = alloc_disk_sb(rdev)))
goto abort_free;
- err = lock_rdev(rdev, newdev);
+ err = lock_rdev(rdev, newdev, super_format == -2);
if (err)
goto abort_free;
@@ -2099,7 +2202,7 @@ static void analyze_sbs(mddev_t * mddev)
char b[BDEVNAME_SIZE];
freshest = NULL;
- ITERATE_RDEV(mddev,rdev,tmp)
+ rdev_for_each(rdev, tmp, mddev)
switch (super_types[mddev->major_version].
load_super(rdev, freshest, mddev->minor_version)) {
case 1:
@@ -2120,7 +2223,7 @@ static void analyze_sbs(mddev_t * mddev)
validate_super(mddev, freshest);
i = 0;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (rdev != freshest)
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
@@ -2215,7 +2318,7 @@ level_show(mddev_t *mddev, char *page)
static ssize_t
level_store(mddev_t *mddev, const char *buf, size_t len)
{
- int rv = len;
+ ssize_t rv = len;
if (mddev->pers)
return -EBUSY;
if (len == 0)
@@ -2425,6 +2528,8 @@ array_state_show(mddev_t *mddev, char *page)
case 0:
if (mddev->in_sync)
st = clean;
+ else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ st = write_pending;
else if (mddev->safemode)
st = active_idle;
else
@@ -2455,11 +2560,9 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
break;
case clear:
/* stopping an active array */
- if (mddev->pers) {
- if (atomic_read(&mddev->active) > 1)
- return -EBUSY;
- err = do_md_stop(mddev, 0);
- }
+ if (atomic_read(&mddev->active) > 1)
+ return -EBUSY;
+ err = do_md_stop(mddev, 0);
break;
case inactive:
/* stopping an active array */
@@ -2467,7 +2570,8 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
if (atomic_read(&mddev->active) > 1)
return -EBUSY;
err = do_md_stop(mddev, 2);
- }
+ } else
+ err = 0; /* already inactive */
break;
case suspended:
break; /* not supported yet */
@@ -2495,9 +2599,15 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
restart_array(mddev);
spin_lock_irq(&mddev->write_lock);
if (atomic_read(&mddev->writes_pending) == 0) {
- mddev->in_sync = 1;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
- }
+ if (mddev->in_sync == 0) {
+ mddev->in_sync = 1;
+ if (mddev->persistent)
+ set_bit(MD_CHANGE_CLEAN,
+ &mddev->flags);
+ }
+ err = 0;
+ } else
+ err = -EBUSY;
spin_unlock_irq(&mddev->write_lock);
} else {
mddev->ro = 0;
@@ -2508,7 +2618,8 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
case active:
if (mddev->pers) {
restart_array(mddev);
- clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ if (mddev->external)
+ clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
@@ -2574,7 +2685,9 @@ new_dev_store(mddev_t *mddev, const char *buf, size_t len)
if (err < 0)
goto out;
}
- } else
+ } else if (mddev->external)
+ rdev = md_import_device(dev, -2, -1);
+ else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev))
@@ -2659,7 +2772,9 @@ __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
/* Metdata version.
- * This is either 'none' for arrays with externally managed metadata,
+ * This is one of
+ * 'none' for arrays with no metadata (good luck...)
+ * 'external' for arrays with externally managed metadata,
* or N.M for internally known formats
*/
static ssize_t
@@ -2668,6 +2783,8 @@ metadata_show(mddev_t *mddev, char *page)
if (mddev->persistent)
return sprintf(page, "%d.%d\n",
mddev->major_version, mddev->minor_version);
+ else if (mddev->external)
+ return sprintf(page, "external:%s\n", mddev->metadata_type);
else
return sprintf(page, "none\n");
}
@@ -2682,6 +2799,21 @@ metadata_store(mddev_t *mddev, const char *buf, size_t len)
if (cmd_match(buf, "none")) {
mddev->persistent = 0;
+ mddev->external = 0;
+ mddev->major_version = 0;
+ mddev->minor_version = 90;
+ return len;
+ }
+ if (strncmp(buf, "external:", 9) == 0) {
+ size_t namelen = len-9;
+ if (namelen >= sizeof(mddev->metadata_type))
+ namelen = sizeof(mddev->metadata_type)-1;
+ strncpy(mddev->metadata_type, buf+9, namelen);
+ mddev->metadata_type[namelen] = 0;
+ if (namelen && mddev->metadata_type[namelen-1] == '\n')
+ mddev->metadata_type[--namelen] = 0;
+ mddev->persistent = 0;
+ mddev->external = 1;
mddev->major_version = 0;
mddev->minor_version = 90;
return len;
@@ -2698,6 +2830,7 @@ metadata_store(mddev_t *mddev, const char *buf, size_t len)
mddev->major_version = major;
mddev->minor_version = minor;
mddev->persistent = 1;
+ mddev->external = 0;
return len;
}
@@ -2865,6 +2998,43 @@ sync_completed_show(mddev_t *mddev, char *page)
static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
static ssize_t
+max_sync_show(mddev_t *mddev, char *page)
+{
+ if (mddev->resync_max == MaxSector)
+ return sprintf(page, "max\n");
+ else
+ return sprintf(page, "%llu\n",
+ (unsigned long long)mddev->resync_max);
+}
+static ssize_t
+max_sync_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ if (strncmp(buf, "max", 3) == 0)
+ mddev->resync_max = MaxSector;
+ else {
+ char *ep;
+ unsigned long long max = simple_strtoull(buf, &ep, 10);
+ if (ep == buf || (*ep != 0 && *ep != '\n'))
+ return -EINVAL;
+ if (max < mddev->resync_max &&
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ return -EBUSY;
+
+ /* Must be a multiple of chunk_size */
+ if (mddev->chunk_size) {
+ if (max & (sector_t)((mddev->chunk_size>>9)-1))
+ return -EINVAL;
+ }
+ mddev->resync_max = max;
+ }
+ wake_up(&mddev->recovery_wait);
+ return len;
+}
+
+static struct md_sysfs_entry md_max_sync =
+__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
+
+static ssize_t
suspend_lo_show(mddev_t *mddev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
@@ -2974,6 +3144,7 @@ static struct attribute *md_redundancy_attrs[] = {
&md_sync_max.attr,
&md_sync_speed.attr,
&md_sync_completed.attr,
+ &md_max_sync.attr,
&md_suspend_lo.attr,
&md_suspend_hi.attr,
&md_bitmap.attr,
@@ -3118,8 +3289,11 @@ static int do_md_run(mddev_t * mddev)
/*
* Analyze all RAID superblock(s)
*/
- if (!mddev->raid_disks)
+ if (!mddev->raid_disks) {
+ if (!mddev->persistent)
+ return -EINVAL;
analyze_sbs(mddev);
+ }
chunk_size = mddev->chunk_size;
@@ -3143,7 +3317,7 @@ static int do_md_run(mddev_t * mddev)
}
/* devices must have minimum size of one chunk */
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (test_bit(Faulty, &rdev->flags))
continue;
if (rdev->size < chunk_size / 1024) {
@@ -3170,7 +3344,7 @@ static int do_md_run(mddev_t * mddev)
* the only valid external interface is through the md
* device.
*/
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (test_bit(Faulty, &rdev->flags))
continue;
sync_blockdev(rdev->bdev);
@@ -3236,8 +3410,8 @@ static int do_md_run(mddev_t * mddev)
mdk_rdev_t *rdev2;
struct list_head *tmp2;
int warned = 0;
- ITERATE_RDEV(mddev, rdev, tmp) {
- ITERATE_RDEV(mddev, rdev2, tmp2) {
+ rdev_for_each(rdev, tmp, mddev) {
+ rdev_for_each(rdev2, tmp2, mddev) {
if (rdev < rdev2 &&
rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
@@ -3297,7 +3471,7 @@ static int do_md_run(mddev_t * mddev)
mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
mddev->in_sync = 1;
- ITERATE_RDEV(mddev,rdev,tmp)
+ rdev_for_each(rdev, tmp, mddev)
if (rdev->raid_disk >= 0) {
char nm[20];
sprintf(nm, "rd%d", rdev->raid_disk);
@@ -3330,7 +3504,7 @@ static int do_md_run(mddev_t * mddev)
if (mddev->degraded && !mddev->sync_thread) {
struct list_head *rtmp;
int spares = 0;
- ITERATE_RDEV(mddev,rdev,rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags))
@@ -3507,14 +3681,14 @@ static int do_md_stop(mddev_t * mddev, int mode)
}
mddev->bitmap_offset = 0;
- ITERATE_RDEV(mddev,rdev,tmp)
+ rdev_for_each(rdev, tmp, mddev)
if (rdev->raid_disk >= 0) {
char nm[20];
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_remove_link(&mddev->kobj, nm);
}
- /* make sure all delayed_delete calls have finished */
+ /* make sure all md_delayed_delete calls have finished */
flush_scheduled_work();
export_array(mddev);
@@ -3523,7 +3697,10 @@ static int do_md_stop(mddev_t * mddev, int mode)
mddev->size = 0;
mddev->raid_disks = 0;
mddev->recovery_cp = 0;
+ mddev->resync_max = MaxSector;
mddev->reshape_position = MaxSector;
+ mddev->external = 0;
+ mddev->persistent = 0;
} else if (mddev->pers)
printk(KERN_INFO "md: %s switched to read-only mode.\n",
@@ -3546,7 +3723,7 @@ static void autorun_array(mddev_t *mddev)
printk(KERN_INFO "md: running: ");
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
char b[BDEVNAME_SIZE];
printk("<%s>", bdevname(rdev->bdev,b));
}
@@ -3589,7 +3766,7 @@ static void autorun_devices(int part)
printk(KERN_INFO "md: considering %s ...\n",
bdevname(rdev0->bdev,b));
INIT_LIST_HEAD(&candidates);
- ITERATE_RDEV_PENDING(rdev,tmp)
+ rdev_for_each_list(rdev, tmp, pending_raid_disks)
if (super_90_load(rdev, rdev0, 0) >= 0) {
printk(KERN_INFO "md: adding %s ...\n",
bdevname(rdev->bdev,b));
@@ -3632,7 +3809,8 @@ static void autorun_devices(int part)
mddev_unlock(mddev);
} else {
printk(KERN_INFO "md: created %s\n", mdname(mddev));
- ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
+ mddev->persistent = 1;
+ rdev_for_each_list(rdev, tmp, candidates) {
list_del_init(&rdev->same_set);
if (bind_rdev_to_array(rdev, mddev))
export_rdev(rdev);
@@ -3643,7 +3821,7 @@ static void autorun_devices(int part)
/* on success, candidates will be empty, on error
* it won't...
*/
- ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
+ rdev_for_each_list(rdev, tmp, candidates)
export_rdev(rdev);
mddev_put(mddev);
}
@@ -3673,7 +3851,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
struct list_head *tmp;
nr=working=active=failed=spare=0;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
nr++;
if (test_bit(Faulty, &rdev->flags))
failed++;
@@ -3919,8 +4097,6 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
else
rdev->raid_disk = -1;
- rdev->flags = 0;
-
if (rdev->raid_disk < mddev->raid_disks)
if (info->state & (1<<MD_DISK_SYNC))
set_bit(In_sync, &rdev->flags);
@@ -4165,13 +4341,15 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
else
mddev->recovery_cp = 0;
mddev->persistent = ! info->not_persistent;
+ mddev->external = 0;
mddev->layout = info->layout;
mddev->chunk_size = info->chunk_size;
mddev->max_disks = MD_SB_DISKS;
- mddev->flags = 0;
+ if (mddev->persistent)
+ mddev->flags = 0;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
@@ -4213,7 +4391,7 @@ static int update_size(mddev_t *mddev, unsigned long size)
*/
if (mddev->sync_thread)
return -EBUSY;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
sector_t avail;
avail = rdev->size * 2;
@@ -4471,9 +4649,10 @@ static int md_ioctl(struct inode *inode, struct file *file,
*/
/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
* RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
- if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
- && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
- && cmd != GET_BITMAP_FILE) {
+ if ((!mddev->raid_disks && !mddev->external)
+ && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
+ && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
+ && cmd != GET_BITMAP_FILE) {
err = -ENODEV;
goto abort_unlock;
}
@@ -4757,7 +4936,7 @@ static void status_unused(struct seq_file *seq)
seq_printf(seq, "unused devices: ");
- ITERATE_RDEV_PENDING(rdev,tmp) {
+ rdev_for_each_list(rdev, tmp, pending_raid_disks) {
char b[BDEVNAME_SIZE];
i++;
seq_printf(seq, "%s ",
@@ -4953,7 +5132,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
}
size = 0;
- ITERATE_RDEV(mddev,rdev,tmp2) {
+ rdev_for_each(rdev, tmp2, mddev) {
char b[BDEVNAME_SIZE];
seq_printf(seq, " %s[%d]",
bdevname(rdev->bdev,b), rdev->desc_nr);
@@ -4982,7 +5161,10 @@ static int md_seq_show(struct seq_file *seq, void *v)
mddev->major_version,
mddev->minor_version);
}
- } else
+ } else if (mddev->external)
+ seq_printf(seq, " super external:%s",
+ mddev->metadata_type);
+ else
seq_printf(seq, " super non-persistent");
if (mddev->pers) {
@@ -5106,7 +5288,7 @@ static int is_mddev_idle(mddev_t *mddev)
long curr_events;
idle = 1;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
curr_events = disk_stat_read(disk, sectors[0]) +
disk_stat_read(disk, sectors[1]) -
@@ -5283,7 +5465,7 @@ void md_do_sync(mddev_t *mddev)
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
goto skip;
}
- ITERATE_MDDEV(mddev2,tmp) {
+ for_each_mddev(mddev2, tmp) {
if (mddev2 == mddev)
continue;
if (mddev2->curr_resync &&
@@ -5333,7 +5515,7 @@ void md_do_sync(mddev_t *mddev)
/* recovery follows the physical size of devices */
max_sectors = mddev->size << 1;
j = MaxSector;
- ITERATE_RDEV(mddev,rdev,rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
@@ -5381,8 +5563,16 @@ void md_do_sync(mddev_t *mddev)
sector_t sectors;
skipped = 0;
+ if (j >= mddev->resync_max) {
+ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ wait_event(mddev->recovery_wait,
+ mddev->resync_max > j
+ || kthread_should_stop());
+ }
+ if (kthread_should_stop())
+ goto interrupted;
sectors = mddev->pers->sync_request(mddev, j, &skipped,
- currspeed < speed_min(mddev));
+ currspeed < speed_min(mddev));
if (sectors == 0) {
set_bit(MD_RECOVERY_ERR, &mddev->recovery);
goto out;
@@ -5424,15 +5614,9 @@ void md_do_sync(mddev_t *mddev)
}
- if (kthread_should_stop()) {
- /*
- * got a signal, exit.
- */
- printk(KERN_INFO
- "md: md_do_sync() got signal ... exiting\n");
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- goto out;
- }
+ if (kthread_should_stop())
+ goto interrupted;
+
/*
* this loop exits only if either when we are slower than
@@ -5484,7 +5668,7 @@ void md_do_sync(mddev_t *mddev)
} else {
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
- ITERATE_RDEV(mddev,rdev,rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
@@ -5496,9 +5680,22 @@ void md_do_sync(mddev_t *mddev)
skip:
mddev->curr_resync = 0;
+ mddev->resync_max = MaxSector;
+ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
wake_up(&resync_wait);
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread);
+ return;
+
+ interrupted:
+ /*
+ * got a signal, exit.
+ */
+ printk(KERN_INFO
+ "md: md_do_sync() got signal ... exiting\n");
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ goto out;
+
}
EXPORT_SYMBOL_GPL(md_do_sync);
@@ -5509,8 +5706,9 @@ static int remove_and_add_spares(mddev_t *mddev)
struct list_head *rtmp;
int spares = 0;
- ITERATE_RDEV(mddev,rdev,rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
if (rdev->raid_disk >= 0 &&
+ !mddev->external &&
(test_bit(Faulty, &rdev->flags) ||
! test_bit(In_sync, &rdev->flags)) &&
atomic_read(&rdev->nr_pending)==0) {
@@ -5524,7 +5722,7 @@ static int remove_and_add_spares(mddev_t *mddev)
}
if (mddev->degraded) {
- ITERATE_RDEV(mddev,rdev,rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
if (rdev->raid_disk < 0
&& !test_bit(Faulty, &rdev->flags)) {
rdev->recovery_offset = 0;
@@ -5589,7 +5787,7 @@ void md_check_recovery(mddev_t *mddev)
}
if ( ! (
- mddev->flags ||
+ (mddev->flags && !mddev->external) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->safemode == 1) ||
@@ -5605,7 +5803,8 @@ void md_check_recovery(mddev_t *mddev)
if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
!mddev->in_sync && mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ if (mddev->persistent)
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
@@ -5637,7 +5836,7 @@ void md_check_recovery(mddev_t *mddev)
* information must be scrapped
*/
if (!mddev->degraded)
- ITERATE_RDEV(mddev,rdev,rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
rdev->saved_raid_disk = -1;
mddev->recovery = 0;
@@ -5714,7 +5913,7 @@ static int md_notify_reboot(struct notifier_block *this,
printk(KERN_INFO "md: stopping all md devices.\n");
- ITERATE_MDDEV(mddev,tmp)
+ for_each_mddev(mddev, tmp)
if (mddev_trylock(mddev)) {
do_md_stop (mddev, 1);
mddev_unlock(mddev);
@@ -5848,7 +6047,7 @@ static __exit void md_exit(void)
unregister_reboot_notifier(&md_notifier);
unregister_sysctl_table(raid_table_header);
remove_proc_entry("mdstat", NULL);
- ITERATE_MDDEV(mddev,tmp) {
+ for_each_mddev(mddev, tmp) {
struct gendisk *disk = mddev->gendisk;
if (!disk)
continue;
diff --git a/drivers/md/mktables.c b/drivers/md/mktables.c
index adef299908cf..b61d5767aae7 100644
--- a/drivers/md/mktables.c
+++ b/drivers/md/mktables.c
@@ -1,13 +1,10 @@
-#ident "$Id: mktables.c,v 1.2 2002/12/12 22:41:27 hpa Exp $"
-/* ----------------------------------------------------------------------- *
+/* -*- linux-c -*- ------------------------------------------------------- *
*
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
+ * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Bostom MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2 or (at your
+ * option) any later version; incorporated herein by reference.
*
* ----------------------------------------------------------------------- */
@@ -26,100 +23,98 @@
static uint8_t gfmul(uint8_t a, uint8_t b)
{
- uint8_t v = 0;
-
- while ( b ) {
- if ( b & 1 ) v ^= a;
- a = (a << 1) ^ (a & 0x80 ? 0x1d : 0);
- b >>= 1;
- }
- return v;
+ uint8_t v = 0;
+
+ while (b) {
+ if (b & 1)
+ v ^= a;
+ a = (a << 1) ^ (a & 0x80 ? 0x1d : 0);
+ b >>= 1;
+ }
+
+ return v;
}
static uint8_t gfpow(uint8_t a, int b)
{
- uint8_t v = 1;
-
- b %= 255;
- if ( b < 0 )
- b += 255;
-
- while ( b ) {
- if ( b & 1 ) v = gfmul(v,a);
- a = gfmul(a,a);
- b >>= 1;
- }
- return v;
+ uint8_t v = 1;
+
+ b %= 255;
+ if (b < 0)
+ b += 255;
+
+ while (b) {
+ if (b & 1)
+ v = gfmul(v, a);
+ a = gfmul(a, a);
+ b >>= 1;
+ }
+
+ return v;
}
int main(int argc, char *argv[])
{
- int i, j, k;
- uint8_t v;
- uint8_t exptbl[256], invtbl[256];
-
- printf("#include \"raid6.h\"\n");
-
- /* Compute multiplication table */
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfmul[256][256] =\n"
- "{\n");
- for ( i = 0 ; i < 256 ; i++ ) {
- printf("\t{\n");
- for ( j = 0 ; j < 256 ; j += 8 ) {
- printf("\t\t");
- for ( k = 0 ; k < 8 ; k++ ) {
- printf("0x%02x, ", gfmul(i,j+k));
- }
- printf("\n");
- }
- printf("\t},\n");
- }
- printf("};\n");
-
- /* Compute power-of-2 table (exponent) */
- v = 1;
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfexp[256] =\n"
- "{\n");
- for ( i = 0 ; i < 256 ; i += 8 ) {
- printf("\t");
- for ( j = 0 ; j < 8 ; j++ ) {
- exptbl[i+j] = v;
- printf("0x%02x, ", v);
- v = gfmul(v,2);
- if ( v == 1 ) v = 0; /* For entry 255, not a real entry */
- }
- printf("\n");
- }
- printf("};\n");
-
- /* Compute inverse table x^-1 == x^254 */
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfinv[256] =\n"
- "{\n");
- for ( i = 0 ; i < 256 ; i += 8 ) {
- printf("\t");
- for ( j = 0 ; j < 8 ; j++ ) {
- invtbl[i+j] = v = gfpow(i+j,254);
- printf("0x%02x, ", v);
- }
- printf("\n");
- }
- printf("};\n");
-
- /* Compute inv(2^x + 1) (exponent-xor-inverse) table */
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfexi[256] =\n"
- "{\n");
- for ( i = 0 ; i < 256 ; i += 8 ) {
- printf("\t");
- for ( j = 0 ; j < 8 ; j++ ) {
- printf("0x%02x, ", invtbl[exptbl[i+j]^1]);
- }
- printf("\n");
- }
- printf("};\n\n");
-
- return 0;
+ int i, j, k;
+ uint8_t v;
+ uint8_t exptbl[256], invtbl[256];
+
+ printf("#include \"raid6.h\"\n");
+
+ /* Compute multiplication table */
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gfmul[256][256] =\n"
+ "{\n");
+ for (i = 0; i < 256; i++) {
+ printf("\t{\n");
+ for (j = 0; j < 256; j += 8) {
+ printf("\t\t");
+ for (k = 0; k < 8; k++)
+ printf("0x%02x,%c", gfmul(i, j + k),
+ (k == 7) ? '\n' : ' ');
+ }
+ printf("\t},\n");
+ }
+ printf("};\n");
+
+ /* Compute power-of-2 table (exponent) */
+ v = 1;
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gfexp[256] =\n" "{\n");
+ for (i = 0; i < 256; i += 8) {
+ printf("\t");
+ for (j = 0; j < 8; j++) {
+ exptbl[i + j] = v;
+ printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
+ v = gfmul(v, 2);
+ if (v == 1)
+ v = 0; /* For entry 255, not a real entry */
+ }
+ }
+ printf("};\n");
+
+ /* Compute inverse table x^-1 == x^254 */
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gfinv[256] =\n" "{\n");
+ for (i = 0; i < 256; i += 8) {
+ printf("\t");
+ for (j = 0; j < 8; j++) {
+ invtbl[i + j] = v = gfpow(i + j, 254);
+ printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
+ }
+ }
+ printf("};\n");
+
+ /* Compute inv(2^x + 1) (exponent-xor-inverse) table */
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gfexi[256] =\n" "{\n");
+ for (i = 0; i < 256; i += 8) {
+ printf("\t");
+ for (j = 0; j < 8; j++)
+ printf("0x%02x,%c", invtbl[exptbl[i + j] ^ 1],
+ (j == 7) ? '\n' : ' ');
+ }
+ printf("};\n");
+
+ return 0;
}
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index eb631ebed686..3f299d835a2b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -436,7 +436,7 @@ static int multipath_run (mddev_t *mddev)
}
conf->working_disks = 0;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
disk_idx = rdev->raid_disk;
if (disk_idx < 0 ||
disk_idx >= mddev->raid_disks)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f8e591708d1f..818b48284096 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -72,11 +72,11 @@ static int create_strip_zones (mddev_t *mddev)
*/
conf->nr_strip_zones = 0;
- ITERATE_RDEV(mddev,rdev1,tmp1) {
+ rdev_for_each(rdev1, tmp1, mddev) {
printk("raid0: looking at %s\n",
bdevname(rdev1->bdev,b));
c = 0;
- ITERATE_RDEV(mddev,rdev2,tmp2) {
+ rdev_for_each(rdev2, tmp2, mddev) {
printk("raid0: comparing %s(%llu)",
bdevname(rdev1->bdev,b),
(unsigned long long)rdev1->size);
@@ -124,7 +124,7 @@ static int create_strip_zones (mddev_t *mddev)
cnt = 0;
smallest = NULL;
zone->dev = conf->devlist;
- ITERATE_RDEV(mddev, rdev1, tmp1) {
+ rdev_for_each(rdev1, tmp1, mddev) {
int j = rdev1->raid_disk;
if (j < 0 || j >= mddev->raid_disks) {
@@ -293,7 +293,7 @@ static int raid0_run (mddev_t *mddev)
/* calculate array device size */
mddev->array_size = 0;
- ITERATE_RDEV(mddev,rdev,tmp)
+ rdev_for_each(rdev, tmp, mddev)
mddev->array_size += rdev->size;
printk("raid0 : md_size is %llu blocks.\n",
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4a69c416e045..5c7fef091cec 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1684,6 +1684,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
if (!go_faster && conf->nr_waiting)
msleep_interruptible(1000);
+ bitmap_cond_end_sync(mddev->bitmap, sector_nr);
raise_barrier(conf);
conf->next_resync = sector_nr;
@@ -1766,6 +1767,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
return rv;
}
+ if (max_sector > mddev->resync_max)
+ max_sector = mddev->resync_max; /* Don't do IO beyond here */
nr_sectors = 0;
sync_blocks = 0;
do {
@@ -1884,7 +1887,7 @@ static int run(mddev_t *mddev)
if (!conf->r1bio_pool)
goto out_no_mem;
- ITERATE_RDEV(mddev, rdev, tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
|| disk_idx < 0)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5cdcc9386200..017f58113c33 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1657,6 +1657,9 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
return (max_sector - sector_nr) + sectors_skipped;
}
+ if (max_sector > mddev->resync_max)
+ max_sector = mddev->resync_max; /* Don't do IO beyond here */
+
/* make sure whole request will fit in a chunk - if chunks
* are meaningful
*/
@@ -1670,6 +1673,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
if (!go_faster && conf->nr_waiting)
msleep_interruptible(1000);
+ bitmap_cond_end_sync(mddev->bitmap, sector_nr);
+
/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
* have bi_end_io, bi_sector, bi_bdev set,
@@ -2021,7 +2026,7 @@ static int run(mddev_t *mddev)
goto out_free_conf;
}
- ITERATE_RDEV(mddev, rdev, tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
|| disk_idx < 0)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e8c8157b02fc..2d6f1a51359c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3159,7 +3159,8 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
atomic_inc(&conf->preread_active_stripes);
list_add_tail(&sh->lru, &conf->handle_list);
}
- }
+ } else
+ blk_plug_device(conf->mddev->queue);
}
static void activate_bit_delay(raid5_conf_t *conf)
@@ -3549,7 +3550,8 @@ static int make_request(struct request_queue *q, struct bio * bi)
goto retry;
}
finish_wait(&conf->wait_for_overlap, &w);
- handle_stripe(sh, NULL);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ clear_bit(STRIPE_DELAYED, &sh->state);
release_stripe(sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
@@ -3698,6 +3700,25 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
release_stripe(sh);
first_sector += STRIPE_SECTORS;
}
+ /* If this takes us to the resync_max point where we have to pause,
+ * then we need to write out the superblock.
+ */
+ sector_nr += conf->chunk_size>>9;
+ if (sector_nr >= mddev->resync_max) {
+ /* Cannot proceed until we've updated the superblock... */
+ wait_event(conf->wait_for_overlap,
+ atomic_read(&conf->reshape_stripes) == 0);
+ mddev->reshape_position = conf->expand_progress;
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ md_wakeup_thread(mddev->thread);
+ wait_event(mddev->sb_wait,
+ !test_bit(MD_CHANGE_DEVS, &mddev->flags)
+ || kthread_should_stop());
+ spin_lock_irq(&conf->device_lock);
+ conf->expand_lo = mddev->reshape_position;
+ spin_unlock_irq(&conf->device_lock);
+ wake_up(&conf->wait_for_overlap);
+ }
return conf->chunk_size>>9;
}
@@ -3734,6 +3755,12 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
return reshape_request(mddev, sector_nr, skipped);
+ /* No need to check resync_max as we never do more than one
+ * stripe, and as resync_max will always be on a chunk boundary,
+ * if the check in md_do_sync didn't fire, there is no chance
+ * of overstepping resync_max here
+ */
+
/* if there is too many failed drives and we are trying
* to resync, then assert that we are finished, because there is
* nothing we can do.
@@ -3753,6 +3780,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
}
+
+ bitmap_cond_end_sync(mddev->bitmap, sector_nr);
+
pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks);
sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1);
if (sh == NULL) {
@@ -3864,7 +3894,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
* During the scan, completed stripes are saved for us by the interrupt
* handler, so that they will not have to wait for our next wakeup.
*/
-static void raid5d (mddev_t *mddev)
+static void raid5d(mddev_t *mddev)
{
struct stripe_head *sh;
raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3889,12 +3919,6 @@ static void raid5d (mddev_t *mddev)
activate_bit_delay(conf);
}
- if (list_empty(&conf->handle_list) &&
- atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
- !blk_queue_plugged(mddev->queue) &&
- !list_empty(&conf->delayed_list))
- raid5_activate_delayed(conf);
-
while ((bio = remove_bio_from_retry(conf))) {
int ok;
spin_unlock_irq(&conf->device_lock);
@@ -4108,7 +4132,7 @@ static int run(mddev_t *mddev)
pr_debug("raid5: run(%s) called.\n", mdname(mddev));
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
raid_disk = rdev->raid_disk;
if (raid_disk >= conf->raid_disks
|| raid_disk < 0)
@@ -4521,7 +4545,7 @@ static int raid5_start_reshape(mddev_t *mddev)
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
- ITERATE_RDEV(mddev, rdev, rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
if (rdev->raid_disk < 0 &&
!test_bit(Faulty, &rdev->flags))
spares++;
@@ -4543,7 +4567,7 @@ static int raid5_start_reshape(mddev_t *mddev)
/* Add some new drives, as many as will fit.
* We know there are enough to make the newly sized array work.
*/
- ITERATE_RDEV(mddev, rdev, rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
if (rdev->raid_disk < 0 &&
!test_bit(Faulty, &rdev->flags)) {
if (raid5_add_disk(mddev, rdev)) {
diff --git a/drivers/md/raid6test/test.c b/drivers/md/raid6test/test.c
index 0d5cd57accd7..559cc41b2585 100644
--- a/drivers/md/raid6test/test.c
+++ b/drivers/md/raid6test/test.c
@@ -1,12 +1,10 @@
/* -*- linux-c -*- ------------------------------------------------------- *
*
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
+ * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Bostom MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2 or (at your
+ * option) any later version; incorporated herein by reference.
*
* ----------------------------------------------------------------------- */
@@ -30,67 +28,87 @@ char *dataptrs[NDISKS];
char data[NDISKS][PAGE_SIZE];
char recovi[PAGE_SIZE], recovj[PAGE_SIZE];
-void makedata(void)
+static void makedata(void)
{
int i, j;
- for ( i = 0 ; i < NDISKS ; i++ ) {
- for ( j = 0 ; j < PAGE_SIZE ; j++ ) {
+ for (i = 0; i < NDISKS; i++) {
+ for (j = 0; j < PAGE_SIZE; j++)
data[i][j] = rand();
- }
+
dataptrs[i] = data[i];
}
}
+static char disk_type(int d)
+{
+ switch (d) {
+ case NDISKS-2:
+ return 'P';
+ case NDISKS-1:
+ return 'Q';
+ default:
+ return 'D';
+ }
+}
+
+static int test_disks(int i, int j)
+{
+ int erra, errb;
+
+ memset(recovi, 0xf0, PAGE_SIZE);
+ memset(recovj, 0xba, PAGE_SIZE);
+
+ dataptrs[i] = recovi;
+ dataptrs[j] = recovj;
+
+ raid6_dual_recov(NDISKS, PAGE_SIZE, i, j, (void **)&dataptrs);
+
+ erra = memcmp(data[i], recovi, PAGE_SIZE);
+ errb = memcmp(data[j], recovj, PAGE_SIZE);
+
+ if (i < NDISKS-2 && j == NDISKS-1) {
+ /* We don't implement the DQ failure scenario, since it's
+ equivalent to a RAID-5 failure (XOR, then recompute Q) */
+ erra = errb = 0;
+ } else {
+ printf("algo=%-8s faila=%3d(%c) failb=%3d(%c) %s\n",
+ raid6_call.name,
+ i, disk_type(i),
+ j, disk_type(j),
+ (!erra && !errb) ? "OK" :
+ !erra ? "ERRB" :
+ !errb ? "ERRA" : "ERRAB");
+ }
+
+ dataptrs[i] = data[i];
+ dataptrs[j] = data[j];
+
+ return erra || errb;
+}
+
int main(int argc, char *argv[])
{
- const struct raid6_calls * const * algo;
+ const struct raid6_calls *const *algo;
int i, j;
- int erra, errb;
+ int err = 0;
makedata();
- for ( algo = raid6_algos ; *algo ; algo++ ) {
- if ( !(*algo)->valid || (*algo)->valid() ) {
+ for (algo = raid6_algos; *algo; algo++) {
+ if (!(*algo)->valid || (*algo)->valid()) {
raid6_call = **algo;
/* Nuke syndromes */
memset(data[NDISKS-2], 0xee, 2*PAGE_SIZE);
/* Generate assumed good syndrome */
- raid6_call.gen_syndrome(NDISKS, PAGE_SIZE, (void **)&dataptrs);
-
- for ( i = 0 ; i < NDISKS-1 ; i++ ) {
- for ( j = i+1 ; j < NDISKS ; j++ ) {
- memset(recovi, 0xf0, PAGE_SIZE);
- memset(recovj, 0xba, PAGE_SIZE);
-
- dataptrs[i] = recovi;
- dataptrs[j] = recovj;
-
- raid6_dual_recov(NDISKS, PAGE_SIZE, i, j, (void **)&dataptrs);
-
- erra = memcmp(data[i], recovi, PAGE_SIZE);
- errb = memcmp(data[j], recovj, PAGE_SIZE);
-
- if ( i < NDISKS-2 && j == NDISKS-1 ) {
- /* We don't implement the DQ failure scenario, since it's
- equivalent to a RAID-5 failure (XOR, then recompute Q) */
- } else {
- printf("algo=%-8s faila=%3d(%c) failb=%3d(%c) %s\n",
- raid6_call.name,
- i, (i==NDISKS-2)?'P':'D',
- j, (j==NDISKS-1)?'Q':(j==NDISKS-2)?'P':'D',
- (!erra && !errb) ? "OK" :
- !erra ? "ERRB" :
- !errb ? "ERRA" :
- "ERRAB");
- }
-
- dataptrs[i] = data[i];
- dataptrs[j] = data[j];
- }
- }
+ raid6_call.gen_syndrome(NDISKS, PAGE_SIZE,
+ (void **)&dataptrs);
+
+ for (i = 0; i < NDISKS-1; i++)
+ for (j = i+1; j < NDISKS; j++)
+ err += test_disks(i, j);
}
printf("\n");
}
@@ -99,5 +117,8 @@ int main(int argc, char *argv[])
/* Pick the best algorithm test */
raid6_select_algo();
- return 0;
+ if (err)
+ printf("\n*** ERRORS FOUND ***\n");
+
+ return err;
}
diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c
index 7d04a6fd1acb..168a8d3a5e55 100644
--- a/drivers/media/common/saa7146_core.c
+++ b/drivers/media/common/saa7146_core.c
@@ -388,7 +388,7 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
}
dev->revision &= 0xf;
- /* remap the memory from virtual to physical adress */
+ /* remap the memory from virtual to physical address */
err = pci_request_region(pci, 0, "saa7146");
if (err < 0)
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index a33eb5988c42..ed3f8268ed11 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -681,7 +681,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
drop = 1;
/* else: destination address matches the MAC address of our receiver device */
}
- /* else: promiscious mode; pass everything up the stack */
+ /* else: promiscuous mode; pass everything up the stack */
if (drop) {
#ifdef ULE_DEBUG
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 28ddd146c1c5..850b8c6f4577 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -22,7 +22,6 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o
-obj-$(CONFIG_SOUND_TVMIXER) += tvmixer.o
obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o
obj-$(CONFIG_VIDEO_SAA5246A) += saa5246a.o
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 63a47cd4c161..7374c02dd183 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -4344,7 +4344,7 @@ static void rv605_muxsel(struct bttv *btv, unsigned int input)
gpio_bits(0x200,0x000);
mdelay(1);
- /* create a new conection */
+ /* create a new connection */
gpio_bits(0x480,0x080);
gpio_bits(0x480,0x480);
mdelay(1);
diff --git a/drivers/media/video/indycam.c b/drivers/media/video/indycam.c
index 5c2c4029ff86..84b9e4f2b3b3 100644
--- a/drivers/media/video/indycam.c
+++ b/drivers/media/video/indycam.c
@@ -326,7 +326,7 @@ static int indycam_attach(struct i2c_adapter *adap, int addr, int kind)
// initialize
err = indycam_write_block(client, 0, sizeof(initseq), (u8 *)&initseq);
if (err) {
- printk(KERN_ERR "IndyCam initalization failed\n");
+ printk(KERN_ERR "IndyCam initialization failed\n");
err = -EIO;
goto out_detach_client;
}
diff --git a/drivers/media/video/mt20xx.c b/drivers/media/video/mt20xx.c
index b630c26cfe85..58bab653330f 100644
--- a/drivers/media/video/mt20xx.c
+++ b/drivers/media/video/mt20xx.c
@@ -369,7 +369,7 @@ static struct dvb_tuner_ops mt2032_tuner_ops = {
.get_frequency = microtune_get_frequency,
};
-// Initalization as described in "MT203x Programming Procedures", Rev 1.2, Feb.2001
+// Initialization as described in "MT203x Programming Procedures", Rev 1.2, Feb.2001
static int mt2032_init(struct dvb_frontend *fe)
{
struct microtune_priv *priv = fe->tuner_priv;
diff --git a/drivers/media/video/pvrusb2/pvrusb2.h b/drivers/media/video/pvrusb2/pvrusb2.h
index 074533e9c21e..1a9a4baf12b8 100644
--- a/drivers/media/video/pvrusb2/pvrusb2.h
+++ b/drivers/media/video/pvrusb2/pvrusb2.h
@@ -27,7 +27,7 @@
might want to increase this - however the driver operation will not
be impaired if it is too small. Instead additional units just
won't have an ID assigned and it might not be possible to specify
- module paramters for those extra units. */
+ module parameters for those extra units. */
#define PVR_NUM 20
#endif /* __PVRUSB2_H */
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 7300ace8f44e..f991d72fe108 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -542,7 +542,7 @@ int pwc_handle_frame(struct pwc_device *pdev)
}
if (pdev->read_frame != NULL) {
- /* Decompression is a lenghty process, so it's outside of the lock.
+ /* Decompression is a lengthy process, so it's outside of the lock.
This gives the isoc_handler the opportunity to fill more frames
in the mean time.
*/
diff --git a/drivers/media/video/tea6420.c b/drivers/media/video/tea6420.c
index e0ff811fab6f..ca05cd655087 100644
--- a/drivers/media/video/tea6420.c
+++ b/drivers/media/video/tea6420.c
@@ -57,7 +57,7 @@ static int tea6420_switch(struct i2c_client *client, int i, int o, int g)
dprintk("adr:0x%02x, i:%d, o:%d, g:%d\n", client->addr, i, o, g);
- /* check if the paramters are valid */
+ /* check if the parameters are valid */
if (i < 1 || i > 6 || o < 1 || o > 4 || g < 0 || g > 6 || g % 2 != 0)
return -1;
diff --git a/drivers/media/video/tvmixer.c b/drivers/media/video/tvmixer.c
deleted file mode 100644
index 9fa5b702e073..000000000000
--- a/drivers/media/video/tvmixer.c
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/videodev.h>
-#include <linux/init.h>
-#include <linux/kdev_t.h>
-#include <linux/sound.h>
-#include <linux/soundcard.h>
-
-#include <asm/semaphore.h>
-#include <asm/uaccess.h>
-
-
-#define DEV_MAX 4
-
-static int devnr = -1;
-module_param(devnr, int, 0644);
-
-MODULE_AUTHOR("Gerd Knorr");
-MODULE_LICENSE("GPL");
-
-/* ----------------------------------------------------------------------- */
-
-struct TVMIXER {
- struct i2c_client *dev;
- int minor;
- int count;
-};
-
-static struct TVMIXER devices[DEV_MAX];
-
-static int tvmixer_adapters(struct i2c_adapter *adap);
-static int tvmixer_clients(struct i2c_client *client);
-
-/* ----------------------------------------------------------------------- */
-
-static int mix_to_v4l(int i)
-{
- int r;
-
- r = ((i & 0xff) * 65536 + 50) / 100;
- if (r > 65535) r = 65535;
- if (r < 0) r = 0;
- return r;
-}
-
-static int v4l_to_mix(int i)
-{
- int r;
-
- r = (i * 100 + 32768) / 65536;
- if (r > 100) r = 100;
- if (r < 0) r = 0;
- return r | (r << 8);
-}
-
-static int v4l_to_mix2(int l, int r)
-{
- r = (r * 100 + 32768) / 65536;
- if (r > 100) r = 100;
- if (r < 0) r = 0;
- l = (l * 100 + 32768) / 65536;
- if (l > 100) l = 100;
- if (l < 0) l = 0;
- return (r << 8) | l;
-}
-
-static int tvmixer_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct video_audio va;
- int left,right,ret,val = 0;
- struct TVMIXER *mix = file->private_data;
- struct i2c_client *client = mix->dev;
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
-
- if (NULL == client)
- return -ENODEV;
-
- if (cmd == SOUND_MIXER_INFO) {
- mixer_info info;
- strlcpy(info.id, "tv card", sizeof(info.id));
- strlcpy(info.name, client->name, sizeof(info.name));
- info.modify_counter = 42 /* FIXME */;
- if (copy_to_user(argp, &info, sizeof(info)))
- return -EFAULT;
- return 0;
- }
- if (cmd == SOUND_OLD_MIXER_INFO) {
- _old_mixer_info info;
- strlcpy(info.id, "tv card", sizeof(info.id));
- strlcpy(info.name, client->name, sizeof(info.name));
- if (copy_to_user(argp, &info, sizeof(info)))
- return -EFAULT;
- return 0;
- }
- if (cmd == OSS_GETVERSION)
- return put_user(SOUND_VERSION, p);
-
- if (_SIOC_DIR(cmd) & _SIOC_WRITE)
- if (get_user(val, p))
- return -EFAULT;
-
- /* read state */
- memset(&va,0,sizeof(va));
- client->driver->command(client,VIDIOCGAUDIO,&va);
-
- switch (cmd) {
- case MIXER_READ(SOUND_MIXER_RECMASK):
- case MIXER_READ(SOUND_MIXER_CAPS):
- case MIXER_READ(SOUND_MIXER_RECSRC):
- case MIXER_WRITE(SOUND_MIXER_RECSRC):
- ret = 0;
- break;
-
- case MIXER_READ(SOUND_MIXER_STEREODEVS):
- ret = SOUND_MASK_VOLUME;
- break;
- case MIXER_READ(SOUND_MIXER_DEVMASK):
- ret = SOUND_MASK_VOLUME;
- if (va.flags & VIDEO_AUDIO_BASS)
- ret |= SOUND_MASK_BASS;
- if (va.flags & VIDEO_AUDIO_TREBLE)
- ret |= SOUND_MASK_TREBLE;
- break;
-
- case MIXER_WRITE(SOUND_MIXER_VOLUME):
- left = mix_to_v4l(val);
- right = mix_to_v4l(val >> 8);
- va.volume = max(left,right);
- va.balance = (32768*min(left,right)) / (va.volume ? va.volume : 1);
- va.balance = (left<right) ? (65535-va.balance) : va.balance;
- if (va.volume)
- va.flags &= ~VIDEO_AUDIO_MUTE;
- client->driver->command(client,VIDIOCSAUDIO,&va);
- client->driver->command(client,VIDIOCGAUDIO,&va);
- /* fall throuth */
- case MIXER_READ(SOUND_MIXER_VOLUME):
- left = (min(65536 - va.balance,32768) *
- va.volume) / 32768;
- right = (min(va.balance,(u16)32768) *
- va.volume) / 32768;
- ret = v4l_to_mix2(left,right);
- break;
-
- case MIXER_WRITE(SOUND_MIXER_BASS):
- va.bass = mix_to_v4l(val);
- client->driver->command(client,VIDIOCSAUDIO,&va);
- client->driver->command(client,VIDIOCGAUDIO,&va);
- /* fall throuth */
- case MIXER_READ(SOUND_MIXER_BASS):
- ret = v4l_to_mix(va.bass);
- break;
-
- case MIXER_WRITE(SOUND_MIXER_TREBLE):
- va.treble = mix_to_v4l(val);
- client->driver->command(client,VIDIOCSAUDIO,&va);
- client->driver->command(client,VIDIOCGAUDIO,&va);
- /* fall throuth */
- case MIXER_READ(SOUND_MIXER_TREBLE):
- ret = v4l_to_mix(va.treble);
- break;
-
- default:
- return -EINVAL;
- }
- if (put_user(ret, p))
- return -EFAULT;
- return 0;
-}
-
-static int tvmixer_open(struct inode *inode, struct file *file)
-{
- int i, minor = iminor(inode);
- struct TVMIXER *mix = NULL;
- struct i2c_client *client = NULL;
-
- for (i = 0; i < DEV_MAX; i++) {
- if (devices[i].minor == minor) {
- mix = devices+i;
- client = mix->dev;
- break;
- }
- }
-
- if (NULL == client)
- return -ENODEV;
-
- /* lock bttv in memory while the mixer is in use */
- file->private_data = mix;
- if (client->adapter->owner)
- try_module_get(client->adapter->owner);
- return 0;
-}
-
-static int tvmixer_release(struct inode *inode, struct file *file)
-{
- struct TVMIXER *mix = file->private_data;
- struct i2c_client *client;
-
- client = mix->dev;
- if (NULL == client) {
- return -ENODEV;
- }
-
- module_put(client->adapter->owner);
- return 0;
-}
-
-static struct i2c_driver driver = {
- .driver = {
- .name = "tvmixer",
- },
- .id = I2C_DRIVERID_TVMIXER,
- .detach_adapter = tvmixer_adapters,
- .attach_adapter = tvmixer_adapters,
- .detach_client = tvmixer_clients,
-};
-
-static const struct file_operations tvmixer_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .ioctl = tvmixer_ioctl,
- .open = tvmixer_open,
- .release = tvmixer_release,
-};
-
-/* ----------------------------------------------------------------------- */
-
-static int tvmixer_adapters(struct i2c_adapter *adap)
-{
- struct i2c_client *client;
-
- list_for_each_entry(client, &adap->clients, list)
- tvmixer_clients(client);
- return 0;
-}
-
-static int tvmixer_clients(struct i2c_client *client)
-{
- struct video_audio va;
- int i,minor;
-
- if (!(client->adapter->class & I2C_CLASS_TV_ANALOG))
- return -1;
-
- /* unregister ?? */
- for (i = 0; i < DEV_MAX; i++) {
- if (devices[i].dev == client) {
- /* unregister */
- unregister_sound_mixer(devices[i].minor);
- devices[i].dev = NULL;
- devices[i].minor = -1;
- printk("tvmixer: %s unregistered (#1)\n",
- client->name);
- return 0;
- }
- }
-
- /* look for a free slot */
- for (i = 0; i < DEV_MAX; i++)
- if (NULL == devices[i].dev)
- break;
- if (i == DEV_MAX) {
- printk(KERN_WARNING "tvmixer: DEV_MAX too small\n");
- return -1;
- }
-
- /* audio chip with mixer ??? */
- if (NULL == client->driver->command)
- return -1;
- memset(&va,0,sizeof(va));
- if (0 != client->driver->command(client,VIDIOCGAUDIO,&va))
- return -1;
- if (0 == (va.flags & VIDEO_AUDIO_VOLUME))
- return -1;
-
- /* everything is fine, register */
- if ((minor = register_sound_mixer(&tvmixer_fops,devnr)) < 0) {
- printk(KERN_ERR "tvmixer: cannot allocate mixer device\n");
- return -1;
- }
-
- devices[i].minor = minor;
- devices[i].count = 0;
- devices[i].dev = client;
- printk("tvmixer: %s (%s) registered with minor %d\n",
- client->name,client->adapter->name,minor);
-
- return 0;
-}
-
-/* ----------------------------------------------------------------------- */
-
-static int __init tvmixer_init_module(void)
-{
- int i;
-
- for (i = 0; i < DEV_MAX; i++)
- devices[i].minor = -1;
-
- return i2c_add_driver(&driver);
-}
-
-static void __exit tvmixer_cleanup_module(void)
-{
- int i;
-
- i2c_del_driver(&driver);
- for (i = 0; i < DEV_MAX; i++) {
- if (devices[i].minor != -1) {
- unregister_sound_mixer(devices[i].minor);
- printk("tvmixer: %s unregistered (#2)\n",
- devices[i].dev->name);
- }
- }
-}
-
-module_init(tvmixer_init_module);
-module_exit(tvmixer_cleanup_module);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index d847273eeba0..5e7b79501370 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -258,7 +258,7 @@ static void qcm_hsv2rgb(u16 hue, u16 sat, u16 val, u16 *r, u16 *g, u16 *b)
unsigned int p;
/*
- the registers controling gain are 8 bit of which
+ the registers controlling gain are 8 bit of which
we affect only the last 4 bits with our gain.
we know that if saturation is 0, (unsaturated) then
we're grayscale (center axis of the colour cone) so
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index b52b826a30be..df52f8a60215 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -131,7 +131,7 @@ static struct usbvision_v4l2_format_st usbvision_v4l2_format[] = {
/* Function prototypes */
static void usbvision_release(struct usb_usbvision *usbvision);
-/* Default initalization of device driver parameters */
+/* Default initialization of device driver parameters */
/* Set the default format for ISOC endpoint */
static int isocMode = ISOC_MODE_COMPRESS;
/* Set the default Debug Mode of the device driver */
diff --git a/drivers/media/video/vpx3220.c b/drivers/media/video/vpx3220.c
index 8ef31ed7d3f1..a9133858e913 100644
--- a/drivers/media/video/vpx3220.c
+++ b/drivers/media/video/vpx3220.c
@@ -566,7 +566,7 @@ vpx3220_init_client (struct i2c_client *client)
}
/* -----------------------------------------------------------------------
- * Client managment code
+ * Client management code
*/
/*
diff --git a/drivers/media/video/zoran_card.c b/drivers/media/video/zoran_card.c
index 6e0ac4c5c379..690281bb59ee 100644
--- a/drivers/media/video/zoran_card.c
+++ b/drivers/media/video/zoran_card.c
@@ -1270,7 +1270,7 @@ zoran_setup_videocodec (struct zoran *zr,
}
/*
- * Scan for a Buz card (actually for the PCI contoler ZR36057),
+ * Scan for a Buz card (actually for the PCI controller ZR36057),
* request the irq and map the io memory
*/
static int __devinit
diff --git a/drivers/media/video/zr36050.c b/drivers/media/video/zr36050.c
index 9f622e00c479..faae4ec3ea0b 100644
--- a/drivers/media/video/zr36050.c
+++ b/drivers/media/video/zr36050.c
@@ -161,7 +161,7 @@ zr36050_wait_end (struct zr36050 *ptr)
udelay(1);
if (i++ > 200000) { // 200ms, there is for sure something wrong!!!
dprintk(1,
- "%s: timout at wait_end (last status: 0x%02x)\n",
+ "%s: timeout at wait_end (last status: 0x%02x)\n",
ptr->name, ptr->status1);
break;
}
diff --git a/drivers/media/video/zr36060.c b/drivers/media/video/zr36060.c
index 1ef14fef08e6..7849b65969d0 100644
--- a/drivers/media/video/zr36060.c
+++ b/drivers/media/video/zr36060.c
@@ -163,7 +163,7 @@ zr36060_wait_end (struct zr36060 *ptr)
udelay(1);
if (i++ > 200000) { // 200ms, there is for sure something wrong!!!
dprintk(1,
- "%s: timout at wait_end (last status: 0x%02x)\n",
+ "%s: timeout at wait_end (last status: 0x%02x)\n",
ptr->name, ptr->status);
break;
}
diff --git a/drivers/message/fusion/lsi/mpi_log_sas.h b/drivers/message/fusion/lsi/mpi_log_sas.h
index 6be1f6b65777..af9da03e95e5 100644
--- a/drivers/message/fusion/lsi/mpi_log_sas.h
+++ b/drivers/message/fusion/lsi/mpi_log_sas.h
@@ -162,7 +162,7 @@
#define PL_LOGINFO_SUB_CODE_FRAME_XFER_ERROR (0x00000400) /* Bits 0-3 encode Transport Status Register (offset 0x08) */
/* Bit 0 is Status Bit 0: FrameXferErr */
/* Bit 1 & 2 are Status Bits 16 and 17: FrameXmitErrStatus */
- /* Bit 3 is Status Bit 18 WriteDataLenghtGTDataLengthErr */
+ /* Bit 3 is Status Bit 18 WriteDataLengthGTDataLengthErr */
#define PL_LOGINFO_SUB_CODE_TX_FM_CONNECTED_LOW (0x00000500)
#define PL_LOGINFO_SUB_CODE_SATA_NON_NCQ_RW_ERR_BIT_SET (0x00000600)
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 6029509702d3..e630b50966ec 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1708,7 +1708,7 @@ mptctl_replace_fw (unsigned long arg)
*
* Outputs: None.
* Return: 0 if successful
- * -EBUSY if previous command timout and IOC reset is not complete.
+ * -EBUSY if previous command timeout and IOC reset is not complete.
* -EFAULT if data unavailable
* -ENODEV if no such device/adapter
* -ETIME if timer expires
@@ -1748,7 +1748,7 @@ mptctl_mpt_command (unsigned long arg)
*
* Outputs: None.
* Return: 0 if successful
- * -EBUSY if previous command timout and IOC reset is not complete.
+ * -EBUSY if previous command timeout and IOC reset is not complete.
* -EFAULT if data unavailable
* -ENODEV if no such device/adapter
* -ETIME if timer expires
@@ -2316,7 +2316,7 @@ done_free_mem:
* Outputs: None.
* Return: 0 if successful
* -EFAULT if data unavailable
- * -EBUSY if previous command timout and IOC reset is not complete.
+ * -EBUSY if previous command timeout and IOC reset is not complete.
* -ENODEV if no such device/adapter
* -ETIME if timer expires
* -ENOMEM if memory allocation error
@@ -2553,7 +2553,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
* Outputs: None.
* Return: 0 if successful
* -EFAULT if data unavailable
- * -EBUSY if previous command timout and IOC reset is not complete.
+ * -EBUSY if previous command timeout and IOC reset is not complete.
* -ENODEV if no such device/adapter
* -ETIME if timer expires
* -ENOMEM if memory allocation error
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 5c614ec38cc4..af1de0ccee2f 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1736,7 +1736,7 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, i
fail_out:
/*
- * Free task managment mf, and corresponding tm flags
+ * Free task management mf, and corresponding tm flags
*/
mpt_free_msg_frame(ioc, mf);
hd->tmPending = 0;
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 7814a06ae970..da715e11c1b2 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -916,7 +916,7 @@ static int i2o_parse_hrt(struct i2o_controller *c)
* status block. The status block could then be accessed through
* c->status_block.
*
- * Returns 0 on sucess or negative error code on failure.
+ * Returns 0 on success or negative error code on failure.
*/
int i2o_status_get(struct i2o_controller *c)
{
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 25716193a534..0c886c882385 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -15,6 +15,13 @@ config MFD_SM501
interface. The device may be connected by PCI or local bus with
varying functions enabled.
+config MFD_ASIC3
+ bool "Support for Compaq ASIC3"
+ depends on GENERIC_HARDIRQS && ARM
+ ---help---
+ This driver supports the ASIC3 multifunction chip found on many
+ PDAs (mainly iPAQ and HTC based ones)
+
endmenu
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 51432091b323..521cd5cb68af 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_MFD_SM501) += sm501.o
+obj-$(CONFIG_MFD_ASIC3) += asic3.o
obj-$(CONFIG_MCP) += mcp-core.o
obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
new file mode 100644
index 000000000000..63fb1ff3ad10
--- /dev/null
+++ b/drivers/mfd/asic3.c
@@ -0,0 +1,588 @@
+/*
+ * driver/mfd/asic3.c
+ *
+ * Compaq ASIC3 support.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright 2001 Compaq Computer Corporation.
+ * Copyright 2004-2005 Phil Blundell
+ * Copyright 2007 OpenedHand Ltd.
+ *
+ * Authors: Phil Blundell <pb@handhelds.org>,
+ * Samuel Ortiz <sameo@openedhand.com>
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/asic3.h>
+
+static inline void asic3_write_register(struct asic3 *asic,
+ unsigned int reg, u32 value)
+{
+ iowrite16(value, (unsigned long)asic->mapping +
+ (reg >> asic->bus_shift));
+}
+
+static inline u32 asic3_read_register(struct asic3 *asic,
+ unsigned int reg)
+{
+ return ioread16((unsigned long)asic->mapping +
+ (reg >> asic->bus_shift));
+}
+
+/* IRQs */
+#define MAX_ASIC_ISR_LOOPS 20
+#define ASIC3_GPIO_Base_INCR \
+ (ASIC3_GPIO_B_Base - ASIC3_GPIO_A_Base)
+
+static void asic3_irq_flip_edge(struct asic3 *asic,
+ u32 base, int bit)
+{
+ u16 edge;
+ unsigned long flags;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ edge = asic3_read_register(asic,
+ base + ASIC3_GPIO_EdgeTrigger);
+ edge ^= bit;
+ asic3_write_register(asic,
+ base + ASIC3_GPIO_EdgeTrigger, edge);
+ spin_unlock_irqrestore(&asic->lock, flags);
+}
+
+static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
+{
+ int iter, i;
+ unsigned long flags;
+ struct asic3 *asic;
+
+ desc->chip->ack(irq);
+
+ asic = desc->handler_data;
+
+ for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
+ u32 status;
+ int bank;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ status = asic3_read_register(asic,
+ ASIC3_OFFSET(INTR, PIntStat));
+ spin_unlock_irqrestore(&asic->lock, flags);
+
+ /* Check all ten register bits */
+ if ((status & 0x3ff) == 0)
+ break;
+
+ /* Handle GPIO IRQs */
+ for (bank = 0; bank < ASIC3_NUM_GPIO_BANKS; bank++) {
+ if (status & (1 << bank)) {
+ unsigned long base, istat;
+
+ base = ASIC3_GPIO_A_Base
+ + bank * ASIC3_GPIO_Base_INCR;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ istat = asic3_read_register(asic,
+ base +
+ ASIC3_GPIO_IntStatus);
+ /* Clearing IntStatus */
+ asic3_write_register(asic,
+ base +
+ ASIC3_GPIO_IntStatus, 0);
+ spin_unlock_irqrestore(&asic->lock, flags);
+
+ for (i = 0; i < ASIC3_GPIOS_PER_BANK; i++) {
+ int bit = (1 << i);
+ unsigned int irqnr;
+
+ if (!(istat & bit))
+ continue;
+
+ irqnr = asic->irq_base +
+ (ASIC3_GPIOS_PER_BANK * bank)
+ + i;
+ desc = irq_desc + irqnr;
+ desc->handle_irq(irqnr, desc);
+ if (asic->irq_bothedge[bank] & bit)
+ asic3_irq_flip_edge(asic, base,
+ bit);
+ }
+ }
+ }
+
+ /* Handle remaining IRQs in the status register */
+ for (i = ASIC3_NUM_GPIOS; i < ASIC3_NR_IRQS; i++) {
+ /* They start at bit 4 and go up */
+ if (status & (1 << (i - ASIC3_NUM_GPIOS + 4))) {
+ desc = irq_desc + + i;
+ desc->handle_irq(asic->irq_base + i,
+ desc);
+ }
+ }
+ }
+
+ if (iter >= MAX_ASIC_ISR_LOOPS)
+ printk(KERN_ERR "%s: interrupt processing overrun\n",
+ __FUNCTION__);
+}
+
+static inline int asic3_irq_to_bank(struct asic3 *asic, int irq)
+{
+ int n;
+
+ n = (irq - asic->irq_base) >> 4;
+
+ return (n * (ASIC3_GPIO_B_Base - ASIC3_GPIO_A_Base));
+}
+
+static inline int asic3_irq_to_index(struct asic3 *asic, int irq)
+{
+ return (irq - asic->irq_base) & 0xf;
+}
+
+static void asic3_mask_gpio_irq(unsigned int irq)
+{
+ struct asic3 *asic = get_irq_chip_data(irq);
+ u32 val, bank, index;
+ unsigned long flags;
+
+ bank = asic3_irq_to_bank(asic, irq);
+ index = asic3_irq_to_index(asic, irq);
+
+ spin_lock_irqsave(&asic->lock, flags);
+ val = asic3_read_register(asic, bank + ASIC3_GPIO_Mask);
+ val |= 1 << index;
+ asic3_write_register(asic, bank + ASIC3_GPIO_Mask, val);
+ spin_unlock_irqrestore(&asic->lock, flags);
+}
+
+static void asic3_mask_irq(unsigned int irq)
+{
+ struct asic3 *asic = get_irq_chip_data(irq);
+ int regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ regval = asic3_read_register(asic,
+ ASIC3_INTR_Base +
+ ASIC3_INTR_IntMask);
+
+ regval &= ~(ASIC3_INTMASK_MASK0 <<
+ (irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
+
+ asic3_write_register(asic,
+ ASIC3_INTR_Base +
+ ASIC3_INTR_IntMask,
+ regval);
+ spin_unlock_irqrestore(&asic->lock, flags);
+}
+
+static void asic3_unmask_gpio_irq(unsigned int irq)
+{
+ struct asic3 *asic = get_irq_chip_data(irq);
+ u32 val, bank, index;
+ unsigned long flags;
+
+ bank = asic3_irq_to_bank(asic, irq);
+ index = asic3_irq_to_index(asic, irq);
+
+ spin_lock_irqsave(&asic->lock, flags);
+ val = asic3_read_register(asic, bank + ASIC3_GPIO_Mask);
+ val &= ~(1 << index);
+ asic3_write_register(asic, bank + ASIC3_GPIO_Mask, val);
+ spin_unlock_irqrestore(&asic->lock, flags);
+}
+
+static void asic3_unmask_irq(unsigned int irq)
+{
+ struct asic3 *asic = get_irq_chip_data(irq);
+ int regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ regval = asic3_read_register(asic,
+ ASIC3_INTR_Base +
+ ASIC3_INTR_IntMask);
+
+ regval |= (ASIC3_INTMASK_MASK0 <<
+ (irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
+
+ asic3_write_register(asic,
+ ASIC3_INTR_Base +
+ ASIC3_INTR_IntMask,
+ regval);
+ spin_unlock_irqrestore(&asic->lock, flags);
+}
+
+static int asic3_gpio_irq_type(unsigned int irq, unsigned int type)
+{
+ struct asic3 *asic = get_irq_chip_data(irq);
+ u32 bank, index;
+ u16 trigger, level, edge, bit;
+ unsigned long flags;
+
+ bank = asic3_irq_to_bank(asic, irq);
+ index = asic3_irq_to_index(asic, irq);
+ bit = 1<<index;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ level = asic3_read_register(asic,
+ bank + ASIC3_GPIO_LevelTrigger);
+ edge = asic3_read_register(asic,
+ bank + ASIC3_GPIO_EdgeTrigger);
+ trigger = asic3_read_register(asic,
+ bank + ASIC3_GPIO_TriggerType);
+ asic->irq_bothedge[(irq - asic->irq_base) >> 4] &= ~bit;
+
+ if (type == IRQT_RISING) {
+ trigger |= bit;
+ edge |= bit;
+ } else if (type == IRQT_FALLING) {
+ trigger |= bit;
+ edge &= ~bit;
+ } else if (type == IRQT_BOTHEDGE) {
+ trigger |= bit;
+ if (asic3_gpio_get_value(asic, irq - asic->irq_base))
+ edge &= ~bit;
+ else
+ edge |= bit;
+ asic->irq_bothedge[(irq - asic->irq_base) >> 4] |= bit;
+ } else if (type == IRQT_LOW) {
+ trigger &= ~bit;
+ level &= ~bit;
+ } else if (type == IRQT_HIGH) {
+ trigger &= ~bit;
+ level |= bit;
+ } else {
+ /*
+ * if type == IRQT_NOEDGE, we should mask interrupts, but
+ * be careful to not unmask them if mask was also called.
+ * Probably need internal state for mask.
+ */
+ printk(KERN_NOTICE "asic3: irq type not changed.\n");
+ }
+ asic3_write_register(asic, bank + ASIC3_GPIO_LevelTrigger,
+ level);
+ asic3_write_register(asic, bank + ASIC3_GPIO_EdgeTrigger,
+ edge);
+ asic3_write_register(asic, bank + ASIC3_GPIO_TriggerType,
+ trigger);
+ spin_unlock_irqrestore(&asic->lock, flags);
+ return 0;
+}
+
+static struct irq_chip asic3_gpio_irq_chip = {
+ .name = "ASIC3-GPIO",
+ .ack = asic3_mask_gpio_irq,
+ .mask = asic3_mask_gpio_irq,
+ .unmask = asic3_unmask_gpio_irq,
+ .set_type = asic3_gpio_irq_type,
+};
+
+static struct irq_chip asic3_irq_chip = {
+ .name = "ASIC3",
+ .ack = asic3_mask_irq,
+ .mask = asic3_mask_irq,
+ .unmask = asic3_unmask_irq,
+};
+
+static int asic3_irq_probe(struct platform_device *pdev)
+{
+ struct asic3 *asic = platform_get_drvdata(pdev);
+ unsigned long clksel = 0;
+ unsigned int irq, irq_base;
+
+ asic->irq_nr = platform_get_irq(pdev, 0);
+ if (asic->irq_nr < 0)
+ return asic->irq_nr;
+
+ /* turn on clock to IRQ controller */
+ clksel |= CLOCK_SEL_CX;
+ asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL),
+ clksel);
+
+ irq_base = asic->irq_base;
+
+ for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) {
+ if (irq < asic->irq_base + ASIC3_NUM_GPIOS)
+ set_irq_chip(irq, &asic3_gpio_irq_chip);
+ else
+ set_irq_chip(irq, &asic3_irq_chip);
+
+ set_irq_chip_data(irq, asic);
+ set_irq_handler(irq, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+
+ asic3_write_register(asic, ASIC3_OFFSET(INTR, IntMask),
+ ASIC3_INTMASK_GINTMASK);
+
+ set_irq_chained_handler(asic->irq_nr, asic3_irq_demux);
+ set_irq_type(asic->irq_nr, IRQT_RISING);
+ set_irq_data(asic->irq_nr, asic);
+
+ return 0;
+}
+
+static void asic3_irq_remove(struct platform_device *pdev)
+{
+ struct asic3 *asic = platform_get_drvdata(pdev);
+ unsigned int irq, irq_base;
+
+ irq_base = asic->irq_base;
+
+ for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) {
+ set_irq_flags(irq, 0);
+ set_irq_handler(irq, NULL);
+ set_irq_chip(irq, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+ set_irq_chained_handler(asic->irq_nr, NULL);
+}
+
+/* GPIOs */
+static inline u32 asic3_get_gpio(struct asic3 *asic, unsigned int base,
+ unsigned int function)
+{
+ return asic3_read_register(asic, base + function);
+}
+
+static void asic3_set_gpio(struct asic3 *asic, unsigned int base,
+ unsigned int function, u32 bits, u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ val |= (asic3_read_register(asic, base + function) & ~bits);
+
+ asic3_write_register(asic, base + function, val);
+ spin_unlock_irqrestore(&asic->lock, flags);
+}
+
+#define asic3_get_gpio_a(asic, fn) \
+ asic3_get_gpio(asic, ASIC3_GPIO_A_Base, ASIC3_GPIO_##fn)
+#define asic3_get_gpio_b(asic, fn) \
+ asic3_get_gpio(asic, ASIC3_GPIO_B_Base, ASIC3_GPIO_##fn)
+#define asic3_get_gpio_c(asic, fn) \
+ asic3_get_gpio(asic, ASIC3_GPIO_C_Base, ASIC3_GPIO_##fn)
+#define asic3_get_gpio_d(asic, fn) \
+ asic3_get_gpio(asic, ASIC3_GPIO_D_Base, ASIC3_GPIO_##fn)
+
+#define asic3_set_gpio_a(asic, fn, bits, val) \
+ asic3_set_gpio(asic, ASIC3_GPIO_A_Base, ASIC3_GPIO_##fn, bits, val)
+#define asic3_set_gpio_b(asic, fn, bits, val) \
+ asic3_set_gpio(asic, ASIC3_GPIO_B_Base, ASIC3_GPIO_##fn, bits, val)
+#define asic3_set_gpio_c(asic, fn, bits, val) \
+ asic3_set_gpio(asic, ASIC3_GPIO_C_Base, ASIC3_GPIO_##fn, bits, val)
+#define asic3_set_gpio_d(asic, fn, bits, val) \
+ asic3_set_gpio(asic, ASIC3_GPIO_D_Base, ASIC3_GPIO_##fn, bits, val)
+
+#define asic3_set_gpio_banks(asic, fn, bits, pdata, field) \
+ do { \
+ asic3_set_gpio_a((asic), fn, (bits), (pdata)->gpio_a.field); \
+ asic3_set_gpio_b((asic), fn, (bits), (pdata)->gpio_b.field); \
+ asic3_set_gpio_c((asic), fn, (bits), (pdata)->gpio_c.field); \
+ asic3_set_gpio_d((asic), fn, (bits), (pdata)->gpio_d.field); \
+ } while (0)
+
+int asic3_gpio_get_value(struct asic3 *asic, unsigned gpio)
+{
+ u32 mask = ASIC3_GPIO_bit(gpio);
+
+ switch (gpio >> 4) {
+ case ASIC3_GPIO_BANK_A:
+ return asic3_get_gpio_a(asic, Status) & mask;
+ case ASIC3_GPIO_BANK_B:
+ return asic3_get_gpio_b(asic, Status) & mask;
+ case ASIC3_GPIO_BANK_C:
+ return asic3_get_gpio_c(asic, Status) & mask;
+ case ASIC3_GPIO_BANK_D:
+ return asic3_get_gpio_d(asic, Status) & mask;
+ default:
+ printk(KERN_ERR "%s: invalid GPIO value 0x%x",
+ __FUNCTION__, gpio);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(asic3_gpio_get_value);
+
+void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val)
+{
+ u32 mask = ASIC3_GPIO_bit(gpio);
+ u32 bitval = 0;
+ if (val)
+ bitval = mask;
+
+ switch (gpio >> 4) {
+ case ASIC3_GPIO_BANK_A:
+ asic3_set_gpio_a(asic, Out, mask, bitval);
+ return;
+ case ASIC3_GPIO_BANK_B:
+ asic3_set_gpio_b(asic, Out, mask, bitval);
+ return;
+ case ASIC3_GPIO_BANK_C:
+ asic3_set_gpio_c(asic, Out, mask, bitval);
+ return;
+ case ASIC3_GPIO_BANK_D:
+ asic3_set_gpio_d(asic, Out, mask, bitval);
+ return;
+ default:
+ printk(KERN_ERR "%s: invalid GPIO value 0x%x",
+ __FUNCTION__, gpio);
+ return;
+ }
+}
+EXPORT_SYMBOL(asic3_gpio_set_value);
+
+static int asic3_gpio_probe(struct platform_device *pdev)
+{
+ struct asic3_platform_data *pdata = pdev->dev.platform_data;
+ struct asic3 *asic = platform_get_drvdata(pdev);
+
+ asic3_write_register(asic, ASIC3_GPIO_OFFSET(A, Mask), 0xffff);
+ asic3_write_register(asic, ASIC3_GPIO_OFFSET(B, Mask), 0xffff);
+ asic3_write_register(asic, ASIC3_GPIO_OFFSET(C, Mask), 0xffff);
+ asic3_write_register(asic, ASIC3_GPIO_OFFSET(D, Mask), 0xffff);
+
+ asic3_set_gpio_a(asic, SleepMask, 0xffff, 0xffff);
+ asic3_set_gpio_b(asic, SleepMask, 0xffff, 0xffff);
+ asic3_set_gpio_c(asic, SleepMask, 0xffff, 0xffff);
+ asic3_set_gpio_d(asic, SleepMask, 0xffff, 0xffff);
+
+ if (pdata) {
+ asic3_set_gpio_banks(asic, Out, 0xffff, pdata, init);
+ asic3_set_gpio_banks(asic, Direction, 0xffff, pdata, dir);
+ asic3_set_gpio_banks(asic, SleepMask, 0xffff, pdata,
+ sleep_mask);
+ asic3_set_gpio_banks(asic, SleepOut, 0xffff, pdata, sleep_out);
+ asic3_set_gpio_banks(asic, BattFaultOut, 0xffff, pdata,
+ batt_fault_out);
+ asic3_set_gpio_banks(asic, SleepConf, 0xffff, pdata,
+ sleep_conf);
+ asic3_set_gpio_banks(asic, AltFunction, 0xffff, pdata,
+ alt_function);
+ }
+
+ return 0;
+}
+
+static void asic3_gpio_remove(struct platform_device *pdev)
+{
+ return;
+}
+
+
+/* Core */
+static int asic3_probe(struct platform_device *pdev)
+{
+ struct asic3_platform_data *pdata = pdev->dev.platform_data;
+ struct asic3 *asic;
+ struct resource *mem;
+ unsigned long clksel;
+ int ret;
+
+ asic = kzalloc(sizeof(struct asic3), GFP_KERNEL);
+ if (!asic)
+ return -ENOMEM;
+
+ spin_lock_init(&asic->lock);
+ platform_set_drvdata(pdev, asic);
+ asic->dev = &pdev->dev;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ ret = -ENOMEM;
+ printk(KERN_ERR "asic3: no MEM resource\n");
+ goto err_out_1;
+ }
+
+ asic->mapping = ioremap(mem->start, PAGE_SIZE);
+ if (!asic->mapping) {
+ ret = -ENOMEM;
+ printk(KERN_ERR "asic3: couldn't ioremap\n");
+ goto err_out_1;
+ }
+
+ asic->irq_base = pdata->irq_base;
+
+ if (pdata && pdata->bus_shift)
+ asic->bus_shift = 2 - pdata->bus_shift;
+ else
+ asic->bus_shift = 0;
+
+ clksel = 0;
+ asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), clksel);
+
+ ret = asic3_irq_probe(pdev);
+ if (ret < 0) {
+ printk(KERN_ERR "asic3: couldn't probe IRQs\n");
+ goto err_out_2;
+ }
+ asic3_gpio_probe(pdev);
+
+ if (pdata->children) {
+ int i;
+ for (i = 0; i < pdata->n_children; i++) {
+ pdata->children[i]->dev.parent = &pdev->dev;
+ platform_device_register(pdata->children[i]);
+ }
+ }
+
+ printk(KERN_INFO "ASIC3 Core driver\n");
+
+ return 0;
+
+ err_out_2:
+ iounmap(asic->mapping);
+ err_out_1:
+ kfree(asic);
+
+ return ret;
+}
+
+static int asic3_remove(struct platform_device *pdev)
+{
+ struct asic3 *asic = platform_get_drvdata(pdev);
+
+ asic3_gpio_remove(pdev);
+ asic3_irq_remove(pdev);
+
+ asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), 0);
+
+ iounmap(asic->mapping);
+
+ kfree(asic);
+
+ return 0;
+}
+
+static void asic3_shutdown(struct platform_device *pdev)
+{
+}
+
+static struct platform_driver asic3_device_driver = {
+ .driver = {
+ .name = "asic3",
+ },
+ .probe = asic3_probe,
+ .remove = __devexit_p(asic3_remove),
+ .shutdown = asic3_shutdown,
+};
+
+static int __init asic3_init(void)
+{
+ int retval = 0;
+ retval = platform_driver_register(&asic3_device_driver);
+ return retval;
+}
+
+subsys_initcall(asic3_init);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index b5e67c0ff433..78cd33861766 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -92,6 +92,22 @@ config TIFM_7XX1
To compile this driver as a module, choose M here: the module will
be called tifm_7xx1.
+config ACER_WMI
+ tristate "Acer WMI Laptop Extras (EXPERIMENTAL)"
+ depends on X86
+ depends on EXPERIMENTAL
+ depends on ACPI
+ depends on ACPI_WMI
+ depends on LEDS_CLASS
+ depends on BACKLIGHT_CLASS_DEVICE
+ ---help---
+ This is a driver for newer Acer (and Wistron) laptops. It adds
+ wireless radio and bluetooth control, and on some laptops,
+ exposes the mail LED and LCD backlight.
+
+ If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
+ here.
+
config ASUS_LAPTOP
tristate "Asus Laptop Extras (EXPERIMENTAL)"
depends on X86
@@ -126,6 +142,15 @@ config FUJITSU_LAPTOP
If you have a Fujitsu laptop, say Y or M here.
+config TC1100_WMI
+ tristate "HP Compaq TC1100 Tablet WMI Extras"
+ depends on X86 && !X86_64
+ depends on ACPI
+ depends on ACPI_WMI
+ ---help---
+ This is a driver for the WMI extensions (wireless and bluetooth power
+ control) of the HP Compaq TC1100 tablet.
+
config MSI_LAPTOP
tristate "MSI Laptop Extras"
depends on X86
@@ -219,6 +244,25 @@ config THINKPAD_ACPI_BAY
If you are not sure, say Y here.
+config THINKPAD_ACPI_HOTKEY_POLL
+ bool "Suport NVRAM polling for hot keys"
+ depends on THINKPAD_ACPI
+ default y
+ ---help---
+ Some thinkpad models benefit from NVRAM polling to detect a few of
+ the hot key press events. If you know your ThinkPad model does not
+ need to do NVRAM polling to support any of the hot keys you use,
+ unselecting this option will save about 1kB of memory.
+
+ ThinkPads T40 and newer, R52 and newer, and X31 and newer are
+ unlikely to need NVRAM polling in their latest BIOS versions.
+
+ NVRAM polling can detect at most the following keys: ThinkPad/Access
+ IBM, Zoom, Switch Display (fn+F7), ThinkLight, Volume up/down/mute,
+ Brightness up/down, Display Expand (fn+F8), Hibernate (fn+F12).
+
+ If you are not sure, say Y here. The driver enables polling only if
+ it is strictly necessary to do so.
config ATMEL_SSC
tristate "Device driver for Atmel SSC peripheral"
@@ -232,4 +276,13 @@ config ATMEL_SSC
If unsure, say N.
+config INTEL_MENLOW
+ tristate "Thermal Management driver for Intel menlow platform"
+ depends on ACPI_THERMAL
+ ---help---
+ ACPI thermal management enhancement driver on
+ Intel Menlow platform.
+
+ If unsure, say N.
+
endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 87f2685d728f..1f41654aae4d 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -6,8 +6,10 @@ obj- := misc.o # Dummy rule to force built-in.o to be made
obj-$(CONFIG_IBM_ASM) += ibmasm/
obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
+obj-$(CONFIG_ACER_WMI) += acer-wmi.o
obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
+obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
obj-$(CONFIG_LKDTM) += lkdtm.o
obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
@@ -17,3 +19,4 @@ obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
+obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c
new file mode 100644
index 000000000000..a4d677504250
--- /dev/null
+++ b/drivers/misc/acer-wmi.c
@@ -0,0 +1,1109 @@
+/*
+ * Acer WMI Laptop Extras
+ *
+ * Copyright (C) 2007-2008 Carlos Corbacho <carlos@strangeworlds.co.uk>
+ *
+ * Based on acer_acpi:
+ * Copyright (C) 2005-2007 E.M. Smith
+ * Copyright (C) 2007-2008 Carlos Corbacho <cathectic@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define ACER_WMI_VERSION "0.1"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/dmi.h>
+#include <linux/backlight.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/i8042.h>
+
+#include <acpi/acpi_drivers.h>
+
+MODULE_AUTHOR("Carlos Corbacho");
+MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver");
+MODULE_LICENSE("GPL");
+
+#define ACER_LOGPREFIX "acer-wmi: "
+#define ACER_ERR KERN_ERR ACER_LOGPREFIX
+#define ACER_NOTICE KERN_NOTICE ACER_LOGPREFIX
+#define ACER_INFO KERN_INFO ACER_LOGPREFIX
+
+/*
+ * The following defines quirks to get some specific functions to work
+ * which are known to not be supported over ACPI-WMI (such as the mail LED
+ * on WMID based Acer's)
+ */
+struct acer_quirks {
+ const char *vendor;
+ const char *model;
+ u16 quirks;
+};
+
+/*
+ * Magic Number
+ * Meaning is unknown - this number is required for writing to ACPI for AMW0
+ * (it's also used in acerhk when directly accessing the BIOS)
+ */
+#define ACER_AMW0_WRITE 0x9610
+
+/*
+ * Bit masks for the AMW0 interface
+ */
+#define ACER_AMW0_WIRELESS_MASK 0x35
+#define ACER_AMW0_BLUETOOTH_MASK 0x34
+#define ACER_AMW0_MAILLED_MASK 0x31
+
+/*
+ * Method IDs for WMID interface
+ */
+#define ACER_WMID_GET_WIRELESS_METHODID 1
+#define ACER_WMID_GET_BLUETOOTH_METHODID 2
+#define ACER_WMID_GET_BRIGHTNESS_METHODID 3
+#define ACER_WMID_SET_WIRELESS_METHODID 4
+#define ACER_WMID_SET_BLUETOOTH_METHODID 5
+#define ACER_WMID_SET_BRIGHTNESS_METHODID 6
+#define ACER_WMID_GET_THREEG_METHODID 10
+#define ACER_WMID_SET_THREEG_METHODID 11
+
+/*
+ * Acer ACPI method GUIDs
+ */
+#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB"
+#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"
+#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A"
+
+MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
+MODULE_ALIAS("wmi:6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3");
+
+/* Temporary workaround until the WMI sysfs interface goes in */
+MODULE_ALIAS("dmi:*:*Acer*:*:");
+
+/*
+ * Interface capability flags
+ */
+#define ACER_CAP_MAILLED (1<<0)
+#define ACER_CAP_WIRELESS (1<<1)
+#define ACER_CAP_BLUETOOTH (1<<2)
+#define ACER_CAP_BRIGHTNESS (1<<3)
+#define ACER_CAP_THREEG (1<<4)
+#define ACER_CAP_ANY (0xFFFFFFFF)
+
+/*
+ * Interface type flags
+ */
+enum interface_flags {
+ ACER_AMW0,
+ ACER_AMW0_V2,
+ ACER_WMID,
+};
+
+#define ACER_DEFAULT_WIRELESS 0
+#define ACER_DEFAULT_BLUETOOTH 0
+#define ACER_DEFAULT_MAILLED 0
+#define ACER_DEFAULT_THREEG 0
+
+static int max_brightness = 0xF;
+
+static int wireless = -1;
+static int bluetooth = -1;
+static int mailled = -1;
+static int brightness = -1;
+static int threeg = -1;
+static int force_series;
+
+module_param(mailled, int, 0444);
+module_param(wireless, int, 0444);
+module_param(bluetooth, int, 0444);
+module_param(brightness, int, 0444);
+module_param(threeg, int, 0444);
+module_param(force_series, int, 0444);
+MODULE_PARM_DESC(wireless, "Set initial state of Wireless hardware");
+MODULE_PARM_DESC(bluetooth, "Set initial state of Bluetooth hardware");
+MODULE_PARM_DESC(mailled, "Set initial state of Mail LED");
+MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness");
+MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware");
+MODULE_PARM_DESC(force_series, "Force a different laptop series");
+
+struct acer_data {
+ int mailled;
+ int wireless;
+ int bluetooth;
+ int threeg;
+ int brightness;
+};
+
+/* Each low-level interface must define at least some of the following */
+struct wmi_interface {
+ /* The WMI device type */
+ u32 type;
+
+ /* The capabilities this interface provides */
+ u32 capability;
+
+ /* Private data for the current interface */
+ struct acer_data data;
+};
+
+/* The static interface pointer, points to the currently detected interface */
+static struct wmi_interface *interface;
+
+/*
+ * Embedded Controller quirks
+ * Some laptops require us to directly access the EC to either enable or query
+ * features that are not available through WMI.
+ */
+
+struct quirk_entry {
+ u8 wireless;
+ u8 mailled;
+ u8 brightness;
+ u8 bluetooth;
+};
+
+static struct quirk_entry *quirks;
+
+static void set_quirks(void)
+{
+ if (quirks->mailled)
+ interface->capability |= ACER_CAP_MAILLED;
+
+ if (quirks->brightness)
+ interface->capability |= ACER_CAP_BRIGHTNESS;
+}
+
+static int dmi_matched(const struct dmi_system_id *dmi)
+{
+ quirks = dmi->driver_data;
+ return 0;
+}
+
+static struct quirk_entry quirk_unknown = {
+};
+
+static struct quirk_entry quirk_acer_travelmate_2490 = {
+ .mailled = 1,
+};
+
+/* This AMW0 laptop has no bluetooth */
+static struct quirk_entry quirk_medion_md_98300 = {
+ .wireless = 1,
+};
+
+static struct dmi_system_id acer_quirks[] = {
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 3100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3100"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 5100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 5630",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 5650",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 5680",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 9110",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer TravelMate 2490",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Medion MD 98300",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "WAM2030"),
+ },
+ .driver_data = &quirk_medion_md_98300,
+ },
+ {}
+};
+
+/* Find which quirks are needed for a particular vendor/ model pair */
+static void find_quirks(void)
+{
+ if (!force_series) {
+ dmi_check_system(acer_quirks);
+ } else if (force_series == 2490) {
+ quirks = &quirk_acer_travelmate_2490;
+ }
+
+ if (quirks == NULL)
+ quirks = &quirk_unknown;
+
+ set_quirks();
+}
+
+/*
+ * General interface convenience methods
+ */
+
+static bool has_cap(u32 cap)
+{
+ if ((interface->capability & cap) != 0)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * AMW0 (V1) interface
+ */
+struct wmab_args {
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+};
+
+struct wmab_ret {
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+ u32 eex;
+};
+
+static acpi_status wmab_execute(struct wmab_args *regbuf,
+struct acpi_buffer *result)
+{
+ struct acpi_buffer input;
+ acpi_status status;
+ input.length = sizeof(struct wmab_args);
+ input.pointer = (u8 *)regbuf;
+
+ status = wmi_evaluate_method(AMW0_GUID1, 1, 1, &input, result);
+
+ return status;
+}
+
+static acpi_status AMW0_get_u32(u32 *value, u32 cap,
+struct wmi_interface *iface)
+{
+ int err;
+ u8 result;
+
+ switch (cap) {
+ case ACER_CAP_MAILLED:
+ switch (quirks->mailled) {
+ default:
+ err = ec_read(0xA, &result);
+ if (err)
+ return AE_ERROR;
+ *value = (result >> 7) & 0x1;
+ return AE_OK;
+ }
+ break;
+ case ACER_CAP_WIRELESS:
+ switch (quirks->wireless) {
+ case 1:
+ err = ec_read(0x7B, &result);
+ if (err)
+ return AE_ERROR;
+ *value = result & 0x1;
+ return AE_OK;
+ default:
+ err = ec_read(0xA, &result);
+ if (err)
+ return AE_ERROR;
+ *value = (result >> 2) & 0x1;
+ return AE_OK;
+ }
+ break;
+ case ACER_CAP_BLUETOOTH:
+ switch (quirks->bluetooth) {
+ default:
+ err = ec_read(0xA, &result);
+ if (err)
+ return AE_ERROR;
+ *value = (result >> 4) & 0x1;
+ return AE_OK;
+ }
+ break;
+ case ACER_CAP_BRIGHTNESS:
+ switch (quirks->brightness) {
+ default:
+ err = ec_read(0x83, &result);
+ if (err)
+ return AE_ERROR;
+ *value = result;
+ return AE_OK;
+ }
+ break;
+ default:
+ return AE_BAD_ADDRESS;
+ }
+ return AE_OK;
+}
+
+static acpi_status AMW0_set_u32(u32 value, u32 cap, struct wmi_interface *iface)
+{
+ struct wmab_args args;
+
+ args.eax = ACER_AMW0_WRITE;
+ args.ebx = value ? (1<<8) : 0;
+ args.ecx = args.edx = 0;
+
+ switch (cap) {
+ case ACER_CAP_MAILLED:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ args.ebx |= ACER_AMW0_MAILLED_MASK;
+ break;
+ case ACER_CAP_WIRELESS:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ args.ebx |= ACER_AMW0_WIRELESS_MASK;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ args.ebx |= ACER_AMW0_BLUETOOTH_MASK;
+ break;
+ case ACER_CAP_BRIGHTNESS:
+ if (value > max_brightness)
+ return AE_BAD_PARAMETER;
+ switch (quirks->brightness) {
+ case 1:
+ return ec_write(0x83, value);
+ default:
+ return AE_BAD_ADDRESS;
+ break;
+ }
+ default:
+ return AE_BAD_ADDRESS;
+ }
+
+ /* Actually do the set */
+ return wmab_execute(&args, NULL);
+}
+
+static acpi_status AMW0_find_mailled(void)
+{
+ struct wmab_args args;
+ struct wmab_ret ret;
+ acpi_status status = AE_OK;
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+
+ args.eax = 0x86;
+ args.ebx = args.ecx = args.edx = 0;
+
+ status = wmab_execute(&args, &out);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) out.pointer;
+ if (obj && obj->type == ACPI_TYPE_BUFFER &&
+ obj->buffer.length == sizeof(struct wmab_ret)) {
+ ret = *((struct wmab_ret *) obj->buffer.pointer);
+ } else {
+ return AE_ERROR;
+ }
+
+ if (ret.eex & 0x1)
+ interface->capability |= ACER_CAP_MAILLED;
+
+ kfree(out.pointer);
+
+ return AE_OK;
+}
+
+static acpi_status AMW0_set_capabilities(void)
+{
+ struct wmab_args args;
+ struct wmab_ret ret;
+ acpi_status status = AE_OK;
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+
+ args.eax = ACER_AMW0_WRITE;
+ args.ecx = args.edx = 0;
+
+ args.ebx = 0xa2 << 8;
+ args.ebx |= ACER_AMW0_WIRELESS_MASK;
+
+ status = wmab_execute(&args, &out);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) out.pointer;
+ if (obj && obj->type == ACPI_TYPE_BUFFER &&
+ obj->buffer.length == sizeof(struct wmab_ret)) {
+ ret = *((struct wmab_ret *) obj->buffer.pointer);
+ } else {
+ return AE_ERROR;
+ }
+
+ if (ret.eax & 0x1)
+ interface->capability |= ACER_CAP_WIRELESS;
+
+ args.ebx = 2 << 8;
+ args.ebx |= ACER_AMW0_BLUETOOTH_MASK;
+
+ status = wmab_execute(&args, &out);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) out.pointer;
+ if (obj && obj->type == ACPI_TYPE_BUFFER
+ && obj->buffer.length == sizeof(struct wmab_ret)) {
+ ret = *((struct wmab_ret *) obj->buffer.pointer);
+ } else {
+ return AE_ERROR;
+ }
+
+ if (ret.eax & 0x1)
+ interface->capability |= ACER_CAP_BLUETOOTH;
+
+ kfree(out.pointer);
+
+ /*
+ * This appears to be safe to enable, since all Wistron based laptops
+ * appear to use the same EC register for brightness, even if they
+ * differ for wireless, etc
+ */
+ interface->capability |= ACER_CAP_BRIGHTNESS;
+
+ return AE_OK;
+}
+
+static struct wmi_interface AMW0_interface = {
+ .type = ACER_AMW0,
+};
+
+static struct wmi_interface AMW0_V2_interface = {
+ .type = ACER_AMW0_V2,
+};
+
+/*
+ * New interface (The WMID interface)
+ */
+static acpi_status
+WMI_execute_u32(u32 method_id, u32 in, u32 *out)
+{
+ struct acpi_buffer input = { (acpi_size) sizeof(u32), (void *)(&in) };
+ struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ u32 tmp;
+ acpi_status status;
+
+ status = wmi_evaluate_method(WMID_GUID1, 1, method_id, &input, &result);
+
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) result.pointer;
+ if (obj && obj->type == ACPI_TYPE_BUFFER &&
+ obj->buffer.length == sizeof(u32)) {
+ tmp = *((u32 *) obj->buffer.pointer);
+ } else {
+ tmp = 0;
+ }
+
+ if (out)
+ *out = tmp;
+
+ kfree(result.pointer);
+
+ return status;
+}
+
+static acpi_status WMID_get_u32(u32 *value, u32 cap,
+struct wmi_interface *iface)
+{
+ acpi_status status;
+ u8 tmp;
+ u32 result, method_id = 0;
+
+ switch (cap) {
+ case ACER_CAP_WIRELESS:
+ method_id = ACER_WMID_GET_WIRELESS_METHODID;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ method_id = ACER_WMID_GET_BLUETOOTH_METHODID;
+ break;
+ case ACER_CAP_BRIGHTNESS:
+ method_id = ACER_WMID_GET_BRIGHTNESS_METHODID;
+ break;
+ case ACER_CAP_THREEG:
+ method_id = ACER_WMID_GET_THREEG_METHODID;
+ break;
+ case ACER_CAP_MAILLED:
+ if (quirks->mailled == 1) {
+ ec_read(0x9f, &tmp);
+ *value = tmp & 0x1;
+ return 0;
+ }
+ default:
+ return AE_BAD_ADDRESS;
+ }
+ status = WMI_execute_u32(method_id, 0, &result);
+
+ if (ACPI_SUCCESS(status))
+ *value = (u8)result;
+
+ return status;
+}
+
+static acpi_status WMID_set_u32(u32 value, u32 cap, struct wmi_interface *iface)
+{
+ u32 method_id = 0;
+ char param;
+
+ switch (cap) {
+ case ACER_CAP_BRIGHTNESS:
+ if (value > max_brightness)
+ return AE_BAD_PARAMETER;
+ method_id = ACER_WMID_SET_BRIGHTNESS_METHODID;
+ break;
+ case ACER_CAP_WIRELESS:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ method_id = ACER_WMID_SET_WIRELESS_METHODID;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ method_id = ACER_WMID_SET_BLUETOOTH_METHODID;
+ break;
+ case ACER_CAP_THREEG:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ method_id = ACER_WMID_SET_THREEG_METHODID;
+ break;
+ case ACER_CAP_MAILLED:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ if (quirks->mailled == 1) {
+ param = value ? 0x92 : 0x93;
+ i8042_command(&param, 0x1059);
+ return 0;
+ }
+ break;
+ default:
+ return AE_BAD_ADDRESS;
+ }
+ return WMI_execute_u32(method_id, (u32)value, NULL);
+}
+
+static acpi_status WMID_set_capabilities(void)
+{
+ struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *obj;
+ acpi_status status;
+ u32 devices;
+
+ status = wmi_query_block(WMID_GUID2, 1, &out);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) out.pointer;
+ if (obj && obj->type == ACPI_TYPE_BUFFER &&
+ obj->buffer.length == sizeof(u32)) {
+ devices = *((u32 *) obj->buffer.pointer);
+ } else {
+ return AE_ERROR;
+ }
+
+ /* Not sure on the meaning of the relevant bits yet to detect these */
+ interface->capability |= ACER_CAP_WIRELESS;
+ interface->capability |= ACER_CAP_THREEG;
+
+ /* WMID always provides brightness methods */
+ interface->capability |= ACER_CAP_BRIGHTNESS;
+
+ if (devices & 0x10)
+ interface->capability |= ACER_CAP_BLUETOOTH;
+
+ if (!(devices & 0x20))
+ max_brightness = 0x9;
+
+ return status;
+}
+
+static struct wmi_interface wmid_interface = {
+ .type = ACER_WMID,
+};
+
+/*
+ * Generic Device (interface-independent)
+ */
+
+static acpi_status get_u32(u32 *value, u32 cap)
+{
+ acpi_status status = AE_BAD_ADDRESS;
+
+ switch (interface->type) {
+ case ACER_AMW0:
+ status = AMW0_get_u32(value, cap, interface);
+ break;
+ case ACER_AMW0_V2:
+ if (cap == ACER_CAP_MAILLED) {
+ status = AMW0_get_u32(value, cap, interface);
+ break;
+ }
+ case ACER_WMID:
+ status = WMID_get_u32(value, cap, interface);
+ break;
+ }
+
+ return status;
+}
+
+static acpi_status set_u32(u32 value, u32 cap)
+{
+ if (interface->capability & cap) {
+ switch (interface->type) {
+ case ACER_AMW0:
+ return AMW0_set_u32(value, cap, interface);
+ case ACER_AMW0_V2:
+ case ACER_WMID:
+ return WMID_set_u32(value, cap, interface);
+ default:
+ return AE_BAD_PARAMETER;
+ }
+ }
+ return AE_BAD_PARAMETER;
+}
+
+static void __init acer_commandline_init(void)
+{
+ /*
+ * These will all fail silently if the value given is invalid, or the
+ * capability isn't available on the given interface
+ */
+ set_u32(mailled, ACER_CAP_MAILLED);
+ set_u32(wireless, ACER_CAP_WIRELESS);
+ set_u32(bluetooth, ACER_CAP_BLUETOOTH);
+ set_u32(threeg, ACER_CAP_THREEG);
+ set_u32(brightness, ACER_CAP_BRIGHTNESS);
+}
+
+/*
+ * LED device (Mail LED only, no other LEDs known yet)
+ */
+static void mail_led_set(struct led_classdev *led_cdev,
+enum led_brightness value)
+{
+ set_u32(value, ACER_CAP_MAILLED);
+}
+
+static struct led_classdev mail_led = {
+ .name = "acer-mail:green",
+ .brightness_set = mail_led_set,
+};
+
+static int __init acer_led_init(struct device *dev)
+{
+ return led_classdev_register(dev, &mail_led);
+}
+
+static void acer_led_exit(void)
+{
+ led_classdev_unregister(&mail_led);
+}
+
+/*
+ * Backlight device
+ */
+static struct backlight_device *acer_backlight_device;
+
+static int read_brightness(struct backlight_device *bd)
+{
+ u32 value;
+ get_u32(&value, ACER_CAP_BRIGHTNESS);
+ return value;
+}
+
+static int update_bl_status(struct backlight_device *bd)
+{
+ set_u32(bd->props.brightness, ACER_CAP_BRIGHTNESS);
+ return 0;
+}
+
+static struct backlight_ops acer_bl_ops = {
+ .get_brightness = read_brightness,
+ .update_status = update_bl_status,
+};
+
+static int __init acer_backlight_init(struct device *dev)
+{
+ struct backlight_device *bd;
+
+ bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops);
+ if (IS_ERR(bd)) {
+ printk(ACER_ERR "Could not register Acer backlight device\n");
+ acer_backlight_device = NULL;
+ return PTR_ERR(bd);
+ }
+
+ acer_backlight_device = bd;
+
+ bd->props.max_brightness = max_brightness;
+ bd->props.brightness = read_brightness(NULL);
+ backlight_update_status(bd);
+ return 0;
+}
+
+static void __exit acer_backlight_exit(void)
+{
+ backlight_device_unregister(acer_backlight_device);
+}
+
+/*
+ * Read/ write bool sysfs macro
+ */
+#define show_set_bool(value, cap) \
+static ssize_t \
+show_bool_##value(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ u32 result; \
+ acpi_status status = get_u32(&result, cap); \
+ if (ACPI_SUCCESS(status)) \
+ return sprintf(buf, "%u\n", result); \
+ return sprintf(buf, "Read error\n"); \
+} \
+\
+static ssize_t \
+set_bool_##value(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ u32 tmp = simple_strtoul(buf, NULL, 10); \
+ acpi_status status = set_u32(tmp, cap); \
+ if (ACPI_FAILURE(status)) \
+ return -EINVAL; \
+ return count; \
+} \
+static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
+ show_bool_##value, set_bool_##value);
+
+show_set_bool(wireless, ACER_CAP_WIRELESS);
+show_set_bool(bluetooth, ACER_CAP_BLUETOOTH);
+show_set_bool(threeg, ACER_CAP_THREEG);
+
+/*
+ * Read interface sysfs macro
+ */
+static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ switch (interface->type) {
+ case ACER_AMW0:
+ return sprintf(buf, "AMW0\n");
+ case ACER_AMW0_V2:
+ return sprintf(buf, "AMW0 v2\n");
+ case ACER_WMID:
+ return sprintf(buf, "WMID\n");
+ default:
+ return sprintf(buf, "Error!\n");
+ }
+}
+
+static DEVICE_ATTR(interface, S_IWUGO | S_IRUGO | S_IWUSR,
+ show_interface, NULL);
+
+/*
+ * Platform device
+ */
+static int __devinit acer_platform_probe(struct platform_device *device)
+{
+ int err;
+
+ if (has_cap(ACER_CAP_MAILLED)) {
+ err = acer_led_init(&device->dev);
+ if (err)
+ goto error_mailled;
+ }
+
+ if (has_cap(ACER_CAP_BRIGHTNESS)) {
+ err = acer_backlight_init(&device->dev);
+ if (err)
+ goto error_brightness;
+ }
+
+ return 0;
+
+error_brightness:
+ acer_led_exit();
+error_mailled:
+ return err;
+}
+
+static int acer_platform_remove(struct platform_device *device)
+{
+ if (has_cap(ACER_CAP_MAILLED))
+ acer_led_exit();
+ if (has_cap(ACER_CAP_BRIGHTNESS))
+ acer_backlight_exit();
+ return 0;
+}
+
+static int acer_platform_suspend(struct platform_device *dev,
+pm_message_t state)
+{
+ u32 value;
+ struct acer_data *data = &interface->data;
+
+ if (!data)
+ return -ENOMEM;
+
+ if (has_cap(ACER_CAP_WIRELESS)) {
+ get_u32(&value, ACER_CAP_WIRELESS);
+ data->wireless = value;
+ }
+
+ if (has_cap(ACER_CAP_BLUETOOTH)) {
+ get_u32(&value, ACER_CAP_BLUETOOTH);
+ data->bluetooth = value;
+ }
+
+ if (has_cap(ACER_CAP_MAILLED)) {
+ get_u32(&value, ACER_CAP_MAILLED);
+ data->mailled = value;
+ }
+
+ if (has_cap(ACER_CAP_BRIGHTNESS)) {
+ get_u32(&value, ACER_CAP_BRIGHTNESS);
+ data->brightness = value;
+ }
+
+ return 0;
+}
+
+static int acer_platform_resume(struct platform_device *device)
+{
+ struct acer_data *data = &interface->data;
+
+ if (!data)
+ return -ENOMEM;
+
+ if (has_cap(ACER_CAP_WIRELESS))
+ set_u32(data->wireless, ACER_CAP_WIRELESS);
+
+ if (has_cap(ACER_CAP_BLUETOOTH))
+ set_u32(data->bluetooth, ACER_CAP_BLUETOOTH);
+
+ if (has_cap(ACER_CAP_THREEG))
+ set_u32(data->threeg, ACER_CAP_THREEG);
+
+ if (has_cap(ACER_CAP_MAILLED))
+ set_u32(data->mailled, ACER_CAP_MAILLED);
+
+ if (has_cap(ACER_CAP_BRIGHTNESS))
+ set_u32(data->brightness, ACER_CAP_BRIGHTNESS);
+
+ return 0;
+}
+
+static struct platform_driver acer_platform_driver = {
+ .driver = {
+ .name = "acer-wmi",
+ .owner = THIS_MODULE,
+ },
+ .probe = acer_platform_probe,
+ .remove = acer_platform_remove,
+ .suspend = acer_platform_suspend,
+ .resume = acer_platform_resume,
+};
+
+static struct platform_device *acer_platform_device;
+
+static int remove_sysfs(struct platform_device *device)
+{
+ if (has_cap(ACER_CAP_WIRELESS))
+ device_remove_file(&device->dev, &dev_attr_wireless);
+
+ if (has_cap(ACER_CAP_BLUETOOTH))
+ device_remove_file(&device->dev, &dev_attr_bluetooth);
+
+ if (has_cap(ACER_CAP_THREEG))
+ device_remove_file(&device->dev, &dev_attr_threeg);
+
+ device_remove_file(&device->dev, &dev_attr_interface);
+
+ return 0;
+}
+
+static int create_sysfs(void)
+{
+ int retval = -ENOMEM;
+
+ if (has_cap(ACER_CAP_WIRELESS)) {
+ retval = device_create_file(&acer_platform_device->dev,
+ &dev_attr_wireless);
+ if (retval)
+ goto error_sysfs;
+ }
+
+ if (has_cap(ACER_CAP_BLUETOOTH)) {
+ retval = device_create_file(&acer_platform_device->dev,
+ &dev_attr_bluetooth);
+ if (retval)
+ goto error_sysfs;
+ }
+
+ if (has_cap(ACER_CAP_THREEG)) {
+ retval = device_create_file(&acer_platform_device->dev,
+ &dev_attr_threeg);
+ if (retval)
+ goto error_sysfs;
+ }
+
+ retval = device_create_file(&acer_platform_device->dev,
+ &dev_attr_interface);
+ if (retval)
+ goto error_sysfs;
+
+ return 0;
+
+error_sysfs:
+ remove_sysfs(acer_platform_device);
+ return retval;
+}
+
+static int __init acer_wmi_init(void)
+{
+ int err;
+
+ printk(ACER_INFO "Acer Laptop ACPI-WMI Extras version %s\n",
+ ACER_WMI_VERSION);
+
+ /*
+ * Detect which ACPI-WMI interface we're using.
+ */
+ if (wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1))
+ interface = &AMW0_V2_interface;
+
+ if (!wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1))
+ interface = &wmid_interface;
+
+ if (wmi_has_guid(WMID_GUID2) && interface) {
+ if (ACPI_FAILURE(WMID_set_capabilities())) {
+ printk(ACER_ERR "Unable to detect available devices\n");
+ return -ENODEV;
+ }
+ } else if (!wmi_has_guid(WMID_GUID2) && interface) {
+ printk(ACER_ERR "Unable to detect available devices\n");
+ return -ENODEV;
+ }
+
+ if (wmi_has_guid(AMW0_GUID1) && !wmi_has_guid(WMID_GUID1)) {
+ interface = &AMW0_interface;
+
+ if (ACPI_FAILURE(AMW0_set_capabilities())) {
+ printk(ACER_ERR "Unable to detect available devices\n");
+ return -ENODEV;
+ }
+ }
+
+ if (wmi_has_guid(AMW0_GUID1)) {
+ if (ACPI_FAILURE(AMW0_find_mailled()))
+ printk(ACER_ERR "Unable to detect mail LED\n");
+ }
+
+ find_quirks();
+
+ if (!interface) {
+ printk(ACER_ERR "No or unsupported WMI interface, unable to ");
+ printk(KERN_CONT "load.\n");
+ return -ENODEV;
+ }
+
+ if (platform_driver_register(&acer_platform_driver)) {
+ printk(ACER_ERR "Unable to register platform driver.\n");
+ goto error_platform_register;
+ }
+ acer_platform_device = platform_device_alloc("acer-wmi", -1);
+ platform_device_add(acer_platform_device);
+
+ err = create_sysfs();
+ if (err)
+ return err;
+
+ /* Override any initial settings with values from the commandline */
+ acer_commandline_init();
+
+ return 0;
+
+error_platform_register:
+ return -ENODEV;
+}
+
+static void __exit acer_wmi_exit(void)
+{
+ remove_sysfs(acer_platform_device);
+ platform_device_del(acer_platform_device);
+ platform_driver_unregister(&acer_platform_driver);
+
+ printk(ACER_INFO "Acer Laptop WMI Extras unloaded\n");
+ return;
+}
+
+module_init(acer_wmi_init);
+module_exit(acer_wmi_exit);
diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c
index 7dce318df1bd..7c6dfd03de9f 100644
--- a/drivers/misc/asus-laptop.c
+++ b/drivers/misc/asus-laptop.c
@@ -33,7 +33,6 @@
* Sam Lin - GPS support
*/
-#include <linux/autoconf.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -240,7 +239,7 @@ static struct workqueue_struct *led_workqueue;
static int object##_led_wk; \
static DECLARE_WORK(object##_led_work, object##_led_update); \
static struct led_classdev object##_led = { \
- .name = "asus:" ledname, \
+ .name = "asus::" ledname, \
.brightness_set = object##_led_set, \
}
@@ -255,7 +254,7 @@ ASUS_LED(gled, "gaming");
* method is searched within the scope of the handle, can be NULL. The output
* of the method is written is output, which can also be NULL
*
- * returns 1 if write is successful, 0 else.
+ * returns 0 if write is successful, -1 else.
*/
static int write_acpi_int(acpi_handle handle, const char *method, int val,
struct acpi_buffer *output)
@@ -264,13 +263,19 @@ static int write_acpi_int(acpi_handle handle, const char *method, int val,
union acpi_object in_obj; //the only param we use
acpi_status status;
+ if (!handle)
+ return 0;
+
params.count = 1;
params.pointer = &in_obj;
in_obj.type = ACPI_TYPE_INTEGER;
in_obj.integer.value = val;
status = acpi_evaluate_object(handle, (char *)method, &params, output);
- return (status == AE_OK);
+ if (status == AE_OK)
+ return 0;
+ else
+ return -1;
}
static int read_wireless_status(int mask)
@@ -322,7 +327,7 @@ static void write_status(acpi_handle handle, int out, int mask)
switch (mask) {
case MLED_ON:
- out = !out & 0x1;
+ out = !(out & 0x1);
break;
case GLED_ON:
out = (out & 0x1) + 1;
@@ -336,7 +341,7 @@ static void write_status(acpi_handle handle, int out, int mask)
break;
}
- if (handle && !write_acpi_int(handle, NULL, out, NULL))
+ if (write_acpi_int(handle, NULL, out, NULL))
printk(ASUS_WARNING " write failed %x\n", mask);
}
@@ -416,7 +421,7 @@ static int set_brightness(struct backlight_device *bd, int value)
value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
/* 0 <= value <= 15 */
- if (!write_acpi_int(brightness_set_handle, NULL, value, NULL)) {
+ if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) {
printk(ASUS_WARNING "Error changing brightness\n");
ret = -EIO;
}
@@ -546,7 +551,7 @@ static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
rv = parse_arg(buf, count, &value);
if (rv > 0) {
- if (!write_acpi_int(ledd_set_handle, NULL, value, NULL))
+ if (write_acpi_int(ledd_set_handle, NULL, value, NULL))
printk(ASUS_WARNING "LED display write failed\n");
else
hotk->ledd_status = (u32) value;
@@ -591,7 +596,7 @@ static ssize_t store_bluetooth(struct device *dev,
static void set_display(int value)
{
/* no sanity check needed for now */
- if (!write_acpi_int(display_set_handle, NULL, value, NULL))
+ if (write_acpi_int(display_set_handle, NULL, value, NULL))
printk(ASUS_WARNING "Error setting display\n");
return;
}
@@ -648,7 +653,7 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
*/
static void set_light_sens_switch(int value)
{
- if (!write_acpi_int(ls_switch_handle, NULL, value, NULL))
+ if (write_acpi_int(ls_switch_handle, NULL, value, NULL))
printk(ASUS_WARNING "Error setting light sensor switch\n");
hotk->light_switch = value;
}
@@ -673,7 +678,7 @@ static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
static void set_light_sens_level(int value)
{
- if (!write_acpi_int(ls_level_handle, NULL, value, NULL))
+ if (write_acpi_int(ls_level_handle, NULL, value, NULL))
printk(ASUS_WARNING "Error setting light sensor level\n");
hotk->light_level = value;
}
@@ -861,7 +866,7 @@ static int asus_hotk_get_info(void)
printk(ASUS_WARNING "Couldn't get the DSDT table header\n");
/* We have to write 0 on init this far for all ASUS models */
- if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
+ if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
printk(ASUS_ERR "Hotkey initialization failed\n");
return -ENODEV;
}
diff --git a/drivers/misc/fujitsu-laptop.c b/drivers/misc/fujitsu-laptop.c
index c8d62c268b11..1cfd7f3f1294 100644
--- a/drivers/misc/fujitsu-laptop.c
+++ b/drivers/misc/fujitsu-laptop.c
@@ -50,7 +50,6 @@
#include <linux/dmi.h>
#include <linux/backlight.h>
#include <linux/platform_device.h>
-#include <linux/autoconf.h>
#define FUJITSU_DRIVER_VERSION "0.3"
diff --git a/drivers/misc/intel_menlow.c b/drivers/misc/intel_menlow.c
new file mode 100644
index 000000000000..f70984ab1e1b
--- /dev/null
+++ b/drivers/misc/intel_menlow.c
@@ -0,0 +1,526 @@
+/*
+ * intel_menlow.c - Intel menlow Driver for thermal management extension
+ *
+ * Copyright (C) 2008 Intel Corp
+ * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
+ * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This driver creates the sys I/F for programming the sensors.
+ * It also implements the driver for intel menlow memory controller (hardware
+ * id is INT0002) which makes use of the platform specific ACPI methods
+ * to get/set bandwidth.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+
+#include <linux/thermal.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+MODULE_AUTHOR("Thomas Sujith");
+MODULE_AUTHOR("Zhang Rui");
+MODULE_DESCRIPTION("Intel Menlow platform specific driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Memory controller device control
+ */
+
+#define MEMORY_GET_BANDWIDTH "GTHS"
+#define MEMORY_SET_BANDWIDTH "STHS"
+#define MEMORY_ARG_CUR_BANDWIDTH 1
+#define MEMORY_ARG_MAX_BANDWIDTH 0
+
+static int memory_get_int_max_bandwidth(struct thermal_cooling_device *cdev,
+ unsigned long *max_state)
+{
+ struct acpi_device *device = cdev->devdata;
+ acpi_handle handle = device->handle;
+ unsigned long value;
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ acpi_status status = AE_OK;
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = MEMORY_ARG_MAX_BANDWIDTH;
+ status = acpi_evaluate_integer(handle, MEMORY_GET_BANDWIDTH,
+ &arg_list, &value);
+ if (ACPI_FAILURE(status))
+ return -EFAULT;
+
+ *max_state = value - 1;
+ return 0;
+}
+
+static int memory_get_max_bandwidth(struct thermal_cooling_device *cdev,
+ char *buf)
+{
+ unsigned long value;
+ if (memory_get_int_max_bandwidth(cdev, &value))
+ return -EINVAL;
+
+ return sprintf(buf, "%ld\n", value);
+}
+
+static int memory_get_cur_bandwidth(struct thermal_cooling_device *cdev,
+ char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ acpi_handle handle = device->handle;
+ unsigned long value;
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ acpi_status status = AE_OK;
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = MEMORY_ARG_CUR_BANDWIDTH;
+ status = acpi_evaluate_integer(handle, MEMORY_GET_BANDWIDTH,
+ &arg_list, &value);
+ if (ACPI_FAILURE(status))
+ return -EFAULT;
+
+ return sprintf(buf, "%ld\n", value);
+}
+
+static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev,
+ unsigned int state)
+{
+ struct acpi_device *device = cdev->devdata;
+ acpi_handle handle = device->handle;
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ acpi_status status;
+ int temp;
+ unsigned long max_state;
+
+ if (memory_get_int_max_bandwidth(cdev, &max_state))
+ return -EFAULT;
+
+ if (max_state < 0 || state > max_state)
+ return -EINVAL;
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = state;
+
+ status =
+ acpi_evaluate_integer(handle, MEMORY_SET_BANDWIDTH, &arg_list,
+ (unsigned long *)&temp);
+
+ printk(KERN_INFO
+ "Bandwidth value was %d: status is %d\n", state, status);
+ if (ACPI_FAILURE(status))
+ return -EFAULT;
+
+ return 0;
+}
+
+static struct thermal_cooling_device_ops memory_cooling_ops = {
+ .get_max_state = memory_get_max_bandwidth,
+ .get_cur_state = memory_get_cur_bandwidth,
+ .set_cur_state = memory_set_cur_bandwidth,
+};
+
+/*
+ * Memory Device Management
+ */
+static int intel_menlow_memory_add(struct acpi_device *device)
+{
+ int result = -ENODEV;
+ acpi_status status = AE_OK;
+ acpi_handle dummy;
+ struct thermal_cooling_device *cdev;
+
+ if (!device)
+ return -EINVAL;
+
+ status = acpi_get_handle(device->handle, MEMORY_GET_BANDWIDTH, &dummy);
+ if (ACPI_FAILURE(status))
+ goto end;
+
+ status = acpi_get_handle(device->handle, MEMORY_SET_BANDWIDTH, &dummy);
+ if (ACPI_FAILURE(status))
+ goto end;
+
+ cdev = thermal_cooling_device_register("Memory controller", device,
+ &memory_cooling_ops);
+ acpi_driver_data(device) = cdev;
+ if (!cdev)
+ result = -ENODEV;
+ else {
+ result = sysfs_create_link(&device->dev.kobj,
+ &cdev->device.kobj, "thermal_cooling");
+ if (result)
+ goto unregister;
+
+ result = sysfs_create_link(&cdev->device.kobj,
+ &device->dev.kobj, "device");
+ if (result) {
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+ goto unregister;
+ }
+ }
+
+ end:
+ return result;
+
+ unregister:
+ thermal_cooling_device_unregister(cdev);
+ return result;
+
+}
+
+static int intel_menlow_memory_remove(struct acpi_device *device, int type)
+{
+ struct thermal_cooling_device *cdev = acpi_driver_data(device);
+
+ if (!device || !cdev)
+ return -EINVAL;
+
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+ sysfs_remove_link(&cdev->device.kobj, "device");
+ thermal_cooling_device_unregister(cdev);
+
+ return 0;
+}
+
+const static struct acpi_device_id intel_menlow_memory_ids[] = {
+ {"INT0002", 0},
+ {"", 0},
+};
+
+static struct acpi_driver intel_menlow_memory_driver = {
+ .name = "intel_menlow_thermal_control",
+ .ids = intel_menlow_memory_ids,
+ .ops = {
+ .add = intel_menlow_memory_add,
+ .remove = intel_menlow_memory_remove,
+ },
+};
+
+/*
+ * Sensor control on menlow platform
+ */
+
+#define THERMAL_AUX0 0
+#define THERMAL_AUX1 1
+#define GET_AUX0 "GAX0"
+#define GET_AUX1 "GAX1"
+#define SET_AUX0 "SAX0"
+#define SET_AUX1 "SAX1"
+
+struct intel_menlow_attribute {
+ struct device_attribute attr;
+ struct device *device;
+ acpi_handle handle;
+ struct list_head node;
+};
+
+static LIST_HEAD(intel_menlow_attr_list);
+static DEFINE_MUTEX(intel_menlow_attr_lock);
+
+/*
+ * sensor_get_auxtrip - get the current auxtrip value from sensor
+ * @name: Thermalzone name
+ * @auxtype : AUX0/AUX1
+ * @buf: syfs buffer
+ */
+static int sensor_get_auxtrip(acpi_handle handle, int index, int *value)
+{
+ acpi_status status;
+
+ if ((index != 0 && index != 1) || !value)
+ return -EINVAL;
+
+ status = acpi_evaluate_integer(handle, index ? GET_AUX1 : GET_AUX0,
+ NULL, (unsigned long *)value);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * sensor_set_auxtrip - set the new auxtrip value to sensor
+ * @name: Thermalzone name
+ * @auxtype : AUX0/AUX1
+ * @buf: syfs buffer
+ */
+static int sensor_set_auxtrip(acpi_handle handle, int index, int value)
+{
+ acpi_status status;
+ union acpi_object arg = {
+ ACPI_TYPE_INTEGER
+ };
+ struct acpi_object_list args = {
+ 1, &arg
+ };
+ int temp;
+
+ if (index != 0 && index != 1)
+ return -EINVAL;
+
+ status = acpi_evaluate_integer(handle, index ? GET_AUX0 : GET_AUX1,
+ NULL, (unsigned long *)&temp);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ if ((index && value < temp) || (!index && value > temp))
+ return -EINVAL;
+
+ arg.integer.value = value;
+ status = acpi_evaluate_integer(handle, index ? SET_AUX1 : SET_AUX0,
+ &args, (unsigned long *)&temp);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ /* do we need to check the return value of SAX0/SAX1 ? */
+
+ return 0;
+}
+
+#define to_intel_menlow_attr(_attr) \
+ container_of(_attr, struct intel_menlow_attribute, attr)
+
+static ssize_t aux0_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
+ int value;
+ int result;
+
+ result = sensor_get_auxtrip(attr->handle, 0, &value);
+
+ return result ? result : sprintf(buf, "%lu", KELVIN_TO_CELSIUS(value));
+}
+
+static ssize_t aux1_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
+ int value;
+ int result;
+
+ result = sensor_get_auxtrip(attr->handle, 1, &value);
+
+ return result ? result : sprintf(buf, "%lu", KELVIN_TO_CELSIUS(value));
+}
+
+static ssize_t aux0_store(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
+ int value;
+ int result;
+
+ /*Sanity check; should be a positive integer */
+ if (!sscanf(buf, "%d", &value))
+ return -EINVAL;
+
+ if (value < 0)
+ return -EINVAL;
+
+ result = sensor_set_auxtrip(attr->handle, 0, CELSIUS_TO_KELVIN(value));
+ return result ? result : count;
+}
+
+static ssize_t aux1_store(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
+ int value;
+ int result;
+
+ /*Sanity check; should be a positive integer */
+ if (!sscanf(buf, "%d", &value))
+ return -EINVAL;
+
+ if (value < 0)
+ return -EINVAL;
+
+ result = sensor_set_auxtrip(attr->handle, 1, CELSIUS_TO_KELVIN(value));
+ return result ? result : count;
+}
+
+/* BIOS can enable/disable the thermal user application in dabney platform */
+#define BIOS_ENABLED "\\_TZ.GSTS"
+static ssize_t bios_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ acpi_status status;
+ unsigned long bios_enabled;
+
+ status = acpi_evaluate_integer(NULL, BIOS_ENABLED, NULL, &bios_enabled);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return sprintf(buf, "%s\n", bios_enabled ? "enabled" : "disabled");
+}
+
+static int intel_menlow_add_one_attribute(char *name, int mode, void *show,
+ void *store, struct device *dev,
+ acpi_handle handle)
+{
+ struct intel_menlow_attribute *attr;
+ int result;
+
+ attr = kzalloc(sizeof(struct intel_menlow_attribute), GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ attr->attr.attr.name = name;
+ attr->attr.attr.mode = mode;
+ attr->attr.show = show;
+ attr->attr.store = store;
+ attr->device = dev;
+ attr->handle = handle;
+
+ result = device_create_file(dev, &attr->attr);
+ if (result)
+ return result;
+
+ mutex_lock(&intel_menlow_attr_lock);
+ list_add_tail(&attr->node, &intel_menlow_attr_list);
+ mutex_unlock(&intel_menlow_attr_lock);
+
+ return 0;
+}
+
+static acpi_status intel_menlow_register_sensor(acpi_handle handle, u32 lvl,
+ void *context, void **rv)
+{
+ acpi_status status;
+ acpi_handle dummy;
+ struct thermal_zone_device *thermal;
+ int result;
+
+ result = acpi_bus_get_private_data(handle, (void **)&thermal);
+ if (result)
+ return 0;
+
+ /* _TZ must have the AUX0/1 methods */
+ status = acpi_get_handle(handle, GET_AUX0, &dummy);
+ if (ACPI_FAILURE(status))
+ goto not_found;
+
+ status = acpi_get_handle(handle, SET_AUX0, &dummy);
+ if (ACPI_FAILURE(status))
+ goto not_found;
+
+ result = intel_menlow_add_one_attribute("aux0", 0644,
+ aux0_show, aux0_store,
+ &thermal->device, handle);
+ if (result)
+ return AE_ERROR;
+
+ status = acpi_get_handle(handle, GET_AUX1, &dummy);
+ if (ACPI_FAILURE(status))
+ goto not_found;
+
+ status = acpi_get_handle(handle, SET_AUX1, &dummy);
+ if (ACPI_FAILURE(status))
+ goto not_found;
+
+ result = intel_menlow_add_one_attribute("aux1", 0644,
+ aux1_show, aux1_store,
+ &thermal->device, handle);
+ if (result)
+ return AE_ERROR;
+
+ /*
+ * create the "dabney_enabled" attribute which means the user app
+ * should be loaded or not
+ */
+
+ result = intel_menlow_add_one_attribute("bios_enabled", 0444,
+ bios_enabled_show, NULL,
+ &thermal->device, handle);
+ if (result)
+ return AE_ERROR;
+
+ not_found:
+ if (status == AE_NOT_FOUND)
+ return AE_OK;
+ else
+ return status;
+}
+
+static void intel_menlow_unregister_sensor(void)
+{
+ struct intel_menlow_attribute *pos, *next;
+
+ mutex_lock(&intel_menlow_attr_lock);
+ list_for_each_entry_safe(pos, next, &intel_menlow_attr_list, node) {
+ list_del(&pos->node);
+ device_remove_file(pos->device, &pos->attr);
+ kfree(pos);
+ }
+ mutex_unlock(&intel_menlow_attr_lock);
+
+ return;
+}
+
+static int __init intel_menlow_module_init(void)
+{
+ int result = -ENODEV;
+ acpi_status status;
+ unsigned long enable;
+
+ if (acpi_disabled)
+ return result;
+
+ /* Looking for the \_TZ.GSTS method */
+ status = acpi_evaluate_integer(NULL, BIOS_ENABLED, NULL, &enable);
+ if (ACPI_FAILURE(status) || !enable)
+ return -ENODEV;
+
+ /* Looking for ACPI device MEM0 with hardware id INT0002 */
+ result = acpi_bus_register_driver(&intel_menlow_memory_driver);
+ if (result)
+ return result;
+
+ /* Looking for sensors in each ACPI thermal zone */
+ status = acpi_walk_namespace(ACPI_TYPE_THERMAL, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ intel_menlow_register_sensor, NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit intel_menlow_module_exit(void)
+{
+ acpi_bus_unregister_driver(&intel_menlow_memory_driver);
+ intel_menlow_unregister_sensor();
+}
+
+module_init(intel_menlow_module_init);
+module_exit(intel_menlow_module_exit);
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 552b7957a92a..c884730c5eaf 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -129,27 +129,28 @@ module_param(cpoint_count, int, 0644);
MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
"crash point is to be hit to trigger action");
-unsigned int jp_do_irq(unsigned int irq)
+static unsigned int jp_do_irq(unsigned int irq)
{
lkdtm_handler();
jprobe_return();
return 0;
}
-irqreturn_t jp_handle_irq_event(unsigned int irq, struct irqaction *action)
+static irqreturn_t jp_handle_irq_event(unsigned int irq,
+ struct irqaction *action)
{
lkdtm_handler();
jprobe_return();
return 0;
}
-void jp_tasklet_action(struct softirq_action *a)
+static void jp_tasklet_action(struct softirq_action *a)
{
lkdtm_handler();
jprobe_return();
}
-void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
+static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
{
lkdtm_handler();
jprobe_return();
@@ -157,23 +158,24 @@ void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
struct scan_control;
-unsigned long jp_shrink_inactive_list(unsigned long max_scan,
- struct zone *zone, struct scan_control *sc)
+static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
+ struct zone *zone,
+ struct scan_control *sc)
{
lkdtm_handler();
jprobe_return();
return 0;
}
-int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
- const enum hrtimer_mode mode)
+static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode)
{
lkdtm_handler();
jprobe_return();
return 0;
}
-int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
+static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
{
lkdtm_handler();
jprobe_return();
@@ -270,7 +272,7 @@ void lkdtm_handler(void)
}
}
-int lkdtm_module_init(void)
+static int __init lkdtm_module_init(void)
{
int ret;
@@ -331,7 +333,7 @@ int lkdtm_module_init(void)
return 0;
}
-void lkdtm_module_exit(void)
+static void __exit lkdtm_module_exit(void)
{
unregister_jprobe(&lkdtm);
printk(KERN_INFO "lkdtm : Crash point unregistered\n");
diff --git a/drivers/misc/msi-laptop.c b/drivers/misc/msi-laptop.c
index 83679c762925..de898c6938f3 100644
--- a/drivers/misc/msi-laptop.c
+++ b/drivers/misc/msi-laptop.c
@@ -58,7 +58,6 @@
#include <linux/dmi.h>
#include <linux/backlight.h>
#include <linux/platform_device.h>
-#include <linux/autoconf.h>
#define MSI_DRIVER_VERSION "0.5"
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index cd221fd0fb94..7fa61e907e1c 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -25,7 +25,7 @@
#include <asm/atomic.h>
#include <asm/io.h>
-#define PHANTOM_VERSION "n0.9.7"
+#define PHANTOM_VERSION "n0.9.8"
#define PHANTOM_MAX_MINORS 8
@@ -456,8 +456,9 @@ static int phantom_resume(struct pci_dev *pdev)
#endif
static struct pci_device_id phantom_pci_tbl[] __devinitdata = {
- { PCI_DEVICE(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050),
- .class = PCI_CLASS_BRIDGE_OTHER << 8, .class_mask = 0xffff00 },
+ { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9050,
+ .subvendor = PCI_VENDOR_ID_PLX, .subdevice = PCI_DEVICE_ID_PLX_9050,
+ .class = PCI_CLASS_BRIDGE_OTHER << 8, .class_mask = 0xffff00 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, phantom_pci_tbl);
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
index b0f68031b49d..899e3f75f288 100644
--- a/drivers/misc/sony-laptop.c
+++ b/drivers/misc/sony-laptop.c
@@ -73,7 +73,7 @@
if (debug) printk(KERN_WARNING DRV_PFX msg); \
} while (0)
-#define SONY_LAPTOP_DRIVER_VERSION "0.5"
+#define SONY_LAPTOP_DRIVER_VERSION "0.6"
#define SONY_NC_CLASS "sony-nc"
#define SONY_NC_HID "SNY5001"
@@ -146,68 +146,70 @@ struct sony_laptop_keypress {
* and input layer indexes in the keymap
*/
static int sony_laptop_input_index[] = {
- -1, /* no event */
- -1, /* SONYPI_EVENT_JOGDIAL_DOWN */
- -1, /* SONYPI_EVENT_JOGDIAL_UP */
- -1, /* SONYPI_EVENT_JOGDIAL_DOWN_PRESSED */
- -1, /* SONYPI_EVENT_JOGDIAL_UP_PRESSED */
- -1, /* SONYPI_EVENT_JOGDIAL_PRESSED */
- -1, /* SONYPI_EVENT_JOGDIAL_RELEASED */
- 0, /* SONYPI_EVENT_CAPTURE_PRESSED */
- 1, /* SONYPI_EVENT_CAPTURE_RELEASED */
- 2, /* SONYPI_EVENT_CAPTURE_PARTIALPRESSED */
- 3, /* SONYPI_EVENT_CAPTURE_PARTIALRELEASED */
- 4, /* SONYPI_EVENT_FNKEY_ESC */
- 5, /* SONYPI_EVENT_FNKEY_F1 */
- 6, /* SONYPI_EVENT_FNKEY_F2 */
- 7, /* SONYPI_EVENT_FNKEY_F3 */
- 8, /* SONYPI_EVENT_FNKEY_F4 */
- 9, /* SONYPI_EVENT_FNKEY_F5 */
- 10, /* SONYPI_EVENT_FNKEY_F6 */
- 11, /* SONYPI_EVENT_FNKEY_F7 */
- 12, /* SONYPI_EVENT_FNKEY_F8 */
- 13, /* SONYPI_EVENT_FNKEY_F9 */
- 14, /* SONYPI_EVENT_FNKEY_F10 */
- 15, /* SONYPI_EVENT_FNKEY_F11 */
- 16, /* SONYPI_EVENT_FNKEY_F12 */
- 17, /* SONYPI_EVENT_FNKEY_1 */
- 18, /* SONYPI_EVENT_FNKEY_2 */
- 19, /* SONYPI_EVENT_FNKEY_D */
- 20, /* SONYPI_EVENT_FNKEY_E */
- 21, /* SONYPI_EVENT_FNKEY_F */
- 22, /* SONYPI_EVENT_FNKEY_S */
- 23, /* SONYPI_EVENT_FNKEY_B */
- 24, /* SONYPI_EVENT_BLUETOOTH_PRESSED */
- 25, /* SONYPI_EVENT_PKEY_P1 */
- 26, /* SONYPI_EVENT_PKEY_P2 */
- 27, /* SONYPI_EVENT_PKEY_P3 */
- 28, /* SONYPI_EVENT_BACK_PRESSED */
- -1, /* SONYPI_EVENT_LID_CLOSED */
- -1, /* SONYPI_EVENT_LID_OPENED */
- 29, /* SONYPI_EVENT_BLUETOOTH_ON */
- 30, /* SONYPI_EVENT_BLUETOOTH_OFF */
- 31, /* SONYPI_EVENT_HELP_PRESSED */
- 32, /* SONYPI_EVENT_FNKEY_ONLY */
- 33, /* SONYPI_EVENT_JOGDIAL_FAST_DOWN */
- 34, /* SONYPI_EVENT_JOGDIAL_FAST_UP */
- 35, /* SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */
- 36, /* SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */
- 37, /* SONYPI_EVENT_JOGDIAL_VFAST_DOWN */
- 38, /* SONYPI_EVENT_JOGDIAL_VFAST_UP */
- 39, /* SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */
- 40, /* SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */
- 41, /* SONYPI_EVENT_ZOOM_PRESSED */
- 42, /* SONYPI_EVENT_THUMBPHRASE_PRESSED */
- 43, /* SONYPI_EVENT_MEYE_FACE */
- 44, /* SONYPI_EVENT_MEYE_OPPOSITE */
- 45, /* SONYPI_EVENT_MEMORYSTICK_INSERT */
- 46, /* SONYPI_EVENT_MEMORYSTICK_EJECT */
- -1, /* SONYPI_EVENT_ANYBUTTON_RELEASED */
- -1, /* SONYPI_EVENT_BATTERY_INSERT */
- -1, /* SONYPI_EVENT_BATTERY_REMOVE */
- -1, /* SONYPI_EVENT_FNKEY_RELEASED */
- 47, /* SONYPI_EVENT_WIRELESS_ON */
- 48, /* SONYPI_EVENT_WIRELESS_OFF */
+ -1, /* 0 no event */
+ -1, /* 1 SONYPI_EVENT_JOGDIAL_DOWN */
+ -1, /* 2 SONYPI_EVENT_JOGDIAL_UP */
+ -1, /* 3 SONYPI_EVENT_JOGDIAL_DOWN_PRESSED */
+ -1, /* 4 SONYPI_EVENT_JOGDIAL_UP_PRESSED */
+ -1, /* 5 SONYPI_EVENT_JOGDIAL_PRESSED */
+ -1, /* 6 SONYPI_EVENT_JOGDIAL_RELEASED */
+ 0, /* 7 SONYPI_EVENT_CAPTURE_PRESSED */
+ 1, /* 8 SONYPI_EVENT_CAPTURE_RELEASED */
+ 2, /* 9 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */
+ 3, /* 10 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */
+ 4, /* 11 SONYPI_EVENT_FNKEY_ESC */
+ 5, /* 12 SONYPI_EVENT_FNKEY_F1 */
+ 6, /* 13 SONYPI_EVENT_FNKEY_F2 */
+ 7, /* 14 SONYPI_EVENT_FNKEY_F3 */
+ 8, /* 15 SONYPI_EVENT_FNKEY_F4 */
+ 9, /* 16 SONYPI_EVENT_FNKEY_F5 */
+ 10, /* 17 SONYPI_EVENT_FNKEY_F6 */
+ 11, /* 18 SONYPI_EVENT_FNKEY_F7 */
+ 12, /* 19 SONYPI_EVENT_FNKEY_F8 */
+ 13, /* 20 SONYPI_EVENT_FNKEY_F9 */
+ 14, /* 21 SONYPI_EVENT_FNKEY_F10 */
+ 15, /* 22 SONYPI_EVENT_FNKEY_F11 */
+ 16, /* 23 SONYPI_EVENT_FNKEY_F12 */
+ 17, /* 24 SONYPI_EVENT_FNKEY_1 */
+ 18, /* 25 SONYPI_EVENT_FNKEY_2 */
+ 19, /* 26 SONYPI_EVENT_FNKEY_D */
+ 20, /* 27 SONYPI_EVENT_FNKEY_E */
+ 21, /* 28 SONYPI_EVENT_FNKEY_F */
+ 22, /* 29 SONYPI_EVENT_FNKEY_S */
+ 23, /* 30 SONYPI_EVENT_FNKEY_B */
+ 24, /* 31 SONYPI_EVENT_BLUETOOTH_PRESSED */
+ 25, /* 32 SONYPI_EVENT_PKEY_P1 */
+ 26, /* 33 SONYPI_EVENT_PKEY_P2 */
+ 27, /* 34 SONYPI_EVENT_PKEY_P3 */
+ 28, /* 35 SONYPI_EVENT_BACK_PRESSED */
+ -1, /* 36 SONYPI_EVENT_LID_CLOSED */
+ -1, /* 37 SONYPI_EVENT_LID_OPENED */
+ 29, /* 38 SONYPI_EVENT_BLUETOOTH_ON */
+ 30, /* 39 SONYPI_EVENT_BLUETOOTH_OFF */
+ 31, /* 40 SONYPI_EVENT_HELP_PRESSED */
+ 32, /* 41 SONYPI_EVENT_FNKEY_ONLY */
+ 33, /* 42 SONYPI_EVENT_JOGDIAL_FAST_DOWN */
+ 34, /* 43 SONYPI_EVENT_JOGDIAL_FAST_UP */
+ 35, /* 44 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */
+ 36, /* 45 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */
+ 37, /* 46 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */
+ 38, /* 47 SONYPI_EVENT_JOGDIAL_VFAST_UP */
+ 39, /* 48 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */
+ 40, /* 49 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */
+ 41, /* 50 SONYPI_EVENT_ZOOM_PRESSED */
+ 42, /* 51 SONYPI_EVENT_THUMBPHRASE_PRESSED */
+ 43, /* 52 SONYPI_EVENT_MEYE_FACE */
+ 44, /* 53 SONYPI_EVENT_MEYE_OPPOSITE */
+ 45, /* 54 SONYPI_EVENT_MEMORYSTICK_INSERT */
+ 46, /* 55 SONYPI_EVENT_MEMORYSTICK_EJECT */
+ -1, /* 56 SONYPI_EVENT_ANYBUTTON_RELEASED */
+ -1, /* 57 SONYPI_EVENT_BATTERY_INSERT */
+ -1, /* 58 SONYPI_EVENT_BATTERY_REMOVE */
+ -1, /* 59 SONYPI_EVENT_FNKEY_RELEASED */
+ 47, /* 60 SONYPI_EVENT_WIRELESS_ON */
+ 48, /* 61 SONYPI_EVENT_WIRELESS_OFF */
+ 49, /* 62 SONYPI_EVENT_ZOOM_IN_PRESSED */
+ 50, /* 63 SONYPI_EVENT_ZOOM_OUT_PRESSED */
};
static int sony_laptop_input_keycode_map[] = {
@@ -260,6 +262,8 @@ static int sony_laptop_input_keycode_map[] = {
KEY_RESERVED, /* 46 SONYPI_EVENT_MEMORYSTICK_EJECT */
KEY_WLAN, /* 47 SONYPI_EVENT_WIRELESS_ON */
KEY_WLAN, /* 48 SONYPI_EVENT_WIRELESS_OFF */
+ KEY_ZOOMIN, /* 49 SONYPI_EVENT_ZOOM_IN_PRESSED */
+ KEY_ZOOMOUT /* 50 SONYPI_EVENT_ZOOM_OUT_PRESSED */
};
/* release buttons after a short delay if pressed */
@@ -311,7 +315,7 @@ static void sony_laptop_report_input_event(u8 event)
break;
default:
- if (event > ARRAY_SIZE (sony_laptop_input_keycode_map)) {
+ if (event > ARRAY_SIZE(sony_laptop_input_index)) {
dprintk("sony_laptop_report_input_event, event not known: %d\n", event);
break;
}
@@ -875,6 +879,15 @@ static const struct dmi_system_id sony_nc_ids[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-C"),
},
},
+ {
+ .ident = "Sony Vaio N Series",
+ .callback = sony_nc_C_enable,
+ .driver_data = sony_C_events,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-N"),
+ },
+ },
{ }
};
@@ -1169,10 +1182,12 @@ static struct acpi_driver sony_nc_driver = {
#define SONYPI_DEVICE_TYPE1 0x00000001
#define SONYPI_DEVICE_TYPE2 0x00000002
#define SONYPI_DEVICE_TYPE3 0x00000004
+#define SONYPI_DEVICE_TYPE4 0x00000008
#define SONYPI_TYPE1_OFFSET 0x04
#define SONYPI_TYPE2_OFFSET 0x12
#define SONYPI_TYPE3_OFFSET 0x12
+#define SONYPI_TYPE4_OFFSET 0x12
struct sony_pic_ioport {
struct acpi_resource_io io1;
@@ -1185,18 +1200,33 @@ struct sony_pic_irq {
struct list_head list;
};
+struct sonypi_eventtypes {
+ u8 data;
+ unsigned long mask;
+ struct sonypi_event *events;
+};
+
+struct device_ctrl {
+ int model;
+ int (*handle_irq)(const u8, const u8);
+ u16 evport_offset;
+ u8 has_camera;
+ u8 has_bluetooth;
+ u8 has_wwan;
+ struct sonypi_eventtypes *event_types;
+};
+
struct sony_pic_dev {
- int model;
- u16 evport_offset;
- u8 camera_power;
- u8 bluetooth_power;
- u8 wwan_power;
+ struct device_ctrl *control;
struct acpi_device *acpi_dev;
struct sony_pic_irq *cur_irq;
struct sony_pic_ioport *cur_ioport;
struct list_head interrupts;
struct list_head ioports;
struct mutex lock;
+ u8 camera_power;
+ u8 bluetooth_power;
+ u8 wwan_power;
};
static struct sony_pic_dev spic_dev = {
@@ -1253,6 +1283,7 @@ static struct sonypi_event sonypi_joggerev[] = {
static struct sonypi_event sonypi_captureev[] = {
{ 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED },
{ 0x07, SONYPI_EVENT_CAPTURE_PRESSED },
+ { 0x40, SONYPI_EVENT_CAPTURE_PRESSED },
{ 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED },
{ 0, 0 }
};
@@ -1289,7 +1320,6 @@ static struct sonypi_event sonypi_pkeyev[] = {
{ 0x01, SONYPI_EVENT_PKEY_P1 },
{ 0x02, SONYPI_EVENT_PKEY_P2 },
{ 0x04, SONYPI_EVENT_PKEY_P3 },
- { 0x5c, SONYPI_EVENT_PKEY_P1 },
{ 0, 0 }
};
@@ -1331,6 +1361,8 @@ static struct sonypi_event sonypi_lidev[] = {
/* The set of possible zoom events */
static struct sonypi_event sonypi_zoomev[] = {
{ 0x39, SONYPI_EVENT_ZOOM_PRESSED },
+ { 0x10, SONYPI_EVENT_ZOOM_IN_PRESSED },
+ { 0x20, SONYPI_EVENT_ZOOM_OUT_PRESSED },
{ 0, 0 }
};
@@ -1361,76 +1393,58 @@ static struct sonypi_event sonypi_batteryev[] = {
{ 0, 0 }
};
-static struct sonypi_eventtypes {
- int model;
- u8 data;
- unsigned long mask;
- struct sonypi_event * events;
-} sony_pic_eventtypes[] = {
- { SONYPI_DEVICE_TYPE1, 0, 0xffffffff, sonypi_releaseev },
- { SONYPI_DEVICE_TYPE1, 0x70, SONYPI_MEYE_MASK, sonypi_meyeev },
- { SONYPI_DEVICE_TYPE1, 0x30, SONYPI_LID_MASK, sonypi_lidev },
- { SONYPI_DEVICE_TYPE1, 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev },
- { SONYPI_DEVICE_TYPE1, 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev },
- { SONYPI_DEVICE_TYPE1, 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
- { SONYPI_DEVICE_TYPE1, 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
- { SONYPI_DEVICE_TYPE1, 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev },
- { SONYPI_DEVICE_TYPE1, 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
- { SONYPI_DEVICE_TYPE1, 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev },
-
- { SONYPI_DEVICE_TYPE2, 0, 0xffffffff, sonypi_releaseev },
- { SONYPI_DEVICE_TYPE2, 0x38, SONYPI_LID_MASK, sonypi_lidev },
- { SONYPI_DEVICE_TYPE2, 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev },
- { SONYPI_DEVICE_TYPE2, 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev },
- { SONYPI_DEVICE_TYPE2, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
- { SONYPI_DEVICE_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
- { SONYPI_DEVICE_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev },
- { SONYPI_DEVICE_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev },
- { SONYPI_DEVICE_TYPE2, 0x21, SONYPI_HELP_MASK, sonypi_helpev },
- { SONYPI_DEVICE_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev },
- { SONYPI_DEVICE_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
- { SONYPI_DEVICE_TYPE2, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
- { SONYPI_DEVICE_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
- { SONYPI_DEVICE_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
-
- { SONYPI_DEVICE_TYPE3, 0, 0xffffffff, sonypi_releaseev },
- { SONYPI_DEVICE_TYPE3, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
- { SONYPI_DEVICE_TYPE3, 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
- { SONYPI_DEVICE_TYPE3, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
- { SONYPI_DEVICE_TYPE3, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
- { SONYPI_DEVICE_TYPE3, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
- { 0 }
+static struct sonypi_eventtypes type1_events[] = {
+ { 0, 0xffffffff, sonypi_releaseev },
+ { 0x70, SONYPI_MEYE_MASK, sonypi_meyeev },
+ { 0x30, SONYPI_LID_MASK, sonypi_lidev },
+ { 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev },
+ { 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev },
+ { 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
+ { 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev },
+ { 0 },
+};
+static struct sonypi_eventtypes type2_events[] = {
+ { 0, 0xffffffff, sonypi_releaseev },
+ { 0x38, SONYPI_LID_MASK, sonypi_lidev },
+ { 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev },
+ { 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev },
+ { 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
+ { 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0x11, SONYPI_BACK_MASK, sonypi_backev },
+ { 0x21, SONYPI_HELP_MASK, sonypi_helpev },
+ { 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev },
+ { 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
+ { 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+ { 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0 },
+};
+static struct sonypi_eventtypes type3_events[] = {
+ { 0, 0xffffffff, sonypi_releaseev },
+ { 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
+ { 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+ { 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0 },
+};
+static struct sonypi_eventtypes type4_events[] = {
+ { 0, 0xffffffff, sonypi_releaseev },
+ { 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
+ { 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+ { 0x05, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0x05, SONYPI_ZOOM_MASK, sonypi_zoomev },
+ { 0x05, SONYPI_CAPTURE_MASK, sonypi_captureev },
+ { 0 },
};
-static int sony_pic_detect_device_type(void)
-{
- struct pci_dev *pcidev;
- int model = 0;
-
- if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82371AB_3, NULL)))
- model = SONYPI_DEVICE_TYPE1;
-
- else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_ICH6_1, NULL)))
- model = SONYPI_DEVICE_TYPE3;
-
- else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_ICH7_1, NULL)))
- model = SONYPI_DEVICE_TYPE3;
-
- else
- model = SONYPI_DEVICE_TYPE2;
-
- if (pcidev)
- pci_dev_put(pcidev);
-
- printk(KERN_INFO DRV_PFX "detected Type%d model\n",
- model == SONYPI_DEVICE_TYPE1 ? 1 :
- model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
- return model;
-}
-
+/* low level spic calls */
#define ITERATIONS_LONG 10000
#define ITERATIONS_SHORT 10
#define wait_on_command(command, iterations) { \
@@ -1451,7 +1465,7 @@ static u8 sony_pic_call1(u8 dev)
outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
v1 = inb_p(spic_dev.cur_ioport->io1.minimum + 4);
v2 = inb_p(spic_dev.cur_ioport->io1.minimum);
- dprintk("sony_pic_call1: 0x%.4x\n", (v2 << 8) | v1);
+ dprintk("sony_pic_call1(0x%.2x): 0x%.4x\n", dev, (v2 << 8) | v1);
return v2;
}
@@ -1466,7 +1480,7 @@ static u8 sony_pic_call2(u8 dev, u8 fn)
ITERATIONS_LONG);
outb(fn, spic_dev.cur_ioport->io1.minimum);
v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
- dprintk("sony_pic_call2: 0x%.4x\n", v1);
+ dprintk("sony_pic_call2(0x%.2x - 0x%.2x): 0x%.4x\n", dev, fn, v1);
return v1;
}
@@ -1481,10 +1495,105 @@ static u8 sony_pic_call3(u8 dev, u8 fn, u8 v)
wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
outb(v, spic_dev.cur_ioport->io1.minimum);
v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
- dprintk("sony_pic_call3: 0x%.4x\n", v1);
+ dprintk("sony_pic_call3(0x%.2x - 0x%.2x - 0x%.2x): 0x%.4x\n",
+ dev, fn, v, v1);
return v1;
}
+/*
+ * minidrivers for SPIC models
+ */
+static int type4_handle_irq(const u8 data_mask, const u8 ev)
+{
+ /*
+ * 0x31 could mean we have to take some extra action and wait for
+ * the next irq for some Type4 models, it will generate a new
+ * irq and we can read new data from the device:
+ * - 0x5c and 0x5f requires 0xA0
+ * - 0x61 requires 0xB3
+ */
+ if (data_mask == 0x31) {
+ if (ev == 0x5c || ev == 0x5f)
+ sony_pic_call1(0xA0);
+ else if (ev == 0x61)
+ sony_pic_call1(0xB3);
+ return 0;
+ }
+ return 1;
+}
+
+static struct device_ctrl spic_types[] = {
+ {
+ .model = SONYPI_DEVICE_TYPE1,
+ .handle_irq = NULL,
+ .evport_offset = SONYPI_TYPE1_OFFSET,
+ .event_types = type1_events,
+ },
+ {
+ .model = SONYPI_DEVICE_TYPE2,
+ .handle_irq = NULL,
+ .evport_offset = SONYPI_TYPE2_OFFSET,
+ .event_types = type2_events,
+ },
+ {
+ .model = SONYPI_DEVICE_TYPE3,
+ .handle_irq = NULL,
+ .evport_offset = SONYPI_TYPE3_OFFSET,
+ .event_types = type3_events,
+ },
+ {
+ .model = SONYPI_DEVICE_TYPE4,
+ .handle_irq = type4_handle_irq,
+ .evport_offset = SONYPI_TYPE4_OFFSET,
+ .event_types = type4_events,
+ },
+};
+
+static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
+{
+ struct pci_dev *pcidev;
+
+ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
+ if (pcidev) {
+ dev->control = &spic_types[0];
+ goto out;
+ }
+
+ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_ICH6_1, NULL);
+ if (pcidev) {
+ dev->control = &spic_types[2];
+ goto out;
+ }
+
+ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_ICH7_1, NULL);
+ if (pcidev) {
+ dev->control = &spic_types[3];
+ goto out;
+ }
+
+ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_ICH8_4, NULL);
+ if (pcidev) {
+ dev->control = &spic_types[3];
+ goto out;
+ }
+
+ /* default */
+ dev->control = &spic_types[1];
+
+out:
+ if (pcidev)
+ pci_dev_put(pcidev);
+
+ printk(KERN_INFO DRV_PFX "detected Type%d model\n",
+ dev->control->model == SONYPI_DEVICE_TYPE1 ? 1 :
+ dev->control->model == SONYPI_DEVICE_TYPE2 ? 2 :
+ dev->control->model == SONYPI_DEVICE_TYPE3 ? 3 : 4);
+}
+
/* camera tests and poweron/poweroff */
#define SONYPI_CAMERA_PICTURE 5
#define SONYPI_CAMERA_CONTROL 0x10
@@ -2253,7 +2362,7 @@ static int sony_pic_enable(struct acpi_device *device,
buffer.pointer = resource;
/* setup Type 1 resources */
- if (spic_dev.model == SONYPI_DEVICE_TYPE1) {
+ if (spic_dev.control->model == SONYPI_DEVICE_TYPE1) {
/* setup io resources */
resource->res1.type = ACPI_RESOURCE_TYPE_IO;
@@ -2335,39 +2444,49 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id)
if (dev->cur_ioport->io2.minimum)
data_mask = inb_p(dev->cur_ioport->io2.minimum);
else
- data_mask = inb_p(dev->cur_ioport->io1.minimum + dev->evport_offset);
+ data_mask = inb_p(dev->cur_ioport->io1.minimum +
+ dev->control->evport_offset);
dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
- ev, data_mask, dev->cur_ioport->io1.minimum, dev->evport_offset);
+ ev, data_mask, dev->cur_ioport->io1.minimum,
+ dev->control->evport_offset);
if (ev == 0x00 || ev == 0xff)
return IRQ_HANDLED;
- for (i = 0; sony_pic_eventtypes[i].model; i++) {
-
- if (spic_dev.model != sony_pic_eventtypes[i].model)
- continue;
+ for (i = 0; dev->control->event_types[i].mask; i++) {
- if ((data_mask & sony_pic_eventtypes[i].data) !=
- sony_pic_eventtypes[i].data)
+ if ((data_mask & dev->control->event_types[i].data) !=
+ dev->control->event_types[i].data)
continue;
- if (!(mask & sony_pic_eventtypes[i].mask))
+ if (!(mask & dev->control->event_types[i].mask))
continue;
- for (j = 0; sony_pic_eventtypes[i].events[j].event; j++) {
- if (ev == sony_pic_eventtypes[i].events[j].data) {
+ for (j = 0; dev->control->event_types[i].events[j].event; j++) {
+ if (ev == dev->control->event_types[i].events[j].data) {
device_event =
- sony_pic_eventtypes[i].events[j].event;
+ dev->control->
+ event_types[i].events[j].event;
goto found;
}
}
}
+ /* Still not able to decode the event try to pass
+ * it over to the minidriver
+ */
+ if (dev->control->handle_irq &&
+ dev->control->handle_irq(data_mask, ev) == 0)
+ return IRQ_HANDLED;
+
+ dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
+ ev, data_mask, dev->cur_ioport->io1.minimum,
+ dev->control->evport_offset);
return IRQ_HANDLED;
found:
sony_laptop_report_input_event(device_event);
- acpi_bus_generate_proc_event(spic_dev.acpi_dev, 1, device_event);
+ acpi_bus_generate_proc_event(dev->acpi_dev, 1, device_event);
sonypi_compat_report_event(device_event);
return IRQ_HANDLED;
@@ -2429,23 +2548,9 @@ static int sony_pic_add(struct acpi_device *device)
spic_dev.acpi_dev = device;
strcpy(acpi_device_class(device), "sony/hotkey");
- spic_dev.model = sony_pic_detect_device_type();
+ sony_pic_detect_device_type(&spic_dev);
mutex_init(&spic_dev.lock);
- /* model specific characteristics */
- switch(spic_dev.model) {
- case SONYPI_DEVICE_TYPE1:
- spic_dev.evport_offset = SONYPI_TYPE1_OFFSET;
- break;
- case SONYPI_DEVICE_TYPE3:
- spic_dev.evport_offset = SONYPI_TYPE3_OFFSET;
- break;
- case SONYPI_DEVICE_TYPE2:
- default:
- spic_dev.evport_offset = SONYPI_TYPE2_OFFSET;
- break;
- }
-
/* read _PRS resources */
result = sony_pic_possible_resources(device);
if (result) {
diff --git a/drivers/misc/tc1100-wmi.c b/drivers/misc/tc1100-wmi.c
new file mode 100644
index 000000000000..f25e4c974dcf
--- /dev/null
+++ b/drivers/misc/tc1100-wmi.c
@@ -0,0 +1,290 @@
+/*
+ * HP Compaq TC1100 Tablet WMI Extras Driver
+ *
+ * Copyright (C) 2007 Carlos Corbacho <carlos@strangeworlds.co.uk>
+ * Copyright (C) 2004 Jamey Hicks <jamey.hicks@hp.com>
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <acpi/acpi.h>
+#include <acpi/actypes.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/platform_device.h>
+
+#define GUID "C364AC71-36DB-495A-8494-B439D472A505"
+
+#define TC1100_INSTANCE_WIRELESS 1
+#define TC1100_INSTANCE_JOGDIAL 2
+
+#define TC1100_LOGPREFIX "tc1100-wmi: "
+#define TC1100_INFO KERN_INFO TC1100_LOGPREFIX
+
+MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho");
+MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("wmi:C364AC71-36DB-495A-8494-B439D472A505");
+
+static int tc1100_probe(struct platform_device *device);
+static int tc1100_remove(struct platform_device *device);
+static int tc1100_suspend(struct platform_device *device, pm_message_t state);
+static int tc1100_resume(struct platform_device *device);
+
+static struct platform_driver tc1100_driver = {
+ .driver = {
+ .name = "tc1100-wmi",
+ .owner = THIS_MODULE,
+ },
+ .probe = tc1100_probe,
+ .remove = tc1100_remove,
+ .suspend = tc1100_suspend,
+ .resume = tc1100_resume,
+};
+
+static struct platform_device *tc1100_device;
+
+struct tc1100_data {
+ u32 wireless;
+ u32 jogdial;
+};
+
+static struct tc1100_data suspend_data;
+
+/* --------------------------------------------------------------------------
+ Device Management
+ -------------------------------------------------------------------------- */
+
+static int get_state(u32 *out, u8 instance)
+{
+ u32 tmp;
+ acpi_status status;
+ struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+
+ if (!out)
+ return -EINVAL;
+
+ if (instance > 2)
+ return -ENODEV;
+
+ status = wmi_query_block(GUID, instance, &result);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ obj = (union acpi_object *) result.pointer;
+ if (obj && obj->type == ACPI_TYPE_BUFFER &&
+ obj->buffer.length == sizeof(u32)) {
+ tmp = *((u32 *) obj->buffer.pointer);
+ } else {
+ tmp = 0;
+ }
+
+ if (result.length > 0 && result.pointer)
+ kfree(result.pointer);
+
+ switch (instance) {
+ case TC1100_INSTANCE_WIRELESS:
+ *out = (tmp == 3) ? 1 : 0;
+ return 0;
+ case TC1100_INSTANCE_JOGDIAL:
+ *out = (tmp == 1) ? 1 : 0;
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+static int set_state(u32 *in, u8 instance)
+{
+ u32 value;
+ acpi_status status;
+ struct acpi_buffer input;
+
+ if (!in)
+ return -EINVAL;
+
+ if (instance > 2)
+ return -ENODEV;
+
+ switch (instance) {
+ case TC1100_INSTANCE_WIRELESS:
+ value = (*in) ? 1 : 2;
+ break;
+ case TC1100_INSTANCE_JOGDIAL:
+ value = (*in) ? 0 : 1;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ input.length = sizeof(u32);
+ input.pointer = &value;
+
+ status = wmi_set_block(GUID, instance, &input);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
+ FS Interface (/sys)
+ -------------------------------------------------------------------------- */
+
+/*
+ * Read/ write bool sysfs macro
+ */
+#define show_set_bool(value, instance) \
+static ssize_t \
+show_bool_##value(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ u32 result; \
+ acpi_status status = get_state(&result, instance); \
+ if (ACPI_SUCCESS(status)) \
+ return sprintf(buf, "%d\n", result); \
+ return sprintf(buf, "Read error\n"); \
+} \
+\
+static ssize_t \
+set_bool_##value(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ u32 tmp = simple_strtoul(buf, NULL, 10); \
+ acpi_status status = set_state(&tmp, instance); \
+ if (ACPI_FAILURE(status)) \
+ return -EINVAL; \
+ return count; \
+} \
+static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
+ show_bool_##value, set_bool_##value);
+
+show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
+show_set_bool(jogdial, TC1100_INSTANCE_JOGDIAL);
+
+static void remove_fs(void)
+{
+ device_remove_file(&tc1100_device->dev, &dev_attr_wireless);
+ device_remove_file(&tc1100_device->dev, &dev_attr_jogdial);
+}
+
+static int add_fs(void)
+{
+ int ret;
+
+ ret = device_create_file(&tc1100_device->dev, &dev_attr_wireless);
+ if (ret)
+ goto add_sysfs_error;
+
+ ret = device_create_file(&tc1100_device->dev, &dev_attr_jogdial);
+ if (ret)
+ goto add_sysfs_error;
+
+ return ret;
+
+add_sysfs_error:
+ remove_fs();
+ return ret;
+}
+
+/* --------------------------------------------------------------------------
+ Driver Model
+ -------------------------------------------------------------------------- */
+
+static int tc1100_probe(struct platform_device *device)
+{
+ int result = 0;
+
+ result = add_fs();
+ return result;
+}
+
+
+static int tc1100_remove(struct platform_device *device)
+{
+ remove_fs();
+ return 0;
+}
+
+static int tc1100_suspend(struct platform_device *dev, pm_message_t state)
+{
+ int ret;
+
+ ret = get_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
+ if (ret)
+ return ret;
+
+ ret = get_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int tc1100_resume(struct platform_device *dev)
+{
+ int ret;
+
+ ret = set_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
+ if (ret)
+ return ret;
+
+ ret = set_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int __init tc1100_init(void)
+{
+ int result = 0;
+
+ if (!wmi_has_guid(GUID))
+ return -ENODEV;
+
+ result = platform_driver_register(&tc1100_driver);
+ if (result)
+ return result;
+
+ tc1100_device = platform_device_alloc("tc1100-wmi", -1);
+ platform_device_add(tc1100_device);
+
+ printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras loaded\n");
+
+ return result;
+}
+
+static void __exit tc1100_exit(void)
+{
+ platform_device_del(tc1100_device);
+ platform_driver_unregister(&tc1100_driver);
+
+ printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras unloaded\n");
+}
+
+module_init(tc1100_init);
+module_exit(tc1100_exit);
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
index cf56647a6ca4..7ba1acad5402 100644
--- a/drivers/misc/thinkpad_acpi.c
+++ b/drivers/misc/thinkpad_acpi.c
@@ -3,7 +3,7 @@
*
*
* Copyright (C) 2004-2005 Borislav Deianov <borislav@users.sf.net>
- * Copyright (C) 2006-2007 Henrique de Moraes Holschuh <hmh@hmh.eng.br>
+ * Copyright (C) 2006-2008 Henrique de Moraes Holschuh <hmh@hmh.eng.br>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,11 +21,13 @@
* 02110-1301, USA.
*/
-#define IBM_VERSION "0.17"
-#define TPACPI_SYSFS_VERSION 0x020000
+#define TPACPI_VERSION "0.19"
+#define TPACPI_SYSFS_VERSION 0x020200
/*
* Changelog:
+ * 2007-10-20 changelog trimmed down
+ *
* 2007-03-27 0.14 renamed to thinkpad_acpi and moved to
* drivers/misc.
*
@@ -33,89 +35,219 @@
* changelog now lives in git commit history, and will
* not be updated further in-file.
*
- * 2005-08-17 0.12 fix compilation on 2.6.13-rc kernels
* 2005-03-17 0.11 support for 600e, 770x
* thanks to Jamie Lentin <lentinj@dial.pipex.com>
- * support for 770e, G41
- * G40 and G41 don't have a thinklight
- * temperatures no longer experimental
- * experimental brightness control
- * experimental volume control
- * experimental fan enable/disable
- * 2005-01-16 0.10 fix module loading on R30, R31
- * 2005-01-16 0.9 support for 570, R30, R31
- * ultrabay support on A22p, A3x
- * limit arg for cmos, led, beep, drop experimental status
- * more capable led control on A21e, A22p, T20-22, X20
- * experimental temperatures and fan speed
- * experimental embedded controller register dump
- * mark more functions as __init, drop incorrect __exit
- * use MODULE_VERSION
+ *
+ * 2005-01-16 0.9 use MODULE_VERSION
* thanks to Henrik Brix Andersen <brix@gentoo.org>
* fix parameter passing on module loading
* thanks to Rusty Russell <rusty@rustcorp.com.au>
* thanks to Jim Radford <radford@blackbean.org>
* 2004-11-08 0.8 fix init error case, don't return from a macro
* thanks to Chris Wright <chrisw@osdl.org>
- * 2004-10-23 0.7 fix module loading on A21e, A22p, T20, T21, X20
- * fix led control on A21e
- * 2004-10-19 0.6 use acpi_bus_register_driver() to claim HKEY device
- * 2004-10-18 0.5 thinklight support on A21e, G40, R32, T20, T21, X20
- * proc file format changed
- * video_switch command
- * experimental cmos control
- * experimental led control
- * experimental acpi sounds
- * 2004-09-16 0.4 support for module parameters
- * hotkey mask can be prefixed by 0x
- * video output switching
- * video expansion control
- * ultrabay eject support
- * removed lcd brightness/on/off control, didn't work
- * 2004-08-17 0.3 support for R40
- * lcd off, brightness control
- * thinklight on/off
- * 2004-08-14 0.2 support for T series, X20
- * bluetooth enable/disable
- * hotkey events disabled by default
- * removed fan control, currently useless
- * 2004-08-09 0.1 initial release, support for X series
*/
-#include "thinkpad_acpi.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/delay.h>
+
+#include <linux/nvram.h>
+#include <linux/proc_fs.h>
+#include <linux/sysfs.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/platform_device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/input.h>
+#include <asm/uaccess.h>
+
+#include <linux/dmi.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+
+#include <acpi/acpi_drivers.h>
+#include <acpi/acnamesp.h>
+
+#include <linux/pci_ids.h>
+
+
+/* ThinkPad CMOS commands */
+#define TP_CMOS_VOLUME_DOWN 0
+#define TP_CMOS_VOLUME_UP 1
+#define TP_CMOS_VOLUME_MUTE 2
+#define TP_CMOS_BRIGHTNESS_UP 4
+#define TP_CMOS_BRIGHTNESS_DOWN 5
+
+/* NVRAM Addresses */
+enum tp_nvram_addr {
+ TP_NVRAM_ADDR_HK2 = 0x57,
+ TP_NVRAM_ADDR_THINKLIGHT = 0x58,
+ TP_NVRAM_ADDR_VIDEO = 0x59,
+ TP_NVRAM_ADDR_BRIGHTNESS = 0x5e,
+ TP_NVRAM_ADDR_MIXER = 0x60,
+};
-MODULE_AUTHOR("Borislav Deianov, Henrique de Moraes Holschuh");
-MODULE_DESCRIPTION(IBM_DESC);
-MODULE_VERSION(IBM_VERSION);
-MODULE_LICENSE("GPL");
+/* NVRAM bit masks */
+enum {
+ TP_NVRAM_MASK_HKT_THINKPAD = 0x08,
+ TP_NVRAM_MASK_HKT_ZOOM = 0x20,
+ TP_NVRAM_MASK_HKT_DISPLAY = 0x40,
+ TP_NVRAM_MASK_HKT_HIBERNATE = 0x80,
+ TP_NVRAM_MASK_THINKLIGHT = 0x10,
+ TP_NVRAM_MASK_HKT_DISPEXPND = 0x30,
+ TP_NVRAM_MASK_HKT_BRIGHTNESS = 0x20,
+ TP_NVRAM_MASK_LEVEL_BRIGHTNESS = 0x0f,
+ TP_NVRAM_POS_LEVEL_BRIGHTNESS = 0,
+ TP_NVRAM_MASK_MUTE = 0x40,
+ TP_NVRAM_MASK_HKT_VOLUME = 0x80,
+ TP_NVRAM_MASK_LEVEL_VOLUME = 0x0f,
+ TP_NVRAM_POS_LEVEL_VOLUME = 0,
+};
-/* Please remove this in year 2009 */
-MODULE_ALIAS("ibm_acpi");
+/* ACPI HIDs */
+#define TPACPI_ACPI_HKEY_HID "IBM0068"
-/*
- * DMI matching for module autoloading
- *
- * See http://thinkwiki.org/wiki/List_of_DMI_IDs
- * See http://thinkwiki.org/wiki/BIOS_Upgrade_Downloads
- *
- * Only models listed in thinkwiki will be supported, so add yours
- * if it is not there yet.
+/* Input IDs */
+#define TPACPI_HKEY_INPUT_PRODUCT 0x5054 /* "TP" */
+#define TPACPI_HKEY_INPUT_VERSION 0x4101
+
+
+/****************************************************************************
+ * Main driver
*/
-#define IBM_BIOS_MODULE_ALIAS(__type) \
- MODULE_ALIAS("dmi:bvnIBM:bvr" __type "ET??WW")
-/* Non-ancient thinkpads */
-MODULE_ALIAS("dmi:bvnIBM:*:svnIBM:*:pvrThinkPad*:rvnIBM:*");
-MODULE_ALIAS("dmi:bvnLENOVO:*:svnLENOVO:*:pvrThinkPad*:rvnLENOVO:*");
+#define TPACPI_NAME "thinkpad"
+#define TPACPI_DESC "ThinkPad ACPI Extras"
+#define TPACPI_FILE TPACPI_NAME "_acpi"
+#define TPACPI_URL "http://ibm-acpi.sf.net/"
+#define TPACPI_MAIL "ibm-acpi-devel@lists.sourceforge.net"
+
+#define TPACPI_PROC_DIR "ibm"
+#define TPACPI_ACPI_EVENT_PREFIX "ibm"
+#define TPACPI_DRVR_NAME TPACPI_FILE
+#define TPACPI_HWMON_DRVR_NAME TPACPI_NAME "_hwmon"
+
+#define TPACPI_MAX_ACPI_ARGS 3
+
+/* Debugging */
+#define TPACPI_LOG TPACPI_FILE ": "
+#define TPACPI_ERR KERN_ERR TPACPI_LOG
+#define TPACPI_NOTICE KERN_NOTICE TPACPI_LOG
+#define TPACPI_INFO KERN_INFO TPACPI_LOG
+#define TPACPI_DEBUG KERN_DEBUG TPACPI_LOG
+
+#define TPACPI_DBG_ALL 0xffff
+#define TPACPI_DBG_ALL 0xffff
+#define TPACPI_DBG_INIT 0x0001
+#define TPACPI_DBG_EXIT 0x0002
+#define dbg_printk(a_dbg_level, format, arg...) \
+ do { if (dbg_level & a_dbg_level) \
+ printk(TPACPI_DEBUG "%s: " format, __func__ , ## arg); \
+ } while (0)
+#ifdef CONFIG_THINKPAD_ACPI_DEBUG
+#define vdbg_printk(a_dbg_level, format, arg...) \
+ dbg_printk(a_dbg_level, format, ## arg)
+static const char *str_supported(int is_supported);
+#else
+#define vdbg_printk(a_dbg_level, format, arg...)
+#endif
-/* Ancient thinkpad BIOSes have to be identified by
- * BIOS type or model number, and there are far less
- * BIOS types than model numbers... */
-IBM_BIOS_MODULE_ALIAS("I[B,D,H,I,M,N,O,T,W,V,Y,Z]");
-IBM_BIOS_MODULE_ALIAS("1[0,3,6,8,A-G,I,K,M-P,S,T]");
-IBM_BIOS_MODULE_ALIAS("K[U,X-Z]");
+#define onoff(status, bit) ((status) & (1 << (bit)) ? "on" : "off")
+#define enabled(status, bit) ((status) & (1 << (bit)) ? "enabled" : "disabled")
+#define strlencmp(a, b) (strncmp((a), (b), strlen(b)))
+
+
+/****************************************************************************
+ * Driver-wide structs and misc. variables
+ */
+
+struct ibm_struct;
+
+struct tp_acpi_drv_struct {
+ const struct acpi_device_id *hid;
+ struct acpi_driver *driver;
-#define __unused __attribute__ ((unused))
+ void (*notify) (struct ibm_struct *, u32);
+ acpi_handle *handle;
+ u32 type;
+ struct acpi_device *device;
+};
+
+struct ibm_struct {
+ char *name;
+
+ int (*read) (char *);
+ int (*write) (char *);
+ void (*exit) (void);
+ void (*resume) (void);
+ void (*suspend) (pm_message_t state);
+
+ struct list_head all_drivers;
+
+ struct tp_acpi_drv_struct *acpi;
+
+ struct {
+ u8 acpi_driver_registered:1;
+ u8 acpi_notify_installed:1;
+ u8 proc_created:1;
+ u8 init_called:1;
+ u8 experimental:1;
+ } flags;
+};
+
+struct ibm_init_struct {
+ char param[32];
+
+ int (*init) (struct ibm_init_struct *);
+ struct ibm_struct *data;
+};
+
+static struct {
+#ifdef CONFIG_THINKPAD_ACPI_BAY
+ u32 bay_status:1;
+ u32 bay_eject:1;
+ u32 bay_status2:1;
+ u32 bay_eject2:1;
+#endif
+ u32 bluetooth:1;
+ u32 hotkey:1;
+ u32 hotkey_mask:1;
+ u32 hotkey_wlsw:1;
+ u32 light:1;
+ u32 light_status:1;
+ u32 bright_16levels:1;
+ u32 wan:1;
+ u32 fan_ctrl_status_undef:1;
+ u32 input_device_registered:1;
+ u32 platform_drv_registered:1;
+ u32 platform_drv_attrs_registered:1;
+ u32 sensors_pdrv_registered:1;
+ u32 sensors_pdrv_attrs_registered:1;
+ u32 sensors_pdev_attrs_registered:1;
+ u32 hotkey_poll_active:1;
+} tp_features;
+
+struct thinkpad_id_data {
+ unsigned int vendor; /* ThinkPad vendor:
+ * PCI_VENDOR_ID_IBM/PCI_VENDOR_ID_LENOVO */
+
+ char *bios_version_str; /* Something like 1ZET51WW (1.03z) */
+ char *ec_version_str; /* Something like 1ZHT51WW-1.04a */
+
+ u16 bios_model; /* Big Endian, TP-1Y = 0x5931, 0 = unknown */
+ u16 ec_model;
+
+ char *model_str;
+};
+static struct thinkpad_id_data thinkpad_id;
static enum {
TPACPI_LIFE_INIT = 0,
@@ -123,6 +255,9 @@ static enum {
TPACPI_LIFE_EXITING,
} tpacpi_lifecycle;
+static int experimental;
+static u32 dbg_level;
+
/****************************************************************************
****************************************************************************
*
@@ -137,13 +272,13 @@ static enum {
static acpi_handle root_handle;
-#define IBM_HANDLE(object, parent, paths...) \
+#define TPACPI_HANDLE(object, parent, paths...) \
static acpi_handle object##_handle; \
static acpi_handle *object##_parent = &parent##_handle; \
static char *object##_path; \
static char *object##_paths[] = { paths }
-IBM_HANDLE(ec, root, "\\_SB.PCI0.ISA.EC0", /* 240, 240x */
+TPACPI_HANDLE(ec, root, "\\_SB.PCI0.ISA.EC0", /* 240, 240x */
"\\_SB.PCI.ISA.EC", /* 570 */
"\\_SB.PCI0.ISA0.EC0", /* 600e/x, 770e, 770x */
"\\_SB.PCI0.ISA.EC", /* A21e, A2xm/p, T20-22, X20-21 */
@@ -152,20 +287,16 @@ IBM_HANDLE(ec, root, "\\_SB.PCI0.ISA.EC0", /* 240, 240x */
"\\_SB.PCI0.LPC.EC", /* all others */
);
-IBM_HANDLE(ecrd, ec, "ECRD"); /* 570 */
-IBM_HANDLE(ecwr, ec, "ECWR"); /* 570 */
-
-
-/*************************************************************************
- * Misc ACPI handles
- */
+TPACPI_HANDLE(ecrd, ec, "ECRD"); /* 570 */
+TPACPI_HANDLE(ecwr, ec, "ECWR"); /* 570 */
-IBM_HANDLE(cmos, root, "\\UCMS", /* R50, R50e, R50p, R51, T4x, X31, X40 */
+TPACPI_HANDLE(cmos, root, "\\UCMS", /* R50, R50e, R50p, R51, */
+ /* T4x, X31, X40 */
"\\CMOS", /* A3x, G4x, R32, T23, T30, X22-24, X30 */
"\\CMS", /* R40, R40e */
); /* all others */
-IBM_HANDLE(hkey, ec, "\\_SB.HKEY", /* 600e/x, 770e, 770x */
+TPACPI_HANDLE(hkey, ec, "\\_SB.HKEY", /* 600e/x, 770e, 770x */
"^HKEY", /* R30, R31 */
"HKEY", /* all others */
); /* 570 */
@@ -180,7 +311,7 @@ static int acpi_evalf(acpi_handle handle,
{
char *fmt0 = fmt;
struct acpi_object_list params;
- union acpi_object in_objs[IBM_MAX_ACPI_ARGS];
+ union acpi_object in_objs[TPACPI_MAX_ACPI_ARGS];
struct acpi_buffer result, *resultp;
union acpi_object out_obj;
acpi_status status;
@@ -190,7 +321,7 @@ static int acpi_evalf(acpi_handle handle,
int quiet;
if (!*fmt) {
- printk(IBM_ERR "acpi_evalf() called with empty format\n");
+ printk(TPACPI_ERR "acpi_evalf() called with empty format\n");
return 0;
}
@@ -215,7 +346,7 @@ static int acpi_evalf(acpi_handle handle,
break;
/* add more types as needed */
default:
- printk(IBM_ERR "acpi_evalf() called "
+ printk(TPACPI_ERR "acpi_evalf() called "
"with invalid format character '%c'\n", c);
return 0;
}
@@ -242,29 +373,19 @@ static int acpi_evalf(acpi_handle handle,
break;
/* add more types as needed */
default:
- printk(IBM_ERR "acpi_evalf() called "
+ printk(TPACPI_ERR "acpi_evalf() called "
"with invalid format character '%c'\n", res_type);
return 0;
}
if (!success && !quiet)
- printk(IBM_ERR "acpi_evalf(%s, %s, ...) failed: %d\n",
+ printk(TPACPI_ERR "acpi_evalf(%s, %s, ...) failed: %d\n",
method, fmt0, status);
return success;
}
-static void __unused acpi_print_int(acpi_handle handle, char *method)
-{
- int i;
-
- if (acpi_evalf(handle, &i, method, "d"))
- printk(IBM_INFO "%s = 0x%x\n", method, i);
- else
- printk(IBM_ERR "error calling %s\n", method);
-}
-
-static int acpi_ec_read(int i, u8 * p)
+static int acpi_ec_read(int i, u8 *p)
{
int v;
@@ -293,6 +414,7 @@ static int acpi_ec_write(int i, u8 v)
return 1;
}
+#if defined(CONFIG_THINKPAD_ACPI_DOCK) || defined(CONFIG_THINKPAD_ACPI_BAY)
static int _sta(acpi_handle handle)
{
int status;
@@ -302,6 +424,7 @@ static int _sta(acpi_handle handle)
return status;
}
+#endif
static int issue_thinkpad_cmos_command(int cmos_cmd)
{
@@ -318,6 +441,10 @@ static int issue_thinkpad_cmos_command(int cmos_cmd)
* ACPI device model
*/
+#define TPACPI_ACPIHANDLE_INIT(object) \
+ drv_acpi_handle_init(#object, &object##_handle, *object##_parent, \
+ object##_paths, ARRAY_SIZE(object##_paths), &object##_path)
+
static void drv_acpi_handle_init(char *name,
acpi_handle *handle, acpi_handle parent,
char **paths, int num_paths, char **path)
@@ -372,25 +499,27 @@ static int __init setup_acpi_notify(struct ibm_struct *ibm)
rc = acpi_bus_get_device(*ibm->acpi->handle, &ibm->acpi->device);
if (rc < 0) {
- printk(IBM_ERR "acpi_bus_get_device(%s) failed: %d\n",
+ printk(TPACPI_ERR "acpi_bus_get_device(%s) failed: %d\n",
ibm->name, rc);
return -ENODEV;
}
acpi_driver_data(ibm->acpi->device) = ibm;
sprintf(acpi_device_class(ibm->acpi->device), "%s/%s",
- IBM_ACPI_EVENT_PREFIX,
+ TPACPI_ACPI_EVENT_PREFIX,
ibm->name);
status = acpi_install_notify_handler(*ibm->acpi->handle,
ibm->acpi->type, dispatch_acpi_notify, ibm);
if (ACPI_FAILURE(status)) {
if (status == AE_ALREADY_EXISTS) {
- printk(IBM_NOTICE "another device driver is already handling %s events\n",
- ibm->name);
+ printk(TPACPI_NOTICE
+ "another device driver is already "
+ "handling %s events\n", ibm->name);
} else {
- printk(IBM_ERR "acpi_install_notify_handler(%s) failed: %d\n",
- ibm->name, status);
+ printk(TPACPI_ERR
+ "acpi_install_notify_handler(%s) failed: %d\n",
+ ibm->name, status);
}
return -ENODEV;
}
@@ -414,18 +543,18 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
ibm->acpi->driver = kzalloc(sizeof(struct acpi_driver), GFP_KERNEL);
if (!ibm->acpi->driver) {
- printk(IBM_ERR "kzalloc(ibm->driver) failed\n");
+ printk(TPACPI_ERR "kzalloc(ibm->driver) failed\n");
return -ENOMEM;
}
- sprintf(ibm->acpi->driver->name, "%s_%s", IBM_NAME, ibm->name);
+ sprintf(ibm->acpi->driver->name, "%s_%s", TPACPI_NAME, ibm->name);
ibm->acpi->driver->ids = ibm->acpi->hid;
ibm->acpi->driver->ops.add = &tpacpi_device_add;
rc = acpi_bus_register_driver(ibm->acpi->driver);
if (rc < 0) {
- printk(IBM_ERR "acpi_bus_register_driver(%s) failed: %d\n",
+ printk(TPACPI_ERR "acpi_bus_register_driver(%s) failed: %d\n",
ibm->name, rc);
kfree(ibm->acpi->driver);
ibm->acpi->driver = NULL;
@@ -470,7 +599,7 @@ static int dispatch_procfs_read(char *page, char **start, off_t off,
}
static int dispatch_procfs_write(struct file *file,
- const char __user * userbuf,
+ const char __user *userbuf,
unsigned long count, void *data)
{
struct ibm_struct *ibm = data;
@@ -530,7 +659,22 @@ static struct platform_device *tpacpi_sensors_pdev;
static struct device *tpacpi_hwmon;
static struct input_dev *tpacpi_inputdev;
static struct mutex tpacpi_inputdev_send_mutex;
+static LIST_HEAD(tpacpi_all_drivers);
+
+static int tpacpi_suspend_handler(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ibm_struct *ibm, *itmp;
+
+ list_for_each_entry_safe(ibm, itmp,
+ &tpacpi_all_drivers,
+ all_drivers) {
+ if (ibm->suspend)
+ (ibm->suspend)(state);
+ }
+ return 0;
+}
static int tpacpi_resume_handler(struct platform_device *pdev)
{
@@ -548,107 +692,36 @@ static int tpacpi_resume_handler(struct platform_device *pdev)
static struct platform_driver tpacpi_pdriver = {
.driver = {
- .name = IBM_DRVR_NAME,
+ .name = TPACPI_DRVR_NAME,
.owner = THIS_MODULE,
},
+ .suspend = tpacpi_suspend_handler,
.resume = tpacpi_resume_handler,
};
static struct platform_driver tpacpi_hwmon_pdriver = {
.driver = {
- .name = IBM_HWMON_DRVR_NAME,
+ .name = TPACPI_HWMON_DRVR_NAME,
.owner = THIS_MODULE,
},
};
/*************************************************************************
- * thinkpad-acpi driver attributes
+ * sysfs support helpers
*/
-/* interface_version --------------------------------------------------- */
-static ssize_t tpacpi_driver_interface_version_show(
- struct device_driver *drv,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", TPACPI_SYSFS_VERSION);
-}
-
-static DRIVER_ATTR(interface_version, S_IRUGO,
- tpacpi_driver_interface_version_show, NULL);
-
-/* debug_level --------------------------------------------------------- */
-static ssize_t tpacpi_driver_debug_show(struct device_driver *drv,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "0x%04x\n", dbg_level);
-}
-
-static ssize_t tpacpi_driver_debug_store(struct device_driver *drv,
- const char *buf, size_t count)
-{
- unsigned long t;
-
- if (parse_strtoul(buf, 0xffff, &t))
- return -EINVAL;
-
- dbg_level = t;
-
- return count;
-}
-
-static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
- tpacpi_driver_debug_show, tpacpi_driver_debug_store);
-
-/* version ------------------------------------------------------------- */
-static ssize_t tpacpi_driver_version_show(struct device_driver *drv,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s v%s\n", IBM_DESC, IBM_VERSION);
-}
-
-static DRIVER_ATTR(version, S_IRUGO,
- tpacpi_driver_version_show, NULL);
-
-/* --------------------------------------------------------------------- */
-
-static struct driver_attribute* tpacpi_driver_attributes[] = {
- &driver_attr_debug_level, &driver_attr_version,
- &driver_attr_interface_version,
+struct attribute_set {
+ unsigned int members, max_members;
+ struct attribute_group group;
};
-static int __init tpacpi_create_driver_attributes(struct device_driver *drv)
-{
- int i, res;
-
- i = 0;
- res = 0;
- while (!res && i < ARRAY_SIZE(tpacpi_driver_attributes)) {
- res = driver_create_file(drv, tpacpi_driver_attributes[i]);
- i++;
- }
-
- return res;
-}
-
-static void tpacpi_remove_driver_attributes(struct device_driver *drv)
-{
- int i;
-
- for(i = 0; i < ARRAY_SIZE(tpacpi_driver_attributes); i++)
- driver_remove_file(drv, tpacpi_driver_attributes[i]);
-}
-
-/*************************************************************************
- * sysfs support helpers
- */
-
struct attribute_set_obj {
struct attribute_set s;
struct attribute *a;
} __attribute__((packed));
static struct attribute_set *create_attr_set(unsigned int max_members,
- const char* name)
+ const char *name)
{
struct attribute_set_obj *sobj;
@@ -668,8 +741,11 @@ static struct attribute_set *create_attr_set(unsigned int max_members,
return &sobj->s;
}
+#define destroy_attr_set(_set) \
+ kfree(_set);
+
/* not multi-threaded safe, use it in a single thread per set */
-static int add_to_attr_set(struct attribute_set* s, struct attribute *attr)
+static int add_to_attr_set(struct attribute_set *s, struct attribute *attr)
{
if (!s || !attr)
return -EINVAL;
@@ -683,7 +759,7 @@ static int add_to_attr_set(struct attribute_set* s, struct attribute *attr)
return 0;
}
-static int add_many_to_attr_set(struct attribute_set* s,
+static int add_many_to_attr_set(struct attribute_set *s,
struct attribute **attr,
unsigned int count)
{
@@ -698,12 +774,15 @@ static int add_many_to_attr_set(struct attribute_set* s,
return 0;
}
-static void delete_attr_set(struct attribute_set* s, struct kobject *kobj)
+static void delete_attr_set(struct attribute_set *s, struct kobject *kobj)
{
sysfs_remove_group(kobj, &s->group);
destroy_attr_set(s);
}
+#define register_attr_set_with_sysfs(_attr_set, _kobj) \
+ sysfs_create_group(_kobj, &_attr_set->group)
+
static int parse_strtoul(const char *buf,
unsigned long max, unsigned long *value)
{
@@ -720,6 +799,84 @@ static int parse_strtoul(const char *buf,
return 0;
}
+/*************************************************************************
+ * thinkpad-acpi driver attributes
+ */
+
+/* interface_version --------------------------------------------------- */
+static ssize_t tpacpi_driver_interface_version_show(
+ struct device_driver *drv,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", TPACPI_SYSFS_VERSION);
+}
+
+static DRIVER_ATTR(interface_version, S_IRUGO,
+ tpacpi_driver_interface_version_show, NULL);
+
+/* debug_level --------------------------------------------------------- */
+static ssize_t tpacpi_driver_debug_show(struct device_driver *drv,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n", dbg_level);
+}
+
+static ssize_t tpacpi_driver_debug_store(struct device_driver *drv,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+
+ if (parse_strtoul(buf, 0xffff, &t))
+ return -EINVAL;
+
+ dbg_level = t;
+
+ return count;
+}
+
+static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
+ tpacpi_driver_debug_show, tpacpi_driver_debug_store);
+
+/* version ------------------------------------------------------------- */
+static ssize_t tpacpi_driver_version_show(struct device_driver *drv,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s v%s\n",
+ TPACPI_DESC, TPACPI_VERSION);
+}
+
+static DRIVER_ATTR(version, S_IRUGO,
+ tpacpi_driver_version_show, NULL);
+
+/* --------------------------------------------------------------------- */
+
+static struct driver_attribute *tpacpi_driver_attributes[] = {
+ &driver_attr_debug_level, &driver_attr_version,
+ &driver_attr_interface_version,
+};
+
+static int __init tpacpi_create_driver_attributes(struct device_driver *drv)
+{
+ int i, res;
+
+ i = 0;
+ res = 0;
+ while (!res && i < ARRAY_SIZE(tpacpi_driver_attributes)) {
+ res = driver_create_file(drv, tpacpi_driver_attributes[i]);
+ i++;
+ }
+
+ return res;
+}
+
+static void tpacpi_remove_driver_attributes(struct device_driver *drv)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tpacpi_driver_attributes); i++)
+ driver_remove_file(drv, tpacpi_driver_attributes[i]);
+}
+
/****************************************************************************
****************************************************************************
*
@@ -734,17 +891,17 @@ static int parse_strtoul(const char *buf,
static int __init thinkpad_acpi_driver_init(struct ibm_init_struct *iibm)
{
- printk(IBM_INFO "%s v%s\n", IBM_DESC, IBM_VERSION);
- printk(IBM_INFO "%s\n", IBM_URL);
+ printk(TPACPI_INFO "%s v%s\n", TPACPI_DESC, TPACPI_VERSION);
+ printk(TPACPI_INFO "%s\n", TPACPI_URL);
- printk(IBM_INFO "ThinkPad BIOS %s, EC %s\n",
+ printk(TPACPI_INFO "ThinkPad BIOS %s, EC %s\n",
(thinkpad_id.bios_version_str) ?
thinkpad_id.bios_version_str : "unknown",
(thinkpad_id.ec_version_str) ?
thinkpad_id.ec_version_str : "unknown");
if (thinkpad_id.vendor && thinkpad_id.model_str)
- printk(IBM_INFO "%s %s\n",
+ printk(TPACPI_INFO "%s %s\n",
(thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ?
"IBM" : ((thinkpad_id.vendor ==
PCI_VENDOR_ID_LENOVO) ?
@@ -758,8 +915,8 @@ static int thinkpad_acpi_driver_read(char *p)
{
int len = 0;
- len += sprintf(p + len, "driver:\t\t%s\n", IBM_DESC);
- len += sprintf(p + len, "version:\t%s\n", IBM_VERSION);
+ len += sprintf(p + len, "driver:\t\t%s\n", TPACPI_DESC);
+ len += sprintf(p + len, "version:\t%s\n", TPACPI_VERSION);
return len;
}
@@ -773,15 +930,129 @@ static struct ibm_struct thinkpad_acpi_driver_data = {
* Hotkey subdriver
*/
+enum { /* hot key scan codes (derived from ACPI DSDT) */
+ TP_ACPI_HOTKEYSCAN_FNF1 = 0,
+ TP_ACPI_HOTKEYSCAN_FNF2,
+ TP_ACPI_HOTKEYSCAN_FNF3,
+ TP_ACPI_HOTKEYSCAN_FNF4,
+ TP_ACPI_HOTKEYSCAN_FNF5,
+ TP_ACPI_HOTKEYSCAN_FNF6,
+ TP_ACPI_HOTKEYSCAN_FNF7,
+ TP_ACPI_HOTKEYSCAN_FNF8,
+ TP_ACPI_HOTKEYSCAN_FNF9,
+ TP_ACPI_HOTKEYSCAN_FNF10,
+ TP_ACPI_HOTKEYSCAN_FNF11,
+ TP_ACPI_HOTKEYSCAN_FNF12,
+ TP_ACPI_HOTKEYSCAN_FNBACKSPACE,
+ TP_ACPI_HOTKEYSCAN_FNINSERT,
+ TP_ACPI_HOTKEYSCAN_FNDELETE,
+ TP_ACPI_HOTKEYSCAN_FNHOME,
+ TP_ACPI_HOTKEYSCAN_FNEND,
+ TP_ACPI_HOTKEYSCAN_FNPAGEUP,
+ TP_ACPI_HOTKEYSCAN_FNPAGEDOWN,
+ TP_ACPI_HOTKEYSCAN_FNSPACE,
+ TP_ACPI_HOTKEYSCAN_VOLUMEUP,
+ TP_ACPI_HOTKEYSCAN_VOLUMEDOWN,
+ TP_ACPI_HOTKEYSCAN_MUTE,
+ TP_ACPI_HOTKEYSCAN_THINKPAD,
+};
+
+enum { /* Keys available through NVRAM polling */
+ TPACPI_HKEY_NVRAM_KNOWN_MASK = 0x00fb88c0U,
+ TPACPI_HKEY_NVRAM_GOOD_MASK = 0x00fb8000U,
+};
+
+enum { /* Positions of some of the keys in hotkey masks */
+ TP_ACPI_HKEY_DISPSWTCH_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNF7,
+ TP_ACPI_HKEY_DISPXPAND_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNF8,
+ TP_ACPI_HKEY_HIBERNATE_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNF12,
+ TP_ACPI_HKEY_BRGHTUP_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNHOME,
+ TP_ACPI_HKEY_BRGHTDWN_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNEND,
+ TP_ACPI_HKEY_THNKLGHT_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNPAGEUP,
+ TP_ACPI_HKEY_ZOOM_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNSPACE,
+ TP_ACPI_HKEY_VOLUP_MASK = 1 << TP_ACPI_HOTKEYSCAN_VOLUMEUP,
+ TP_ACPI_HKEY_VOLDWN_MASK = 1 << TP_ACPI_HOTKEYSCAN_VOLUMEDOWN,
+ TP_ACPI_HKEY_MUTE_MASK = 1 << TP_ACPI_HOTKEYSCAN_MUTE,
+ TP_ACPI_HKEY_THINKPAD_MASK = 1 << TP_ACPI_HOTKEYSCAN_THINKPAD,
+};
+
+enum { /* NVRAM to ACPI HKEY group map */
+ TP_NVRAM_HKEY_GROUP_HK2 = TP_ACPI_HKEY_THINKPAD_MASK |
+ TP_ACPI_HKEY_ZOOM_MASK |
+ TP_ACPI_HKEY_DISPSWTCH_MASK |
+ TP_ACPI_HKEY_HIBERNATE_MASK,
+ TP_NVRAM_HKEY_GROUP_BRIGHTNESS = TP_ACPI_HKEY_BRGHTUP_MASK |
+ TP_ACPI_HKEY_BRGHTDWN_MASK,
+ TP_NVRAM_HKEY_GROUP_VOLUME = TP_ACPI_HKEY_VOLUP_MASK |
+ TP_ACPI_HKEY_VOLDWN_MASK |
+ TP_ACPI_HKEY_MUTE_MASK,
+};
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+struct tp_nvram_state {
+ u16 thinkpad_toggle:1;
+ u16 zoom_toggle:1;
+ u16 display_toggle:1;
+ u16 thinklight_toggle:1;
+ u16 hibernate_toggle:1;
+ u16 displayexp_toggle:1;
+ u16 display_state:1;
+ u16 brightness_toggle:1;
+ u16 volume_toggle:1;
+ u16 mute:1;
+
+ u8 brightness_level;
+ u8 volume_level;
+};
+
+static struct task_struct *tpacpi_hotkey_task;
+static u32 hotkey_source_mask; /* bit mask 0=ACPI,1=NVRAM */
+static int hotkey_poll_freq = 10; /* Hz */
+static struct mutex hotkey_thread_mutex;
+static struct mutex hotkey_thread_data_mutex;
+static unsigned int hotkey_config_change;
+
+#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+#define hotkey_source_mask 0U
+
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+static struct mutex hotkey_mutex;
+
+static enum { /* Reasons for waking up */
+ TP_ACPI_WAKEUP_NONE = 0, /* None or unknown */
+ TP_ACPI_WAKEUP_BAYEJ, /* Bay ejection request */
+ TP_ACPI_WAKEUP_UNDOCK, /* Undock request */
+} hotkey_wakeup_reason;
+
+static int hotkey_autosleep_ack;
+
static int hotkey_orig_status;
static u32 hotkey_orig_mask;
static u32 hotkey_all_mask;
static u32 hotkey_reserved_mask;
+static u32 hotkey_mask;
+
+static unsigned int hotkey_report_mode;
static u16 *hotkey_keycode_map;
static struct attribute_set *hotkey_dev_attributes;
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+#define HOTKEY_CONFIG_CRITICAL_START \
+ do { \
+ mutex_lock(&hotkey_thread_data_mutex); \
+ hotkey_config_change++; \
+ } while (0);
+#define HOTKEY_CONFIG_CRITICAL_END \
+ mutex_unlock(&hotkey_thread_data_mutex);
+#else
+#define HOTKEY_CONFIG_CRITICAL_START
+#define HOTKEY_CONFIG_CRITICAL_END
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
static int hotkey_get_wlsw(int *status)
{
if (!acpi_evalf(hkey_handle, status, "WLSW", "d"))
@@ -789,15 +1060,400 @@ static int hotkey_get_wlsw(int *status)
return 0;
}
+/*
+ * Call with hotkey_mutex held
+ */
+static int hotkey_mask_get(void)
+{
+ u32 m = 0;
+
+ if (tp_features.hotkey_mask) {
+ if (!acpi_evalf(hkey_handle, &m, "DHKN", "d"))
+ return -EIO;
+ }
+ hotkey_mask = m | (hotkey_source_mask & hotkey_mask);
+
+ return 0;
+}
+
+/*
+ * Call with hotkey_mutex held
+ */
+static int hotkey_mask_set(u32 mask)
+{
+ int i;
+ int rc = 0;
+
+ if (tp_features.hotkey_mask) {
+ HOTKEY_CONFIG_CRITICAL_START
+ for (i = 0; i < 32; i++) {
+ u32 m = 1 << i;
+ /* enable in firmware mask only keys not in NVRAM
+ * mode, but enable the key in the cached hotkey_mask
+ * regardless of mode, or the key will end up
+ * disabled by hotkey_mask_get() */
+ if (!acpi_evalf(hkey_handle,
+ NULL, "MHKM", "vdd", i + 1,
+ !!((mask & ~hotkey_source_mask) & m))) {
+ rc = -EIO;
+ break;
+ } else {
+ hotkey_mask = (hotkey_mask & ~m) | (mask & m);
+ }
+ }
+ HOTKEY_CONFIG_CRITICAL_END
+
+ /* hotkey_mask_get must be called unconditionally below */
+ if (!hotkey_mask_get() && !rc &&
+ (hotkey_mask & ~hotkey_source_mask) !=
+ (mask & ~hotkey_source_mask)) {
+ printk(TPACPI_NOTICE
+ "requested hot key mask 0x%08x, but "
+ "firmware forced it to 0x%08x\n",
+ mask, hotkey_mask);
+ }
+ } else {
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ HOTKEY_CONFIG_CRITICAL_START
+ hotkey_mask = mask & hotkey_source_mask;
+ HOTKEY_CONFIG_CRITICAL_END
+ hotkey_mask_get();
+ if (hotkey_mask != mask) {
+ printk(TPACPI_NOTICE
+ "requested hot key mask 0x%08x, "
+ "forced to 0x%08x (NVRAM poll mask is "
+ "0x%08x): no firmware mask support\n",
+ mask, hotkey_mask, hotkey_source_mask);
+ }
+#else
+ hotkey_mask_get();
+ rc = -ENXIO;
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+ }
+
+ return rc;
+}
+
+static int hotkey_status_get(int *status)
+{
+ if (!acpi_evalf(hkey_handle, status, "DHKC", "d"))
+ return -EIO;
+
+ return 0;
+}
+
+static int hotkey_status_set(int status)
+{
+ if (!acpi_evalf(hkey_handle, NULL, "MHKC", "vd", status))
+ return -EIO;
+
+ return 0;
+}
+
+static void tpacpi_input_send_radiosw(void)
+{
+ int wlsw;
+
+ mutex_lock(&tpacpi_inputdev_send_mutex);
+
+ if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw)) {
+ input_report_switch(tpacpi_inputdev,
+ SW_RADIO, !!wlsw);
+ input_sync(tpacpi_inputdev);
+ }
+
+ mutex_unlock(&tpacpi_inputdev_send_mutex);
+}
+
+static void tpacpi_input_send_key(unsigned int scancode)
+{
+ unsigned int keycode;
+
+ keycode = hotkey_keycode_map[scancode];
+
+ if (keycode != KEY_RESERVED) {
+ mutex_lock(&tpacpi_inputdev_send_mutex);
+
+ input_report_key(tpacpi_inputdev, keycode, 1);
+ if (keycode == KEY_UNKNOWN)
+ input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
+ scancode);
+ input_sync(tpacpi_inputdev);
+
+ input_report_key(tpacpi_inputdev, keycode, 0);
+ if (keycode == KEY_UNKNOWN)
+ input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
+ scancode);
+ input_sync(tpacpi_inputdev);
+
+ mutex_unlock(&tpacpi_inputdev_send_mutex);
+ }
+}
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+static struct tp_acpi_drv_struct ibm_hotkey_acpidriver;
+
+static void tpacpi_hotkey_send_key(unsigned int scancode)
+{
+ tpacpi_input_send_key(scancode);
+ if (hotkey_report_mode < 2) {
+ acpi_bus_generate_proc_event(ibm_hotkey_acpidriver.device,
+ 0x80, 0x1001 + scancode);
+ }
+}
+
+static void hotkey_read_nvram(struct tp_nvram_state *n, u32 m)
+{
+ u8 d;
+
+ if (m & TP_NVRAM_HKEY_GROUP_HK2) {
+ d = nvram_read_byte(TP_NVRAM_ADDR_HK2);
+ n->thinkpad_toggle = !!(d & TP_NVRAM_MASK_HKT_THINKPAD);
+ n->zoom_toggle = !!(d & TP_NVRAM_MASK_HKT_ZOOM);
+ n->display_toggle = !!(d & TP_NVRAM_MASK_HKT_DISPLAY);
+ n->hibernate_toggle = !!(d & TP_NVRAM_MASK_HKT_HIBERNATE);
+ }
+ if (m & TP_ACPI_HKEY_THNKLGHT_MASK) {
+ d = nvram_read_byte(TP_NVRAM_ADDR_THINKLIGHT);
+ n->thinklight_toggle = !!(d & TP_NVRAM_MASK_THINKLIGHT);
+ }
+ if (m & TP_ACPI_HKEY_DISPXPAND_MASK) {
+ d = nvram_read_byte(TP_NVRAM_ADDR_VIDEO);
+ n->displayexp_toggle =
+ !!(d & TP_NVRAM_MASK_HKT_DISPEXPND);
+ }
+ if (m & TP_NVRAM_HKEY_GROUP_BRIGHTNESS) {
+ d = nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS);
+ n->brightness_level = (d & TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
+ >> TP_NVRAM_POS_LEVEL_BRIGHTNESS;
+ n->brightness_toggle =
+ !!(d & TP_NVRAM_MASK_HKT_BRIGHTNESS);
+ }
+ if (m & TP_NVRAM_HKEY_GROUP_VOLUME) {
+ d = nvram_read_byte(TP_NVRAM_ADDR_MIXER);
+ n->volume_level = (d & TP_NVRAM_MASK_LEVEL_VOLUME)
+ >> TP_NVRAM_POS_LEVEL_VOLUME;
+ n->mute = !!(d & TP_NVRAM_MASK_MUTE);
+ n->volume_toggle = !!(d & TP_NVRAM_MASK_HKT_VOLUME);
+ }
+}
+
+#define TPACPI_COMPARE_KEY(__scancode, __member) \
+ do { \
+ if ((mask & (1 << __scancode)) && \
+ oldn->__member != newn->__member) \
+ tpacpi_hotkey_send_key(__scancode); \
+ } while (0)
+
+#define TPACPI_MAY_SEND_KEY(__scancode) \
+ do { if (mask & (1 << __scancode)) \
+ tpacpi_hotkey_send_key(__scancode); } while (0)
+
+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+ struct tp_nvram_state *newn,
+ u32 mask)
+{
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF12, hibernate_toggle);
+
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNPAGEUP, thinklight_toggle);
+
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF8, displayexp_toggle);
+
+ /* handle volume */
+ if (oldn->volume_toggle != newn->volume_toggle) {
+ if (oldn->mute != newn->mute) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
+ }
+ if (oldn->volume_level > newn->volume_level) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
+ } else if (oldn->volume_level < newn->volume_level) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+ } else if (oldn->mute == newn->mute) {
+ /* repeated key presses that didn't change state */
+ if (newn->mute) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
+ } else if (newn->volume_level != 0) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+ } else {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
+ }
+ }
+ }
+
+ /* handle brightness */
+ if (oldn->brightness_toggle != newn->brightness_toggle) {
+ if (oldn->brightness_level < newn->brightness_level) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+ } else if (oldn->brightness_level > newn->brightness_level) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
+ } else {
+ /* repeated key presses that didn't change state */
+ if (newn->brightness_level != 0) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+ } else {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
+ }
+ }
+ }
+}
+
+#undef TPACPI_COMPARE_KEY
+#undef TPACPI_MAY_SEND_KEY
+
+static int hotkey_kthread(void *data)
+{
+ struct tp_nvram_state s[2];
+ u32 mask;
+ unsigned int si, so;
+ unsigned long t;
+ unsigned int change_detector, must_reset;
+
+ mutex_lock(&hotkey_thread_mutex);
+
+ if (tpacpi_lifecycle == TPACPI_LIFE_EXITING)
+ goto exit;
+
+ set_freezable();
+
+ so = 0;
+ si = 1;
+ t = 0;
+
+ /* Initial state for compares */
+ mutex_lock(&hotkey_thread_data_mutex);
+ change_detector = hotkey_config_change;
+ mask = hotkey_source_mask & hotkey_mask;
+ mutex_unlock(&hotkey_thread_data_mutex);
+ hotkey_read_nvram(&s[so], mask);
+
+ while (!kthread_should_stop() && hotkey_poll_freq) {
+ if (t == 0)
+ t = 1000/hotkey_poll_freq;
+ t = msleep_interruptible(t);
+ if (unlikely(kthread_should_stop()))
+ break;
+ must_reset = try_to_freeze();
+ if (t > 0 && !must_reset)
+ continue;
+
+ mutex_lock(&hotkey_thread_data_mutex);
+ if (must_reset || hotkey_config_change != change_detector) {
+ /* forget old state on thaw or config change */
+ si = so;
+ t = 0;
+ change_detector = hotkey_config_change;
+ }
+ mask = hotkey_source_mask & hotkey_mask;
+ mutex_unlock(&hotkey_thread_data_mutex);
+
+ if (likely(mask)) {
+ hotkey_read_nvram(&s[si], mask);
+ if (likely(si != so)) {
+ hotkey_compare_and_issue_event(&s[so], &s[si],
+ mask);
+ }
+ }
+
+ so = si;
+ si ^= 1;
+ }
+
+exit:
+ mutex_unlock(&hotkey_thread_mutex);
+ return 0;
+}
+
+static void hotkey_poll_stop_sync(void)
+{
+ if (tpacpi_hotkey_task) {
+ if (frozen(tpacpi_hotkey_task) ||
+ freezing(tpacpi_hotkey_task))
+ thaw_process(tpacpi_hotkey_task);
+
+ kthread_stop(tpacpi_hotkey_task);
+ tpacpi_hotkey_task = NULL;
+ mutex_lock(&hotkey_thread_mutex);
+ /* at this point, the thread did exit */
+ mutex_unlock(&hotkey_thread_mutex);
+ }
+}
+
+/* call with hotkey_mutex held */
+static void hotkey_poll_setup(int may_warn)
+{
+ if ((hotkey_source_mask & hotkey_mask) != 0 &&
+ hotkey_poll_freq > 0 &&
+ (tpacpi_inputdev->users > 0 || hotkey_report_mode < 2)) {
+ if (!tpacpi_hotkey_task) {
+ tpacpi_hotkey_task = kthread_run(hotkey_kthread,
+ NULL,
+ TPACPI_FILE "d");
+ if (IS_ERR(tpacpi_hotkey_task)) {
+ tpacpi_hotkey_task = NULL;
+ printk(TPACPI_ERR
+ "could not create kernel thread "
+ "for hotkey polling\n");
+ }
+ }
+ } else {
+ hotkey_poll_stop_sync();
+ if (may_warn &&
+ hotkey_source_mask != 0 && hotkey_poll_freq == 0) {
+ printk(TPACPI_NOTICE
+ "hot keys 0x%08x require polling, "
+ "which is currently disabled\n",
+ hotkey_source_mask);
+ }
+ }
+}
+
+static void hotkey_poll_setup_safe(int may_warn)
+{
+ mutex_lock(&hotkey_mutex);
+ hotkey_poll_setup(may_warn);
+ mutex_unlock(&hotkey_mutex);
+}
+
+static int hotkey_inputdev_open(struct input_dev *dev)
+{
+ switch (tpacpi_lifecycle) {
+ case TPACPI_LIFE_INIT:
+ /*
+ * hotkey_init will call hotkey_poll_setup_safe
+ * at the appropriate moment
+ */
+ return 0;
+ case TPACPI_LIFE_EXITING:
+ return -EBUSY;
+ case TPACPI_LIFE_RUNNING:
+ hotkey_poll_setup_safe(0);
+ return 0;
+ }
+
+ /* Should only happen if tpacpi_lifecycle is corrupt */
+ BUG();
+ return -EBUSY;
+}
+
+static void hotkey_inputdev_close(struct input_dev *dev)
+{
+ /* disable hotkey polling when possible */
+ if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING)
+ hotkey_poll_setup_safe(0);
+}
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
/* sysfs hotkey enable ------------------------------------------------- */
static ssize_t hotkey_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int res, status;
- u32 mask;
- res = hotkey_get(&status, &mask);
+ res = hotkey_status_get(&status);
if (res)
return res;
@@ -809,15 +1465,12 @@ static ssize_t hotkey_enable_store(struct device *dev,
const char *buf, size_t count)
{
unsigned long t;
- int res, status;
- u32 mask;
+ int res;
if (parse_strtoul(buf, 1, &t))
return -EINVAL;
- res = hotkey_get(&status, &mask);
- if (!res)
- res = hotkey_set(t, mask);
+ res = hotkey_status_set(t);
return (res) ? res : count;
}
@@ -831,14 +1484,15 @@ static ssize_t hotkey_mask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int res, status;
- u32 mask;
+ int res;
- res = hotkey_get(&status, &mask);
- if (res)
- return res;
+ if (mutex_lock_interruptible(&hotkey_mutex))
+ return -ERESTARTSYS;
+ res = hotkey_mask_get();
+ mutex_unlock(&hotkey_mutex);
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", mask);
+ return (res)?
+ res : snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_mask);
}
static ssize_t hotkey_mask_store(struct device *dev,
@@ -846,15 +1500,21 @@ static ssize_t hotkey_mask_store(struct device *dev,
const char *buf, size_t count)
{
unsigned long t;
- int res, status;
- u32 mask;
+ int res;
if (parse_strtoul(buf, 0xffffffffUL, &t))
return -EINVAL;
- res = hotkey_get(&status, &mask);
- if (!res)
- hotkey_set(status, t);
+ if (mutex_lock_interruptible(&hotkey_mutex))
+ return -ERESTARTSYS;
+
+ res = hotkey_mask_set(t);
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ hotkey_poll_setup(1);
+#endif
+
+ mutex_unlock(&hotkey_mutex);
return (res) ? res : count;
}
@@ -890,7 +1550,8 @@ static ssize_t hotkey_all_mask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_all_mask);
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n",
+ hotkey_all_mask | hotkey_source_mask);
}
static struct device_attribute dev_attr_hotkey_all_mask =
@@ -902,14 +1563,87 @@ static ssize_t hotkey_recommended_mask_show(struct device *dev,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "0x%08x\n",
- hotkey_all_mask & ~hotkey_reserved_mask);
+ (hotkey_all_mask | hotkey_source_mask)
+ & ~hotkey_reserved_mask);
}
static struct device_attribute dev_attr_hotkey_recommended_mask =
__ATTR(hotkey_recommended_mask, S_IRUGO,
hotkey_recommended_mask_show, NULL);
-/* sysfs hotkey radio_sw ----------------------------------------------- */
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+
+/* sysfs hotkey hotkey_source_mask ------------------------------------- */
+static ssize_t hotkey_source_mask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_source_mask);
+}
+
+static ssize_t hotkey_source_mask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+
+ if (parse_strtoul(buf, 0xffffffffUL, &t) ||
+ ((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0))
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&hotkey_mutex))
+ return -ERESTARTSYS;
+
+ HOTKEY_CONFIG_CRITICAL_START
+ hotkey_source_mask = t;
+ HOTKEY_CONFIG_CRITICAL_END
+
+ hotkey_poll_setup(1);
+
+ mutex_unlock(&hotkey_mutex);
+
+ return count;
+}
+
+static struct device_attribute dev_attr_hotkey_source_mask =
+ __ATTR(hotkey_source_mask, S_IWUSR | S_IRUGO,
+ hotkey_source_mask_show, hotkey_source_mask_store);
+
+/* sysfs hotkey hotkey_poll_freq --------------------------------------- */
+static ssize_t hotkey_poll_freq_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_poll_freq);
+}
+
+static ssize_t hotkey_poll_freq_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+
+ if (parse_strtoul(buf, 25, &t))
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&hotkey_mutex))
+ return -ERESTARTSYS;
+
+ hotkey_poll_freq = t;
+
+ hotkey_poll_setup(1);
+ mutex_unlock(&hotkey_mutex);
+
+ return count;
+}
+
+static struct device_attribute dev_attr_hotkey_poll_freq =
+ __ATTR(hotkey_poll_freq, S_IWUSR | S_IRUGO,
+ hotkey_poll_freq_show, hotkey_poll_freq_store);
+
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+/* sysfs hotkey radio_sw (pollable) ------------------------------------ */
static ssize_t hotkey_radio_sw_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -925,6 +1659,13 @@ static ssize_t hotkey_radio_sw_show(struct device *dev,
static struct device_attribute dev_attr_hotkey_radio_sw =
__ATTR(hotkey_radio_sw, S_IRUGO, hotkey_radio_sw_show, NULL);
+static void hotkey_radio_sw_notify_change(void)
+{
+ if (tp_features.hotkey_wlsw)
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+ "hotkey_radio_sw");
+}
+
/* sysfs hotkey report_mode -------------------------------------------- */
static ssize_t hotkey_report_mode_show(struct device *dev,
struct device_attribute *attr,
@@ -937,43 +1678,132 @@ static ssize_t hotkey_report_mode_show(struct device *dev,
static struct device_attribute dev_attr_hotkey_report_mode =
__ATTR(hotkey_report_mode, S_IRUGO, hotkey_report_mode_show, NULL);
+/* sysfs wakeup reason (pollable) -------------------------------------- */
+static ssize_t hotkey_wakeup_reason_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason);
+}
+
+static struct device_attribute dev_attr_hotkey_wakeup_reason =
+ __ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL);
+
+void hotkey_wakeup_reason_notify_change(void)
+{
+ if (tp_features.hotkey_mask)
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+ "wakeup_reason");
+}
+
+/* sysfs wakeup hotunplug_complete (pollable) -------------------------- */
+static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack);
+}
+
+static struct device_attribute dev_attr_hotkey_wakeup_hotunplug_complete =
+ __ATTR(wakeup_hotunplug_complete, S_IRUGO,
+ hotkey_wakeup_hotunplug_complete_show, NULL);
+
+void hotkey_wakeup_hotunplug_complete_notify_change(void)
+{
+ if (tp_features.hotkey_mask)
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+ "wakeup_hotunplug_complete");
+}
+
/* --------------------------------------------------------------------- */
static struct attribute *hotkey_attributes[] __initdata = {
&dev_attr_hotkey_enable.attr,
+ &dev_attr_hotkey_bios_enabled.attr,
&dev_attr_hotkey_report_mode.attr,
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ &dev_attr_hotkey_mask.attr,
+ &dev_attr_hotkey_all_mask.attr,
+ &dev_attr_hotkey_recommended_mask.attr,
+ &dev_attr_hotkey_source_mask.attr,
+ &dev_attr_hotkey_poll_freq.attr,
+#endif
};
static struct attribute *hotkey_mask_attributes[] __initdata = {
- &dev_attr_hotkey_mask.attr,
- &dev_attr_hotkey_bios_enabled.attr,
&dev_attr_hotkey_bios_mask.attr,
+#ifndef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ &dev_attr_hotkey_mask.attr,
&dev_attr_hotkey_all_mask.attr,
&dev_attr_hotkey_recommended_mask.attr,
+#endif
+ &dev_attr_hotkey_wakeup_reason.attr,
+ &dev_attr_hotkey_wakeup_hotunplug_complete.attr,
};
static int __init hotkey_init(struct ibm_init_struct *iibm)
{
-
+ /* Requirements for changing the default keymaps:
+ *
+ * 1. Many of the keys are mapped to KEY_RESERVED for very
+ * good reasons. Do not change them unless you have deep
+ * knowledge on the IBM and Lenovo ThinkPad firmware for
+ * the various ThinkPad models. The driver behaves
+ * differently for KEY_RESERVED: such keys have their
+ * hot key mask *unset* in mask_recommended, and also
+ * in the initial hot key mask programmed into the
+ * firmware at driver load time, which means the firm-
+ * ware may react very differently if you change them to
+ * something else;
+ *
+ * 2. You must be subscribed to the linux-thinkpad and
+ * ibm-acpi-devel mailing lists, and you should read the
+ * list archives since 2007 if you want to change the
+ * keymaps. This requirement exists so that you will
+ * know the past history of problems with the thinkpad-
+ * acpi driver keymaps, and also that you will be
+ * listening to any bug reports;
+ *
+ * 3. Do not send thinkpad-acpi specific patches directly to
+ * for merging, *ever*. Send them to the linux-acpi
+ * mailinglist for comments. Merging is to be done only
+ * through acpi-test and the ACPI maintainer.
+ *
+ * If the above is too much to ask, don't change the keymap.
+ * Ask the thinkpad-acpi maintainer to do it, instead.
+ */
static u16 ibm_keycode_map[] __initdata = {
/* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
KEY_FN_F1, KEY_FN_F2, KEY_COFFEE, KEY_SLEEP,
KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
- /* Scan codes 0x0C to 0x0F: Other ACPI HKEY hot keys */
+
+ /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
KEY_UNKNOWN, /* 0x0C: FN+BACKSPACE */
KEY_UNKNOWN, /* 0x0D: FN+INSERT */
KEY_UNKNOWN, /* 0x0E: FN+DELETE */
+
+ /* brightness: firmware always reacts to them, unless
+ * X.org did some tricks in the radeon BIOS scratch
+ * registers of *some* models */
KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */
- /* Scan codes 0x10 to 0x1F: Extended ACPI HKEY hot keys */
KEY_RESERVED, /* 0x10: FN+END (brightness down) */
+
+ /* Thinklight: firmware always react to it */
KEY_RESERVED, /* 0x11: FN+PGUP (thinklight toggle) */
+
KEY_UNKNOWN, /* 0x12: FN+PGDOWN */
KEY_ZOOM, /* 0x13: FN+SPACE (zoom) */
+
+ /* Volume: firmware always react to it and reprograms
+ * the built-in *extra* mixer. Never map it to control
+ * another mixer by default. */
KEY_RESERVED, /* 0x14: VOLUME UP */
KEY_RESERVED, /* 0x15: VOLUME DOWN */
KEY_RESERVED, /* 0x16: MUTE */
+
KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */
+
/* (assignments unknown, please report if found) */
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
@@ -983,20 +1813,37 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
KEY_FN_F1, KEY_COFFEE, KEY_BATTERY, KEY_SLEEP,
KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
- /* Scan codes 0x0C to 0x0F: Other ACPI HKEY hot keys */
+
+ /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
KEY_UNKNOWN, /* 0x0C: FN+BACKSPACE */
KEY_UNKNOWN, /* 0x0D: FN+INSERT */
KEY_UNKNOWN, /* 0x0E: FN+DELETE */
+
KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */
- /* Scan codes 0x10 to 0x1F: Extended ACPI HKEY hot keys */
KEY_RESERVED, /* 0x10: FN+END (brightness down) */
+
KEY_RESERVED, /* 0x11: FN+PGUP (thinklight toggle) */
+
KEY_UNKNOWN, /* 0x12: FN+PGDOWN */
KEY_ZOOM, /* 0x13: FN+SPACE (zoom) */
+
+ /* Volume: z60/z61, T60 (BIOS version?): firmware always
+ * react to it and reprograms the built-in *extra* mixer.
+ * Never map it to control another mixer by default.
+ *
+ * T60?, T61, R60?, R61: firmware and EC tries to send
+ * these over the regular keyboard, so these are no-ops,
+ * but there are still weird bugs re. MUTE, so do not
+ * change unless you get test reports from all Lenovo
+ * models. May cause the BIOS to interfere with the
+ * HDA mixer.
+ */
KEY_RESERVED, /* 0x14: VOLUME UP */
KEY_RESERVED, /* 0x15: VOLUME DOWN */
KEY_RESERVED, /* 0x16: MUTE */
+
KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */
+
/* (assignments unknown, please report if found) */
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
@@ -1013,10 +1860,17 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "initializing hotkey subdriver\n");
BUG_ON(!tpacpi_inputdev);
+ BUG_ON(tpacpi_inputdev->open != NULL ||
+ tpacpi_inputdev->close != NULL);
- IBM_ACPIHANDLE_INIT(hkey);
+ TPACPI_ACPIHANDLE_INIT(hkey);
mutex_init(&hotkey_mutex);
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ mutex_init(&hotkey_thread_mutex);
+ mutex_init(&hotkey_thread_data_mutex);
+#endif
+
/* hotkey not supported on 570 */
tp_features.hotkey = hkey_handle != NULL;
@@ -1024,7 +1878,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
str_supported(tp_features.hotkey));
if (tp_features.hotkey) {
- hotkey_dev_attributes = create_attr_set(8, NULL);
+ hotkey_dev_attributes = create_attr_set(12, NULL);
if (!hotkey_dev_attributes)
return -ENOMEM;
res = add_many_to_attr_set(hotkey_dev_attributes,
@@ -1038,15 +1892,15 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
for HKEY interface version 0x100 */
if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
if ((hkeyv >> 8) != 1) {
- printk(IBM_ERR "unknown version of the "
+ printk(TPACPI_ERR "unknown version of the "
"HKEY interface: 0x%x\n", hkeyv);
- printk(IBM_ERR "please report this to %s\n",
- IBM_MAIL);
+ printk(TPACPI_ERR "please report this to %s\n",
+ TPACPI_MAIL);
} else {
/*
* MHKV 0x100 in A31, R40, R40e,
* T4x, X31, and later
- * */
+ */
tp_features.hotkey_mask = 1;
}
}
@@ -1057,25 +1911,46 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (tp_features.hotkey_mask) {
if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
"MHKA", "qd")) {
- printk(IBM_ERR
+ printk(TPACPI_ERR
"missing MHKA handler, "
"please report this to %s\n",
- IBM_MAIL);
- hotkey_all_mask = 0x080cU; /* FN+F12, FN+F4, FN+F3 */
+ TPACPI_MAIL);
+ /* FN+F12, FN+F4, FN+F3 */
+ hotkey_all_mask = 0x080cU;
}
}
- res = hotkey_get(&hotkey_orig_status, &hotkey_orig_mask);
+ /* hotkey_source_mask *must* be zero for
+ * the first hotkey_mask_get */
+ res = hotkey_status_get(&hotkey_orig_status);
if (!res && tp_features.hotkey_mask) {
- res = add_many_to_attr_set(hotkey_dev_attributes,
- hotkey_mask_attributes,
- ARRAY_SIZE(hotkey_mask_attributes));
+ res = hotkey_mask_get();
+ hotkey_orig_mask = hotkey_mask;
+ if (!res) {
+ res = add_many_to_attr_set(
+ hotkey_dev_attributes,
+ hotkey_mask_attributes,
+ ARRAY_SIZE(hotkey_mask_attributes));
+ }
+ }
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ if (tp_features.hotkey_mask) {
+ hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
+ & ~hotkey_all_mask;
+ } else {
+ hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK;
}
+ vdbg_printk(TPACPI_DBG_INIT,
+ "hotkey source mask 0x%08x, polling freq %d\n",
+ hotkey_source_mask, hotkey_poll_freq);
+#endif
+
/* Not all thinkpads have a hardware radio switch */
if (!res && acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
tp_features.hotkey_wlsw = 1;
- printk(IBM_INFO
+ printk(TPACPI_INFO
"radio switch found; radios are %s\n",
enabled(status, 0));
res = add_to_attr_set(hotkey_dev_attributes,
@@ -1094,7 +1969,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE,
GFP_KERNEL);
if (!hotkey_keycode_map) {
- printk(IBM_ERR "failed to allocate memory for key map\n");
+ printk(TPACPI_ERR
+ "failed to allocate memory for key map\n");
return -ENOMEM;
}
@@ -1133,15 +2009,26 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
dbg_printk(TPACPI_DBG_INIT,
"enabling hot key handling\n");
- res = hotkey_set(1, (hotkey_all_mask & ~hotkey_reserved_mask)
- | hotkey_orig_mask);
+ res = hotkey_status_set(1);
if (res)
return res;
+ res = hotkey_mask_set(((hotkey_all_mask | hotkey_source_mask)
+ & ~hotkey_reserved_mask)
+ | hotkey_orig_mask);
+ if (res < 0 && res != -ENXIO)
+ return res;
dbg_printk(TPACPI_DBG_INIT,
"legacy hot key reporting over procfs %s\n",
(hotkey_report_mode < 2) ?
"enabled" : "disabled");
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ tpacpi_inputdev->open = &hotkey_inputdev_open;
+ tpacpi_inputdev->close = &hotkey_inputdev_close;
+
+ hotkey_poll_setup_safe(1);
+#endif
}
return (tp_features.hotkey)? 0 : 1;
@@ -1149,13 +2036,19 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
static void hotkey_exit(void)
{
- int res;
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ hotkey_poll_stop_sync();
+#endif
if (tp_features.hotkey) {
- dbg_printk(TPACPI_DBG_EXIT, "restoring original hotkey mask\n");
- res = hotkey_set(hotkey_orig_status, hotkey_orig_mask);
- if (res)
- printk(IBM_ERR "failed to restore hotkey to BIOS defaults\n");
+ dbg_printk(TPACPI_DBG_EXIT,
+ "restoring original hot key mask\n");
+ /* no short-circuit boolean operator below! */
+ if ((hotkey_mask_set(hotkey_orig_mask) |
+ hotkey_status_set(hotkey_orig_status)) != 0)
+ printk(TPACPI_ERR
+ "failed to restore hot key mask "
+ "to BIOS defaults\n");
}
if (hotkey_dev_attributes) {
@@ -1164,62 +2057,28 @@ static void hotkey_exit(void)
}
}
-static void tpacpi_input_send_key(unsigned int scancode,
- unsigned int keycode)
-{
- if (keycode != KEY_RESERVED) {
- mutex_lock(&tpacpi_inputdev_send_mutex);
-
- input_report_key(tpacpi_inputdev, keycode, 1);
- if (keycode == KEY_UNKNOWN)
- input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
- scancode);
- input_sync(tpacpi_inputdev);
-
- input_report_key(tpacpi_inputdev, keycode, 0);
- if (keycode == KEY_UNKNOWN)
- input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
- scancode);
- input_sync(tpacpi_inputdev);
-
- mutex_unlock(&tpacpi_inputdev_send_mutex);
- }
-}
-
-static void tpacpi_input_send_radiosw(void)
-{
- int wlsw;
-
- mutex_lock(&tpacpi_inputdev_send_mutex);
-
- if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw)) {
- input_report_switch(tpacpi_inputdev,
- SW_RADIO, !!wlsw);
- input_sync(tpacpi_inputdev);
- }
-
- mutex_unlock(&tpacpi_inputdev_send_mutex);
-}
-
static void hotkey_notify(struct ibm_struct *ibm, u32 event)
{
u32 hkey;
- unsigned int keycode, scancode;
+ unsigned int scancode;
int send_acpi_ev;
int ignore_acpi_ev;
+ int unk_ev;
if (event != 0x80) {
- printk(IBM_ERR "unknown HKEY notification event %d\n", event);
+ printk(TPACPI_ERR
+ "unknown HKEY notification event %d\n", event);
/* forward it to userspace, maybe it knows how to handle it */
- acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
- ibm->acpi->device->dev.bus_id,
- event, 0);
+ acpi_bus_generate_netlink_event(
+ ibm->acpi->device->pnp.device_class,
+ ibm->acpi->device->dev.bus_id,
+ event, 0);
return;
}
while (1) {
if (!acpi_evalf(hkey_handle, &hkey, "MHKP", "d")) {
- printk(IBM_ERR "failed to retrieve HKEY event\n");
+ printk(TPACPI_ERR "failed to retrieve HKEY event\n");
return;
}
@@ -1228,8 +2087,9 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
return;
}
- send_acpi_ev = 0;
+ send_acpi_ev = 1;
ignore_acpi_ev = 0;
+ unk_ev = 0;
switch (hkey >> 12) {
case 1:
@@ -1237,104 +2097,139 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
scancode = hkey & 0xfff;
if (scancode > 0 && scancode < 0x21) {
scancode--;
- keycode = hotkey_keycode_map[scancode];
- tpacpi_input_send_key(scancode, keycode);
+ if (!(hotkey_source_mask & (1 << scancode))) {
+ tpacpi_input_send_key(scancode);
+ send_acpi_ev = 0;
+ } else {
+ ignore_acpi_ev = 1;
+ }
} else {
- printk(IBM_ERR
- "hotkey 0x%04x out of range for keyboard map\n",
- hkey);
- send_acpi_ev = 1;
+ unk_ev = 1;
}
break;
- case 5:
- /* 0x5000-0x5FFF: LID */
- /* we don't handle it through this path, just
- * eat up known LID events */
- if (hkey != 0x5001 && hkey != 0x5002) {
- printk(IBM_ERR
- "unknown LID-related HKEY event: 0x%04x\n",
- hkey);
- send_acpi_ev = 1;
+ case 2:
+ /* Wakeup reason */
+ switch (hkey) {
+ case 0x2304: /* suspend, undock */
+ case 0x2404: /* hibernation, undock */
+ hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK;
+ ignore_acpi_ev = 1;
+ break;
+ case 0x2305: /* suspend, bay eject */
+ case 0x2405: /* hibernation, bay eject */
+ hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ;
+ ignore_acpi_ev = 1;
+ break;
+ default:
+ unk_ev = 1;
+ }
+ if (hotkey_wakeup_reason != TP_ACPI_WAKEUP_NONE) {
+ printk(TPACPI_INFO
+ "woke up due to a hot-unplug "
+ "request...\n");
+ hotkey_wakeup_reason_notify_change();
+ }
+ break;
+ case 3:
+ /* bay-related wakeups */
+ if (hkey == 0x3003) {
+ hotkey_autosleep_ack = 1;
+ printk(TPACPI_INFO
+ "bay ejected\n");
+ hotkey_wakeup_hotunplug_complete_notify_change();
} else {
+ unk_ev = 1;
+ }
+ break;
+ case 4:
+ /* dock-related wakeups */
+ if (hkey == 0x4003) {
+ hotkey_autosleep_ack = 1;
+ printk(TPACPI_INFO
+ "undocked\n");
+ hotkey_wakeup_hotunplug_complete_notify_change();
+ } else {
+ unk_ev = 1;
+ }
+ break;
+ case 5:
+ /* 0x5000-0x5FFF: human interface helpers */
+ switch (hkey) {
+ case 0x5010: /* Lenovo new BIOS: brightness changed */
+ case 0x5009: /* X61t: swivel up (tablet mode) */
+ case 0x500a: /* X61t: swivel down (normal mode) */
+ case 0x500b: /* X61t: tablet pen inserted into bay */
+ case 0x500c: /* X61t: tablet pen removed from bay */
+ break;
+ case 0x5001:
+ case 0x5002:
+ /* LID switch events. Do not propagate */
ignore_acpi_ev = 1;
+ break;
+ default:
+ unk_ev = 1;
}
break;
case 7:
/* 0x7000-0x7FFF: misc */
if (tp_features.hotkey_wlsw && hkey == 0x7000) {
tpacpi_input_send_radiosw();
+ hotkey_radio_sw_notify_change();
+ send_acpi_ev = 0;
break;
}
/* fallthrough to default */
default:
- /* case 2: dock-related */
- /* 0x2305 - T43 waking up due to bay lever eject while aslept */
- /* case 3: ultra-bay related. maybe bay in dock? */
- /* 0x3003 - T43 after wake up by bay lever eject (0x2305) */
- printk(IBM_NOTICE "unhandled HKEY event 0x%04x\n", hkey);
- send_acpi_ev = 1;
+ unk_ev = 1;
+ }
+ if (unk_ev) {
+ printk(TPACPI_NOTICE
+ "unhandled HKEY event 0x%04x\n", hkey);
}
/* Legacy events */
- if (!ignore_acpi_ev && (send_acpi_ev || hotkey_report_mode < 2)) {
- acpi_bus_generate_proc_event(ibm->acpi->device, event, hkey);
+ if (!ignore_acpi_ev &&
+ (send_acpi_ev || hotkey_report_mode < 2)) {
+ acpi_bus_generate_proc_event(ibm->acpi->device,
+ event, hkey);
}
/* netlink events */
if (!ignore_acpi_ev && send_acpi_ev) {
- acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
- ibm->acpi->device->dev.bus_id,
- event, hkey);
+ acpi_bus_generate_netlink_event(
+ ibm->acpi->device->pnp.device_class,
+ ibm->acpi->device->dev.bus_id,
+ event, hkey);
}
}
}
-static void hotkey_resume(void)
+static void hotkey_suspend(pm_message_t state)
{
- tpacpi_input_send_radiosw();
+ /* Do these on suspend, we get the events on early resume! */
+ hotkey_wakeup_reason = TP_ACPI_WAKEUP_NONE;
+ hotkey_autosleep_ack = 0;
}
-/*
- * Call with hotkey_mutex held
- */
-static int hotkey_get(int *status, u32 *mask)
-{
- if (!acpi_evalf(hkey_handle, status, "DHKC", "d"))
- return -EIO;
-
- if (tp_features.hotkey_mask)
- if (!acpi_evalf(hkey_handle, mask, "DHKN", "d"))
- return -EIO;
-
- return 0;
-}
-
-/*
- * Call with hotkey_mutex held
- */
-static int hotkey_set(int status, u32 mask)
+static void hotkey_resume(void)
{
- int i;
-
- if (!acpi_evalf(hkey_handle, NULL, "MHKC", "vd", status))
- return -EIO;
-
- if (tp_features.hotkey_mask)
- for (i = 0; i < 32; i++) {
- int bit = ((1 << i) & mask) != 0;
- if (!acpi_evalf(hkey_handle,
- NULL, "MHKM", "vdd", i + 1, bit))
- return -EIO;
- }
-
- return 0;
+ if (hotkey_mask_get())
+ printk(TPACPI_ERR
+ "error while trying to read hot key mask "
+ "from firmware\n");
+ tpacpi_input_send_radiosw();
+ hotkey_radio_sw_notify_change();
+ hotkey_wakeup_reason_notify_change();
+ hotkey_wakeup_hotunplug_complete_notify_change();
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ hotkey_poll_setup_safe(0);
+#endif
}
/* procfs -------------------------------------------------------------- */
static int hotkey_read(char *p)
{
int res, status;
- u32 mask;
int len = 0;
if (!tp_features.hotkey) {
@@ -1344,14 +2239,16 @@ static int hotkey_read(char *p)
if (mutex_lock_interruptible(&hotkey_mutex))
return -ERESTARTSYS;
- res = hotkey_get(&status, &mask);
+ res = hotkey_status_get(&status);
+ if (!res)
+ res = hotkey_mask_get();
mutex_unlock(&hotkey_mutex);
if (res)
return res;
len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0));
if (tp_features.hotkey_mask) {
- len += sprintf(p + len, "mask:\t\t0x%08x\n", mask);
+ len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_mask);
len += sprintf(p + len,
"commands:\tenable, disable, reset, <mask>\n");
} else {
@@ -1367,7 +2264,6 @@ static int hotkey_write(char *buf)
int res, status;
u32 mask;
char *cmd;
- int do_cmd = 0;
if (!tp_features.hotkey)
return -ENODEV;
@@ -1375,9 +2271,8 @@ static int hotkey_write(char *buf)
if (mutex_lock_interruptible(&hotkey_mutex))
return -ERESTARTSYS;
- res = hotkey_get(&status, &mask);
- if (res)
- goto errexit;
+ status = -1;
+ mask = hotkey_mask;
res = 0;
while ((cmd = next_cmd(&buf))) {
@@ -1396,11 +2291,12 @@ static int hotkey_write(char *buf)
res = -EINVAL;
goto errexit;
}
- do_cmd = 1;
}
+ if (status != -1)
+ res = hotkey_status_set(status);
- if (do_cmd)
- res = hotkey_set(status, mask);
+ if (!res && mask != hotkey_mask)
+ res = hotkey_mask_set(mask);
errexit:
mutex_unlock(&hotkey_mutex);
@@ -1408,7 +2304,7 @@ errexit:
}
static const struct acpi_device_id ibm_htk_device_ids[] = {
- {IBM_HKEY_HID, 0},
+ {TPACPI_ACPI_HKEY_HID, 0},
{"", 0},
};
@@ -1425,6 +2321,7 @@ static struct ibm_struct hotkey_driver_data = {
.write = hotkey_write,
.exit = hotkey_exit,
.resume = hotkey_resume,
+ .suspend = hotkey_suspend,
.acpi = &ibm_hotkey_acpidriver,
};
@@ -1432,6 +2329,16 @@ static struct ibm_struct hotkey_driver_data = {
* Bluetooth subdriver
*/
+enum {
+ /* ACPI GBDC/SBDC bits */
+ TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */
+ TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */
+ TP_ACPI_BLUETOOTH_UNK = 0x04, /* unknown function */
+};
+
+static int bluetooth_get_radiosw(void);
+static int bluetooth_set_radiosw(int radio_on);
+
/* sysfs bluetooth enable ---------------------------------------------- */
static ssize_t bluetooth_enable_show(struct device *dev,
struct device_attribute *attr,
@@ -1483,7 +2390,7 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "initializing bluetooth subdriver\n");
- IBM_ACPIHANDLE_INIT(hkey);
+ TPACPI_ACPIHANDLE_INIT(hkey);
/* bluetooth not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
G4x, R30, R31, R40e, R50e, T20-22, X20-21 */
@@ -1596,6 +2503,16 @@ static struct ibm_struct bluetooth_driver_data = {
* Wan subdriver
*/
+enum {
+ /* ACPI GWAN/SWAN bits */
+ TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */
+ TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */
+ TP_ACPI_WANCARD_UNK = 0x04, /* unknown function */
+};
+
+static int wan_get_radiosw(void);
+static int wan_set_radiosw(int radio_on);
+
/* sysfs wan enable ---------------------------------------------------- */
static ssize_t wan_enable_show(struct device *dev,
struct device_attribute *attr,
@@ -1647,7 +2564,7 @@ static int __init wan_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "initializing wan subdriver\n");
- IBM_ACPIHANDLE_INIT(hkey);
+ TPACPI_ACPIHANDLE_INIT(hkey);
tp_features.wan = hkey_handle &&
acpi_evalf(hkey_handle, &status, "GWAN", "qd");
@@ -1759,17 +2676,41 @@ static struct ibm_struct wan_driver_data = {
* Video subdriver
*/
+enum video_access_mode {
+ TPACPI_VIDEO_NONE = 0,
+ TPACPI_VIDEO_570, /* 570 */
+ TPACPI_VIDEO_770, /* 600e/x, 770e, 770x */
+ TPACPI_VIDEO_NEW, /* all others */
+};
+
+enum { /* video status flags, based on VIDEO_570 */
+ TP_ACPI_VIDEO_S_LCD = 0x01, /* LCD output enabled */
+ TP_ACPI_VIDEO_S_CRT = 0x02, /* CRT output enabled */
+ TP_ACPI_VIDEO_S_DVI = 0x08, /* DVI output enabled */
+};
+
+enum { /* TPACPI_VIDEO_570 constants */
+ TP_ACPI_VIDEO_570_PHSCMD = 0x87, /* unknown magic constant :( */
+ TP_ACPI_VIDEO_570_PHSMASK = 0x03, /* PHS bits that map to
+ * video_status_flags */
+ TP_ACPI_VIDEO_570_PHS2CMD = 0x8b, /* unknown magic constant :( */
+ TP_ACPI_VIDEO_570_PHS2SET = 0x80, /* unknown magic constant :( */
+};
+
static enum video_access_mode video_supported;
static int video_orig_autosw;
-IBM_HANDLE(vid, root, "\\_SB.PCI.AGP.VGA", /* 570 */
+static int video_autosw_get(void);
+static int video_autosw_set(int enable);
+
+TPACPI_HANDLE(vid, root, "\\_SB.PCI.AGP.VGA", /* 570 */
"\\_SB.PCI0.AGP0.VID0", /* 600e/x, 770x */
"\\_SB.PCI0.VID0", /* 770e */
"\\_SB.PCI0.VID", /* A21e, G4x, R50e, X30, X40 */
"\\_SB.PCI0.AGP.VID", /* all others */
); /* R30, R31 */
-IBM_HANDLE(vid2, root, "\\_SB.PCI0.AGPB.VID"); /* G41 */
+TPACPI_HANDLE(vid2, root, "\\_SB.PCI0.AGPB.VID"); /* G41 */
static int __init video_init(struct ibm_init_struct *iibm)
{
@@ -1777,8 +2718,8 @@ static int __init video_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "initializing video subdriver\n");
- IBM_ACPIHANDLE_INIT(vid);
- IBM_ACPIHANDLE_INIT(vid2);
+ TPACPI_ACPIHANDLE_INIT(vid);
+ TPACPI_ACPIHANDLE_INIT(vid2);
if (vid2_handle && acpi_evalf(NULL, &ivga, "\\IVGA", "d") && ivga)
/* G41, assume IVGA doesn't change */
@@ -1809,7 +2750,7 @@ static void video_exit(void)
dbg_printk(TPACPI_DBG_EXIT,
"restoring original video autoswitch mode\n");
if (video_autosw_set(video_orig_autosw))
- printk(IBM_ERR "error while trying to restore original "
+ printk(TPACPI_ERR "error while trying to restore original "
"video autoswitch mode\n");
}
@@ -1882,13 +2823,14 @@ static int video_outputsw_set(int status)
res = acpi_evalf(vid_handle, NULL,
"ASWT", "vdd", status * 0x100, 0);
if (!autosw && video_autosw_set(autosw)) {
- printk(IBM_ERR "video auto-switch left enabled due to error\n");
+ printk(TPACPI_ERR
+ "video auto-switch left enabled due to error\n");
return -EIO;
}
break;
case TPACPI_VIDEO_NEW:
res = acpi_evalf(NULL, NULL, "\\VUPS", "vd", 0x80) &&
- acpi_evalf(NULL, NULL, "\\VSDS", "vdd", status, 1);
+ acpi_evalf(NULL, NULL, "\\VSDS", "vdd", status, 1);
break;
default:
return -ENOSYS;
@@ -1951,7 +2893,8 @@ static int video_outputsw_cycle(void)
return -ENOSYS;
}
if (!autosw && video_autosw_set(autosw)) {
- printk(IBM_ERR "video auto-switch left enabled due to error\n");
+ printk(TPACPI_ERR
+ "video auto-switch left enabled due to error\n");
return -EIO;
}
@@ -2080,16 +3023,16 @@ static struct ibm_struct video_driver_data = {
* Light (thinklight) subdriver
*/
-IBM_HANDLE(lght, root, "\\LGHT"); /* A21e, A2xm/p, T20-22, X20-21 */
-IBM_HANDLE(ledb, ec, "LEDB"); /* G4x */
+TPACPI_HANDLE(lght, root, "\\LGHT"); /* A21e, A2xm/p, T20-22, X20-21 */
+TPACPI_HANDLE(ledb, ec, "LEDB"); /* G4x */
static int __init light_init(struct ibm_init_struct *iibm)
{
vdbg_printk(TPACPI_DBG_INIT, "initializing light subdriver\n");
- IBM_ACPIHANDLE_INIT(ledb);
- IBM_ACPIHANDLE_INIT(lght);
- IBM_ACPIHANDLE_INIT(cmos);
+ TPACPI_ACPIHANDLE_INIT(ledb);
+ TPACPI_ACPIHANDLE_INIT(lght);
+ TPACPI_ACPIHANDLE_INIT(cmos);
/* light not supported on 570, 600e/x, 770e, 770x, G4x, R30, R31 */
tp_features.light = (cmos_handle || lght_handle) && !ledb_handle;
@@ -2167,14 +3110,18 @@ static struct ibm_struct light_driver_data = {
#ifdef CONFIG_THINKPAD_ACPI_DOCK
-IBM_HANDLE(dock, root, "\\_SB.GDCK", /* X30, X31, X40 */
+static void dock_notify(struct ibm_struct *ibm, u32 event);
+static int dock_read(char *p);
+static int dock_write(char *buf);
+
+TPACPI_HANDLE(dock, root, "\\_SB.GDCK", /* X30, X31, X40 */
"\\_SB.PCI0.DOCK", /* 600e/x,770e,770x,A2xm/p,T20-22,X20-21 */
"\\_SB.PCI0.PCI1.DOCK", /* all others */
"\\_SB.PCI.ISA.SLCE", /* 570 */
); /* A21e,G4x,R30,R31,R32,R40,R40e,R50e */
/* don't list other alternatives as we install a notify handler on the 570 */
-IBM_HANDLE(pci, root, "\\_SB.PCI"); /* 570 */
+TPACPI_HANDLE(pci, root, "\\_SB.PCI"); /* 570 */
static const struct acpi_device_id ibm_pci_device_ids[] = {
{PCI_ROOT_HID_STRING, 0},
@@ -2217,7 +3164,7 @@ static int __init dock_init(struct ibm_init_struct *iibm)
{
vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver\n");
- IBM_ACPIHANDLE_INIT(dock);
+ TPACPI_ACPIHANDLE_INIT(dock);
vdbg_printk(TPACPI_DBG_INIT, "dock is %s\n",
str_supported(dock_handle != NULL));
@@ -2233,7 +3180,7 @@ static int __init dock_init2(struct ibm_init_struct *iibm)
if (dock_driver_data[0].flags.acpi_driver_registered &&
dock_driver_data[0].flags.acpi_notify_installed) {
- IBM_ACPIHANDLE_INIT(pci);
+ TPACPI_ACPIHANDLE_INIT(pci);
dock2_needed = (pci_handle != NULL);
vdbg_printk(TPACPI_DBG_INIT,
"dock PCI handler for the TP 570 is %s\n",
@@ -2265,7 +3212,7 @@ static void dock_notify(struct ibm_struct *ibm, u32 event)
else if (event == 0 && docked)
data = 3; /* dock */
else {
- printk(IBM_ERR "unknown dock event %d, status %d\n",
+ printk(TPACPI_ERR "unknown dock event %d, status %d\n",
event, _sta(dock_handle));
data = 0; /* unknown */
}
@@ -2321,18 +3268,19 @@ static int dock_write(char *buf)
*/
#ifdef CONFIG_THINKPAD_ACPI_BAY
-IBM_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST", /* 570 */
+
+TPACPI_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST", /* 570 */
"\\_SB.PCI0.IDE0.IDES.IDSM", /* 600e/x, 770e, 770x */
"\\_SB.PCI0.SATA.SCND.MSTR", /* T60, X60, Z60 */
"\\_SB.PCI0.IDE0.SCND.MSTR", /* all others */
); /* A21e, R30, R31 */
-IBM_HANDLE(bay_ej, bay, "_EJ3", /* 600e/x, A2xm/p, A3x */
+TPACPI_HANDLE(bay_ej, bay, "_EJ3", /* 600e/x, A2xm/p, A3x */
"_EJ0", /* all others */
); /* 570,A21e,G4x,R30,R31,R32,R40e,R50e */
-IBM_HANDLE(bay2, root, "\\_SB.PCI0.IDE0.PRIM.SLAV", /* A3x, R32 */
+TPACPI_HANDLE(bay2, root, "\\_SB.PCI0.IDE0.PRIM.SLAV", /* A3x, R32 */
"\\_SB.PCI0.IDE0.IDEP.IDPS", /* 600e/x, 770e, 770x */
); /* all others */
-IBM_HANDLE(bay2_ej, bay2, "_EJ3", /* 600e/x, 770e, A3x */
+TPACPI_HANDLE(bay2_ej, bay2, "_EJ3", /* 600e/x, 770e, A3x */
"_EJ0", /* 770x */
); /* all others */
@@ -2340,12 +3288,12 @@ static int __init bay_init(struct ibm_init_struct *iibm)
{
vdbg_printk(TPACPI_DBG_INIT, "initializing bay subdriver\n");
- IBM_ACPIHANDLE_INIT(bay);
+ TPACPI_ACPIHANDLE_INIT(bay);
if (bay_handle)
- IBM_ACPIHANDLE_INIT(bay_ej);
- IBM_ACPIHANDLE_INIT(bay2);
+ TPACPI_ACPIHANDLE_INIT(bay_ej);
+ TPACPI_ACPIHANDLE_INIT(bay2);
if (bay2_handle)
- IBM_ACPIHANDLE_INIT(bay2_ej);
+ TPACPI_ACPIHANDLE_INIT(bay2_ej);
tp_features.bay_status = bay_handle &&
acpi_evalf(bay_handle, NULL, "_STA", "qv");
@@ -2474,7 +3422,7 @@ static int __init cmos_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT,
"initializing cmos commands subdriver\n");
- IBM_ACPIHANDLE_INIT(cmos);
+ TPACPI_ACPIHANDLE_INIT(cmos);
vdbg_printk(TPACPI_DBG_INIT, "cmos commands are %s\n",
str_supported(cmos_handle != NULL));
@@ -2538,10 +3486,24 @@ static struct ibm_struct cmos_driver_data = {
* LED subdriver
*/
+enum led_access_mode {
+ TPACPI_LED_NONE = 0,
+ TPACPI_LED_570, /* 570 */
+ TPACPI_LED_OLD, /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */
+ TPACPI_LED_NEW, /* all others */
+};
+
+enum { /* For TPACPI_LED_OLD */
+ TPACPI_LED_EC_HLCL = 0x0c, /* EC reg to get led to power on */
+ TPACPI_LED_EC_HLBL = 0x0d, /* EC reg to blink a lit led */
+ TPACPI_LED_EC_HLMS = 0x0e, /* EC reg to select led to command */
+};
+
static enum led_access_mode led_supported;
-IBM_HANDLE(led, ec, "SLED", /* 570 */
- "SYSL", /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */
+TPACPI_HANDLE(led, ec, "SLED", /* 570 */
+ "SYSL", /* 600e/x, 770e, 770x, A21e, A2xm/p, */
+ /* T20-22, X20-21 */
"LED", /* all others */
); /* R30, R31 */
@@ -2549,7 +3511,7 @@ static int __init led_init(struct ibm_init_struct *iibm)
{
vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n");
- IBM_ACPIHANDLE_INIT(led);
+ TPACPI_ACPIHANDLE_INIT(led);
if (!led_handle)
/* led not supported on R30, R31 */
@@ -2638,13 +3600,11 @@ static int led_write(char *buf)
led = 1 << led;
ret = ec_write(TPACPI_LED_EC_HLMS, led);
if (ret >= 0)
- ret =
- ec_write(TPACPI_LED_EC_HLBL,
- led * led_exp_hlbl[ind]);
+ ret = ec_write(TPACPI_LED_EC_HLBL,
+ led * led_exp_hlbl[ind]);
if (ret >= 0)
- ret =
- ec_write(TPACPI_LED_EC_HLCL,
- led * led_exp_hlcl[ind]);
+ ret = ec_write(TPACPI_LED_EC_HLCL,
+ led * led_exp_hlcl[ind]);
if (ret < 0)
return ret;
} else {
@@ -2668,13 +3628,13 @@ static struct ibm_struct led_driver_data = {
* Beep subdriver
*/
-IBM_HANDLE(beep, ec, "BEEP"); /* all except R30, R31 */
+TPACPI_HANDLE(beep, ec, "BEEP"); /* all except R30, R31 */
static int __init beep_init(struct ibm_init_struct *iibm)
{
vdbg_printk(TPACPI_DBG_INIT, "initializing beep subdriver\n");
- IBM_ACPIHANDLE_INIT(beep);
+ TPACPI_ACPIHANDLE_INIT(beep);
vdbg_printk(TPACPI_DBG_INIT, "beep is %s\n",
str_supported(beep_handle != NULL));
@@ -2727,8 +3687,109 @@ static struct ibm_struct beep_driver_data = {
* Thermal subdriver
*/
+enum thermal_access_mode {
+ TPACPI_THERMAL_NONE = 0, /* No thermal support */
+ TPACPI_THERMAL_ACPI_TMP07, /* Use ACPI TMP0-7 */
+ TPACPI_THERMAL_ACPI_UPDT, /* Use ACPI TMP0-7 with UPDT */
+ TPACPI_THERMAL_TPEC_8, /* Use ACPI EC regs, 8 sensors */
+ TPACPI_THERMAL_TPEC_16, /* Use ACPI EC regs, 16 sensors */
+};
+
+enum { /* TPACPI_THERMAL_TPEC_* */
+ TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */
+ TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */
+ TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */
+};
+
+#define TPACPI_MAX_THERMAL_SENSORS 16 /* Max thermal sensors supported */
+struct ibm_thermal_sensors_struct {
+ s32 temp[TPACPI_MAX_THERMAL_SENSORS];
+};
+
static enum thermal_access_mode thermal_read_mode;
+/* idx is zero-based */
+static int thermal_get_sensor(int idx, s32 *value)
+{
+ int t;
+ s8 tmp;
+ char tmpi[5];
+
+ t = TP_EC_THERMAL_TMP0;
+
+ switch (thermal_read_mode) {
+#if TPACPI_MAX_THERMAL_SENSORS >= 16
+ case TPACPI_THERMAL_TPEC_16:
+ if (idx >= 8 && idx <= 15) {
+ t = TP_EC_THERMAL_TMP8;
+ idx -= 8;
+ }
+ /* fallthrough */
+#endif
+ case TPACPI_THERMAL_TPEC_8:
+ if (idx <= 7) {
+ if (!acpi_ec_read(t + idx, &tmp))
+ return -EIO;
+ *value = tmp * 1000;
+ return 0;
+ }
+ break;
+
+ case TPACPI_THERMAL_ACPI_UPDT:
+ if (idx <= 7) {
+ snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
+ if (!acpi_evalf(ec_handle, NULL, "UPDT", "v"))
+ return -EIO;
+ if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
+ return -EIO;
+ *value = (t - 2732) * 100;
+ return 0;
+ }
+ break;
+
+ case TPACPI_THERMAL_ACPI_TMP07:
+ if (idx <= 7) {
+ snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
+ if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
+ return -EIO;
+ if (t > 127 || t < -127)
+ t = TP_EC_THERMAL_TMP_NA;
+ *value = t * 1000;
+ return 0;
+ }
+ break;
+
+ case TPACPI_THERMAL_NONE:
+ default:
+ return -ENOSYS;
+ }
+
+ return -EINVAL;
+}
+
+static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s)
+{
+ int res, i;
+ int n;
+
+ n = 8;
+ i = 0;
+
+ if (!s)
+ return -EINVAL;
+
+ if (thermal_read_mode == TPACPI_THERMAL_TPEC_16)
+ n = 16;
+
+ for (i = 0 ; i < n; i++) {
+ res = thermal_get_sensor(i, &s->temp[i]);
+ if (res)
+ return res;
+ }
+
+ return n;
+}
+
/* sysfs temp##_input -------------------------------------------------- */
static ssize_t thermal_temp_input_show(struct device *dev,
@@ -2751,7 +3812,8 @@ static ssize_t thermal_temp_input_show(struct device *dev,
}
#define THERMAL_SENSOR_ATTR_TEMP(_idxA, _idxB) \
- SENSOR_ATTR(temp##_idxA##_input, S_IRUGO, thermal_temp_input_show, NULL, _idxB)
+ SENSOR_ATTR(temp##_idxA##_input, S_IRUGO, \
+ thermal_temp_input_show, NULL, _idxB)
static struct sensor_device_attribute sensor_dev_attr_thermal_temp_input[] = {
THERMAL_SENSOR_ATTR_TEMP(1, 0),
@@ -2845,12 +3907,13 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
if (ta1 == 0) {
/* This is sheer paranoia, but we handle it anyway */
if (acpi_tmp7) {
- printk(IBM_ERR
+ printk(TPACPI_ERR
"ThinkPad ACPI EC access misbehaving, "
- "falling back to ACPI TMPx access mode\n");
+ "falling back to ACPI TMPx access "
+ "mode\n");
thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
} else {
- printk(IBM_ERR
+ printk(TPACPI_ERR
"ThinkPad ACPI EC access misbehaving, "
"disabling thermal sensors access\n");
thermal_read_mode = TPACPI_THERMAL_NONE;
@@ -2877,7 +3940,7 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
str_supported(thermal_read_mode != TPACPI_THERMAL_NONE),
thermal_read_mode);
- switch(thermal_read_mode) {
+ switch (thermal_read_mode) {
case TPACPI_THERMAL_TPEC_16:
res = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
&thermal_temp_input16_group);
@@ -2902,7 +3965,7 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
static void thermal_exit(void)
{
- switch(thermal_read_mode) {
+ switch (thermal_read_mode) {
case TPACPI_THERMAL_TPEC_16:
sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
&thermal_temp_input16_group);
@@ -2919,88 +3982,6 @@ static void thermal_exit(void)
}
}
-/* idx is zero-based */
-static int thermal_get_sensor(int idx, s32 *value)
-{
- int t;
- s8 tmp;
- char tmpi[5];
-
- t = TP_EC_THERMAL_TMP0;
-
- switch (thermal_read_mode) {
-#if TPACPI_MAX_THERMAL_SENSORS >= 16
- case TPACPI_THERMAL_TPEC_16:
- if (idx >= 8 && idx <= 15) {
- t = TP_EC_THERMAL_TMP8;
- idx -= 8;
- }
- /* fallthrough */
-#endif
- case TPACPI_THERMAL_TPEC_8:
- if (idx <= 7) {
- if (!acpi_ec_read(t + idx, &tmp))
- return -EIO;
- *value = tmp * 1000;
- return 0;
- }
- break;
-
- case TPACPI_THERMAL_ACPI_UPDT:
- if (idx <= 7) {
- snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
- if (!acpi_evalf(ec_handle, NULL, "UPDT", "v"))
- return -EIO;
- if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
- return -EIO;
- *value = (t - 2732) * 100;
- return 0;
- }
- break;
-
- case TPACPI_THERMAL_ACPI_TMP07:
- if (idx <= 7) {
- snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
- if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
- return -EIO;
- if (t > 127 || t < -127)
- t = TP_EC_THERMAL_TMP_NA;
- *value = t * 1000;
- return 0;
- }
- break;
-
- case TPACPI_THERMAL_NONE:
- default:
- return -ENOSYS;
- }
-
- return -EINVAL;
-}
-
-static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s)
-{
- int res, i;
- int n;
-
- n = 8;
- i = 0;
-
- if (!s)
- return -EINVAL;
-
- if (thermal_read_mode == TPACPI_THERMAL_TPEC_16)
- n = 16;
-
- for(i = 0 ; i < n; i++) {
- res = thermal_get_sensor(i, &s->temp[i]);
- if (res)
- return res;
- }
-
- return n;
-}
-
static int thermal_read(char *p)
{
int len = 0;
@@ -3103,14 +4084,110 @@ static struct ibm_struct ecdump_driver_data = {
* Backlight/brightness subdriver
*/
+#define TPACPI_BACKLIGHT_DEV_NAME "thinkpad_screen"
+
static struct backlight_device *ibm_backlight_device;
+static int brightness_offset = 0x31;
+static int brightness_mode;
+static unsigned int brightness_enable = 2; /* 2 = auto, 0 = no, 1 = yes */
+
+static struct mutex brightness_mutex;
+
+/*
+ * ThinkPads can read brightness from two places: EC 0x31, or
+ * CMOS NVRAM byte 0x5E, bits 0-3.
+ */
+static int brightness_get(struct backlight_device *bd)
+{
+ u8 lec = 0, lcmos = 0, level = 0;
+
+ if (brightness_mode & 1) {
+ if (!acpi_ec_read(brightness_offset, &lec))
+ return -EIO;
+ lec &= (tp_features.bright_16levels)? 0x0f : 0x07;
+ level = lec;
+ };
+ if (brightness_mode & 2) {
+ lcmos = (nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS)
+ & TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
+ >> TP_NVRAM_POS_LEVEL_BRIGHTNESS;
+ lcmos &= (tp_features.bright_16levels)? 0x0f : 0x07;
+ level = lcmos;
+ }
+
+ if (brightness_mode == 3 && lec != lcmos) {
+ printk(TPACPI_ERR
+ "CMOS NVRAM (%u) and EC (%u) do not agree "
+ "on display brightness level\n",
+ (unsigned int) lcmos,
+ (unsigned int) lec);
+ return -EIO;
+ }
+
+ return level;
+}
+
+/* May return EINTR which can always be mapped to ERESTARTSYS */
+static int brightness_set(int value)
+{
+ int cmos_cmd, inc, i, res;
+ int current_value;
+
+ if (value > ((tp_features.bright_16levels)? 15 : 7))
+ return -EINVAL;
+
+ res = mutex_lock_interruptible(&brightness_mutex);
+ if (res < 0)
+ return res;
+
+ current_value = brightness_get(NULL);
+ if (current_value < 0) {
+ res = current_value;
+ goto errout;
+ }
+
+ cmos_cmd = value > current_value ?
+ TP_CMOS_BRIGHTNESS_UP :
+ TP_CMOS_BRIGHTNESS_DOWN;
+ inc = (value > current_value)? 1 : -1;
+
+ res = 0;
+ for (i = current_value; i != value; i += inc) {
+ if ((brightness_mode & 2) &&
+ issue_thinkpad_cmos_command(cmos_cmd)) {
+ res = -EIO;
+ goto errout;
+ }
+ if ((brightness_mode & 1) &&
+ !acpi_ec_write(brightness_offset, i + inc)) {
+ res = -EIO;
+ goto errout;;
+ }
+ }
+
+errout:
+ mutex_unlock(&brightness_mutex);
+ return res;
+}
+
+/* sysfs backlight class ----------------------------------------------- */
+
+static int brightness_update_status(struct backlight_device *bd)
+{
+ /* it is the backlight class's job (caller) to handle
+ * EINTR and other errors properly */
+ return brightness_set(
+ (bd->props.fb_blank == FB_BLANK_UNBLANK &&
+ bd->props.power == FB_BLANK_UNBLANK) ?
+ bd->props.brightness : 0);
+}
static struct backlight_ops ibm_backlight_data = {
- .get_brightness = brightness_get,
- .update_status = brightness_update_status,
+ .get_brightness = brightness_get,
+ .update_status = brightness_update_status,
};
-static struct mutex brightness_mutex;
+/* --------------------------------------------------------------------- */
static int __init tpacpi_query_bcll_levels(acpi_handle handle)
{
@@ -3121,8 +4198,8 @@ static int __init tpacpi_query_bcll_levels(acpi_handle handle)
if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) {
obj = (union acpi_object *)buffer.pointer;
if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
- printk(IBM_ERR "Unknown BCLL data, "
- "please report this to %s\n", IBM_MAIL);
+ printk(TPACPI_ERR "Unknown BCLL data, "
+ "please report this to %s\n", TPACPI_MAIL);
rc = 0;
} else {
rc = obj->package.count;
@@ -3160,14 +4237,15 @@ static int __init brightness_check_levels(void)
void *found_node = NULL;
if (!vid_handle) {
- IBM_ACPIHANDLE_INIT(vid);
+ TPACPI_ACPIHANDLE_INIT(vid);
}
if (!vid_handle)
return 0;
/* Search for a BCLL package with 16 levels */
status = acpi_walk_namespace(ACPI_TYPE_PACKAGE, vid_handle, 3,
- brightness_find_bcll, NULL, &found_node);
+ brightness_find_bcll, NULL,
+ &found_node);
return (ACPI_SUCCESS(status) && found_node != NULL);
}
@@ -3193,14 +4271,14 @@ static int __init brightness_check_std_acpi_support(void)
void *found_node = NULL;
if (!vid_handle) {
- IBM_ACPIHANDLE_INIT(vid);
+ TPACPI_ACPIHANDLE_INIT(vid);
}
if (!vid_handle)
return 0;
/* Search for a _BCL method, but don't execute it */
status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3,
- brightness_find_bcl, NULL, &found_node);
+ brightness_find_bcl, NULL, &found_node);
return (ACPI_SUCCESS(status) && found_node != NULL);
}
@@ -3215,12 +4293,14 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
if (!brightness_enable) {
dbg_printk(TPACPI_DBG_INIT,
- "brightness support disabled by module parameter\n");
+ "brightness support disabled by "
+ "module parameter\n");
return 1;
} else if (brightness_enable > 1) {
if (brightness_check_std_acpi_support()) {
- printk(IBM_NOTICE
- "standard ACPI backlight interface available, not loading native one...\n");
+ printk(TPACPI_NOTICE
+ "standard ACPI backlight interface "
+ "available, not loading native one...\n");
return 1;
}
}
@@ -3247,13 +4327,14 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
return 1;
if (tp_features.bright_16levels)
- printk(IBM_INFO "detected a 16-level brightness capable ThinkPad\n");
+ printk(TPACPI_INFO
+ "detected a 16-level brightness capable ThinkPad\n");
ibm_backlight_device = backlight_device_register(
TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL,
&ibm_backlight_data);
if (IS_ERR(ibm_backlight_device)) {
- printk(IBM_ERR "Could not register backlight device\n");
+ printk(TPACPI_ERR "Could not register backlight device\n");
return PTR_ERR(ibm_backlight_device);
}
vdbg_printk(TPACPI_DBG_INIT, "brightness is supported\n");
@@ -3276,99 +4357,13 @@ static void brightness_exit(void)
}
}
-static int brightness_update_status(struct backlight_device *bd)
-{
- /* it is the backlight class's job (caller) to handle
- * EINTR and other errors properly */
- return brightness_set(
- (bd->props.fb_blank == FB_BLANK_UNBLANK &&
- bd->props.power == FB_BLANK_UNBLANK) ?
- bd->props.brightness : 0);
-}
-
-/*
- * ThinkPads can read brightness from two places: EC 0x31, or
- * CMOS NVRAM byte 0x5E, bits 0-3.
- */
-static int brightness_get(struct backlight_device *bd)
-{
- u8 lec = 0, lcmos = 0, level = 0;
-
- if (brightness_mode & 1) {
- if (!acpi_ec_read(brightness_offset, &lec))
- return -EIO;
- lec &= (tp_features.bright_16levels)? 0x0f : 0x07;
- level = lec;
- };
- if (brightness_mode & 2) {
- lcmos = (nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS)
- & TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
- >> TP_NVRAM_POS_LEVEL_BRIGHTNESS;
- lcmos &= (tp_features.bright_16levels)? 0x0f : 0x07;
- level = lcmos;
- }
-
- if (brightness_mode == 3 && lec != lcmos) {
- printk(IBM_ERR
- "CMOS NVRAM (%u) and EC (%u) do not agree "
- "on display brightness level\n",
- (unsigned int) lcmos,
- (unsigned int) lec);
- return -EIO;
- }
-
- return level;
-}
-
-/* May return EINTR which can always be mapped to ERESTARTSYS */
-static int brightness_set(int value)
-{
- int cmos_cmd, inc, i, res;
- int current_value;
-
- if (value > ((tp_features.bright_16levels)? 15 : 7))
- return -EINVAL;
-
- res = mutex_lock_interruptible(&brightness_mutex);
- if (res < 0)
- return res;
-
- current_value = brightness_get(NULL);
- if (current_value < 0) {
- res = current_value;
- goto errout;
- }
-
- cmos_cmd = value > current_value ?
- TP_CMOS_BRIGHTNESS_UP :
- TP_CMOS_BRIGHTNESS_DOWN;
- inc = (value > current_value)? 1 : -1;
-
- res = 0;
- for (i = current_value; i != value; i += inc) {
- if ((brightness_mode & 2) &&
- issue_thinkpad_cmos_command(cmos_cmd)) {
- res = -EIO;
- goto errout;
- }
- if ((brightness_mode & 1) &&
- !acpi_ec_write(brightness_offset, i + inc)) {
- res = -EIO;
- goto errout;;
- }
- }
-
-errout:
- mutex_unlock(&brightness_mutex);
- return res;
-}
-
static int brightness_read(char *p)
{
int len = 0;
int level;
- if ((level = brightness_get(NULL)) < 0) {
+ level = brightness_get(NULL);
+ if (level < 0) {
len += sprintf(p + len, "level:\t\tunreadable\n");
} else {
len += sprintf(p + len, "level:\t\t%d\n", level);
@@ -3425,6 +4420,8 @@ static struct ibm_struct brightness_driver_data = {
* Volume subdriver
*/
+static int volume_offset = 0x30;
+
static int volume_read(char *p)
{
int len = 0;
@@ -3474,8 +4471,11 @@ static int volume_write(char *buf)
} else
return -EINVAL;
- if (new_level != level) { /* mute doesn't change */
- cmos_cmd = new_level > level ? TP_CMOS_VOLUME_UP : TP_CMOS_VOLUME_DOWN;
+ if (new_level != level) {
+ /* mute doesn't change */
+
+ cmos_cmd = (new_level > level) ?
+ TP_CMOS_VOLUME_UP : TP_CMOS_VOLUME_DOWN;
inc = new_level > level ? 1 : -1;
if (mute && (issue_thinkpad_cmos_command(cmos_cmd) ||
@@ -3487,14 +4487,18 @@ static int volume_write(char *buf)
!acpi_ec_write(volume_offset, i + inc))
return -EIO;
- if (mute && (issue_thinkpad_cmos_command(TP_CMOS_VOLUME_MUTE) ||
- !acpi_ec_write(volume_offset,
- new_level + mute)))
+ if (mute &&
+ (issue_thinkpad_cmos_command(TP_CMOS_VOLUME_MUTE) ||
+ !acpi_ec_write(volume_offset, new_level + mute))) {
return -EIO;
+ }
}
- if (new_mute != mute) { /* level doesn't change */
- cmos_cmd = new_mute ? TP_CMOS_VOLUME_MUTE : TP_CMOS_VOLUME_UP;
+ if (new_mute != mute) {
+ /* level doesn't change */
+
+ cmos_cmd = (new_mute) ?
+ TP_CMOS_VOLUME_MUTE : TP_CMOS_VOLUME_UP;
if (issue_thinkpad_cmos_command(cmos_cmd) ||
!acpi_ec_write(volume_offset, level + new_mute))
@@ -3616,26 +4620,377 @@ static struct ibm_struct volume_driver_data = {
* but the ACPI tables just mention level 7.
*/
+enum { /* Fan control constants */
+ fan_status_offset = 0x2f, /* EC register 0x2f */
+ fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM)
+ * 0x84 must be read before 0x85 */
+
+ TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */
+ TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */
+
+ TPACPI_FAN_LAST_LEVEL = 0x100, /* Use cached last-seen fan level */
+};
+
+enum fan_status_access_mode {
+ TPACPI_FAN_NONE = 0, /* No fan status or control */
+ TPACPI_FAN_RD_ACPI_GFAN, /* Use ACPI GFAN */
+ TPACPI_FAN_RD_TPEC, /* Use ACPI EC regs 0x2f, 0x84-0x85 */
+};
+
+enum fan_control_access_mode {
+ TPACPI_FAN_WR_NONE = 0, /* No fan control */
+ TPACPI_FAN_WR_ACPI_SFAN, /* Use ACPI SFAN */
+ TPACPI_FAN_WR_TPEC, /* Use ACPI EC reg 0x2f */
+ TPACPI_FAN_WR_ACPI_FANS, /* Use ACPI FANS and EC reg 0x2f */
+};
+
+enum fan_control_commands {
+ TPACPI_FAN_CMD_SPEED = 0x0001, /* speed command */
+ TPACPI_FAN_CMD_LEVEL = 0x0002, /* level command */
+ TPACPI_FAN_CMD_ENABLE = 0x0004, /* enable/disable cmd,
+ * and also watchdog cmd */
+};
+
+static int fan_control_allowed;
+
static enum fan_status_access_mode fan_status_access_mode;
static enum fan_control_access_mode fan_control_access_mode;
static enum fan_control_commands fan_control_commands;
static u8 fan_control_initial_status;
static u8 fan_control_desired_level;
+static int fan_watchdog_maxinterval;
+
+static struct mutex fan_mutex;
static void fan_watchdog_fire(struct work_struct *ignored);
-static int fan_watchdog_maxinterval;
static DECLARE_DELAYED_WORK(fan_watchdog_task, fan_watchdog_fire);
-IBM_HANDLE(fans, ec, "FANS"); /* X31, X40, X41 */
-IBM_HANDLE(gfan, ec, "GFAN", /* 570 */
+TPACPI_HANDLE(fans, ec, "FANS"); /* X31, X40, X41 */
+TPACPI_HANDLE(gfan, ec, "GFAN", /* 570 */
"\\FSPD", /* 600e/x, 770e, 770x */
); /* all others */
-IBM_HANDLE(sfan, ec, "SFAN", /* 570 */
+TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */
"JFNS", /* 770x-JL */
); /* all others */
/*
+ * Call with fan_mutex held
+ */
+static void fan_update_desired_level(u8 status)
+{
+ if ((status &
+ (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) == 0) {
+ if (status > 7)
+ fan_control_desired_level = 7;
+ else
+ fan_control_desired_level = status;
+ }
+}
+
+static int fan_get_status(u8 *status)
+{
+ u8 s;
+
+ /* TODO:
+ * Add TPACPI_FAN_RD_ACPI_FANS ? */
+
+ switch (fan_status_access_mode) {
+ case TPACPI_FAN_RD_ACPI_GFAN:
+ /* 570, 600e/x, 770e, 770x */
+
+ if (unlikely(!acpi_evalf(gfan_handle, &s, NULL, "d")))
+ return -EIO;
+
+ if (likely(status))
+ *status = s & 0x07;
+
+ break;
+
+ case TPACPI_FAN_RD_TPEC:
+ /* all except 570, 600e/x, 770e, 770x */
+ if (unlikely(!acpi_ec_read(fan_status_offset, &s)))
+ return -EIO;
+
+ if (likely(status))
+ *status = s;
+
+ break;
+
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int fan_get_status_safe(u8 *status)
+{
+ int rc;
+ u8 s;
+
+ if (mutex_lock_interruptible(&fan_mutex))
+ return -ERESTARTSYS;
+ rc = fan_get_status(&s);
+ if (!rc)
+ fan_update_desired_level(s);
+ mutex_unlock(&fan_mutex);
+
+ if (status)
+ *status = s;
+
+ return rc;
+}
+
+static int fan_get_speed(unsigned int *speed)
+{
+ u8 hi, lo;
+
+ switch (fan_status_access_mode) {
+ case TPACPI_FAN_RD_TPEC:
+ /* all except 570, 600e/x, 770e, 770x */
+ if (unlikely(!acpi_ec_read(fan_rpm_offset, &lo) ||
+ !acpi_ec_read(fan_rpm_offset + 1, &hi)))
+ return -EIO;
+
+ if (likely(speed))
+ *speed = (hi << 8) | lo;
+
+ break;
+
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int fan_set_level(int level)
+{
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ switch (fan_control_access_mode) {
+ case TPACPI_FAN_WR_ACPI_SFAN:
+ if (level >= 0 && level <= 7) {
+ if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", level))
+ return -EIO;
+ } else
+ return -EINVAL;
+ break;
+
+ case TPACPI_FAN_WR_ACPI_FANS:
+ case TPACPI_FAN_WR_TPEC:
+ if ((level != TP_EC_FAN_AUTO) &&
+ (level != TP_EC_FAN_FULLSPEED) &&
+ ((level < 0) || (level > 7)))
+ return -EINVAL;
+
+ /* safety net should the EC not support AUTO
+ * or FULLSPEED mode bits and just ignore them */
+ if (level & TP_EC_FAN_FULLSPEED)
+ level |= 7; /* safety min speed 7 */
+ else if (level & TP_EC_FAN_AUTO)
+ level |= 4; /* safety min speed 4 */
+
+ if (!acpi_ec_write(fan_status_offset, level))
+ return -EIO;
+ else
+ tp_features.fan_ctrl_status_undef = 0;
+ break;
+
+ default:
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static int fan_set_level_safe(int level)
+{
+ int rc;
+
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ if (mutex_lock_interruptible(&fan_mutex))
+ return -ERESTARTSYS;
+
+ if (level == TPACPI_FAN_LAST_LEVEL)
+ level = fan_control_desired_level;
+
+ rc = fan_set_level(level);
+ if (!rc)
+ fan_update_desired_level(level);
+
+ mutex_unlock(&fan_mutex);
+ return rc;
+}
+
+static int fan_set_enable(void)
+{
+ u8 s;
+ int rc;
+
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ if (mutex_lock_interruptible(&fan_mutex))
+ return -ERESTARTSYS;
+
+ switch (fan_control_access_mode) {
+ case TPACPI_FAN_WR_ACPI_FANS:
+ case TPACPI_FAN_WR_TPEC:
+ rc = fan_get_status(&s);
+ if (rc < 0)
+ break;
+
+ /* Don't go out of emergency fan mode */
+ if (s != 7) {
+ s &= 0x07;
+ s |= TP_EC_FAN_AUTO | 4; /* min fan speed 4 */
+ }
+
+ if (!acpi_ec_write(fan_status_offset, s))
+ rc = -EIO;
+ else {
+ tp_features.fan_ctrl_status_undef = 0;
+ rc = 0;
+ }
+ break;
+
+ case TPACPI_FAN_WR_ACPI_SFAN:
+ rc = fan_get_status(&s);
+ if (rc < 0)
+ break;
+
+ s &= 0x07;
+
+ /* Set fan to at least level 4 */
+ s |= 4;
+
+ if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", s))
+ rc = -EIO;
+ else
+ rc = 0;
+ break;
+
+ default:
+ rc = -ENXIO;
+ }
+
+ mutex_unlock(&fan_mutex);
+ return rc;
+}
+
+static int fan_set_disable(void)
+{
+ int rc;
+
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ if (mutex_lock_interruptible(&fan_mutex))
+ return -ERESTARTSYS;
+
+ rc = 0;
+ switch (fan_control_access_mode) {
+ case TPACPI_FAN_WR_ACPI_FANS:
+ case TPACPI_FAN_WR_TPEC:
+ if (!acpi_ec_write(fan_status_offset, 0x00))
+ rc = -EIO;
+ else {
+ fan_control_desired_level = 0;
+ tp_features.fan_ctrl_status_undef = 0;
+ }
+ break;
+
+ case TPACPI_FAN_WR_ACPI_SFAN:
+ if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", 0x00))
+ rc = -EIO;
+ else
+ fan_control_desired_level = 0;
+ break;
+
+ default:
+ rc = -ENXIO;
+ }
+
+
+ mutex_unlock(&fan_mutex);
+ return rc;
+}
+
+static int fan_set_speed(int speed)
+{
+ int rc;
+
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ if (mutex_lock_interruptible(&fan_mutex))
+ return -ERESTARTSYS;
+
+ rc = 0;
+ switch (fan_control_access_mode) {
+ case TPACPI_FAN_WR_ACPI_FANS:
+ if (speed >= 0 && speed <= 65535) {
+ if (!acpi_evalf(fans_handle, NULL, NULL, "vddd",
+ speed, speed, speed))
+ rc = -EIO;
+ } else
+ rc = -EINVAL;
+ break;
+
+ default:
+ rc = -ENXIO;
+ }
+
+ mutex_unlock(&fan_mutex);
+ return rc;
+}
+
+static void fan_watchdog_reset(void)
+{
+ static int fan_watchdog_active;
+
+ if (fan_control_access_mode == TPACPI_FAN_WR_NONE)
+ return;
+
+ if (fan_watchdog_active)
+ cancel_delayed_work(&fan_watchdog_task);
+
+ if (fan_watchdog_maxinterval > 0 &&
+ tpacpi_lifecycle != TPACPI_LIFE_EXITING) {
+ fan_watchdog_active = 1;
+ if (!schedule_delayed_work(&fan_watchdog_task,
+ msecs_to_jiffies(fan_watchdog_maxinterval
+ * 1000))) {
+ printk(TPACPI_ERR
+ "failed to schedule the fan watchdog, "
+ "watchdog will not trigger\n");
+ }
+ } else
+ fan_watchdog_active = 0;
+}
+
+static void fan_watchdog_fire(struct work_struct *ignored)
+{
+ int rc;
+
+ if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
+ return;
+
+ printk(TPACPI_NOTICE "fan watchdog: enabling fan\n");
+ rc = fan_set_enable();
+ if (rc < 0) {
+ printk(TPACPI_ERR "fan watchdog: error %d while enabling fan, "
+ "will try again later...\n", -rc);
+ /* reschedule for later */
+ fan_watchdog_reset();
+ }
+}
+
+/*
* SYSFS fan layout: hwmon compatible (device)
*
* pwm*_enable:
@@ -3868,9 +5223,9 @@ static int __init fan_init(struct ibm_init_struct *iibm)
tp_features.fan_ctrl_status_undef = 0;
fan_control_desired_level = 7;
- IBM_ACPIHANDLE_INIT(fans);
- IBM_ACPIHANDLE_INIT(gfan);
- IBM_ACPIHANDLE_INIT(sfan);
+ TPACPI_ACPIHANDLE_INIT(fans);
+ TPACPI_ACPIHANDLE_INIT(gfan);
+ TPACPI_ACPIHANDLE_INIT(sfan);
if (gfan_handle) {
/* 570, 600e/x, 770e, 770x */
@@ -3896,16 +5251,16 @@ static int __init fan_init(struct ibm_init_struct *iibm)
case 0x3837: /* TP-78 */
case 0x3637: /* TP-76 */
case 0x3037: /* TP-70 */
- printk(IBM_NOTICE
- "fan_init: initial fan status is "
- "unknown, assuming it is in auto "
- "mode\n");
+ printk(TPACPI_NOTICE
+ "fan_init: initial fan status "
+ "is unknown, assuming it is "
+ "in auto mode\n");
tp_features.fan_ctrl_status_undef = 1;
;;
}
}
} else {
- printk(IBM_ERR
+ printk(TPACPI_ERR
"ThinkPad ACPI EC access misbehaving, "
"fan status and control unavailable\n");
return 1;
@@ -3970,333 +5325,20 @@ static int __init fan_init(struct ibm_init_struct *iibm)
return 1;
}
-/*
- * Call with fan_mutex held
- */
-static void fan_update_desired_level(u8 status)
-{
- if ((status &
- (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) == 0) {
- if (status > 7)
- fan_control_desired_level = 7;
- else
- fan_control_desired_level = status;
- }
-}
-
-static int fan_get_status(u8 *status)
-{
- u8 s;
-
- /* TODO:
- * Add TPACPI_FAN_RD_ACPI_FANS ? */
-
- switch (fan_status_access_mode) {
- case TPACPI_FAN_RD_ACPI_GFAN:
- /* 570, 600e/x, 770e, 770x */
-
- if (unlikely(!acpi_evalf(gfan_handle, &s, NULL, "d")))
- return -EIO;
-
- if (likely(status))
- *status = s & 0x07;
-
- break;
-
- case TPACPI_FAN_RD_TPEC:
- /* all except 570, 600e/x, 770e, 770x */
- if (unlikely(!acpi_ec_read(fan_status_offset, &s)))
- return -EIO;
-
- if (likely(status))
- *status = s;
-
- break;
-
- default:
- return -ENXIO;
- }
-
- return 0;
-}
-
-static int fan_get_status_safe(u8 *status)
-{
- int rc;
- u8 s;
-
- if (mutex_lock_interruptible(&fan_mutex))
- return -ERESTARTSYS;
- rc = fan_get_status(&s);
- if (!rc)
- fan_update_desired_level(s);
- mutex_unlock(&fan_mutex);
-
- if (status)
- *status = s;
-
- return rc;
-}
-
static void fan_exit(void)
{
- vdbg_printk(TPACPI_DBG_EXIT, "cancelling any pending fan watchdog tasks\n");
+ vdbg_printk(TPACPI_DBG_EXIT,
+ "cancelling any pending fan watchdog tasks\n");
/* FIXME: can we really do this unconditionally? */
sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, &fan_attr_group);
- driver_remove_file(&tpacpi_hwmon_pdriver.driver, &driver_attr_fan_watchdog);
+ driver_remove_file(&tpacpi_hwmon_pdriver.driver,
+ &driver_attr_fan_watchdog);
cancel_delayed_work(&fan_watchdog_task);
flush_scheduled_work();
}
-static int fan_get_speed(unsigned int *speed)
-{
- u8 hi, lo;
-
- switch (fan_status_access_mode) {
- case TPACPI_FAN_RD_TPEC:
- /* all except 570, 600e/x, 770e, 770x */
- if (unlikely(!acpi_ec_read(fan_rpm_offset, &lo) ||
- !acpi_ec_read(fan_rpm_offset + 1, &hi)))
- return -EIO;
-
- if (likely(speed))
- *speed = (hi << 8) | lo;
-
- break;
-
- default:
- return -ENXIO;
- }
-
- return 0;
-}
-
-static void fan_watchdog_fire(struct work_struct *ignored)
-{
- int rc;
-
- if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
- return;
-
- printk(IBM_NOTICE "fan watchdog: enabling fan\n");
- rc = fan_set_enable();
- if (rc < 0) {
- printk(IBM_ERR "fan watchdog: error %d while enabling fan, "
- "will try again later...\n", -rc);
- /* reschedule for later */
- fan_watchdog_reset();
- }
-}
-
-static void fan_watchdog_reset(void)
-{
- static int fan_watchdog_active;
-
- if (fan_control_access_mode == TPACPI_FAN_WR_NONE)
- return;
-
- if (fan_watchdog_active)
- cancel_delayed_work(&fan_watchdog_task);
-
- if (fan_watchdog_maxinterval > 0 &&
- tpacpi_lifecycle != TPACPI_LIFE_EXITING) {
- fan_watchdog_active = 1;
- if (!schedule_delayed_work(&fan_watchdog_task,
- msecs_to_jiffies(fan_watchdog_maxinterval
- * 1000))) {
- printk(IBM_ERR "failed to schedule the fan watchdog, "
- "watchdog will not trigger\n");
- }
- } else
- fan_watchdog_active = 0;
-}
-
-static int fan_set_level(int level)
-{
- if (!fan_control_allowed)
- return -EPERM;
-
- switch (fan_control_access_mode) {
- case TPACPI_FAN_WR_ACPI_SFAN:
- if (level >= 0 && level <= 7) {
- if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", level))
- return -EIO;
- } else
- return -EINVAL;
- break;
-
- case TPACPI_FAN_WR_ACPI_FANS:
- case TPACPI_FAN_WR_TPEC:
- if ((level != TP_EC_FAN_AUTO) &&
- (level != TP_EC_FAN_FULLSPEED) &&
- ((level < 0) || (level > 7)))
- return -EINVAL;
-
- /* safety net should the EC not support AUTO
- * or FULLSPEED mode bits and just ignore them */
- if (level & TP_EC_FAN_FULLSPEED)
- level |= 7; /* safety min speed 7 */
- else if (level & TP_EC_FAN_FULLSPEED)
- level |= 4; /* safety min speed 4 */
-
- if (!acpi_ec_write(fan_status_offset, level))
- return -EIO;
- else
- tp_features.fan_ctrl_status_undef = 0;
- break;
-
- default:
- return -ENXIO;
- }
- return 0;
-}
-
-static int fan_set_level_safe(int level)
-{
- int rc;
-
- if (!fan_control_allowed)
- return -EPERM;
-
- if (mutex_lock_interruptible(&fan_mutex))
- return -ERESTARTSYS;
-
- if (level == TPACPI_FAN_LAST_LEVEL)
- level = fan_control_desired_level;
-
- rc = fan_set_level(level);
- if (!rc)
- fan_update_desired_level(level);
-
- mutex_unlock(&fan_mutex);
- return rc;
-}
-
-static int fan_set_enable(void)
-{
- u8 s;
- int rc;
-
- if (!fan_control_allowed)
- return -EPERM;
-
- if (mutex_lock_interruptible(&fan_mutex))
- return -ERESTARTSYS;
-
- switch (fan_control_access_mode) {
- case TPACPI_FAN_WR_ACPI_FANS:
- case TPACPI_FAN_WR_TPEC:
- rc = fan_get_status(&s);
- if (rc < 0)
- break;
-
- /* Don't go out of emergency fan mode */
- if (s != 7) {
- s &= 0x07;
- s |= TP_EC_FAN_AUTO | 4; /* min fan speed 4 */
- }
-
- if (!acpi_ec_write(fan_status_offset, s))
- rc = -EIO;
- else {
- tp_features.fan_ctrl_status_undef = 0;
- rc = 0;
- }
- break;
-
- case TPACPI_FAN_WR_ACPI_SFAN:
- rc = fan_get_status(&s);
- if (rc < 0)
- break;
-
- s &= 0x07;
-
- /* Set fan to at least level 4 */
- s |= 4;
-
- if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", s))
- rc= -EIO;
- else
- rc = 0;
- break;
-
- default:
- rc = -ENXIO;
- }
-
- mutex_unlock(&fan_mutex);
- return rc;
-}
-
-static int fan_set_disable(void)
-{
- int rc;
-
- if (!fan_control_allowed)
- return -EPERM;
-
- if (mutex_lock_interruptible(&fan_mutex))
- return -ERESTARTSYS;
-
- rc = 0;
- switch (fan_control_access_mode) {
- case TPACPI_FAN_WR_ACPI_FANS:
- case TPACPI_FAN_WR_TPEC:
- if (!acpi_ec_write(fan_status_offset, 0x00))
- rc = -EIO;
- else {
- fan_control_desired_level = 0;
- tp_features.fan_ctrl_status_undef = 0;
- }
- break;
-
- case TPACPI_FAN_WR_ACPI_SFAN:
- if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", 0x00))
- rc = -EIO;
- else
- fan_control_desired_level = 0;
- break;
-
- default:
- rc = -ENXIO;
- }
-
-
- mutex_unlock(&fan_mutex);
- return rc;
-}
-
-static int fan_set_speed(int speed)
-{
- int rc;
-
- if (!fan_control_allowed)
- return -EPERM;
-
- if (mutex_lock_interruptible(&fan_mutex))
- return -ERESTARTSYS;
-
- rc = 0;
- switch (fan_control_access_mode) {
- case TPACPI_FAN_WR_ACPI_FANS:
- if (speed >= 0 && speed <= 65535) {
- if (!acpi_evalf(fans_handle, NULL, NULL, "vddd",
- speed, speed, speed))
- rc = -EIO;
- } else
- rc = -EINVAL;
- break;
-
- default:
- rc = -ENXIO;
- }
-
- mutex_unlock(&fan_mutex);
- return rc;
-}
-
static int fan_read(char *p)
{
int len = 0;
@@ -4307,7 +5349,8 @@ static int fan_read(char *p)
switch (fan_status_access_mode) {
case TPACPI_FAN_RD_ACPI_GFAN:
/* 570, 600e/x, 770e, 770x */
- if ((rc = fan_get_status_safe(&status)) < 0)
+ rc = fan_get_status_safe(&status);
+ if (rc < 0)
return rc;
len += sprintf(p + len, "status:\t\t%s\n"
@@ -4317,7 +5360,8 @@ static int fan_read(char *p)
case TPACPI_FAN_RD_TPEC:
/* all except 570, 600e/x, 770e, 770x */
- if ((rc = fan_get_status_safe(&status)) < 0)
+ rc = fan_get_status_safe(&status);
+ if (rc < 0)
return rc;
if (unlikely(tp_features.fan_ctrl_status_undef)) {
@@ -4332,7 +5376,8 @@ static int fan_read(char *p)
len += sprintf(p + len, "status:\t\t%s\n",
(status != 0) ? "enabled" : "disabled");
- if ((rc = fan_get_speed(&speed)) < 0)
+ rc = fan_get_speed(&speed);
+ if (rc < 0)
return rc;
len += sprintf(p + len, "speed:\t\t%d\n", speed);
@@ -4368,8 +5413,8 @@ static int fan_read(char *p)
if (fan_control_commands & TPACPI_FAN_CMD_ENABLE)
len += sprintf(p + len, "commands:\tenable, disable\n"
- "commands:\twatchdog <timeout> (<timeout> is 0 (off), "
- "1-120 (seconds))\n");
+ "commands:\twatchdog <timeout> (<timeout> "
+ "is 0 (off), 1-120 (seconds))\n");
if (fan_control_commands & TPACPI_FAN_CMD_SPEED)
len += sprintf(p + len, "commands:\tspeed <speed>"
@@ -4385,13 +5430,14 @@ static int fan_write_cmd_level(const char *cmd, int *rc)
if (strlencmp(cmd, "level auto") == 0)
level = TP_EC_FAN_AUTO;
else if ((strlencmp(cmd, "level disengaged") == 0) |
- (strlencmp(cmd, "level full-speed") == 0))
+ (strlencmp(cmd, "level full-speed") == 0))
level = TP_EC_FAN_FULLSPEED;
else if (sscanf(cmd, "level %d", &level) != 1)
return 0;
- if ((*rc = fan_set_level_safe(level)) == -ENXIO)
- printk(IBM_ERR "level command accepted for unsupported "
+ *rc = fan_set_level_safe(level);
+ if (*rc == -ENXIO)
+ printk(TPACPI_ERR "level command accepted for unsupported "
"access mode %d", fan_control_access_mode);
return 1;
@@ -4402,8 +5448,9 @@ static int fan_write_cmd_enable(const char *cmd, int *rc)
if (strlencmp(cmd, "enable") != 0)
return 0;
- if ((*rc = fan_set_enable()) == -ENXIO)
- printk(IBM_ERR "enable command accepted for unsupported "
+ *rc = fan_set_enable();
+ if (*rc == -ENXIO)
+ printk(TPACPI_ERR "enable command accepted for unsupported "
"access mode %d", fan_control_access_mode);
return 1;
@@ -4414,8 +5461,9 @@ static int fan_write_cmd_disable(const char *cmd, int *rc)
if (strlencmp(cmd, "disable") != 0)
return 0;
- if ((*rc = fan_set_disable()) == -ENXIO)
- printk(IBM_ERR "disable command accepted for unsupported "
+ *rc = fan_set_disable();
+ if (*rc == -ENXIO)
+ printk(TPACPI_ERR "disable command accepted for unsupported "
"access mode %d", fan_control_access_mode);
return 1;
@@ -4431,8 +5479,9 @@ static int fan_write_cmd_speed(const char *cmd, int *rc)
if (sscanf(cmd, "speed %d", &speed) != 1)
return 0;
- if ((*rc = fan_set_speed(speed)) == -ENXIO)
- printk(IBM_ERR "speed command accepted for unsupported "
+ *rc = fan_set_speed(speed);
+ if (*rc == -ENXIO)
+ printk(TPACPI_ERR "speed command accepted for unsupported "
"access mode %d", fan_control_access_mode);
return 1;
@@ -4496,7 +5545,7 @@ static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", IBM_NAME);
+ return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME);
}
static struct device_attribute dev_attr_thinkpad_acpi_pdev_name =
@@ -4507,14 +5556,12 @@ static struct device_attribute dev_attr_thinkpad_acpi_pdev_name =
/* /proc support */
static struct proc_dir_entry *proc_dir;
-/* Subdriver registry */
-static LIST_HEAD(tpacpi_all_drivers);
-
-
/*
* Module and infrastructure proble, init and exit handling
*/
+static int force_load;
+
#ifdef CONFIG_THINKPAD_ACPI_DEBUG
static const char * __init str_supported(int is_supported)
{
@@ -4524,6 +5571,48 @@ static const char * __init str_supported(int is_supported)
}
#endif /* CONFIG_THINKPAD_ACPI_DEBUG */
+static void ibm_exit(struct ibm_struct *ibm)
+{
+ dbg_printk(TPACPI_DBG_EXIT, "removing %s\n", ibm->name);
+
+ list_del_init(&ibm->all_drivers);
+
+ if (ibm->flags.acpi_notify_installed) {
+ dbg_printk(TPACPI_DBG_EXIT,
+ "%s: acpi_remove_notify_handler\n", ibm->name);
+ BUG_ON(!ibm->acpi);
+ acpi_remove_notify_handler(*ibm->acpi->handle,
+ ibm->acpi->type,
+ dispatch_acpi_notify);
+ ibm->flags.acpi_notify_installed = 0;
+ ibm->flags.acpi_notify_installed = 0;
+ }
+
+ if (ibm->flags.proc_created) {
+ dbg_printk(TPACPI_DBG_EXIT,
+ "%s: remove_proc_entry\n", ibm->name);
+ remove_proc_entry(ibm->name, proc_dir);
+ ibm->flags.proc_created = 0;
+ }
+
+ if (ibm->flags.acpi_driver_registered) {
+ dbg_printk(TPACPI_DBG_EXIT,
+ "%s: acpi_bus_unregister_driver\n", ibm->name);
+ BUG_ON(!ibm->acpi);
+ acpi_bus_unregister_driver(ibm->acpi->driver);
+ kfree(ibm->acpi->driver);
+ ibm->acpi->driver = NULL;
+ ibm->flags.acpi_driver_registered = 0;
+ }
+
+ if (ibm->flags.init_called && ibm->exit) {
+ ibm->exit();
+ ibm->flags.init_called = 0;
+ }
+
+ dbg_printk(TPACPI_DBG_INIT, "finished removing %s\n", ibm->name);
+}
+
static int __init ibm_init(struct ibm_init_struct *iibm)
{
int ret;
@@ -4560,7 +5649,7 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
if (ibm->acpi->notify) {
ret = setup_acpi_notify(ibm);
if (ret == -ENODEV) {
- printk(IBM_NOTICE "disabling subdriver %s\n",
+ printk(TPACPI_NOTICE "disabling subdriver %s\n",
ibm->name);
ret = 0;
goto err_out;
@@ -4578,7 +5667,7 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
S_IFREG | S_IRUGO | S_IWUSR,
proc_dir);
if (!entry) {
- printk(IBM_ERR "unable to create proc entry %s\n",
+ printk(TPACPI_ERR "unable to create proc entry %s\n",
ibm->name);
ret = -ENODEV;
goto err_out;
@@ -4604,48 +5693,6 @@ err_out:
return (ret < 0)? ret : 0;
}
-static void ibm_exit(struct ibm_struct *ibm)
-{
- dbg_printk(TPACPI_DBG_EXIT, "removing %s\n", ibm->name);
-
- list_del_init(&ibm->all_drivers);
-
- if (ibm->flags.acpi_notify_installed) {
- dbg_printk(TPACPI_DBG_EXIT,
- "%s: acpi_remove_notify_handler\n", ibm->name);
- BUG_ON(!ibm->acpi);
- acpi_remove_notify_handler(*ibm->acpi->handle,
- ibm->acpi->type,
- dispatch_acpi_notify);
- ibm->flags.acpi_notify_installed = 0;
- ibm->flags.acpi_notify_installed = 0;
- }
-
- if (ibm->flags.proc_created) {
- dbg_printk(TPACPI_DBG_EXIT,
- "%s: remove_proc_entry\n", ibm->name);
- remove_proc_entry(ibm->name, proc_dir);
- ibm->flags.proc_created = 0;
- }
-
- if (ibm->flags.acpi_driver_registered) {
- dbg_printk(TPACPI_DBG_EXIT,
- "%s: acpi_bus_unregister_driver\n", ibm->name);
- BUG_ON(!ibm->acpi);
- acpi_bus_unregister_driver(ibm->acpi->driver);
- kfree(ibm->acpi->driver);
- ibm->acpi->driver = NULL;
- ibm->flags.acpi_driver_registered = 0;
- }
-
- if (ibm->flags.init_called && ibm->exit) {
- ibm->exit();
- ibm->flags.init_called = 0;
- }
-
- dbg_printk(TPACPI_DBG_INIT, "finished removing %s\n", ibm->name);
-}
-
/* Probing */
static void __init get_thinkpad_model_data(struct thinkpad_id_data *tp)
@@ -4715,10 +5762,10 @@ static int __init probe_for_thinkpad(void)
is_thinkpad = (thinkpad_id.model_str != NULL);
/* ec is required because many other handles are relative to it */
- IBM_ACPIHANDLE_INIT(ec);
+ TPACPI_ACPIHANDLE_INIT(ec);
if (!ec_handle) {
if (is_thinkpad)
- printk(IBM_ERR
+ printk(TPACPI_ERR
"Not yet supported ThinkPad detected!\n");
return -ENODEV;
}
@@ -4839,47 +5886,110 @@ static int __init set_ibm_param(const char *val, struct kernel_param *kp)
return -EINVAL;
}
-static int experimental;
module_param(experimental, int, 0);
+MODULE_PARM_DESC(experimental,
+ "Enables experimental features when non-zero");
-static u32 dbg_level;
module_param_named(debug, dbg_level, uint, 0);
+MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
-static int force_load;
module_param(force_load, bool, 0);
+MODULE_PARM_DESC(force_load,
+ "Attempts to load the driver even on a "
+ "mis-identified ThinkPad when true");
-static int fan_control_allowed;
module_param_named(fan_control, fan_control_allowed, bool, 0);
+MODULE_PARM_DESC(fan_control,
+ "Enables setting fan parameters features when true");
-static int brightness_mode;
module_param_named(brightness_mode, brightness_mode, int, 0);
+MODULE_PARM_DESC(brightness_mode,
+ "Selects brightness control strategy: "
+ "0=auto, 1=EC, 2=CMOS, 3=both");
-static unsigned int brightness_enable = 2; /* 2 = auto, 0 = no, 1 = yes */
module_param(brightness_enable, uint, 0);
+MODULE_PARM_DESC(brightness_enable,
+ "Enables backlight control when 1, disables when 0");
-static unsigned int hotkey_report_mode;
module_param(hotkey_report_mode, uint, 0);
-
-#define IBM_PARAM(feature) \
- module_param_call(feature, set_ibm_param, NULL, NULL, 0)
-
-IBM_PARAM(hotkey);
-IBM_PARAM(bluetooth);
-IBM_PARAM(video);
-IBM_PARAM(light);
+MODULE_PARM_DESC(hotkey_report_mode,
+ "used for backwards compatibility with userspace, "
+ "see documentation");
+
+#define TPACPI_PARAM(feature) \
+ module_param_call(feature, set_ibm_param, NULL, NULL, 0); \
+ MODULE_PARM_DESC(feature, "Simulates thinkpad-aci procfs command " \
+ "at module load, see documentation")
+
+TPACPI_PARAM(hotkey);
+TPACPI_PARAM(bluetooth);
+TPACPI_PARAM(video);
+TPACPI_PARAM(light);
#ifdef CONFIG_THINKPAD_ACPI_DOCK
-IBM_PARAM(dock);
+TPACPI_PARAM(dock);
#endif
#ifdef CONFIG_THINKPAD_ACPI_BAY
-IBM_PARAM(bay);
+TPACPI_PARAM(bay);
#endif /* CONFIG_THINKPAD_ACPI_BAY */
-IBM_PARAM(cmos);
-IBM_PARAM(led);
-IBM_PARAM(beep);
-IBM_PARAM(ecdump);
-IBM_PARAM(brightness);
-IBM_PARAM(volume);
-IBM_PARAM(fan);
+TPACPI_PARAM(cmos);
+TPACPI_PARAM(led);
+TPACPI_PARAM(beep);
+TPACPI_PARAM(ecdump);
+TPACPI_PARAM(brightness);
+TPACPI_PARAM(volume);
+TPACPI_PARAM(fan);
+
+static void thinkpad_acpi_module_exit(void)
+{
+ struct ibm_struct *ibm, *itmp;
+
+ tpacpi_lifecycle = TPACPI_LIFE_EXITING;
+
+ list_for_each_entry_safe_reverse(ibm, itmp,
+ &tpacpi_all_drivers,
+ all_drivers) {
+ ibm_exit(ibm);
+ }
+
+ dbg_printk(TPACPI_DBG_INIT, "finished subdriver exit path...\n");
+
+ if (tpacpi_inputdev) {
+ if (tp_features.input_device_registered)
+ input_unregister_device(tpacpi_inputdev);
+ else
+ input_free_device(tpacpi_inputdev);
+ }
+
+ if (tpacpi_hwmon)
+ hwmon_device_unregister(tpacpi_hwmon);
+
+ if (tp_features.sensors_pdev_attrs_registered)
+ device_remove_file(&tpacpi_sensors_pdev->dev,
+ &dev_attr_thinkpad_acpi_pdev_name);
+ if (tpacpi_sensors_pdev)
+ platform_device_unregister(tpacpi_sensors_pdev);
+ if (tpacpi_pdev)
+ platform_device_unregister(tpacpi_pdev);
+
+ if (tp_features.sensors_pdrv_attrs_registered)
+ tpacpi_remove_driver_attributes(&tpacpi_hwmon_pdriver.driver);
+ if (tp_features.platform_drv_attrs_registered)
+ tpacpi_remove_driver_attributes(&tpacpi_pdriver.driver);
+
+ if (tp_features.sensors_pdrv_registered)
+ platform_driver_unregister(&tpacpi_hwmon_pdriver);
+
+ if (tp_features.platform_drv_registered)
+ platform_driver_unregister(&tpacpi_pdriver);
+
+ if (proc_dir)
+ remove_proc_entry(TPACPI_PROC_DIR, acpi_root_dir);
+
+ kfree(thinkpad_id.bios_version_str);
+ kfree(thinkpad_id.ec_version_str);
+ kfree(thinkpad_id.model_str);
+}
+
static int __init thinkpad_acpi_module_init(void)
{
@@ -4902,12 +6012,13 @@ static int __init thinkpad_acpi_module_init(void)
/* Driver initialization */
- IBM_ACPIHANDLE_INIT(ecrd);
- IBM_ACPIHANDLE_INIT(ecwr);
+ TPACPI_ACPIHANDLE_INIT(ecrd);
+ TPACPI_ACPIHANDLE_INIT(ecwr);
- proc_dir = proc_mkdir(IBM_PROC_DIR, acpi_root_dir);
+ proc_dir = proc_mkdir(TPACPI_PROC_DIR, acpi_root_dir);
if (!proc_dir) {
- printk(IBM_ERR "unable to create proc dir " IBM_PROC_DIR);
+ printk(TPACPI_ERR
+ "unable to create proc dir " TPACPI_PROC_DIR);
thinkpad_acpi_module_exit();
return -ENODEV;
}
@@ -4915,7 +6026,8 @@ static int __init thinkpad_acpi_module_init(void)
ret = platform_driver_register(&tpacpi_pdriver);
if (ret) {
- printk(IBM_ERR "unable to register main platform driver\n");
+ printk(TPACPI_ERR
+ "unable to register main platform driver\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -4923,7 +6035,8 @@ static int __init thinkpad_acpi_module_init(void)
ret = platform_driver_register(&tpacpi_hwmon_pdriver);
if (ret) {
- printk(IBM_ERR "unable to register hwmon platform driver\n");
+ printk(TPACPI_ERR
+ "unable to register hwmon platform driver\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -4932,10 +6045,12 @@ static int __init thinkpad_acpi_module_init(void)
ret = tpacpi_create_driver_attributes(&tpacpi_pdriver.driver);
if (!ret) {
tp_features.platform_drv_attrs_registered = 1;
- ret = tpacpi_create_driver_attributes(&tpacpi_hwmon_pdriver.driver);
+ ret = tpacpi_create_driver_attributes(
+ &tpacpi_hwmon_pdriver.driver);
}
if (ret) {
- printk(IBM_ERR "unable to create sysfs driver attributes\n");
+ printk(TPACPI_ERR
+ "unable to create sysfs driver attributes\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -4943,30 +6058,31 @@ static int __init thinkpad_acpi_module_init(void)
/* Device initialization */
- tpacpi_pdev = platform_device_register_simple(IBM_DRVR_NAME, -1,
+ tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, -1,
NULL, 0);
if (IS_ERR(tpacpi_pdev)) {
ret = PTR_ERR(tpacpi_pdev);
tpacpi_pdev = NULL;
- printk(IBM_ERR "unable to register platform device\n");
+ printk(TPACPI_ERR "unable to register platform device\n");
thinkpad_acpi_module_exit();
return ret;
}
tpacpi_sensors_pdev = platform_device_register_simple(
- IBM_HWMON_DRVR_NAME,
- -1, NULL, 0);
+ TPACPI_HWMON_DRVR_NAME,
+ -1, NULL, 0);
if (IS_ERR(tpacpi_sensors_pdev)) {
ret = PTR_ERR(tpacpi_sensors_pdev);
tpacpi_sensors_pdev = NULL;
- printk(IBM_ERR "unable to register hwmon platform device\n");
+ printk(TPACPI_ERR
+ "unable to register hwmon platform device\n");
thinkpad_acpi_module_exit();
return ret;
}
ret = device_create_file(&tpacpi_sensors_pdev->dev,
&dev_attr_thinkpad_acpi_pdev_name);
if (ret) {
- printk(IBM_ERR
- "unable to create sysfs hwmon device attributes\n");
+ printk(TPACPI_ERR
+ "unable to create sysfs hwmon device attributes\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -4975,20 +6091,20 @@ static int __init thinkpad_acpi_module_init(void)
if (IS_ERR(tpacpi_hwmon)) {
ret = PTR_ERR(tpacpi_hwmon);
tpacpi_hwmon = NULL;
- printk(IBM_ERR "unable to register hwmon device\n");
+ printk(TPACPI_ERR "unable to register hwmon device\n");
thinkpad_acpi_module_exit();
return ret;
}
mutex_init(&tpacpi_inputdev_send_mutex);
tpacpi_inputdev = input_allocate_device();
if (!tpacpi_inputdev) {
- printk(IBM_ERR "unable to allocate input device\n");
+ printk(TPACPI_ERR "unable to allocate input device\n");
thinkpad_acpi_module_exit();
return -ENOMEM;
} else {
/* Prepare input device, but don't register */
tpacpi_inputdev->name = "ThinkPad Extra Buttons";
- tpacpi_inputdev->phys = IBM_DRVR_NAME "/input0";
+ tpacpi_inputdev->phys = TPACPI_DRVR_NAME "/input0";
tpacpi_inputdev->id.bustype = BUS_HOST;
tpacpi_inputdev->id.vendor = (thinkpad_id.vendor) ?
thinkpad_id.vendor :
@@ -5007,7 +6123,7 @@ static int __init thinkpad_acpi_module_init(void)
}
ret = input_register_device(tpacpi_inputdev);
if (ret < 0) {
- printk(IBM_ERR "unable to register input device\n");
+ printk(TPACPI_ERR "unable to register input device\n");
thinkpad_acpi_module_exit();
return ret;
} else {
@@ -5018,56 +6134,36 @@ static int __init thinkpad_acpi_module_init(void)
return 0;
}
-static void thinkpad_acpi_module_exit(void)
-{
- struct ibm_struct *ibm, *itmp;
-
- tpacpi_lifecycle = TPACPI_LIFE_EXITING;
-
- list_for_each_entry_safe_reverse(ibm, itmp,
- &tpacpi_all_drivers,
- all_drivers) {
- ibm_exit(ibm);
- }
-
- dbg_printk(TPACPI_DBG_INIT, "finished subdriver exit path...\n");
-
- if (tpacpi_inputdev) {
- if (tp_features.input_device_registered)
- input_unregister_device(tpacpi_inputdev);
- else
- input_free_device(tpacpi_inputdev);
- }
-
- if (tpacpi_hwmon)
- hwmon_device_unregister(tpacpi_hwmon);
-
- if (tp_features.sensors_pdev_attrs_registered)
- device_remove_file(&tpacpi_sensors_pdev->dev,
- &dev_attr_thinkpad_acpi_pdev_name);
- if (tpacpi_sensors_pdev)
- platform_device_unregister(tpacpi_sensors_pdev);
- if (tpacpi_pdev)
- platform_device_unregister(tpacpi_pdev);
-
- if (tp_features.sensors_pdrv_attrs_registered)
- tpacpi_remove_driver_attributes(&tpacpi_hwmon_pdriver.driver);
- if (tp_features.platform_drv_attrs_registered)
- tpacpi_remove_driver_attributes(&tpacpi_pdriver.driver);
+/* Please remove this in year 2009 */
+MODULE_ALIAS("ibm_acpi");
- if (tp_features.sensors_pdrv_registered)
- platform_driver_unregister(&tpacpi_hwmon_pdriver);
+/*
+ * DMI matching for module autoloading
+ *
+ * See http://thinkwiki.org/wiki/List_of_DMI_IDs
+ * See http://thinkwiki.org/wiki/BIOS_Upgrade_Downloads
+ *
+ * Only models listed in thinkwiki will be supported, so add yours
+ * if it is not there yet.
+ */
+#define IBM_BIOS_MODULE_ALIAS(__type) \
+ MODULE_ALIAS("dmi:bvnIBM:bvr" __type "ET??WW")
- if (tp_features.platform_drv_registered)
- platform_driver_unregister(&tpacpi_pdriver);
+/* Non-ancient thinkpads */
+MODULE_ALIAS("dmi:bvnIBM:*:svnIBM:*:pvrThinkPad*:rvnIBM:*");
+MODULE_ALIAS("dmi:bvnLENOVO:*:svnLENOVO:*:pvrThinkPad*:rvnLENOVO:*");
- if (proc_dir)
- remove_proc_entry(IBM_PROC_DIR, acpi_root_dir);
+/* Ancient thinkpad BIOSes have to be identified by
+ * BIOS type or model number, and there are far less
+ * BIOS types than model numbers... */
+IBM_BIOS_MODULE_ALIAS("I[B,D,H,I,M,N,O,T,W,V,Y,Z]");
+IBM_BIOS_MODULE_ALIAS("1[0,3,6,8,A-G,I,K,M-P,S,T]");
+IBM_BIOS_MODULE_ALIAS("K[U,X-Z]");
- kfree(thinkpad_id.bios_version_str);
- kfree(thinkpad_id.ec_version_str);
- kfree(thinkpad_id.model_str);
-}
+MODULE_AUTHOR("Borislav Deianov, Henrique de Moraes Holschuh");
+MODULE_DESCRIPTION(TPACPI_DESC);
+MODULE_VERSION(TPACPI_VERSION);
+MODULE_LICENSE("GPL");
module_init(thinkpad_acpi_module_init);
module_exit(thinkpad_acpi_module_exit);
diff --git a/drivers/misc/thinkpad_acpi.h b/drivers/misc/thinkpad_acpi.h
deleted file mode 100644
index 8fba2bbe345e..000000000000
--- a/drivers/misc/thinkpad_acpi.h
+++ /dev/null
@@ -1,606 +0,0 @@
-/*
- * thinkpad_acpi.h - ThinkPad ACPI Extras
- *
- *
- * Copyright (C) 2004-2005 Borislav Deianov <borislav@users.sf.net>
- * Copyright (C) 2006-2007 Henrique de Moraes Holschuh <hmh@hmh.eng.br>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#ifndef __THINKPAD_ACPI_H__
-#define __THINKPAD_ACPI_H__
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-
-#include <linux/nvram.h>
-#include <linux/proc_fs.h>
-#include <linux/sysfs.h>
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/platform_device.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/input.h>
-#include <asm/uaccess.h>
-
-#include <linux/dmi.h>
-#include <linux/jiffies.h>
-#include <linux/workqueue.h>
-
-#include <acpi/acpi_drivers.h>
-#include <acpi/acnamesp.h>
-
-#include <linux/pci_ids.h>
-
-/****************************************************************************
- * Main driver
- */
-
-#define IBM_NAME "thinkpad"
-#define IBM_DESC "ThinkPad ACPI Extras"
-#define IBM_FILE IBM_NAME "_acpi"
-#define IBM_URL "http://ibm-acpi.sf.net/"
-#define IBM_MAIL "ibm-acpi-devel@lists.sourceforge.net"
-
-#define IBM_PROC_DIR "ibm"
-#define IBM_ACPI_EVENT_PREFIX "ibm"
-#define IBM_DRVR_NAME IBM_FILE
-#define IBM_HWMON_DRVR_NAME IBM_NAME "_hwmon"
-
-#define IBM_LOG IBM_FILE ": "
-#define IBM_ERR KERN_ERR IBM_LOG
-#define IBM_NOTICE KERN_NOTICE IBM_LOG
-#define IBM_INFO KERN_INFO IBM_LOG
-#define IBM_DEBUG KERN_DEBUG IBM_LOG
-
-#define IBM_MAX_ACPI_ARGS 3
-
-/* ThinkPad CMOS commands */
-#define TP_CMOS_VOLUME_DOWN 0
-#define TP_CMOS_VOLUME_UP 1
-#define TP_CMOS_VOLUME_MUTE 2
-#define TP_CMOS_BRIGHTNESS_UP 4
-#define TP_CMOS_BRIGHTNESS_DOWN 5
-
-/* ThinkPad CMOS NVRAM constants */
-#define TP_NVRAM_ADDR_BRIGHTNESS 0x5e
-#define TP_NVRAM_MASK_LEVEL_BRIGHTNESS 0x0f
-#define TP_NVRAM_POS_LEVEL_BRIGHTNESS 0
-
-#define onoff(status,bit) ((status) & (1 << (bit)) ? "on" : "off")
-#define enabled(status,bit) ((status) & (1 << (bit)) ? "enabled" : "disabled")
-#define strlencmp(a,b) (strncmp((a), (b), strlen(b)))
-
-/* Debugging */
-#define TPACPI_DBG_ALL 0xffff
-#define TPACPI_DBG_ALL 0xffff
-#define TPACPI_DBG_INIT 0x0001
-#define TPACPI_DBG_EXIT 0x0002
-#define dbg_printk(a_dbg_level, format, arg...) \
- do { if (dbg_level & a_dbg_level) \
- printk(IBM_DEBUG "%s: " format, __func__ , ## arg); } while (0)
-#ifdef CONFIG_THINKPAD_ACPI_DEBUG
-#define vdbg_printk(a_dbg_level, format, arg...) \
- dbg_printk(a_dbg_level, format, ## arg)
-static const char *str_supported(int is_supported);
-#else
-#define vdbg_printk(a_dbg_level, format, arg...)
-#endif
-
-/* Input IDs */
-#define TPACPI_HKEY_INPUT_VENDOR PCI_VENDOR_ID_IBM
-#define TPACPI_HKEY_INPUT_PRODUCT 0x5054 /* "TP" */
-#define TPACPI_HKEY_INPUT_VERSION 0x4101
-
-/* ACPI HIDs */
-#define IBM_HKEY_HID "IBM0068"
-
-/* ACPI helpers */
-static int __must_check acpi_evalf(acpi_handle handle,
- void *res, char *method, char *fmt, ...);
-static int __must_check acpi_ec_read(int i, u8 * p);
-static int __must_check acpi_ec_write(int i, u8 v);
-static int __must_check _sta(acpi_handle handle);
-
-/* ACPI handles */
-static acpi_handle root_handle; /* root namespace */
-static acpi_handle ec_handle; /* EC */
-static acpi_handle ecrd_handle, ecwr_handle; /* 570 EC access */
-static acpi_handle cmos_handle, hkey_handle; /* basic thinkpad handles */
-
-static void drv_acpi_handle_init(char *name,
- acpi_handle *handle, acpi_handle parent,
- char **paths, int num_paths, char **path);
-#define IBM_ACPIHANDLE_INIT(object) \
- drv_acpi_handle_init(#object, &object##_handle, *object##_parent, \
- object##_paths, ARRAY_SIZE(object##_paths), &object##_path)
-
-/* ThinkPad ACPI helpers */
-static int issue_thinkpad_cmos_command(int cmos_cmd);
-
-/* procfs support */
-static struct proc_dir_entry *proc_dir;
-
-/* procfs helpers */
-static int dispatch_procfs_read(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-static int dispatch_procfs_write(struct file *file,
- const char __user * userbuf,
- unsigned long count, void *data);
-static char *next_cmd(char **cmds);
-
-/* sysfs support */
-struct attribute_set {
- unsigned int members, max_members;
- struct attribute_group group;
-};
-
-static struct attribute_set *create_attr_set(unsigned int max_members,
- const char* name);
-#define destroy_attr_set(_set) \
- kfree(_set);
-static int add_to_attr_set(struct attribute_set* s, struct attribute *attr);
-static int add_many_to_attr_set(struct attribute_set* s,
- struct attribute **attr,
- unsigned int count);
-#define register_attr_set_with_sysfs(_attr_set, _kobj) \
- sysfs_create_group(_kobj, &_attr_set->group)
-static void delete_attr_set(struct attribute_set* s, struct kobject *kobj);
-
-static int parse_strtoul(const char *buf, unsigned long max,
- unsigned long *value);
-
-/* Device model */
-static struct platform_device *tpacpi_pdev;
-static struct platform_device *tpacpi_sensors_pdev;
-static struct device *tpacpi_hwmon;
-static struct platform_driver tpacpi_pdriver;
-static struct input_dev *tpacpi_inputdev;
-static int tpacpi_create_driver_attributes(struct device_driver *drv);
-static void tpacpi_remove_driver_attributes(struct device_driver *drv);
-
-/* Module */
-static int experimental;
-static u32 dbg_level;
-static int force_load;
-static unsigned int hotkey_report_mode;
-
-static int thinkpad_acpi_module_init(void);
-static void thinkpad_acpi_module_exit(void);
-
-
-/****************************************************************************
- * Subdrivers
- */
-
-struct ibm_struct;
-
-struct tp_acpi_drv_struct {
- const struct acpi_device_id *hid;
- struct acpi_driver *driver;
-
- void (*notify) (struct ibm_struct *, u32);
- acpi_handle *handle;
- u32 type;
- struct acpi_device *device;
-};
-
-struct ibm_struct {
- char *name;
-
- int (*read) (char *);
- int (*write) (char *);
- void (*exit) (void);
- void (*resume) (void);
-
- struct list_head all_drivers;
-
- struct tp_acpi_drv_struct *acpi;
-
- struct {
- u8 acpi_driver_registered:1;
- u8 acpi_notify_installed:1;
- u8 proc_created:1;
- u8 init_called:1;
- u8 experimental:1;
- } flags;
-};
-
-struct ibm_init_struct {
- char param[32];
-
- int (*init) (struct ibm_init_struct *);
- struct ibm_struct *data;
-};
-
-static struct {
-#ifdef CONFIG_THINKPAD_ACPI_BAY
- u32 bay_status:1;
- u32 bay_eject:1;
- u32 bay_status2:1;
- u32 bay_eject2:1;
-#endif
- u32 bluetooth:1;
- u32 hotkey:1;
- u32 hotkey_mask:1;
- u32 hotkey_wlsw:1;
- u32 light:1;
- u32 light_status:1;
- u32 bright_16levels:1;
- u32 wan:1;
- u32 fan_ctrl_status_undef:1;
- u32 input_device_registered:1;
- u32 platform_drv_registered:1;
- u32 platform_drv_attrs_registered:1;
- u32 sensors_pdrv_registered:1;
- u32 sensors_pdrv_attrs_registered:1;
- u32 sensors_pdev_attrs_registered:1;
-} tp_features;
-
-struct thinkpad_id_data {
- unsigned int vendor; /* ThinkPad vendor:
- * PCI_VENDOR_ID_IBM/PCI_VENDOR_ID_LENOVO */
-
- char *bios_version_str; /* Something like 1ZET51WW (1.03z) */
- char *ec_version_str; /* Something like 1ZHT51WW-1.04a */
-
- u16 bios_model; /* Big Endian, TP-1Y = 0x5931, 0 = unknown */
- u16 ec_model;
-
- char *model_str;
-};
-
-static struct thinkpad_id_data thinkpad_id;
-
-static struct list_head tpacpi_all_drivers;
-
-static struct ibm_init_struct ibms_init[];
-static int set_ibm_param(const char *val, struct kernel_param *kp);
-static int ibm_init(struct ibm_init_struct *iibm);
-static void ibm_exit(struct ibm_struct *ibm);
-
-
-/*
- * procfs master subdriver
- */
-static int thinkpad_acpi_driver_init(struct ibm_init_struct *iibm);
-static int thinkpad_acpi_driver_read(char *p);
-
-
-/*
- * Bay subdriver
- */
-
-#ifdef CONFIG_THINKPAD_ACPI_BAY
-static acpi_handle bay_handle, bay_ej_handle;
-static acpi_handle bay2_handle, bay2_ej_handle;
-
-static int bay_init(struct ibm_init_struct *iibm);
-static void bay_notify(struct ibm_struct *ibm, u32 event);
-static int bay_read(char *p);
-static int bay_write(char *buf);
-#endif /* CONFIG_THINKPAD_ACPI_BAY */
-
-
-/*
- * Beep subdriver
- */
-
-static acpi_handle beep_handle;
-
-static int beep_read(char *p);
-static int beep_write(char *buf);
-
-
-/*
- * Bluetooth subdriver
- */
-
-enum {
- /* ACPI GBDC/SBDC bits */
- TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */
- TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */
- TP_ACPI_BLUETOOTH_UNK = 0x04, /* unknown function */
-};
-
-static int bluetooth_init(struct ibm_init_struct *iibm);
-static int bluetooth_get_radiosw(void);
-static int bluetooth_set_radiosw(int radio_on);
-static int bluetooth_read(char *p);
-static int bluetooth_write(char *buf);
-
-
-/*
- * Brightness (backlight) subdriver
- */
-
-#define TPACPI_BACKLIGHT_DEV_NAME "thinkpad_screen"
-
-static struct backlight_device *ibm_backlight_device;
-static int brightness_offset = 0x31;
-static int brightness_mode;
-static unsigned int brightness_enable; /* 0 = no, 1 = yes, 2 = auto */
-
-static int brightness_init(struct ibm_init_struct *iibm);
-static void brightness_exit(void);
-static int brightness_get(struct backlight_device *bd);
-static int brightness_set(int value);
-static int brightness_update_status(struct backlight_device *bd);
-static int brightness_read(char *p);
-static int brightness_write(char *buf);
-
-
-/*
- * CMOS subdriver
- */
-
-static int cmos_read(char *p);
-static int cmos_write(char *buf);
-
-
-/*
- * Dock subdriver
- */
-
-#ifdef CONFIG_THINKPAD_ACPI_DOCK
-static acpi_handle pci_handle;
-static acpi_handle dock_handle;
-
-static void dock_notify(struct ibm_struct *ibm, u32 event);
-static int dock_read(char *p);
-static int dock_write(char *buf);
-#endif /* CONFIG_THINKPAD_ACPI_DOCK */
-
-
-/*
- * EC dump subdriver
- */
-
-static int ecdump_read(char *p) ;
-static int ecdump_write(char *buf);
-
-
-/*
- * Fan subdriver
- */
-
-enum { /* Fan control constants */
- fan_status_offset = 0x2f, /* EC register 0x2f */
- fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM)
- * 0x84 must be read before 0x85 */
-
- TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */
- TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */
-
- TPACPI_FAN_LAST_LEVEL = 0x100, /* Use cached last-seen fan level */
-};
-
-enum fan_status_access_mode {
- TPACPI_FAN_NONE = 0, /* No fan status or control */
- TPACPI_FAN_RD_ACPI_GFAN, /* Use ACPI GFAN */
- TPACPI_FAN_RD_TPEC, /* Use ACPI EC regs 0x2f, 0x84-0x85 */
-};
-
-enum fan_control_access_mode {
- TPACPI_FAN_WR_NONE = 0, /* No fan control */
- TPACPI_FAN_WR_ACPI_SFAN, /* Use ACPI SFAN */
- TPACPI_FAN_WR_TPEC, /* Use ACPI EC reg 0x2f */
- TPACPI_FAN_WR_ACPI_FANS, /* Use ACPI FANS and EC reg 0x2f */
-};
-
-enum fan_control_commands {
- TPACPI_FAN_CMD_SPEED = 0x0001, /* speed command */
- TPACPI_FAN_CMD_LEVEL = 0x0002, /* level command */
- TPACPI_FAN_CMD_ENABLE = 0x0004, /* enable/disable cmd,
- * and also watchdog cmd */
-};
-
-static int fan_control_allowed;
-
-static enum fan_status_access_mode fan_status_access_mode;
-static enum fan_control_access_mode fan_control_access_mode;
-static enum fan_control_commands fan_control_commands;
-static u8 fan_control_initial_status;
-static u8 fan_control_desired_level;
-static int fan_watchdog_maxinterval;
-
-static struct mutex fan_mutex;
-
-static acpi_handle fans_handle, gfan_handle, sfan_handle;
-
-static int fan_init(struct ibm_init_struct *iibm);
-static void fan_exit(void);
-static int fan_get_status(u8 *status);
-static int fan_get_status_safe(u8 *status);
-static int fan_get_speed(unsigned int *speed);
-static void fan_update_desired_level(u8 status);
-static void fan_watchdog_fire(struct work_struct *ignored);
-static void fan_watchdog_reset(void);
-static int fan_set_level(int level);
-static int fan_set_level_safe(int level);
-static int fan_set_enable(void);
-static int fan_set_disable(void);
-static int fan_set_speed(int speed);
-static int fan_read(char *p);
-static int fan_write(char *buf);
-static int fan_write_cmd_level(const char *cmd, int *rc);
-static int fan_write_cmd_enable(const char *cmd, int *rc);
-static int fan_write_cmd_disable(const char *cmd, int *rc);
-static int fan_write_cmd_speed(const char *cmd, int *rc);
-static int fan_write_cmd_watchdog(const char *cmd, int *rc);
-
-
-/*
- * Hotkey subdriver
- */
-
-static int hotkey_orig_status;
-static u32 hotkey_orig_mask;
-
-static struct mutex hotkey_mutex;
-
-static int hotkey_init(struct ibm_init_struct *iibm);
-static void hotkey_exit(void);
-static int hotkey_get(int *status, u32 *mask);
-static int hotkey_set(int status, u32 mask);
-static void hotkey_notify(struct ibm_struct *ibm, u32 event);
-static int hotkey_read(char *p);
-static int hotkey_write(char *buf);
-
-
-/*
- * LED subdriver
- */
-
-enum led_access_mode {
- TPACPI_LED_NONE = 0,
- TPACPI_LED_570, /* 570 */
- TPACPI_LED_OLD, /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */
- TPACPI_LED_NEW, /* all others */
-};
-
-enum { /* For TPACPI_LED_OLD */
- TPACPI_LED_EC_HLCL = 0x0c, /* EC reg to get led to power on */
- TPACPI_LED_EC_HLBL = 0x0d, /* EC reg to blink a lit led */
- TPACPI_LED_EC_HLMS = 0x0e, /* EC reg to select led to command */
-};
-
-static enum led_access_mode led_supported;
-static acpi_handle led_handle;
-
-static int led_init(struct ibm_init_struct *iibm);
-static int led_read(char *p);
-static int led_write(char *buf);
-
-/*
- * Light (thinklight) subdriver
- */
-
-static acpi_handle lght_handle, ledb_handle;
-
-static int light_init(struct ibm_init_struct *iibm);
-static int light_read(char *p);
-static int light_write(char *buf);
-
-
-/*
- * Thermal subdriver
- */
-
-enum thermal_access_mode {
- TPACPI_THERMAL_NONE = 0, /* No thermal support */
- TPACPI_THERMAL_ACPI_TMP07, /* Use ACPI TMP0-7 */
- TPACPI_THERMAL_ACPI_UPDT, /* Use ACPI TMP0-7 with UPDT */
- TPACPI_THERMAL_TPEC_8, /* Use ACPI EC regs, 8 sensors */
- TPACPI_THERMAL_TPEC_16, /* Use ACPI EC regs, 16 sensors */
-};
-
-enum { /* TPACPI_THERMAL_TPEC_* */
- TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */
- TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */
- TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */
-};
-
-#define TPACPI_MAX_THERMAL_SENSORS 16 /* Max thermal sensors supported */
-struct ibm_thermal_sensors_struct {
- s32 temp[TPACPI_MAX_THERMAL_SENSORS];
-};
-
-static enum thermal_access_mode thermal_read_mode;
-
-static int thermal_init(struct ibm_init_struct *iibm);
-static int thermal_get_sensor(int idx, s32 *value);
-static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s);
-static int thermal_read(char *p);
-
-
-/*
- * Video subdriver
- */
-
-enum video_access_mode {
- TPACPI_VIDEO_NONE = 0,
- TPACPI_VIDEO_570, /* 570 */
- TPACPI_VIDEO_770, /* 600e/x, 770e, 770x */
- TPACPI_VIDEO_NEW, /* all others */
-};
-
-enum { /* video status flags, based on VIDEO_570 */
- TP_ACPI_VIDEO_S_LCD = 0x01, /* LCD output enabled */
- TP_ACPI_VIDEO_S_CRT = 0x02, /* CRT output enabled */
- TP_ACPI_VIDEO_S_DVI = 0x08, /* DVI output enabled */
-};
-
-enum { /* TPACPI_VIDEO_570 constants */
- TP_ACPI_VIDEO_570_PHSCMD = 0x87, /* unknown magic constant :( */
- TP_ACPI_VIDEO_570_PHSMASK = 0x03, /* PHS bits that map to
- * video_status_flags */
- TP_ACPI_VIDEO_570_PHS2CMD = 0x8b, /* unknown magic constant :( */
- TP_ACPI_VIDEO_570_PHS2SET = 0x80, /* unknown magic constant :( */
-};
-
-static enum video_access_mode video_supported;
-static int video_orig_autosw;
-static acpi_handle vid_handle, vid2_handle;
-
-static int video_init(struct ibm_init_struct *iibm);
-static void video_exit(void);
-static int video_outputsw_get(void);
-static int video_outputsw_set(int status);
-static int video_autosw_get(void);
-static int video_autosw_set(int enable);
-static int video_outputsw_cycle(void);
-static int video_expand_toggle(void);
-static int video_read(char *p);
-static int video_write(char *buf);
-
-
-/*
- * Volume subdriver
- */
-
-static int volume_offset = 0x30;
-
-static int volume_read(char *p);
-static int volume_write(char *buf);
-
-
-/*
- * Wan subdriver
- */
-
-enum {
- /* ACPI GWAN/SWAN bits */
- TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */
- TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */
- TP_ACPI_WANCARD_UNK = 0x04, /* unknown function */
-};
-
-static int wan_init(struct ibm_init_struct *iibm);
-static int wan_get_radiosw(void);
-static int wan_set_radiosw(int radio_on);
-static int wan_read(char *p);
-static int wan_write(char *buf);
-
-
-#endif /* __THINKPAD_ACPI_H */
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index be4b9948c762..eeaaa9dce6ef 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -4,7 +4,7 @@
* block2mtd.c - create an mtd from a block device
*
* Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk>
- * Copyright (C) 2004-2006 Jörn Engel <joern@wh.fh-wedel.de>
+ * Copyright (C) 2004-2006 Joern Engel <joern@wh.fh-wedel.de>
*
* Licence: GPL
*/
@@ -485,5 +485,5 @@ module_init(block2mtd_init);
module_exit(block2mtd_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Simon Evans <spse@secret.org.uk> and others");
+MODULE_AUTHOR("Joern Engel <joern@lazybastard.org>");
MODULE_DESCRIPTION("Emulate an MTD using a block device");
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index e9ce241b7fe5..846989f292e3 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -376,7 +376,7 @@ static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
* hardware restriction. */
if (doc->mfr) {
if (doc->mfr == mfr && doc->id == id)
- return 1; /* This is another the same the first */
+ return 1; /* This is the same as the first */
else
printk(KERN_WARNING
"Flash chip at floor %d, chip %d is different:\n",
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 56cc1ca7ffd5..180298b92a7a 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -2,7 +2,7 @@
* $Id: phram.c,v 1.16 2005/11/07 11:14:25 gleixner Exp $
*
* Copyright (c) ???? Jochen Schäuble <psionic@psionic.de>
- * Copyright (c) 2003-2004 Jörn Engel <joern@wh.fh-wedel.de>
+ * Copyright (c) 2003-2004 Joern Engel <joern@wh.fh-wedel.de>
*
* Usage:
*
@@ -299,5 +299,5 @@ module_init(init_phram);
module_exit(cleanup_phram);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jörn Engel <joern@wh.fh-wedel.de>");
+MODULE_AUTHOR("Joern Engel <joern@wh.fh-wedel.de>");
MODULE_DESCRIPTION("MTD driver for physical RAM");
diff --git a/drivers/mtd/maps/mtx-1_flash.c b/drivers/mtd/maps/mtx-1_flash.c
index d884f2be28f6..2a8fde9b92f0 100644
--- a/drivers/mtd/maps/mtx-1_flash.c
+++ b/drivers/mtd/maps/mtx-1_flash.c
@@ -4,7 +4,7 @@
* $Id: mtx-1_flash.c,v 1.2 2005/11/07 11:14:27 gleixner Exp $
*
* (C) 2005 Bruno Randolf <bruno.randolf@4g-systems.biz>
- * (C) 2005 Jörn Engel <joern@wohnheim.fh-wedel.de>
+ * (C) 2005 Joern Engel <joern@wohnheim.fh-wedel.de>
*
*/
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
index e3744eb8eccb..dd38011ee0b7 100644
--- a/drivers/mtd/nand/autcpu12.c
+++ b/drivers/mtd/nand/autcpu12.c
@@ -20,7 +20,7 @@
*
* 02-12-2002 TG Cleanup of module params
*
- * 02-20-2002 TG adjusted for different rd/wr adress support
+ * 02-20-2002 TG adjusted for different rd/wr address support
* added support for read device ready/busy line
* added page_cache
*
@@ -144,7 +144,7 @@ static int __init autcpu12_init(void)
goto out;
}
- /* map physical adress */
+ /* map physical address */
autcpu12_fio_base = ioremap(AUTCPU12_PHYS_SMC, SZ_1K);
if (!autcpu12_fio_base) {
printk("Ioremap autcpu12 SmartMedia Card failed\n");
@@ -227,7 +227,7 @@ static void __exit autcpu12_cleanup(void)
/* Release resources, unregister device */
nand_release(autcpu12_mtd);
- /* unmap physical adress */
+ /* unmap physical address */
iounmap(autcpu12_fio_base);
/* Free the MTD device structure */
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 7d6ac6a7d9a7..747042ab094a 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -4,7 +4,7 @@
* http://blackfin.uclinux.org/
* Bryan Wu <bryan.wu@analog.com>
*
- * Blackfin BF5xx on-chip NAND flash controler driver
+ * Blackfin BF5xx on-chip NAND flash controller driver
*
* Derived from drivers/mtd/nand/s3c2410.c
* Copyright (c) 2007 Ben Dooks <ben@simtec.co.uk>
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 89deff007116..19e1594421a4 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -337,7 +337,7 @@ static void __exit cs553x_cleanup(void)
nand_release(cs553x_mtd[i]);
cs553x_mtd[i] = NULL;
- /* unmap physical adress */
+ /* unmap physical address */
iounmap(mmio_base);
/* Free the MTD device structure */
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
index 0146cdc48039..ba67bbec20d3 100644
--- a/drivers/mtd/nand/edb7312.c
+++ b/drivers/mtd/nand/edb7312.c
@@ -125,7 +125,7 @@ static int __init ep7312_init(void)
return -ENOMEM;
}
- /* map physical adress */
+ /* map physical address */
ep7312_fio_base = ioremap(ep7312_fio_pbase, SZ_1K);
if (!ep7312_fio_base) {
printk("ioremap EDB7312 NAND flash failed\n");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 971d58c391f1..7acb1a0e7409 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -89,7 +89,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops);
/*
- * For devices which display every fart in the system on a seperate LED. Is
+ * For devices which display every fart in the system on a separate LED. Is
* compiled away when LED support is disabled.
*/
DEFINE_LED_TRIGGER(nand_led_trigger);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 10490b48d9f7..bb885d1fcab5 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -210,7 +210,7 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I
#define STATE_CMD_RESET 0x0000000C /* reset */
#define STATE_CMD_MASK 0x0000000F /* command states mask */
-/* After an addres is input, the simulator goes to one of these states */
+/* After an address is input, the simulator goes to one of these states */
#define STATE_ADDR_PAGE 0x00000010 /* full (row, column) address is accepted */
#define STATE_ADDR_SEC 0x00000020 /* sector address was accepted */
#define STATE_ADDR_ZERO 0x00000030 /* one byte zero address was accepted */
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index d31cb7b3feeb..9260ad947524 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -8,7 +8,7 @@
*
* Changelog:
* 21-Sep-2004 BJD Initial version
- * 23-Sep-2004 BJD Mulitple device support
+ * 23-Sep-2004 BJD Multiple device support
* 28-Sep-2004 BJD Fixed ECC placement for Hardware mode
* 12-Oct-2004 BJD Fixed errors in use of platform data
* 18-Feb-2005 BJD Fix sparse errors
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 51c7288ab49a..033f8800b1e6 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -165,7 +165,7 @@ static int __init sharpsl_nand_init(void)
return -ENOMEM;
}
- /* map physical adress */
+ /* map physical address */
sharpsl_io_base = ioremap(sharpsl_phys_base, 0x1000);
if (!sharpsl_io_base) {
printk("ioremap to access Sharp SL NAND chip failed\n");
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index 067262ee8df0..0513cbc8834d 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -429,7 +429,7 @@ static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_b
}
}
-/* calc_chain_lenght: Walk through a Virtual Unit Chain and estimate chain length */
+/* calc_chain_length: Walk through a Virtual Unit Chain and estimate chain length */
static int calc_chain_length(struct NFTLrecord *nftl, unsigned int first_block)
{
unsigned int length = 0, block = first_block;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 389980f0e59e..9cc25fd80b60 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -814,8 +814,8 @@ config ULTRA32
will be called smc-ultra32.
config BFIN_MAC
- tristate "Blackfin 536/537 on-chip mac support"
- depends on NET_ETHERNET && (BF537 || BF536) && (!BF537_PORT_H)
+ tristate "Blackfin 527/536/537 on-chip mac support"
+ depends on NET_ETHERNET && (BF527 || BF537 || BF536) && (!BF537_PORT_H)
select CRC32
select MII
select PHYLIB
@@ -828,7 +828,7 @@ config BFIN_MAC
config BFIN_MAC_USE_L1
bool "Use L1 memory for rx/tx packets"
- depends on BFIN_MAC && BF537
+ depends on BFIN_MAC && (BF527 || BF537)
default y
help
To get maximum network performance, you should use L1 memory as rx/tx buffers.
@@ -855,7 +855,8 @@ config BFIN_RX_DESC_NUM
config BFIN_MAC_RMII
bool "RMII PHY Interface (EXPERIMENTAL)"
depends on BFIN_MAC && EXPERIMENTAL
- default n
+ default y if BFIN527_EZKIT
+ default n if BFIN537_STAMP
help
Use Reduced PHY MII Interface
@@ -919,8 +920,7 @@ config ENC28J60
---help---
Support for the Microchip EN28J60 ethernet chip.
- To compile this driver as a module, choose M here and read
- <file:Documentation/networking/net-modules.txt>. The module will be
+ To compile this driver as a module, choose M here. The module will be
called enc28j60.
config ENC28J60_WRITEVERIFY
@@ -1199,7 +1199,7 @@ config NE2_MCA
config IBMLANA
tristate "IBM LAN Adapter/A support"
- depends on MCA && MCA_LEGACY
+ depends on MCA
---help---
This is a Micro Channel Ethernet adapter. You need to set
CONFIG_MCA to use this driver. It is both available as an in-kernel
@@ -1737,10 +1737,8 @@ config SC92031
config CPMAC
tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)"
- depends on NET_ETHERNET && EXPERIMENTAL && AR7
+ depends on NET_ETHERNET && EXPERIMENTAL && AR7 && BROKEN
select PHYLIB
- select FIXED_PHY
- select FIXED_MII_100_FDX
help
TI AR7 CPMAC Ethernet support
@@ -2040,8 +2038,7 @@ config IGB
More specific information on configuring the driver is in
<file:Documentation/networking/e1000.txt>.
- To compile this driver as a module, choose M here and read
- <file:Documentation/networking/net-modules.txt>. The module
+ To compile this driver as a module, choose M here. The module
will be called igb.
source "drivers/net/ixp2000/Kconfig"
@@ -3113,6 +3110,7 @@ config VIRTIO_NET
tristate "Virtio network driver (EXPERIMENTAL)"
depends on EXPERIMENTAL && VIRTIO
---help---
- This is the virtual network driver for lguest. Say Y or M.
+ This is the virtual network driver for virtio. It can be used with
+ lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
endif # NETDEVICES
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index 25b114a4e2b1..0ae0d83e5d22 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -384,7 +384,7 @@ static void reset_phy(struct net_device *dev)
/* Wait until PHY reset is complete */
do {
read_phy(lp->phy_address, MII_BMCR, &bmcr);
- } while (!(bmcr && BMCR_RESET));
+ } while (!(bmcr & BMCR_RESET));
disable_mdi();
spin_unlock_irq(&lp->lock);
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 7495a9ee8f4b..194949afacd0 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -137,11 +137,12 @@ static int ax_initial_check(struct net_device *dev)
static void ax_reset_8390(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
unsigned long reset_start_time = jiffies;
void __iomem *addr = (void __iomem *)dev->base_addr;
if (ei_debug > 1)
- printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
+ dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies);
ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
@@ -151,7 +152,7 @@ static void ax_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
if (jiffies - reset_start_time > 2*HZ/100) {
- printk(KERN_WARNING "%s: %s did not complete.\n",
+ dev_warn(&ax->dev->dev, "%s: %s did not complete.\n",
__FUNCTION__, dev->name);
break;
}
@@ -165,13 +166,15 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page)
{
struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem;
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk(KERN_EMERG "%s: DMAing conflict in %s [DMAstat:%d][irqlock:%d].\n",
+ dev_err(&ax->dev->dev, "%s: DMAing conflict in %s "
+ "[DMAstat:%d][irqlock:%d].\n",
dev->name, __FUNCTION__,
- ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -204,13 +207,16 @@ static void ax_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
{
struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem;
char *buf = skb->data;
if (ei_status.dmaing) {
- printk(KERN_EMERG "%s: DMAing conflict in ax_block_input "
+ dev_err(&ax->dev->dev,
+ "%s: DMAing conflict in %s "
"[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ dev->name, __FUNCTION__,
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -239,6 +245,7 @@ static void ax_block_output(struct net_device *dev, int count,
const unsigned char *buf, const int start_page)
{
struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem;
unsigned long dma_start;
@@ -251,7 +258,7 @@ static void ax_block_output(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk(KERN_EMERG "%s: DMAing conflict in %s."
+ dev_err(&ax->dev->dev, "%s: DMAing conflict in %s."
"[DMAstat:%d][irqlock:%d]\n",
dev->name, __FUNCTION__,
ei_status.dmaing, ei_status.irqlock);
@@ -281,7 +288,8 @@ static void ax_block_output(struct net_device *dev, int count,
while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
- printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ dev_warn(&ax->dev->dev,
+ "%s: timeout waiting for Tx RDC.\n", dev->name);
ax_reset_8390(dev);
ax_NS8390_init(dev,1);
break;
@@ -424,10 +432,11 @@ static void
ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
{
struct ei_device *ei = (struct ei_device *) netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
unsigned long flags;
- printk(KERN_DEBUG "%s: %p, %04x, %04x %04x\n",
- __FUNCTION__, dev, phy_addr, reg, value);
+ dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
+ __FUNCTION__, dev, phy_addr, reg, value);
spin_lock_irqsave(&ei->page_lock, flags);
@@ -750,14 +759,11 @@ static int ax_init_dev(struct net_device *dev, int first_init)
ax_NS8390_init(dev, 0);
if (first_init) {
- printk("AX88796: %dbit, irq %d, %lx, MAC: ",
- ei_status.word16 ? 16:8, dev->irq, dev->base_addr);
-
- for (i = 0; i < ETHER_ADDR_LEN; i++)
- printk("%2.2x%c", dev->dev_addr[i],
- (i < (ETHER_ADDR_LEN-1) ? ':' : ' '));
+ DECLARE_MAC_BUF(mac);
- printk("\n");
+ dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %s\n",
+ ei_status.word16 ? 16:8, dev->irq, dev->base_addr,
+ print_mac(mac, dev->dev_addr));
}
ret = register_netdev(dev);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index eb971755a3ff..c993a32b3f50 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -1,34 +1,11 @@
/*
- * File: drivers/net/bfin_mac.c
- * Based on:
- * Maintainer:
- * Bryan Wu <bryan.wu@analog.com>
+ * Blackfin On-Chip MAC Driver
*
- * Original author:
- * Luke Yang <luke.yang@analog.com>
+ * Copyright 2004-2007 Analog Devices Inc.
*
- * Created:
- * Description:
+ * Enter bugs at http://blackfin.uclinux.org/
*
- * Modified:
- * Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software ; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation ; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY ; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program ; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Licensed under the GPL-2 or later.
*/
#include <linux/init.h>
@@ -65,7 +42,7 @@
#define DRV_NAME "bfin_mac"
#define DRV_VERSION "1.1"
#define DRV_AUTHOR "Bryan Wu, Luke Yang"
-#define DRV_DESC "Blackfin BF53[67] on-chip Ethernet MAC driver"
+#define DRV_DESC "Blackfin BF53[67] BF527 on-chip Ethernet MAC driver"
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_LICENSE("GPL");
@@ -296,7 +273,7 @@ static void mdio_poll(void)
/* poll the STABUSY bit */
while ((bfin_read_EMAC_STAADD()) & STABUSY) {
- mdelay(10);
+ udelay(1);
if (timeout_cnt-- < 0) {
printk(KERN_ERR DRV_NAME
": wait MDC/MDIO transaction to complete timeout\n");
@@ -412,20 +389,26 @@ static void bf537_adjust_link(struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
}
+/* MDC = 2.5 MHz */
+#define MDC_CLK 2500000
+
static int mii_probe(struct net_device *dev)
{
struct bf537mac_local *lp = netdev_priv(dev);
struct phy_device *phydev = NULL;
unsigned short sysctl;
int i;
+ u32 sclk, mdc_div;
/* Enable PHY output early */
if (!(bfin_read_VR_CTL() & PHYCLKOE))
bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
- /* MDC = 2.5 MHz */
+ sclk = get_sclk();
+ mdc_div = ((sclk / MDC_CLK) / 2) - 1;
+
sysctl = bfin_read_EMAC_SYSCTL();
- sysctl |= SET_MDCDIV(24);
+ sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
bfin_write_EMAC_SYSCTL(sysctl);
/* search for connect PHY device */
@@ -477,8 +460,10 @@ static int mii_probe(struct net_device *dev)
lp->phydev = phydev;
printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n",
- DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq);
+ "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)"
+ "@sclk=%dMHz)\n",
+ DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq,
+ MDC_CLK, mdc_div, sclk/1000000);
return 0;
}
@@ -551,7 +536,7 @@ static void adjust_tx_list(void)
*/
if (current_tx_ptr->next->next == tx_list_head) {
while (tx_list_head->status.status_word == 0) {
- mdelay(10);
+ mdelay(1);
if (tx_list_head->status.status_word != 0
|| !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) {
goto adjust_head;
@@ -666,6 +651,12 @@ static void bf537mac_rx(struct net_device *dev)
current_rx_ptr->skb = new_skb;
current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
+ /* Invidate the data cache of skb->data range when it is write back
+ * cache. It will prevent overwritting the new data from DMA
+ */
+ blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
+ (unsigned long)new_skb->end);
+
len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
skb_put(skb, len);
blackfin_dcache_invalidate_range((unsigned long)skb->head,
@@ -767,7 +758,7 @@ static void bf537mac_enable(void)
#if defined(CONFIG_BFIN_MAC_RMII)
opmode |= RMII; /* For Now only 100MBit are supported */
-#ifdef CONFIG_BF_REV_0_2
+#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2
opmode |= TE;
#endif
#endif
@@ -792,6 +783,39 @@ static void bf537mac_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+static void bf537mac_multicast_hash(struct net_device *dev)
+{
+ u32 emac_hashhi, emac_hashlo;
+ struct dev_mc_list *dmi = dev->mc_list;
+ char *addrs;
+ int i;
+ u32 crc;
+
+ emac_hashhi = emac_hashlo = 0;
+
+ for (i = 0; i < dev->mc_count; i++) {
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ /* skip non-multicast addresses */
+ if (!(*addrs & 1))
+ continue;
+
+ crc = ether_crc(ETH_ALEN, addrs);
+ crc >>= 26;
+
+ if (crc & 0x20)
+ emac_hashhi |= 1 << (crc & 0x1f);
+ else
+ emac_hashlo |= 1 << (crc & 0x1f);
+ }
+
+ bfin_write_EMAC_HASHHI(emac_hashhi);
+ bfin_write_EMAC_HASHLO(emac_hashlo);
+
+ return;
+}
+
/*
* This routine will, depending on the values passed to it,
* either make it accept multicast packets, go into
@@ -807,11 +831,17 @@ static void bf537mac_set_multicast_list(struct net_device *dev)
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= RAF;
bfin_write_EMAC_OPMODE(sysctl);
- } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ } else if (dev->flags & IFF_ALLMULTI) {
/* accept all multicast */
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= PAM;
bfin_write_EMAC_OPMODE(sysctl);
+ } else if (dev->mc_count) {
+ /* set up multicast hash table */
+ sysctl = bfin_read_EMAC_OPMODE();
+ sysctl |= HM;
+ bfin_write_EMAC_OPMODE(sysctl);
+ bf537mac_multicast_hash(dev);
} else {
/* clear promisc or multicast mode */
sysctl = bfin_read_EMAC_OPMODE();
@@ -860,10 +890,10 @@ static int bf537mac_open(struct net_device *dev)
return retval;
phy_start(lp->phydev);
+ phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
setup_system_regs(dev);
bf537mac_disable();
bf537mac_enable();
-
pr_debug("hardware init finished\n");
netif_start_queue(dev);
netif_carrier_on(dev);
@@ -886,6 +916,7 @@ static int bf537mac_close(struct net_device *dev)
netif_carrier_off(dev);
phy_stop(lp->phydev);
+ phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
/* clear everything */
bf537mac_shutdown(dev);
@@ -970,7 +1001,7 @@ static int __init bf537mac_probe(struct net_device *dev)
/* register irq handler */
if (request_irq
(IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED,
- "BFIN537_MAC_RX", dev)) {
+ "EMAC_RX", dev)) {
printk(KERN_WARNING DRV_NAME
": Unable to attach BlackFin MAC RX interrupt\n");
return -EBUSY;
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index 5970ea7142cd..f774d5a36942 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -1,34 +1,11 @@
/*
- * File: drivers/net/bfin_mac.c
- * Based on:
- * Maintainer:
- * Bryan Wu <bryan.wu@analog.com>
+ * Blackfin On-Chip MAC Driver
*
- * Original author:
- * Luke Yang <luke.yang@analog.com>
+ * Copyright 2004-2007 Analog Devices Inc.
*
- * Created:
- * Description:
+ * Enter bugs at http://blackfin.uclinux.org/
*
- * Modified:
- * Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software ; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation ; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY ; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program ; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Licensed under the GPL-2 or later.
*/
#define BFIN_MAC_CSUM_OFFLOAD
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2039f7838f2d..0942d82f7cbf 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1464,10 +1464,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
dev_set_allmulti(slave_dev, 1);
}
+ netif_tx_lock_bh(bond_dev);
/* upload master's mc_list to new slave */
for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
dev_mc_add (slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
}
+ netif_tx_unlock_bh(bond_dev);
}
if (bond->params.mode == BOND_MODE_8023AD) {
@@ -1821,7 +1823,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
}
/* flush master's mc_list from slave */
+ netif_tx_lock_bh(bond_dev);
bond_mc_list_flush(bond_dev, slave_dev);
+ netif_tx_unlock_bh(bond_dev);
}
netdev_set_master(slave_dev, NULL);
@@ -1942,7 +1946,9 @@ static int bond_release_all(struct net_device *bond_dev)
}
/* flush master's mc_list from slave */
+ netif_tx_lock_bh(bond_dev);
bond_mc_list_flush(bond_dev, slave_dev);
+ netif_tx_unlock_bh(bond_dev);
}
netdev_set_master(slave_dev, NULL);
@@ -2795,14 +2801,11 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
}
if (do_failover) {
- rtnl_lock();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- rtnl_unlock();
-
}
re_arm:
@@ -2859,8 +2862,6 @@ void bond_activebackup_arp_mon(struct work_struct *work)
slave->link = BOND_LINK_UP;
- rtnl_lock();
-
write_lock_bh(&bond->curr_slave_lock);
if ((!bond->curr_active_slave) &&
@@ -2896,7 +2897,6 @@ void bond_activebackup_arp_mon(struct work_struct *work)
}
write_unlock_bh(&bond->curr_slave_lock);
- rtnl_unlock();
}
} else {
read_lock(&bond->curr_slave_lock);
@@ -2966,7 +2966,6 @@ void bond_activebackup_arp_mon(struct work_struct *work)
bond->dev->name,
slave->dev->name);
- rtnl_lock();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
@@ -2974,8 +2973,6 @@ void bond_activebackup_arp_mon(struct work_struct *work)
write_unlock_bh(&bond->curr_slave_lock);
- rtnl_unlock();
-
bond->current_arp_slave = slave;
if (slave) {
@@ -2993,13 +2990,10 @@ void bond_activebackup_arp_mon(struct work_struct *work)
bond->primary_slave->dev->name);
/* primary is up so switch to it */
- rtnl_lock();
write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, bond->primary_slave);
write_unlock_bh(&bond->curr_slave_lock);
- rtnl_unlock();
-
slave = bond->primary_slave;
slave->jiffies = jiffies;
} else {
@@ -3769,42 +3763,45 @@ static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
{
struct bonding *bond = bond_dev->priv;
struct net_device_stats *stats = &(bond->stats), *sstats;
+ struct net_device_stats local_stats;
struct slave *slave;
int i;
- memset(stats, 0, sizeof(struct net_device_stats));
+ memset(&local_stats, 0, sizeof(struct net_device_stats));
read_lock_bh(&bond->lock);
bond_for_each_slave(bond, slave, i) {
sstats = slave->dev->get_stats(slave->dev);
- stats->rx_packets += sstats->rx_packets;
- stats->rx_bytes += sstats->rx_bytes;
- stats->rx_errors += sstats->rx_errors;
- stats->rx_dropped += sstats->rx_dropped;
+ local_stats.rx_packets += sstats->rx_packets;
+ local_stats.rx_bytes += sstats->rx_bytes;
+ local_stats.rx_errors += sstats->rx_errors;
+ local_stats.rx_dropped += sstats->rx_dropped;
- stats->tx_packets += sstats->tx_packets;
- stats->tx_bytes += sstats->tx_bytes;
- stats->tx_errors += sstats->tx_errors;
- stats->tx_dropped += sstats->tx_dropped;
+ local_stats.tx_packets += sstats->tx_packets;
+ local_stats.tx_bytes += sstats->tx_bytes;
+ local_stats.tx_errors += sstats->tx_errors;
+ local_stats.tx_dropped += sstats->tx_dropped;
- stats->multicast += sstats->multicast;
- stats->collisions += sstats->collisions;
+ local_stats.multicast += sstats->multicast;
+ local_stats.collisions += sstats->collisions;
- stats->rx_length_errors += sstats->rx_length_errors;
- stats->rx_over_errors += sstats->rx_over_errors;
- stats->rx_crc_errors += sstats->rx_crc_errors;
- stats->rx_frame_errors += sstats->rx_frame_errors;
- stats->rx_fifo_errors += sstats->rx_fifo_errors;
- stats->rx_missed_errors += sstats->rx_missed_errors;
+ local_stats.rx_length_errors += sstats->rx_length_errors;
+ local_stats.rx_over_errors += sstats->rx_over_errors;
+ local_stats.rx_crc_errors += sstats->rx_crc_errors;
+ local_stats.rx_frame_errors += sstats->rx_frame_errors;
+ local_stats.rx_fifo_errors += sstats->rx_fifo_errors;
+ local_stats.rx_missed_errors += sstats->rx_missed_errors;
- stats->tx_aborted_errors += sstats->tx_aborted_errors;
- stats->tx_carrier_errors += sstats->tx_carrier_errors;
- stats->tx_fifo_errors += sstats->tx_fifo_errors;
- stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
- stats->tx_window_errors += sstats->tx_window_errors;
+ local_stats.tx_aborted_errors += sstats->tx_aborted_errors;
+ local_stats.tx_carrier_errors += sstats->tx_carrier_errors;
+ local_stats.tx_fifo_errors += sstats->tx_fifo_errors;
+ local_stats.tx_heartbeat_errors += sstats->tx_heartbeat_errors;
+ local_stats.tx_window_errors += sstats->tx_window_errors;
}
+ memcpy(stats, &local_stats, sizeof(struct net_device_stats));
+
read_unlock_bh(&bond->lock);
return stats;
@@ -3937,8 +3934,6 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
struct bonding *bond = bond_dev->priv;
struct dev_mc_list *dmi;
- write_lock_bh(&bond->lock);
-
/*
* Do promisc before checking multicast_mode
*/
@@ -3959,6 +3954,8 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
bond_set_allmulti(bond, -1);
}
+ read_lock(&bond->lock);
+
bond->flags = bond_dev->flags;
/* looking for addresses to add to slaves' mc list */
@@ -3979,7 +3976,7 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
bond_mc_list_destroy(bond);
bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC);
- write_unlock_bh(&bond->lock);
+ read_unlock(&bond->lock);
}
/*
@@ -4526,7 +4523,9 @@ static void bond_free_all(void)
struct net_device *bond_dev = bond->dev;
bond_work_cancel_all(bond);
+ netif_tx_lock_bh(bond_dev);
bond_mc_list_destroy(bond);
+ netif_tx_unlock_bh(bond_dev);
/* Release the bonded slaves */
bond_release_all(bond_dev);
bond_deinit(bond_dev);
@@ -4549,14 +4548,19 @@ static void bond_free_all(void)
int bond_parse_parm(const char *buf, struct bond_parm_tbl *tbl)
{
int mode = -1, i, rv;
- char modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, };
+ char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, };
- rv = sscanf(buf, "%d", &mode);
- if (!rv) {
+ for (p = (char *)buf; *p; p++)
+ if (!(isdigit(*p) || isspace(*p)))
+ break;
+
+ if (*p)
rv = sscanf(buf, "%20s", modestr);
- if (!rv)
- return -1;
- }
+ else
+ rv = sscanf(buf, "%d", &mode);
+
+ if (!rv)
+ return -1;
for (i = 0; tbl[i].modename; i++) {
if (mode == tbl[i].mode)
@@ -4883,14 +4887,16 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
down_write(&bonding_rwsem);
/* Check to see if the bond already exists. */
- list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list)
- if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) {
- printk(KERN_ERR DRV_NAME
+ if (name) {
+ list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list)
+ if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) {
+ printk(KERN_ERR DRV_NAME
": cannot add bond %s; it already exists\n",
- name);
- res = -EPERM;
- goto out_rtnl;
- }
+ name);
+ res = -EPERM;
+ goto out_rtnl;
+ }
+ }
bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
ether_setup);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 6d83be49899a..67ccad69d445 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -22,8 +22,8 @@
#include "bond_3ad.h"
#include "bond_alb.h"
-#define DRV_VERSION "3.2.3"
-#define DRV_RELDATE "December 6, 2007"
+#define DRV_VERSION "3.2.4"
+#define DRV_RELDATE "January 28, 2008"
#define DRV_NAME "bonding"
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 6ccebb830ff9..c85194f2cd2d 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -845,15 +845,6 @@ static void cpmac_adjust_link(struct net_device *dev)
spin_unlock(&priv->lock);
}
-static int cpmac_link_update(struct net_device *dev,
- struct fixed_phy_status *status)
-{
- status->link = 1;
- status->speed = 100;
- status->duplex = 1;
- return 0;
-}
-
static int cpmac_open(struct net_device *dev)
{
int i, size, res;
@@ -996,11 +987,11 @@ static int external_switch;
static int __devinit cpmac_probe(struct platform_device *pdev)
{
int rc, phy_id, i;
+ int mdio_bus_id = cpmac_mii.id;
struct resource *mem;
struct cpmac_priv *priv;
struct net_device *dev;
struct plat_cpmac_data *pdata;
- struct fixed_info *fixed_phy;
DECLARE_MAC_BUF(mac);
pdata = pdev->dev.platform_data;
@@ -1014,9 +1005,23 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
}
if (phy_id == PHY_MAX_ADDR) {
- if (external_switch || dumb_switch)
+ if (external_switch || dumb_switch) {
+ struct fixed_phy_status status = {};
+
+ mdio_bus_id = 0;
+
+ /*
+ * FIXME: this should be in the platform code!
+ * Since there is not platform code at all (that is,
+ * no mainline users of that driver), place it here
+ * for now.
+ */
phy_id = 0;
- else {
+ status.link = 1;
+ status.duplex = 1;
+ status.speed = 100;
+ fixed_phy_add(PHY_POLL, phy_id, &status);
+ } else {
printk(KERN_ERR "cpmac: no PHY present\n");
return -ENODEV;
}
@@ -1060,32 +1065,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug_level, 0xff);
memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr));
- if (phy_id == 31) {
- snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, cpmac_mii.id,
- phy_id);
- } else {
- /* Let's try to get a free fixed phy... */
- for (i = 0; i < MAX_PHY_AMNT; i++) {
- fixed_phy = fixed_mdio_get_phydev(i);
- if (!fixed_phy)
- continue;
- if (!fixed_phy->phydev->attached_dev) {
- strncpy(priv->phy_name,
- fixed_phy->phydev->dev.bus_id,
- BUS_ID_SIZE);
- fixed_mdio_set_link_update(fixed_phy->phydev,
- &cpmac_link_update);
- goto phy_found;
- }
- }
- if (netif_msg_drv(priv))
- printk(KERN_ERR "%s: Could not find fixed PHY\n",
- dev->name);
- rc = -ENODEV;
- goto fail;
- }
+ snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
-phy_found:
priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phy)) {
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index d48c396bdabb..901c824bfe6d 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1070,9 +1070,7 @@ void *cxgb_alloc_mem(unsigned long size)
*/
void cxgb_free_mem(void *addr)
{
- unsigned long p = (unsigned long)addr;
-
- if (p >= VMALLOC_START && p < VMALLOC_END)
+ if (is_vmalloc_addr(addr))
vfree(addr);
else
kfree(addr);
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c
index 84c1ffa8e2d3..4c4d6e877ea6 100644
--- a/drivers/net/cxgb3/mc5.c
+++ b/drivers/net/cxgb3/mc5.c
@@ -452,7 +452,7 @@ void t3_mc5_intr_handler(struct mc5 *mc5)
t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
}
-void __devinit t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
+void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
{
#define K * 1024
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index cb684d30831f..9ca8c66abd16 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2836,7 +2836,7 @@ void t3_sge_init(struct adapter *adap, struct sge_params *p)
* defaults for the assorted SGE parameters, which admins can change until
* they are used to initialize the SGE.
*/
-void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
+void t3_sge_prep(struct adapter *adap, struct sge_params *p)
{
int i;
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 7469935877bd..a99496a431c4 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -2675,7 +2675,7 @@ void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
}
-static void __devinit init_mtus(unsigned short mtus[])
+static void init_mtus(unsigned short mtus[])
{
/*
* See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
@@ -2703,7 +2703,7 @@ static void __devinit init_mtus(unsigned short mtus[])
/*
* Initial congestion control parameters.
*/
-static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
+static void init_cong_ctrl(unsigned short *a, unsigned short *b)
{
a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
a[9] = 2;
@@ -3354,8 +3354,7 @@ out_err:
* Determines a card's PCI mode and associated parameters, such as speed
* and width.
*/
-static void __devinit get_pci_mode(struct adapter *adapter,
- struct pci_params *p)
+static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
{
static unsigned short speed_map[] = { 33, 66, 100, 133 };
u32 pci_mode, pcie_cap;
@@ -3395,8 +3394,7 @@ static void __devinit get_pci_mode(struct adapter *adapter,
* capabilities and default speed/duplex/flow-control/autonegotiation
* settings.
*/
-static void __devinit init_link_config(struct link_config *lc,
- unsigned int caps)
+static void init_link_config(struct link_config *lc, unsigned int caps)
{
lc->supported = caps;
lc->requested_speed = lc->speed = SPEED_INVALID;
@@ -3419,7 +3417,7 @@ static void __devinit init_link_config(struct link_config *lc,
* Calculates the size of an MC7 memory in bytes from the value of its
* configuration register.
*/
-static unsigned int __devinit mc7_calc_size(u32 cfg)
+static unsigned int mc7_calc_size(u32 cfg)
{
unsigned int width = G_WIDTH(cfg);
unsigned int banks = !!(cfg & F_BKS) + 1;
@@ -3430,8 +3428,8 @@ static unsigned int __devinit mc7_calc_size(u32 cfg)
return MBs << 20;
}
-static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
- unsigned int base_addr, const char *name)
+static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
+ unsigned int base_addr, const char *name)
{
u32 cfg;
@@ -3517,7 +3515,7 @@ static int t3_reset_adapter(struct adapter *adapter)
return 0;
}
-static int __devinit init_parity(struct adapter *adap)
+static int init_parity(struct adapter *adap)
{
int i, err, addr;
@@ -3552,8 +3550,8 @@ static int __devinit init_parity(struct adapter *adap)
* for some adapter tunables, take PHYs out of reset, and initialize the MDIO
* interface.
*/
-int __devinit t3_prep_adapter(struct adapter *adapter,
- const struct adapter_info *ai, int reset)
+int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
+ int reset)
{
int ret;
unsigned int i, j = 0;
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 51cf577035be..36ba6dc96acc 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -94,7 +94,7 @@
* enabled. 82557 pads with 7Eh, while the later controllers pad
* with 00h.
*
- * IV. Recieve
+ * IV. Receive
*
* The Receive Frame Area (RFA) comprises a ring of Receive Frame
* Descriptors (RFD) + data buffer, thus forming the simplified mode
@@ -120,7 +120,7 @@
* and Rx indication and re-allocation happen in the same context,
* therefore no locking is required. A software-generated interrupt
* is generated from the watchdog to recover from a failed allocation
- * senario where all Rx resources have been indicated and none re-
+ * scenario where all Rx resources have been indicated and none re-
* placed.
*
* V. Miscellaneous
@@ -954,7 +954,7 @@ static void e100_get_defaults(struct nic *nic)
/* Quadwords to DMA into FIFO before starting frame transmit */
nic->tx_threshold = 0xE0;
- /* no interrupt for every tx completion, delay = 256us if not 557*/
+ /* no interrupt for every tx completion, delay = 256us if not 557 */
nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
@@ -1497,7 +1497,7 @@ static void e100_update_stats(struct nic *nic)
&s->complete;
/* Device's stats reporting may take several microseconds to
- * complete, so where always waiting for results of the
+ * complete, so we're always waiting for results of the
* previous command. */
if(*complete == cpu_to_le32(cuc_dump_reset_complete)) {
@@ -1958,7 +1958,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
if(restart_required) {
// ack the rnr?
- writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
+ iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
e100_start_receiver(nic, nic->rx_to_clean);
if(work_done)
(*work_done)++;
@@ -2774,7 +2774,7 @@ static void __devexit e100_remove(struct pci_dev *pdev)
struct nic *nic = netdev_priv(netdev);
unregister_netdev(netdev);
e100_free(nic);
- iounmap(nic->csr);
+ pci_iounmap(pdev, nic->csr);
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -2858,17 +2858,17 @@ static void e100_shutdown(struct pci_dev *pdev)
/**
* e100_io_error_detected - called when PCI error is detected.
* @pdev: Pointer to PCI device
- * @state: The current pci conneection state
+ * @state: The current pci connection state
*/
static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev);
- /* Similar to calling e100_down(), but avoids adpater I/O. */
+ /* Similar to calling e100_down(), but avoids adapter I/O. */
netdev->stop(netdev);
- /* Detach; put netif into state similar to hotplug unplug. */
+ /* Detach; put netif into a state similar to hotplug unplug. */
napi_enable(&nic->napi);
netif_device_detach(netdev);
pci_disable_device(pdev);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 8c87940a9ce8..7c5b05a82f0e 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -853,7 +853,7 @@ e1000_reset(struct e1000_adapter *adapter)
/**
* Dump the eeprom for users having checksum issues
**/
-void e1000_dump_eeprom(struct e1000_adapter *adapter)
+static void e1000_dump_eeprom(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ethtool_eeprom eeprom;
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index f2175ea46b83..6232c3e96689 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -63,6 +63,7 @@
#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
/* Extended Device Control */
#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 6d9c27fd0b53..f77a7427d3a0 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -690,8 +690,8 @@ err_setup:
return err;
}
-bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data,
- int reg, int offset, u32 mask, u32 write)
+static bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data,
+ int reg, int offset, u32 mask, u32 write)
{
int i;
u32 read;
@@ -1632,7 +1632,8 @@ static void e1000_get_wol(struct net_device *netdev,
return;
wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC;
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY | WAKE_ARP;
/* apply any specific unsupported masks here */
if (adapter->flags & FLAG_NO_WAKE_UCAST) {
@@ -1651,6 +1652,10 @@ static void e1000_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & E1000_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol & E1000_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
+ if (adapter->wol & E1000_WUFC_ARP)
+ wol->wolopts |= WAKE_ARP;
}
static int e1000_set_wol(struct net_device *netdev,
@@ -1658,7 +1663,7 @@ static int e1000_set_wol(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & WAKE_MAGICSECURE)
return -EOPNOTSUPP;
if (!(adapter->flags & FLAG_HAS_WOL))
@@ -1675,6 +1680,10 @@ static int e1000_set_wol(struct net_device *netdev,
adapter->wol |= E1000_WUFC_BC;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= E1000_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= E1000_WUFC_LNKC;
+ if (wol->wolopts & WAKE_ARP)
+ adapter->wol |= E1000_WUFC_ARP;
return 0;
}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 0a2cb7960c9e..f58f017ee47a 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -945,11 +945,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
int irq_flags = IRQF_SHARED;
int err;
- err = pci_enable_msi(adapter->pdev);
- if (err) {
- ndev_warn(netdev,
- "Unable to allocate MSI interrupt Error: %d\n", err);
- } else {
+ if (!pci_enable_msi(adapter->pdev)) {
adapter->flags |= FLAG_MSI_ENABLED;
handler = e1000_intr_msi;
irq_flags = 0;
@@ -958,10 +954,12 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
netdev);
if (err) {
+ ndev_err(netdev,
+ "Unable to allocate %s interrupt (return: %d)\n",
+ adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx",
+ err);
if (adapter->flags & FLAG_MSI_ENABLED)
pci_disable_msi(adapter->pdev);
- ndev_err(netdev,
- "Unable to allocate interrupt Error: %d\n", err);
}
return err;
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index d5459a8056b1..2eb82aba4a8b 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -9,7 +9,7 @@
* Many modifications, and currently maintained, by
* Philip Blundell <philb@gnu.org>
* Added the Compaq LTE Alan Cox <alan@redhat.com>
- * Added MCA support Adam Fritzler <mid@auk.cx>
+ * Added MCA support Adam Fritzler
*
* Note - this driver is experimental still - it has problems on faster
* machines. Someone needs to sit down and go through it line by line with
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 5f82a4647eee..88fb53eba715 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -458,4 +458,7 @@ void ehea_set_ethtool_ops(struct net_device *netdev);
int ehea_sense_port_attr(struct ehea_port *port);
int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
+extern u64 ehea_driver_flags;
+extern struct work_struct ehea_rereg_mr_task;
+
#endif /* __EHEA_H__ */
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
index 679f40ee9572..d76885223366 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -40,7 +40,7 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return ret;
if (netif_carrier_ok(dev)) {
- switch(port->port_speed) {
+ switch (port->port_speed) {
case EHEA_SPEED_10M: cmd->speed = SPEED_10; break;
case EHEA_SPEED_100M: cmd->speed = SPEED_100; break;
case EHEA_SPEED_1G: cmd->speed = SPEED_1000; break;
@@ -78,7 +78,7 @@ static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
goto doit;
}
- switch(cmd->speed) {
+ switch (cmd->speed) {
case SPEED_10:
if (cmd->duplex == DUPLEX_FULL)
sp = H_SPEED_10M_F;
diff --git a/drivers/net/ehea/ehea_hw.h b/drivers/net/ehea/ehea_hw.h
index 1af7ca499ec5..567981b4b2cc 100644
--- a/drivers/net/ehea/ehea_hw.h
+++ b/drivers/net/ehea/ehea_hw.h
@@ -29,10 +29,10 @@
#ifndef __EHEA_HW_H__
#define __EHEA_HW_H__
-#define QPX_SQA_VALUE EHEA_BMASK_IBM(48,63)
-#define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48,63)
-#define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48,63)
-#define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48,63)
+#define QPX_SQA_VALUE EHEA_BMASK_IBM(48, 63)
+#define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48, 63)
+#define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48, 63)
+#define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48, 63)
#define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x)
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 869e1604b16e..c051c7e09b9a 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -6,9 +6,9 @@
* (C) Copyright IBM Corp. 2006
*
* Authors:
- * Christoph Raisch <raisch@de.ibm.com>
- * Jan-Bernd Themann <themann@de.ibm.com>
- * Thomas Klein <tklein@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -54,11 +54,11 @@ static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
static int sq_entries = EHEA_DEF_ENTRIES_SQ;
-static int use_mcs = 0;
-static int use_lro = 0;
+static int use_mcs;
+static int use_lro;
static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
static int num_tx_qps = EHEA_NUM_TX_QP;
-static int prop_carrier_state = 0;
+static int prop_carrier_state;
module_param(msg_level, int, 0);
module_param(rq1_entries, int, 0);
@@ -94,9 +94,9 @@ MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
"Default = 0");
-static int port_name_cnt = 0;
+static int port_name_cnt;
static LIST_HEAD(adapter_list);
-u64 ehea_driver_flags = 0;
+u64 ehea_driver_flags;
struct work_struct ehea_rereg_mr_task;
struct semaphore dlpar_mem_lock;
@@ -121,12 +121,13 @@ static struct of_platform_driver ehea_driver = {
.remove = ehea_remove,
};
-void ehea_dump(void *adr, int len, char *msg) {
+void ehea_dump(void *adr, int len, char *msg)
+{
int x;
unsigned char *deb = adr;
for (x = 0; x < len; x += 16) {
printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg,
- deb, x, *((u64*)&deb[0]), *((u64*)&deb[8]));
+ deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
deb += 16;
}
}
@@ -518,7 +519,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
last_wqe_index = wqe_index;
rmb();
if (!ehea_check_cqe(cqe, &rq)) {
- if (rq == 1) { /* LL RQ1 */
+ if (rq == 1) {
+ /* LL RQ1 */
skb = get_skb_by_index_ll(skb_arr_rq1,
skb_arr_rq1_len,
wqe_index);
@@ -531,10 +533,11 @@ static int ehea_proc_rwqes(struct net_device *dev,
if (!skb)
break;
}
- skb_copy_to_linear_data(skb, ((char*)cqe) + 64,
+ skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe->num_bytes_transfered - 4);
ehea_fill_skb(dev, skb, cqe);
- } else if (rq == 2) { /* RQ2 */
+ } else if (rq == 2) {
+ /* RQ2 */
skb = get_skb_by_index(skb_arr_rq2,
skb_arr_rq2_len, cqe);
if (unlikely(!skb)) {
@@ -544,7 +547,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
}
ehea_fill_skb(dev, skb, cqe);
processed_rq2++;
- } else { /* RQ3 */
+ } else {
+ /* RQ3 */
skb = get_skb_by_index(skb_arr_rq3,
skb_arr_rq3_len, cqe);
if (unlikely(!skb)) {
@@ -592,7 +596,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
unsigned long flags;
cqe = ehea_poll_cq(send_cq);
- while(cqe && (quota > 0)) {
+ while (cqe && (quota > 0)) {
ehea_inc_cq(send_cq);
cqe_counter++;
@@ -643,7 +647,8 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
static int ehea_poll(struct napi_struct *napi, int budget)
{
- struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, napi);
+ struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
+ napi);
struct net_device *dev = pr->port->netdev;
struct ehea_cqe *cqe;
struct ehea_cqe *cqe_skb = NULL;
@@ -743,8 +748,9 @@ int ehea_sense_port_attr(struct ehea_port *port)
u64 hret;
struct hcp_ehea_port_cb0 *cb0;
- cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); /* May be called via */
- if (!cb0) { /* ehea_neq_tasklet() */
+ /* may be called via ehea_neq_tasklet() */
+ cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!cb0) {
ehea_error("no mem for cb0");
ret = -ENOMEM;
goto out;
@@ -762,7 +768,7 @@ int ehea_sense_port_attr(struct ehea_port *port)
/* MAC address */
port->mac_addr = cb0->port_mac_addr << 16;
- if (!is_valid_ether_addr((u8*)&port->mac_addr)) {
+ if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
ret = -EADDRNOTAVAIL;
goto out_free;
}
@@ -994,7 +1000,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
static void ehea_neq_tasklet(unsigned long data)
{
- struct ehea_adapter *adapter = (struct ehea_adapter*)data;
+ struct ehea_adapter *adapter = (struct ehea_adapter *)data;
struct ehea_eqe *eqe;
u64 event_mask;
@@ -1204,7 +1210,7 @@ int ehea_rem_smrs(struct ehea_port_res *pr)
static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
{
- int arr_size = sizeof(void*) * max_q_entries;
+ int arr_size = sizeof(void *) * max_q_entries;
q_skba->arr = vmalloc(arr_size);
if (!q_skba->arr)
@@ -1489,7 +1495,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
nfrags = skb_shinfo(skb)->nr_frags;
sg1entry = &swqe->u.immdata_desc.sg_entry;
- sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list;
+ sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
swqe->descriptors = 0;
sg1entry_contains_frag_data = 0;
@@ -1542,7 +1548,7 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
reg_type, port->mac_addr, 0, hcallid);
if (hret != H_SUCCESS) {
ehea_error("%sregistering bc address failed (tagged)",
- hcallid == H_REG_BCMC ? "" : "de");
+ hcallid == H_REG_BCMC ? "" : "de");
ret = -EIO;
goto out_herr;
}
@@ -1732,7 +1738,7 @@ static void ehea_allmulti(struct net_device *dev, int enable)
}
}
-static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr)
+static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
{
struct ehea_mc_list *ehea_mcl_entry;
u64 hret;
@@ -1791,11 +1797,10 @@ static void ehea_set_multicast_list(struct net_device *dev)
goto out;
}
- for (i = 0, k_mcl_entry = dev->mc_list;
- i < dev->mc_count;
- i++, k_mcl_entry = k_mcl_entry->next) {
+ for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++,
+ k_mcl_entry = k_mcl_entry->next)
ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
- }
+
}
out:
return;
@@ -1925,12 +1930,12 @@ static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
if ((skb->protocol == htons(ETH_P_IP)) &&
(ip_hdr(skb)->protocol == IPPROTO_TCP)) {
- tcp = (struct tcphdr*)(skb_network_header(skb) + (ip_hdr(skb)->ihl * 4));
+ tcp = (struct tcphdr *)(skb_network_header(skb) +
+ (ip_hdr(skb)->ihl * 4));
tmp = (tcp->source + (tcp->dest << 16)) % 31;
tmp += ip_hdr(skb)->daddr % 31;
return tmp % num_qps;
- }
- else
+ } else
return 0;
}
@@ -2122,7 +2127,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
u64 hret;
u16 dummy16 = 0;
u64 dummy64 = 0;
- struct hcp_modify_qp_cb0* cb0;
+ struct hcp_modify_qp_cb0 *cb0;
cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb0) {
@@ -2248,7 +2253,7 @@ static int ehea_clean_all_portres(struct ehea_port *port)
int ret = 0;
int i;
- for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
ret |= ehea_clean_portres(port, &port->port_res[i]);
ret |= ehea_destroy_eq(port->qp_eq);
@@ -2300,7 +2305,7 @@ static int ehea_up(struct net_device *dev)
goto out_clean_pr;
}
- for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
if (ret) {
ehea_error("activate_qp failed");
@@ -2308,7 +2313,7 @@ static int ehea_up(struct net_device *dev)
}
}
- for(i = 0; i < port->num_def_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
ret = ehea_fill_port_res(&port->port_res[i]);
if (ret) {
ehea_error("out_free_irqs");
@@ -2425,7 +2430,7 @@ int ehea_stop_qps(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
- struct hcp_modify_qp_cb0* cb0;
+ struct hcp_modify_qp_cb0 *cb0;
int ret = -EIO;
int dret;
int i;
@@ -2490,7 +2495,7 @@ out:
return ret;
}
-void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res * pr)
+void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
{
struct ehea_qp qp = *orig_qp;
struct ehea_qp_init_attr *init_attr = &qp.init_attr;
@@ -2530,7 +2535,7 @@ int ehea_restart_qps(struct net_device *dev)
int ret = 0;
int i;
- struct hcp_modify_qp_cb0* cb0;
+ struct hcp_modify_qp_cb0 *cb0;
u64 hret;
u64 dummy64 = 0;
u16 dummy16 = 0;
@@ -2804,34 +2809,6 @@ static void __devinit logical_port_release(struct device *dev)
of_node_put(port->ofdev.node);
}
-static int ehea_driver_sysfs_add(struct device *dev,
- struct device_driver *driver)
-{
- int ret;
-
- ret = sysfs_create_link(&driver->kobj, &dev->kobj,
- kobject_name(&dev->kobj));
- if (ret == 0) {
- ret = sysfs_create_link(&dev->kobj, &driver->kobj,
- "driver");
- if (ret)
- sysfs_remove_link(&driver->kobj,
- kobject_name(&dev->kobj));
- }
- return ret;
-}
-
-static void ehea_driver_sysfs_remove(struct device *dev,
- struct device_driver *driver)
-{
- struct device_driver *drv = driver;
-
- if (drv) {
- sysfs_remove_link(&drv->kobj, kobject_name(&dev->kobj));
- sysfs_remove_link(&dev->kobj, "driver");
- }
-}
-
static struct device *ehea_register_port(struct ehea_port *port,
struct device_node *dn)
{
@@ -2856,16 +2833,8 @@ static struct device *ehea_register_port(struct ehea_port *port,
goto out_unreg_of_dev;
}
- ret = ehea_driver_sysfs_add(&port->ofdev.dev, &ehea_driver.driver);
- if (ret) {
- ehea_error("failed to register sysfs driver link");
- goto out_rem_dev_file;
- }
-
return &port->ofdev.dev;
-out_rem_dev_file:
- device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
out_unreg_of_dev:
of_device_unregister(&port->ofdev);
out:
@@ -2874,7 +2843,6 @@ out:
static void ehea_unregister_port(struct ehea_port *port)
{
- ehea_driver_sysfs_remove(&port->ofdev.dev, &ehea_driver.driver);
device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
of_device_unregister(&port->ofdev);
}
@@ -3109,7 +3077,7 @@ static ssize_t ehea_probe_port(struct device *dev,
of_node_put(eth_dn);
if (port) {
- for (i=0; i < EHEA_MAX_PORTS; i++)
+ for (i = 0; i < EHEA_MAX_PORTS; i++)
if (!adapter->port[i]) {
adapter->port[i] = port;
break;
@@ -3144,7 +3112,7 @@ static ssize_t ehea_remove_port(struct device *dev,
ehea_shutdown_single_port(port);
- for (i=0; i < EHEA_MAX_PORTS; i++)
+ for (i = 0; i < EHEA_MAX_PORTS; i++)
if (adapter->port[i] == port) {
adapter->port[i] = NULL;
break;
@@ -3313,7 +3281,7 @@ static int ehea_reboot_notifier(struct notifier_block *nb,
}
static struct notifier_block ehea_reboot_nb = {
- .notifier_call = ehea_reboot_notifier,
+ .notifier_call = ehea_reboot_notifier,
};
static int check_module_parm(void)
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 95c4a7f9cc88..156eb6320b4e 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -6,9 +6,9 @@
* (C) Copyright IBM Corp. 2006
*
* Authors:
- * Christoph Raisch <raisch@de.ibm.com>
- * Jan-Bernd Themann <themann@de.ibm.com>
- * Thomas Klein <tklein@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -38,11 +38,11 @@ static inline u16 get_order_of_qentries(u16 queue_entries)
}
/* Defines for H_CALL H_ALLOC_RESOURCE */
-#define H_ALL_RES_TYPE_QP 1
-#define H_ALL_RES_TYPE_CQ 2
-#define H_ALL_RES_TYPE_EQ 3
-#define H_ALL_RES_TYPE_MR 5
-#define H_ALL_RES_TYPE_MW 6
+#define H_ALL_RES_TYPE_QP 1
+#define H_ALL_RES_TYPE_CQ 2
+#define H_ALL_RES_TYPE_EQ 3
+#define H_ALL_RES_TYPE_MR 5
+#define H_ALL_RES_TYPE_MW 6
static long ehea_plpar_hcall_norets(unsigned long opcode,
unsigned long arg1,
@@ -137,77 +137,77 @@ u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
const u64 qp_handle, const u64 sel_mask, void *cb_addr)
{
return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
- adapter_handle, /* R4 */
- qp_category, /* R5 */
- qp_handle, /* R6 */
- sel_mask, /* R7 */
+ adapter_handle, /* R4 */
+ qp_category, /* R5 */
+ qp_handle, /* R6 */
+ sel_mask, /* R7 */
virt_to_abs(cb_addr), /* R8 */
0, 0);
}
/* input param R5 */
-#define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11)
-#define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12)
-#define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15)
-#define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16)
-#define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17)
-#define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19)
-#define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21)
-#define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23)
-#define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55)
-#define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63)
+#define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11)
+#define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12)
+#define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15)
+#define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16)
+#define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17)
+#define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19)
+#define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21)
+#define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23)
+#define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55)
+#define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63)
/* input param R9 */
-#define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32,63)
+#define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32, 63)
/* input param R10 */
-#define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7)
-#define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15)
-#define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23)
-#define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31)
+#define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7)
+#define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15)
+#define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23)
+#define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31)
/* Max Send Scatter Gather Elements */
-#define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39)
-#define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47)
+#define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39)
+#define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47)
/* Max Receive SG Elements RQ1 */
-#define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55)
-#define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63)
+#define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55)
+#define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63)
/* input param R11 */
-#define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7)
+#define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7)
/* max swqe immediate data length */
-#define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63)
+#define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63)
/* input param R12 */
-#define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15)
+#define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15)
/* Threshold RQ2 */
-#define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31)
+#define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31)
/* Threshold RQ3 */
/* output param R6 */
-#define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15)
-#define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31)
-#define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47)
-#define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63)
+#define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15)
+#define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31)
+#define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47)
+#define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63)
/* output param, R7 */
-#define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7)
-#define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15)
-#define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23)
-#define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31)
+#define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7)
+#define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15)
+#define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23)
+#define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31)
#define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
/* output param R8,R9 */
-#define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63)
-#define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63)
/* output param R11,R12 */
-#define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63)
-#define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63)
u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
struct ehea_qp_init_attr *init_attr, const u32 pd,
@@ -334,28 +334,28 @@ u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
}
/* Defines for H_CALL H_ALLOC_RESOURCE */
-#define H_ALL_RES_TYPE_QP 1
-#define H_ALL_RES_TYPE_CQ 2
-#define H_ALL_RES_TYPE_EQ 3
-#define H_ALL_RES_TYPE_MR 5
-#define H_ALL_RES_TYPE_MW 6
+#define H_ALL_RES_TYPE_QP 1
+#define H_ALL_RES_TYPE_CQ 2
+#define H_ALL_RES_TYPE_EQ 3
+#define H_ALL_RES_TYPE_MR 5
+#define H_ALL_RES_TYPE_MW 6
/* input param R5 */
-#define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0)
+#define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0)
#define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7)
#define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16)
-#define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63)
+#define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63)
/* input param R6 */
-#define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63)
/* output param R6 */
-#define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63)
/* output param R7 */
-#define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63)
/* output param R8 */
-#define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63)
/* output param R9 */
#define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31)
@@ -453,12 +453,12 @@ u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
hret = ehea_plpar_hcall9(H_REGISTER_SMR,
outs,
- adapter_handle , /* R4 */
- orig_mr_handle, /* R5 */
- vaddr_in, /* R6 */
- (((u64)access_ctrl) << 32ULL), /* R7 */
- pd, /* R8 */
- 0, 0, 0, 0); /* R9-R12 */
+ adapter_handle , /* R4 */
+ orig_mr_handle, /* R5 */
+ vaddr_in, /* R6 */
+ (((u64)access_ctrl) << 32ULL), /* R7 */
+ pd, /* R8 */
+ 0, 0, 0, 0); /* R9-R12 */
mr->handle = outs[0];
mr->lkey = (u32)outs[2];
@@ -471,11 +471,11 @@ u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
u64 outs[PLPAR_HCALL9_BUFSIZE];
return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
- outs,
+ outs,
adapter_handle, /* R4 */
H_DISABLE_GET_EHEA_WQE_P, /* R5 */
qp_handle, /* R6 */
- 0, 0, 0, 0, 0, 0); /* R7-R12 */
+ 0, 0, 0, 0, 0, 0); /* R7-R12 */
}
u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
@@ -483,9 +483,9 @@ u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
{
return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle, /* R4 */
- res_handle, /* R5 */
+ res_handle, /* R5 */
force_bit,
- 0, 0, 0, 0); /* R7-R10 */
+ 0, 0, 0, 0); /* R7-R10 */
}
u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
@@ -493,13 +493,13 @@ u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
const u32 pd, u64 *mr_handle, u32 *lkey)
{
u64 hret;
- u64 outs[PLPAR_HCALL9_BUFSIZE];
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
outs,
adapter_handle, /* R4 */
5, /* R5 */
- vaddr, /* R6 */
+ vaddr, /* R6 */
length, /* R7 */
(((u64) access_ctrl) << 32ULL), /* R8 */
pd, /* R9 */
@@ -619,8 +619,8 @@ u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
void *rblock)
{
return ehea_plpar_hcall_norets(H_ERROR_DATA,
- adapter_handle, /* R4 */
- ressource_handle, /* R5 */
- virt_to_abs(rblock), /* R6 */
- 0, 0, 0, 0); /* R7-R12 */
+ adapter_handle, /* R4 */
+ ressource_handle, /* R5 */
+ virt_to_abs(rblock), /* R6 */
+ 0, 0, 0, 0); /* R7-R12 */
}
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h
index faa191d23b86..f3628c803567 100644
--- a/drivers/net/ehea/ehea_phyp.h
+++ b/drivers/net/ehea/ehea_phyp.h
@@ -93,7 +93,7 @@ static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel,
static inline void hcp_epas_dtor(struct h_epas *epas)
{
if (epas->kernel.addr)
- iounmap((void __iomem*)((u64)epas->kernel.addr & PAGE_MASK));
+ iounmap((void __iomem *)((u64)epas->kernel.addr & PAGE_MASK));
epas->user.addr = 0;
epas->kernel.addr = 0;
@@ -388,23 +388,23 @@ u64 ehea_h_modify_ehea_qp(const u64 adapter_handle,
const u64 qp_handle,
const u64 sel_mask,
void *cb_addr,
- u64 * inv_attr_id,
- u64 * proc_mask, u16 * out_swr, u16 * out_rwr);
+ u64 *inv_attr_id,
+ u64 *proc_mask, u16 *out_swr, u16 *out_rwr);
u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
- struct ehea_eq_attr *eq_attr, u64 * eq_handle);
+ struct ehea_eq_attr *eq_attr, u64 *eq_handle);
u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
struct ehea_cq_attr *cq_attr,
- u64 * cq_handle, struct h_epas *epas);
+ u64 *cq_handle, struct h_epas *epas);
u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
struct ehea_qp_init_attr *init_attr,
const u32 pd,
- u64 * qp_handle, struct h_epas *h_epas);
+ u64 *qp_handle, struct h_epas *h_epas);
-#define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48,55)
-#define H_REG_RPAGE_QT EHEA_BMASK_IBM(62,63)
+#define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48, 55)
+#define H_REG_RPAGE_QT EHEA_BMASK_IBM(62, 63)
u64 ehea_h_register_rpage(const u64 adapter_handle,
const u8 pagesize,
@@ -426,7 +426,7 @@ u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
const u64 length, const u32 access_ctrl,
- const u32 pd, u64 * mr_handle, u32 * lkey);
+ const u32 pd, u64 *mr_handle, u32 *lkey);
u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
const u8 pagesize, const u8 queue_type,
@@ -439,8 +439,8 @@ u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr);
/* output param R5 */
-#define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40,47)
-#define H_MEHEAPORT_PN EHEA_BMASK_IBM(48,63)
+#define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40, 47)
+#define H_MEHEAPORT_PN EHEA_BMASK_IBM(48, 63)
u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
const u8 cb_cat, const u64 select_mask,
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 83b76432b41a..d522e905f460 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -33,8 +33,6 @@
struct ehea_busmap ehea_bmap = { 0, 0, NULL };
-extern u64 ehea_driver_flags;
-extern struct work_struct ehea_rereg_mr_task;
static void *hw_qpageit_get_inc(struct hw_queue *queue)
@@ -65,7 +63,7 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
}
queue->queue_length = nr_of_pages * pagesize;
- queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL);
+ queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
if (!queue->queue_pages) {
ehea_error("no mem for queue_pages");
return -ENOMEM;
@@ -78,11 +76,11 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
*/
i = 0;
while (i < nr_of_pages) {
- u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
+ u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
if (!kpage)
goto out_nomem;
for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
- (queue->queue_pages)[i] = (struct ehea_page*)kpage;
+ (queue->queue_pages)[i] = (struct ehea_page *)kpage;
kpage += pagesize;
i++;
}
@@ -235,8 +233,8 @@ int ehea_destroy_cq(struct ehea_cq *cq)
return 0;
hcp_epas_dtor(&cq->epas);
-
- if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) {
+ hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
+ if (hret == H_R_STATE) {
ehea_error_data(cq->adapter, cq->fw_handle);
hret = ehea_destroy_cq_res(cq, FORCE_FREE);
}
@@ -301,13 +299,13 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
if (i == (eq->attr.nr_pages - 1)) {
/* last page */
vpage = hw_qpageit_get_inc(&eq->hw_queue);
- if ((hret != H_SUCCESS) || (vpage)) {
+ if ((hret != H_SUCCESS) || (vpage))
goto out_kill_hwq;
- }
+
} else {
- if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+ if ((hret != H_PAGE_REGISTERED) || (!vpage))
goto out_kill_hwq;
- }
+
}
}
@@ -331,7 +329,7 @@ struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
unsigned long flags;
spin_lock_irqsave(&eq->spinlock, flags);
- eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
+ eqe = (struct ehea_eqe *)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
spin_unlock_irqrestore(&eq->spinlock, flags);
return eqe;
@@ -364,7 +362,8 @@ int ehea_destroy_eq(struct ehea_eq *eq)
hcp_epas_dtor(&eq->epas);
- if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) {
+ hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
+ if (hret == H_R_STATE) {
ehea_error_data(eq->adapter, eq->fw_handle);
hret = ehea_destroy_eq_res(eq, FORCE_FREE);
}
@@ -546,7 +545,8 @@ int ehea_destroy_qp(struct ehea_qp *qp)
hcp_epas_dtor(&qp->epas);
- if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) {
+ hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
+ if (hret == H_R_STATE) {
ehea_error_data(qp->adapter, qp->fw_handle);
hret = ehea_destroy_qp_res(qp, FORCE_FREE);
}
@@ -559,7 +559,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
return 0;
}
-int ehea_create_busmap( void )
+int ehea_create_busmap(void)
{
u64 vaddr = EHEA_BUSMAP_START;
unsigned long high_section_index = 0;
@@ -595,7 +595,7 @@ int ehea_create_busmap( void )
return 0;
}
-void ehea_destroy_busmap( void )
+void ehea_destroy_busmap(void)
{
vfree(ehea_bmap.vaddr);
}
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index bc62d389c166..0bb6f92fa2f8 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -41,8 +41,8 @@
#define EHEA_SECTSIZE (1UL << 24)
#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
-#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE
-#error eHEA module can't work if kernel sectionsize < ehea sectionsize
+#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
+#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
#endif
/* Some abbreviations used here:
@@ -188,8 +188,8 @@ struct ehea_eqe {
u64 entry;
};
-#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52,63)
-#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0,7)
+#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
+#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7)
static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
{
@@ -279,7 +279,7 @@ static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
{
void *retvalue = hw_qeit_get(queue);
- u32 qe = *(u8*)retvalue;
+ u32 qe = *(u8 *)retvalue;
if ((qe >> 7) == (queue->toggle_state & 1))
hw_qeit_eq_get_inc(queue);
else
@@ -364,7 +364,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
int ehea_destroy_cq(struct ehea_cq *cq);
-struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd,
+struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
struct ehea_qp_init_attr *init_attr);
int ehea_destroy_qp(struct ehea_qp *qp);
@@ -378,8 +378,8 @@ int ehea_rem_mr(struct ehea_mr *mr);
void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
-int ehea_create_busmap( void );
-void ehea_destroy_busmap( void );
+int ehea_create_busmap(void);
+void ehea_destroy_busmap(void);
u64 ehea_map_vaddr(void *caddr);
#endif /* __EHEA_QMR_H__ */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 7667a62ac31f..d4843d014bc9 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -13,7 +13,7 @@
* Copyright (C) 2004 Andrew de Quincey (wol support)
* Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
* IRQ rate fixes, bigendian fixes, cleanups, verification)
- * Copyright (c) 2004,5,6 NVIDIA Corporation
+ * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -226,7 +226,7 @@ enum {
#define NVREG_MISC1_HD 0x02
#define NVREG_MISC1_FORCE 0x3b0f3c
- NvRegMacReset = 0x3c,
+ NvRegMacReset = 0x34,
#define NVREG_MAC_RESET_ASSERT 0x0F3
NvRegTransmitterControl = 0x084,
#define NVREG_XMITCTL_START 0x01
@@ -277,7 +277,9 @@ enum {
#define NVREG_MCASTADDRA_FORCE 0x01
NvRegMulticastAddrB = 0xB4,
NvRegMulticastMaskA = 0xB8,
+#define NVREG_MCASTMASKA_NONE 0xffffffff
NvRegMulticastMaskB = 0xBC,
+#define NVREG_MCASTMASKB_NONE 0xffff
NvRegPhyInterface = 0xC0,
#define PHY_RGMII 0x10000000
@@ -316,13 +318,13 @@ enum {
NvRegTxRingPhysAddrHigh = 0x148,
NvRegRxRingPhysAddrHigh = 0x14C,
NvRegTxPauseFrame = 0x170,
-#define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
-#define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
+#define NVREG_TX_PAUSEFRAME_DISABLE 0x01ff0080
+#define NVREG_TX_PAUSEFRAME_ENABLE 0x01800010
NvRegMIIStatus = 0x180,
#define NVREG_MIISTAT_ERROR 0x0001
#define NVREG_MIISTAT_LINKCHANGE 0x0008
-#define NVREG_MIISTAT_MASK 0x000f
-#define NVREG_MIISTAT_MASK2 0x000f
+#define NVREG_MIISTAT_MASK_RW 0x0007
+#define NVREG_MIISTAT_MASK_ALL 0x000f
NvRegMIIMask = 0x184,
#define NVREG_MII_LINKCHANGE 0x0008
@@ -471,9 +473,9 @@ union ring_type {
#define NV_RX_AVAIL (1<<31)
#define NV_RX2_CHECKSUMMASK (0x1C000000)
-#define NV_RX2_CHECKSUMOK1 (0x10000000)
-#define NV_RX2_CHECKSUMOK2 (0x14000000)
-#define NV_RX2_CHECKSUMOK3 (0x18000000)
+#define NV_RX2_CHECKSUM_IP (0x10000000)
+#define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
+#define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
#define NV_RX2_DESCRIPTORVALID (1<<29)
#define NV_RX2_SUBSTRACT1 (1<<25)
#define NV_RX2_ERROR1 (1<<18)
@@ -622,6 +624,9 @@ union ring_type {
#define NV_MSI_X_VECTOR_TX 0x1
#define NV_MSI_X_VECTOR_OTHER 0x2
+#define NV_RESTART_TX 0x1
+#define NV_RESTART_RX 0x2
+
/* statistics */
struct nv_ethtool_str {
char name[ETH_GSTRING_LEN];
@@ -1059,7 +1064,7 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
u32 reg;
int retval;
- writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
reg = readl(base + NvRegMIIControl);
if (reg & NVREG_MIICTL_INUSE) {
@@ -1430,16 +1435,30 @@ static void nv_mac_reset(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
+ u32 temp1, temp2, temp3;
dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
+
writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
+
+ /* save registers since they will be cleared on reset */
+ temp1 = readl(base + NvRegMacAddrA);
+ temp2 = readl(base + NvRegMacAddrB);
+ temp3 = readl(base + NvRegTransmitPoll);
+
writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
pci_push(base);
udelay(NV_MAC_RESET_DELAY);
writel(0, base + NvRegMacReset);
pci_push(base);
udelay(NV_MAC_RESET_DELAY);
+
+ /* restore saved registers */
+ writel(temp1, base + NvRegMacAddrA);
+ writel(temp2, base + NvRegMacAddrB);
+ writel(temp3, base + NvRegTransmitPoll);
+
writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
}
@@ -2375,14 +2394,9 @@ static int nv_rx_process(struct net_device *dev, int limit)
goto next_pkt;
}
}
- if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
+ if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
+ ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
- (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
- }
} else {
dev_kfree_skb(skb);
goto next_pkt;
@@ -2474,14 +2488,9 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
}
}
- if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
+ if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
+ ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
- (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
- }
/* got a valid packet - forward it to the network core */
skb_put(skb, len);
@@ -2703,6 +2712,9 @@ static void nv_set_multicast(struct net_device *dev)
addr[1] = alwaysOn[1];
mask[0] = alwaysOn[0] | alwaysOff[0];
mask[1] = alwaysOn[1] | alwaysOff[1];
+ } else {
+ mask[0] = NVREG_MCASTMASKA_NONE;
+ mask[1] = NVREG_MCASTMASKB_NONE;
}
}
addr[0] |= NVREG_MCASTADDRA_FORCE;
@@ -2772,6 +2784,7 @@ static int nv_update_linkspeed(struct net_device *dev)
int mii_status;
int retval = 0;
u32 control_1000, status_1000, phyreg, pause_flags, txreg;
+ u32 txrxFlags = 0;
/* BMSR_LSTATUS is latched, read it twice:
* we want the current value.
@@ -2867,6 +2880,16 @@ set_speed:
np->duplex = newdup;
np->linkspeed = newls;
+ /* The transmitter and receiver must be restarted for safe update */
+ if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
+ txrxFlags |= NV_RESTART_TX;
+ nv_stop_tx(dev);
+ }
+ if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
+ txrxFlags |= NV_RESTART_RX;
+ nv_stop_rx(dev);
+ }
+
if (np->gigabit == PHY_GIGABIT) {
phyreg = readl(base + NvRegRandomSeed);
phyreg &= ~(0x3FF00);
@@ -2955,6 +2978,11 @@ set_speed:
}
nv_update_pause(dev, pause_flags);
+ if (txrxFlags & NV_RESTART_TX)
+ nv_start_tx(dev);
+ if (txrxFlags & NV_RESTART_RX)
+ nv_start_rx(dev);
+
return retval;
}
@@ -2981,7 +3009,7 @@ static void nv_link_irq(struct net_device *dev)
u32 miistat;
miistat = readl(base + NvRegMIIStatus);
- writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
if (miistat & (NVREG_MIISTAT_LINKCHANGE))
@@ -4813,8 +4841,8 @@ static int nv_open(struct net_device *dev)
nv_mac_reset(dev);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
- writel(0, base + NvRegMulticastMaskA);
- writel(0, base + NvRegMulticastMaskB);
+ writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
+ writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
writel(0, base + NvRegPacketFilterFlags);
writel(0, base + NvRegTransmitterControl);
@@ -4856,7 +4884,7 @@ static int nv_open(struct net_device *dev)
writel(0, base + NvRegMIIMask);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
- writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
@@ -4894,7 +4922,7 @@ static int nv_open(struct net_device *dev)
nv_disable_hw_interrupts(dev, np->irqmask);
pci_push(base);
- writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
pci_push(base);
@@ -4908,8 +4936,8 @@ static int nv_open(struct net_device *dev)
spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
- writel(0, base + NvRegMulticastMaskA);
- writel(0, base + NvRegMulticastMaskB);
+ writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
+ writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
/* One manual link speed update: Interrupts are enabled, future link
* speed changes cause interrupts and are handled by nv_link_irq().
@@ -4917,7 +4945,7 @@ static int nv_open(struct net_device *dev)
{
u32 miistat;
miistat = readl(base + NvRegMIIStatus);
- writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
}
/* set linkspeed to invalid value, thus force nv_update_linkspeed
@@ -5285,7 +5313,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
phystate &= ~NVREG_ADAPTCTL_RUNNING;
writel(phystate, base + NvRegAdapterControl);
}
- writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
if (id->driver_data & DEV_HAS_MGMT_UNIT) {
/* management unit running on the mac? */
@@ -5603,35 +5631,35 @@ static struct pci_device_id pci_tbl[] = {
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{0,},
};
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index 100bf410bf5f..6a647d95e6ea 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -127,7 +127,7 @@ int gfar_mdio_reset(struct mii_bus *bus)
struct gfar_mii __iomem *regs = (void __iomem *)bus->priv;
unsigned int timeout = PHY_INIT_TIMEOUT;
- spin_lock_bh(&bus->mdio_lock);
+ mutex_lock(&bus->mdio_lock);
/* Reset the management interface */
gfar_write(&regs->miimcfg, MIIMCFG_RESET);
@@ -140,7 +140,7 @@ int gfar_mdio_reset(struct mii_bus *bus)
timeout--)
cpu_relax();
- spin_unlock_bh(&bus->mdio_lock);
+ mutex_unlock(&bus->mdio_lock);
if(timeout <= 0) {
printk(KERN_ERR "%s: The MII Bus is stuck!\n",
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 11b83dae00ac..e04bf9926441 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -262,8 +262,8 @@ static void tm_isr(struct scc_priv *priv);
static int io[MAX_NUM_DEVS] __initdata = { 0, };
-/* Beware! hw[] is also used in cleanup_module(). */
-static struct scc_hardware hw[NUM_TYPES] __initdata_or_module = HARDWARE;
+/* Beware! hw[] is also used in dmascc_exit(). */
+static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
/* Global variables */
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 46e2c52c7862..95e3464068db 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -901,12 +901,12 @@ static short ibmlana_adapter_ids[] __initdata = {
0x0000
};
-static char *ibmlana_adapter_names[] __initdata = {
+static char *ibmlana_adapter_names[] __devinitdata = {
"IBM LAN Adapter/A",
NULL
};
-static int ibmlana_init_one(struct device *kdev)
+static int __devinit ibmlana_init_one(struct device *kdev)
{
struct mca_device *mdev = to_mca_device(kdev);
struct net_device *dev;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index f3c144d5d72f..d4eb8e2d8720 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -438,7 +438,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
if (adapter->msix_entries) {
err = igb_request_msix(adapter);
if (!err) {
- struct e1000_hw *hw = &adapter->hw;
/* enable IAM, auto-mask,
* DO NOT USE EIAME or IAME in legacy mode */
wr32(E1000_IAM, IMS_ENABLE_MASK);
diff --git a/drivers/net/irda/ali-ircc.h b/drivers/net/irda/ali-ircc.h
index e489c6661ee8..07876578887f 100644
--- a/drivers/net/irda/ali-ircc.h
+++ b/drivers/net/irda/ali-ircc.h
@@ -173,13 +173,13 @@ struct st_fifo {
struct frame_cb {
void *start; /* Start of frame in DMA mem */
- int len; /* Lenght of frame in DMA mem */
+ int len; /* Length of frame in DMA mem */
};
struct tx_fifo {
struct frame_cb queue[MAX_TX_WINDOW]; /* Info about frames in queue */
int ptr; /* Currently being sent */
- int len; /* Lenght of queue */
+ int len; /* Length of queue */
int free; /* Next free slot */
void *tail; /* Next free start in DMA mem */
};
diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h
index bbdc97ff83ca..29398a4f73fd 100644
--- a/drivers/net/irda/nsc-ircc.h
+++ b/drivers/net/irda/nsc-ircc.h
@@ -231,13 +231,13 @@ struct st_fifo {
struct frame_cb {
void *start; /* Start of frame in DMA mem */
- int len; /* Lenght of frame in DMA mem */
+ int len; /* Length of frame in DMA mem */
};
struct tx_fifo {
struct frame_cb queue[MAX_TX_WINDOW]; /* Info about frames in queue */
int ptr; /* Currently being sent */
- int len; /* Lenght of queue */
+ int len; /* Length of queue */
int free; /* Next free slot */
void *tail; /* Next free start in DMA mem */
};
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index 204b1b34ffc7..9d012f0dbd30 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -54,13 +54,13 @@ struct st_fifo {
struct frame_cb {
void *start; /* Start of frame in DMA mem */
- int len; /* Lenght of frame in DMA mem */
+ int len; /* Length of frame in DMA mem */
};
struct tx_fifo {
struct frame_cb queue[MAX_TX_WINDOW + 2]; /* Info about frames in queue */
int ptr; /* Currently being sent */
- int len; /* Lenght of queue */
+ int len; /* Length of queue */
int free; /* Next free slot */
void *tail; /* Next free start in DMA mem */
};
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 419861cbc65e..58d3bb622da6 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1020,7 +1020,7 @@ static const struct ethtool_ops ops = {
.get_link = veth_get_link,
};
-static struct net_device * __init veth_probe_one(int vlan,
+static struct net_device *veth_probe_one(int vlan,
struct vio_dev *vio_dev)
{
struct net_device *dev;
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index a021a6e72641..d0bf206632ca 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -136,8 +136,6 @@ struct ixgbe_ring {
u16 head;
u16 tail;
- /* To protect race between sender and clean_tx_irq */
- spinlock_t tx_lock;
struct ixgbe_queue_stats stats;
@@ -174,7 +172,6 @@ struct ixgbe_adapter {
struct vlan_group *vlgrp;
u16 bd_number;
u16 rx_buf_len;
- atomic_t irq_sem;
struct work_struct reset_task;
/* TX */
@@ -244,6 +241,7 @@ extern const char ixgbe_driver_version[];
extern int ixgbe_up(struct ixgbe_adapter *adapter);
extern void ixgbe_down(struct ixgbe_adapter *adapter);
+extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
extern void ixgbe_reset(struct ixgbe_adapter *adapter);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 36353447716d..a119cbd8dbb8 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -103,21 +103,41 @@ static int ixgbe_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = 0;
+ bool link_up;
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_EXTERNAL;
+ if (hw->phy.media_type == ixgbe_media_type_copper) {
+ ecmd->supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_TP | SUPPORTED_Autoneg);
+
+ ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+
+ ecmd->port = PORT_TP;
+ } else {
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising = (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ }
- if (netif_carrier_ok(adapter->netdev)) {
- ecmd->speed = SPEED_10000;
+ adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up);
+ if (link_up) {
+ ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ SPEED_10000 : SPEED_1000;
ecmd->duplex = DUPLEX_FULL;
} else {
ecmd->speed = -1;
ecmd->duplex = -1;
}
- ecmd->autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -125,17 +145,17 @@ static int ixgbe_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
- if (ecmd->autoneg == AUTONEG_ENABLE ||
- ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
- return -EINVAL;
-
- if (netif_running(adapter->netdev)) {
- ixgbe_down(adapter);
- ixgbe_reset(adapter);
- ixgbe_up(adapter);
- } else {
- ixgbe_reset(adapter);
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ if ((ecmd->autoneg == AUTONEG_ENABLE) ||
+ (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+ return -EINVAL;
+ /* in this case we currently only support 10Gb/FULL */
+ break;
+ default:
+ break;
}
return 0;
@@ -147,7 +167,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- pause->autoneg = AUTONEG_DISABLE;
+ pause->autoneg = (hw->fc.type == ixgbe_fc_full ? 1 : 0);
if (hw->fc.type == ixgbe_fc_rx_pause) {
pause->rx_pause = 1;
@@ -165,10 +185,8 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- if (pause->autoneg == AUTONEG_ENABLE)
- return -EINVAL;
-
- if (pause->rx_pause && pause->tx_pause)
+ if ((pause->autoneg == AUTONEG_ENABLE) ||
+ (pause->rx_pause && pause->tx_pause))
hw->fc.type = ixgbe_fc_full;
else if (pause->rx_pause && !pause->tx_pause)
hw->fc.type = ixgbe_fc_rx_pause;
@@ -176,15 +194,15 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
hw->fc.type = ixgbe_fc_tx_pause;
else if (!pause->rx_pause && !pause->tx_pause)
hw->fc.type = ixgbe_fc_none;
+ else
+ return -EINVAL;
hw->fc.original_type = hw->fc.type;
- if (netif_running(adapter->netdev)) {
- ixgbe_down(adapter);
- ixgbe_up(adapter);
- } else {
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
ixgbe_reset(adapter);
- }
return 0;
}
@@ -203,12 +221,10 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
else
adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
- if (netif_running(netdev)) {
- ixgbe_down(adapter);
- ixgbe_up(adapter);
- } else {
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
ixgbe_reset(adapter);
- }
return 0;
}
@@ -662,7 +678,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
return 0;
}
- if (netif_running(adapter->netdev))
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ msleep(1);
+
+ if (netif_running(netdev))
ixgbe_down(adapter);
/*
@@ -733,6 +752,7 @@ err_setup:
if (netif_running(adapter->netdev))
ixgbe_up(adapter);
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
return err;
}
@@ -820,11 +840,8 @@ static int ixgbe_nway_reset(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (netif_running(netdev)) {
- ixgbe_down(adapter);
- ixgbe_reset(adapter);
- ixgbe_up(adapter);
- }
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 3732dd6c4b2a..ead49e54f31b 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -87,6 +87,25 @@ MODULE_VERSION(DRV_VERSION);
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
+{
+ u32 ctrl_ext;
+
+ /* Let firmware take over control of h/w */
+ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+ ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+}
+
+static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
+{
+ u32 ctrl_ext;
+
+ /* Let firmware know the driver has taken over */
+ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+ ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
+}
#ifdef DEBUG
/**
@@ -165,6 +184,15 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
return false;
}
+#define IXGBE_MAX_TXD_PWR 14
+#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
+ (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
+ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
+
/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
@@ -177,18 +205,34 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned int i, eop;
bool cleaned = false;
- int count = 0;
+ unsigned int total_tx_bytes = 0, total_tx_packets = 0;
i = tx_ring->next_to_clean;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
- for (cleaned = false; !cleaned;) {
+ cleaned = false;
+ while (!cleaned) {
tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
cleaned = (i == eop);
tx_ring->stats.bytes += tx_buffer_info->length;
+ if (cleaned) {
+ struct sk_buff *skb = tx_buffer_info->skb;
+#ifdef NETIF_F_TSO
+ unsigned int segs, bytecount;
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) +
+ skb->len;
+ total_tx_packets += segs;
+ total_tx_bytes += bytecount;
+#else
+ total_tx_packets++;
+ total_tx_bytes += skb->len;
+#endif
+ }
ixgbe_unmap_and_free_tx_resource(adapter,
tx_buffer_info);
tx_desc->wb.status = 0;
@@ -204,29 +248,36 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
/* weight of a sort for tx, avoid endless transmit cleanup */
- if (count++ >= tx_ring->work_limit)
+ if (total_tx_packets >= tx_ring->work_limit)
break;
}
tx_ring->next_to_clean = i;
-#define TX_WAKE_THRESHOLD 32
- spin_lock(&tx_ring->tx_lock);
-
- if (cleaned && netif_carrier_ok(netdev) &&
- (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) &&
- !test_bit(__IXGBE_DOWN, &adapter->state))
- netif_wake_queue(netdev);
-
- spin_unlock(&tx_ring->tx_lock);
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (total_tx_packets && netif_carrier_ok(netdev) &&
+ (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (netif_queue_stopped(netdev) &&
+ !test_bit(__IXGBE_DOWN, &adapter->state)) {
+ netif_wake_queue(netdev);
+ adapter->restart_queue++;
+ }
+ }
if (adapter->detect_tx_hung)
if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
netif_stop_queue(netdev);
- if (count >= tx_ring->work_limit)
+ if (total_tx_packets >= tx_ring->work_limit)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
+ adapter->net_stats.tx_bytes += total_tx_bytes;
+ adapter->net_stats.tx_packets += total_tx_packets;
+ cleaned = total_tx_packets ? true : false;
return cleaned;
}
@@ -255,25 +306,40 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
}
}
+/**
+ * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: address of board private structure
+ * @status_err: hardware indication of status of receive
+ * @skb: skb currently being received and modified
+ **/
static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
u32 status_err,
struct sk_buff *skb)
{
skb->ip_summed = CHECKSUM_NONE;
- /* Ignore Checksum bit is set */
+ /* Ignore Checksum bit is set, or rx csum disabled */
if ((status_err & IXGBE_RXD_STAT_IXSM) ||
- !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
return;
- /* TCP/UDP checksum error bit is set */
- if (status_err & (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE)) {
- /* let the stack verify checksum errors */
+
+ /* if IP and error */
+ if ((status_err & IXGBE_RXD_STAT_IPCS) &&
+ (status_err & IXGBE_RXDADV_ERR_IPE)) {
adapter->hw_csum_rx_error++;
return;
}
+
+ if (!(status_err & IXGBE_RXD_STAT_L4CS))
+ return;
+
+ if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
/* It must be a TCP or UDP packet with a valid checksum */
- if (status_err & (IXGBE_RXD_STAT_L4CS | IXGBE_RXD_STAT_UDPCS))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_rx_good++;
}
@@ -379,6 +445,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
u16 hdr_info, vlan_tag;
bool is_vlan, cleaned = false;
int cleaned_count = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
upper_len = 0;
@@ -458,6 +525,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
}
ixgbe_rx_checksum(adapter, staterr, skb);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
skb->protocol = eth_type_trans(skb, netdev);
ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag);
netdev->last_rx = jiffies;
@@ -486,6 +558,9 @@ next_desc:
if (cleaned_count)
ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ adapter->net_stats.rx_bytes += total_rx_bytes;
+ adapter->net_stats.rx_packets += total_rx_packets;
+
return cleaned;
}
@@ -535,7 +610,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
if (!test_bit(__IXGBE_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies);
}
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
+
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
return IRQ_HANDLED;
}
@@ -713,7 +790,6 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
/* Disable interrupts and register for poll. The flush of the
* posted write is intentionally left out. */
- atomic_inc(&adapter->irq_sem);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
__netif_rx_schedule(netdev, &adapter->napi);
}
@@ -801,7 +877,6 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
**/
static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
{
- atomic_inc(&adapter->irq_sem);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
IXGBE_WRITE_FLUSH(&adapter->hw);
synchronize_irq(adapter->pdev->irq);
@@ -813,15 +888,13 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
**/
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
{
- if (atomic_dec_and_test(&adapter->irq_sem)) {
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
- (IXGBE_EIMS_ENABLE_MASK &
- ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
- IXGBE_EIMS_ENABLE_MASK);
- IXGBE_WRITE_FLUSH(&adapter->hw);
- }
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
+ (IXGBE_EIMS_ENABLE_MASK &
+ ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
+ IXGBE_EIMS_ENABLE_MASK);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
}
/**
@@ -1040,7 +1113,8 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
u32 ctrl;
- ixgbe_irq_disable(adapter);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_disable(adapter);
adapter->vlgrp = grp;
if (grp) {
@@ -1051,7 +1125,8 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
}
- ixgbe_irq_enable(adapter);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter);
}
static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -1066,9 +1141,13 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- ixgbe_irq_disable(adapter);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_disable(adapter);
+
vlan_group_set_device(adapter->vlgrp, vid, NULL);
- ixgbe_irq_enable(adapter);
+
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter);
/* remove VID from filter table */
ixgbe_set_vfta(&adapter->hw, vid, 0, false);
@@ -1170,6 +1249,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
u32 txdctl, rxdctl, mhadd;
int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ ixgbe_get_hw_control(adapter);
+
if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED |
IXGBE_FLAG_MSI_ENABLED)) {
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1224,6 +1305,16 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
return 0;
}
+void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
+{
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ msleep(1);
+ ixgbe_down(adapter);
+ ixgbe_up(adapter);
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
+}
+
int ixgbe_up(struct ixgbe_adapter *adapter)
{
/* hardware has been reset, we need to reload some things */
@@ -1408,7 +1499,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
msleep(10);
napi_disable(&adapter->napi);
- atomic_set(&adapter->irq_sem, 0);
ixgbe_irq_disable(adapter);
@@ -1447,6 +1537,8 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ ixgbe_release_hw_control(adapter);
+
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -1481,7 +1573,8 @@ static int ixgbe_clean(struct napi_struct *napi, int budget)
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
netif_rx_complete(netdev, napi);
- ixgbe_irq_enable(adapter);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter);
}
return work_done;
@@ -1506,8 +1599,7 @@ static void ixgbe_reset_task(struct work_struct *work)
adapter->tx_timeout_count++;
- ixgbe_down(adapter);
- ixgbe_up(adapter);
+ ixgbe_reinit_locked(adapter);
}
/**
@@ -1590,7 +1682,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
return -ENOMEM;
}
- atomic_set(&adapter->irq_sem, 1);
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -1634,7 +1725,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
txdr->next_to_use = 0;
txdr->next_to_clean = 0;
txdr->work_limit = txdr->count;
- spin_lock_init(&txdr->tx_lock);
return 0;
}
@@ -1828,10 +1918,8 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
- if (netif_running(netdev)) {
- ixgbe_down(adapter);
- ixgbe_up(adapter);
- }
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
return 0;
}
@@ -1852,14 +1940,8 @@ static int ixgbe_open(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int err;
- u32 ctrl_ext;
u32 num_rx_queues = adapter->num_rx_queues;
- /* Let firmware know the driver has taken over */
- ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
- ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
-
try_intr_reinit:
/* allocate transmit descriptors */
err = ixgbe_setup_all_tx_resources(adapter);
@@ -1910,6 +1992,7 @@ try_intr_reinit:
return 0;
err_up:
+ ixgbe_release_hw_control(adapter);
ixgbe_free_irq(adapter);
err_req_irq:
ixgbe_free_all_rx_resources(adapter);
@@ -1935,7 +2018,6 @@ err_setup_tx:
static int ixgbe_close(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u32 ctrl_ext;
ixgbe_down(adapter);
ixgbe_free_irq(adapter);
@@ -1943,9 +2025,7 @@ static int ixgbe_close(struct net_device *netdev)
ixgbe_free_all_tx_resources(adapter);
ixgbe_free_all_rx_resources(adapter);
- ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
- ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+ ixgbe_release_hw_control(adapter);
return 0;
}
@@ -1957,22 +2037,26 @@ static int ixgbe_close(struct net_device *netdev)
void ixgbe_update_stats(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u64 good_rx, missed_rx, bprc;
+ u64 total_mpc = 0;
+ u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
- good_rx = IXGBE_READ_REG(hw, IXGBE_GPRC);
- missed_rx = IXGBE_READ_REG(hw, IXGBE_MPC(0));
- missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(1));
- missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(2));
- missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(3));
- missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(4));
- missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(5));
- missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(6));
- missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(7));
- adapter->stats.gprc += (good_rx - missed_rx);
-
- adapter->stats.mpc[0] += missed_rx;
+ for (i = 0; i < 8; i++) {
+ /* for packet buffers not used, the register should read 0 */
+ mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+ missed_rx += mpc;
+ adapter->stats.mpc[i] += mpc;
+ total_mpc += adapter->stats.mpc[i];
+ adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ }
+ adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+ /* work around hardware counting issue */
+ adapter->stats.gprc -= missed_rx;
+
+ /* 82598 hardware only has a 32 bit counter in the high register */
adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+ adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
adapter->stats.bprc += bprc;
adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
@@ -1984,35 +2068,37 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
-
adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
- adapter->stats.lxontxc += IXGBE_READ_REG(hw, IXGBE_LXONTXC);
adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
- adapter->stats.lxofftxc += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+ adapter->stats.lxontxc += lxon;
+ lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ adapter->stats.lxofftxc += lxoff;
adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
- adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
- adapter->stats.rnbc[0] += IXGBE_READ_REG(hw, IXGBE_RNBC(0));
+ adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+ /*
+ * 82598 errata - tx of flow control packets is included in tx counters
+ */
+ xon_off_tot = lxon + lxoff;
+ adapter->stats.gptc -= xon_off_tot;
+ adapter->stats.mptc -= xon_off_tot;
+ adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
- adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+ adapter->stats.ptc64 -= xon_off_tot;
adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
- adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
/* Fill out the OS statistics structure */
- adapter->net_stats.rx_packets = adapter->stats.gprc;
- adapter->net_stats.tx_packets = adapter->stats.gptc;
- adapter->net_stats.rx_bytes = adapter->stats.gorc;
- adapter->net_stats.tx_bytes = adapter->stats.gotc;
adapter->net_stats.multicast = adapter->stats.mprc;
/* Rx Errors */
@@ -2021,8 +2107,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->net_stats.rx_dropped = 0;
adapter->net_stats.rx_length_errors = adapter->stats.rlec;
adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
- adapter->net_stats.rx_missed_errors = adapter->stats.mpc[0];
-
+ adapter->net_stats.rx_missed_errors = total_mpc;
}
/**
@@ -2076,15 +2161,6 @@ static void ixgbe_watchdog(unsigned long data)
round_jiffies(jiffies + 2 * HZ));
}
-#define IXGBE_MAX_TXD_PWR 14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
-
-/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
- (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
-
static int ixgbe_tso(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, u8 *hdr_len)
@@ -2356,6 +2432,37 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
writel(i, adapter->hw.hw_addr + tx_ring->tail);
}
+static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbe_ring *tx_ring, int size)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ netif_stop_queue(netdev);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ return 0;
+}
+
+static int ixgbe_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbe_ring *tx_ring, int size)
+{
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
+}
+
+
static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2363,7 +2470,6 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int len = skb->len;
unsigned int first;
unsigned int tx_flags = 0;
- unsigned long flags = 0;
u8 hdr_len;
int tso;
unsigned int mss = 0;
@@ -2389,14 +2495,10 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
for (f = 0; f < nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
- spin_lock_irqsave(&tx_ring->tx_lock, flags);
- if (IXGBE_DESC_UNUSED(tx_ring) < (count + 2)) {
+ if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
adapter->tx_busy++;
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_BUSY;
}
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
tx_flags |= IXGBE_TX_FLAGS_VLAN;
tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
@@ -2423,11 +2525,7 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
netdev->trans_start = jiffies;
- spin_lock_irqsave(&tx_ring->tx_lock, flags);
- /* Make sure there is space in the ring for the next send. */
- if (IXGBE_DESC_UNUSED(tx_ring) < DESC_NEEDED)
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
}
@@ -2697,6 +2795,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
return 0;
err_register:
+ ixgbe_release_hw_control(adapter);
err_hw_init:
err_sw_init:
err_eeprom:
@@ -2732,6 +2831,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
+ ixgbe_release_hw_control(adapter);
+
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index c429a5002dd6..0c5447dac03b 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -148,7 +148,7 @@ static void __NS8390_init(struct net_device *dev, int startp);
*
* "The author (me) didn't use spin_lock_irqsave because the slowness of the
* card means that approach caused horrible problems like losing serial data
- * at 38400 baud on some chips. Rememeber many 8390 nics on PCI were ISA
+ * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA
* chips with FPGA front ends.
*
* Ok the logic behind the 8390 is very simple:
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index e10528ed9081..81bf005ff280 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -1084,7 +1084,7 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(phydev, if_mii(rq), cmd);
}
-static int __devinit macb_probe(struct platform_device *pdev)
+static int __init macb_probe(struct platform_device *pdev)
{
struct eth_platform_data *pdata;
struct resource *regs;
@@ -1248,7 +1248,7 @@ err_out:
return err;
}
-static int __devexit macb_remove(struct platform_device *pdev)
+static int __exit macb_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct macb *bp;
@@ -1276,8 +1276,7 @@ static int __devexit macb_remove(struct platform_device *pdev)
}
static struct platform_driver macb_driver = {
- .probe = macb_probe,
- .remove = __devexit_p(macb_remove),
+ .remove = __exit_p(macb_remove),
.driver = {
.name = "macb",
},
@@ -1285,7 +1284,7 @@ static struct platform_driver macb_driver = {
static int __init macb_init(void)
{
- return platform_driver_register(&macb_driver);
+ return platform_driver_probe(&macb_driver, macb_probe);
}
static void __exit macb_exit(void)
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index aafc3ce59cbb..6d343efb2717 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -4,8 +4,6 @@
* for more details.
*/
-#define DEBUG
-
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -15,11 +13,93 @@
#include <linux/platform_device.h>
#include <asm/mips-boards/simint.h>
-#include "mipsnet.h" /* actual device IO mapping */
+#define MIPSNET_VERSION "2007-11-17"
+
+/*
+ * Net status/control block as seen by sw in the core.
+ */
+struct mipsnet_regs {
+ /*
+ * Device info for probing, reads as MIPSNET%d where %d is some
+ * form of version.
+ */
+ u64 devId; /*0x00 */
-#define MIPSNET_VERSION "2005-06-20"
+ /*
+ * read only busy flag.
+ * Set and cleared by the Net Device to indicate that an rx or a tx
+ * is in progress.
+ */
+ u32 busy; /*0x08 */
-#define mipsnet_reg_address(dev, field) (dev->base_addr + field_offset(field))
+ /*
+ * Set by the Net Device.
+ * The device will set it once data has been received.
+ * The value is the number of bytes that should be read from
+ * rxDataBuffer. The value will decrease till 0 until all the data
+ * from rxDataBuffer has been read.
+ */
+ u32 rxDataCount; /*0x0c */
+#define MIPSNET_MAX_RXTX_DATACOUNT (1 << 16)
+
+ /*
+ * Settable from the MIPS core, cleared by the Net Device.
+ * The core should set the number of bytes it wants to send,
+ * then it should write those bytes of data to txDataBuffer.
+ * The device will clear txDataCount has been processed (not
+ * necessarily sent).
+ */
+ u32 txDataCount; /*0x10 */
+
+ /*
+ * Interrupt control
+ *
+ * Used to clear the interrupted generated by this dev.
+ * Write a 1 to clear the interrupt. (except bit31).
+ *
+ * Bit0 is set if it was a tx-done interrupt.
+ * Bit1 is set when new rx-data is available.
+ * Until this bit is cleared there will be no other RXs.
+ *
+ * Bit31 is used for testing, it clears after a read.
+ * Writing 1 to this bit will cause an interrupt to be generated.
+ * To clear the test interrupt, write 0 to this register.
+ */
+ u32 interruptControl; /*0x14 */
+#define MIPSNET_INTCTL_TXDONE (1u << 0)
+#define MIPSNET_INTCTL_RXDONE (1u << 1)
+#define MIPSNET_INTCTL_TESTBIT (1u << 31)
+
+ /*
+ * Readonly core-specific interrupt info for the device to signal
+ * the core. The meaning of the contents of this field might change.
+ */
+ /* XXX: the whole memIntf interrupt scheme is messy: the device
+ * should have no control what so ever of what VPE/register set is
+ * being used.
+ * The MemIntf should only expose interrupt lines, and something in
+ * the config should be responsible for the line<->core/vpe bindings.
+ */
+ u32 interruptInfo; /*0x18 */
+
+ /*
+ * This is where the received data is read out.
+ * There is more data to read until rxDataReady is 0.
+ * Only 1 byte at this regs offset is used.
+ */
+ u32 rxDataBuffer; /*0x1c */
+
+ /*
+ * This is where the data to transmit is written.
+ * Data should be written for the amount specified in the
+ * txDataCount register.
+ * Only 1 byte at this regs offset is used.
+ */
+ u32 txDataBuffer; /*0x20 */
+};
+
+#define regaddr(dev, field) \
+ (dev->base_addr + offsetof(struct mipsnet_regs, field))
static char mipsnet_string[] = "mipsnet";
@@ -29,32 +109,27 @@ static char mipsnet_string[] = "mipsnet";
static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
int len)
{
- uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount));
-
- if (available_len < len)
- return -EFAULT;
-
for (; len > 0; len--, kdata++)
- *kdata = inb(mipsnet_reg_address(dev, rxDataBuffer));
+ *kdata = inb(regaddr(dev, rxDataBuffer));
- return inl(mipsnet_reg_address(dev, rxDataCount));
+ return inl(regaddr(dev, rxDataCount));
}
-static inline ssize_t mipsnet_put_todevice(struct net_device *dev,
+static inline void mipsnet_put_todevice(struct net_device *dev,
struct sk_buff *skb)
{
int count_to_go = skb->len;
char *buf_ptr = skb->data;
- outl(skb->len, mipsnet_reg_address(dev, txDataCount));
+ outl(skb->len, regaddr(dev, txDataCount));
for (; count_to_go; buf_ptr++, count_to_go--)
- outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer));
+ outb(*buf_ptr, regaddr(dev, txDataBuffer));
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- return skb->len;
+ dev_kfree_skb(skb);
}
static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -69,18 +144,20 @@ static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count)
+static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t len)
{
struct sk_buff *skb;
- size_t len = count;
- skb = alloc_skb(len + 2, GFP_KERNEL);
+ if (!len)
+ return len;
+
+ skb = dev_alloc_skb(len + NET_IP_ALIGN);
if (!skb) {
dev->stats.rx_dropped++;
return -ENOMEM;
}
- skb_reserve(skb, 2);
+ skb_reserve(skb, NET_IP_ALIGN);
if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len))
return -EFAULT;
@@ -92,50 +169,42 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count)
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
- return count;
+ return len;
}
static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
-
- irqreturn_t retval = IRQ_NONE;
- uint64_t interruptFlags;
-
- if (irq == dev->irq) {
- retval = IRQ_HANDLED;
-
- interruptFlags =
- inl(mipsnet_reg_address(dev, interruptControl));
-
- if (interruptFlags & MIPSNET_INTCTL_TXDONE) {
- outl(MIPSNET_INTCTL_TXDONE,
- mipsnet_reg_address(dev, interruptControl));
- /* only one packet at a time, we are done. */
- netif_wake_queue(dev);
- } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) {
- mipsnet_get_fromdev(dev,
- inl(mipsnet_reg_address(dev, rxDataCount)));
- outl(MIPSNET_INTCTL_RXDONE,
- mipsnet_reg_address(dev, interruptControl));
-
- } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) {
- /*
- * TESTBIT is cleared on read.
- * And takes effect after a write with 0
- */
- outl(0, mipsnet_reg_address(dev, interruptControl));
- } else {
- /* Maybe shared IRQ, just ignore, no clearing. */
- retval = IRQ_NONE;
- }
-
- } else {
- printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
- dev->name, __FUNCTION__, irq);
- retval = IRQ_NONE;
+ u32 int_flags;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (irq != dev->irq)
+ goto out_badirq;
+
+ /* TESTBIT is cleared on read. */
+ int_flags = inl(regaddr(dev, interruptControl));
+ if (int_flags & MIPSNET_INTCTL_TESTBIT) {
+ /* TESTBIT takes effect after a write with 0. */
+ outl(0, regaddr(dev, interruptControl));
+ ret = IRQ_HANDLED;
+ } else if (int_flags & MIPSNET_INTCTL_TXDONE) {
+ /* Only one packet at a time, we are done. */
+ dev->stats.tx_packets++;
+ netif_wake_queue(dev);
+ outl(MIPSNET_INTCTL_TXDONE,
+ regaddr(dev, interruptControl));
+ ret = IRQ_HANDLED;
+ } else if (int_flags & MIPSNET_INTCTL_RXDONE) {
+ mipsnet_get_fromdev(dev, inl(regaddr(dev, rxDataCount)));
+ outl(MIPSNET_INTCTL_RXDONE, regaddr(dev, interruptControl));
+ ret = IRQ_HANDLED;
}
- return retval;
+ return ret;
+
+out_badirq:
+ printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
+ dev->name, __FUNCTION__, irq);
+ return ret;
}
static int mipsnet_open(struct net_device *dev)
@@ -144,18 +213,15 @@ static int mipsnet_open(struct net_device *dev)
err = request_irq(dev->irq, &mipsnet_interrupt,
IRQF_SHARED, dev->name, (void *) dev);
-
if (err) {
- release_region(dev->base_addr, MIPSNET_IO_EXTENT);
+ release_region(dev->base_addr, sizeof(struct mipsnet_regs));
return err;
}
netif_start_queue(dev);
/* test interrupt handler */
- outl(MIPSNET_INTCTL_TESTBIT,
- mipsnet_reg_address(dev, interruptControl));
-
+ outl(MIPSNET_INTCTL_TESTBIT, regaddr(dev, interruptControl));
return 0;
}
@@ -163,7 +229,7 @@ static int mipsnet_open(struct net_device *dev)
static int mipsnet_close(struct net_device *dev)
{
netif_stop_queue(dev);
-
+ free_irq(dev->irq, dev);
return 0;
}
@@ -194,10 +260,11 @@ static int __init mipsnet_probe(struct device *dev)
*/
netdev->base_addr = 0x4200;
netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 +
- inl(mipsnet_reg_address(netdev, interruptInfo));
+ inl(regaddr(netdev, interruptInfo));
/* Get the io region now, get irq on open() */
- if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) {
+ if (!request_region(netdev->base_addr, sizeof(struct mipsnet_regs),
+ "mipsnet")) {
err = -EBUSY;
goto out_free_netdev;
}
@@ -217,7 +284,7 @@ static int __init mipsnet_probe(struct device *dev)
return 0;
out_free_region:
- release_region(netdev->base_addr, MIPSNET_IO_EXTENT);
+ release_region(netdev->base_addr, sizeof(struct mipsnet_regs));
out_free_netdev:
free_netdev(netdev);
@@ -231,7 +298,7 @@ static int __devexit mipsnet_device_remove(struct device *device)
struct net_device *dev = dev_get_drvdata(device);
unregister_netdev(dev);
- release_region(dev->base_addr, MIPSNET_IO_EXTENT);
+ release_region(dev->base_addr, sizeof(struct mipsnet_regs));
free_netdev(dev);
dev_set_drvdata(device, NULL);
diff --git a/drivers/net/mipsnet.h b/drivers/net/mipsnet.h
deleted file mode 100644
index 0132c6714a40..000000000000
--- a/drivers/net/mipsnet.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#ifndef __MIPSNET_H
-#define __MIPSNET_H
-
-/*
- * Id of this Net device, as seen by the core.
- */
-#define MIPS_NET_DEV_ID ((uint64_t) \
- ((uint64_t) 'M' << 0)| \
- ((uint64_t) 'I' << 8)| \
- ((uint64_t) 'P' << 16)| \
- ((uint64_t) 'S' << 24)| \
- ((uint64_t) 'N' << 32)| \
- ((uint64_t) 'E' << 40)| \
- ((uint64_t) 'T' << 48)| \
- ((uint64_t) '0' << 56))
-
-/*
- * Net status/control block as seen by sw in the core.
- * (Why not use bit fields? can't be bothered with cross-platform struct
- * packing.)
- */
-struct net_control_block {
- /*
- * dev info for probing
- * reads as MIPSNET%d where %d is some form of version
- */
- uint64_t devId; /* 0x00 */
-
- /*
- * read only busy flag.
- * Set and cleared by the Net Device to indicate that an rx or a tx
- * is in progress.
- */
- uint32_t busy; /* 0x08 */
-
- /*
- * Set by the Net Device.
- * The device will set it once data has been received.
- * The value is the number of bytes that should be read from
- * rxDataBuffer. The value will decrease till 0 until all the data
- * from rxDataBuffer has been read.
- */
- uint32_t rxDataCount; /* 0x0c */
-#define MIPSNET_MAX_RXTX_DATACOUNT (1<<16)
-
- /*
- * Settable from the MIPS core, cleared by the Net Device. The core
- * should set the number of bytes it wants to send, then it should
- * write those bytes of data to txDataBuffer. The device will clear
- * txDataCount has been processed (not necessarily sent).
- */
- uint32_t txDataCount; /* 0x10 */
-
- /*
- * Interrupt control
- *
- * Used to clear the interrupted generated by this dev.
- * Write a 1 to clear the interrupt. (except bit31).
- *
- * Bit0 is set if it was a tx-done interrupt.
- * Bit1 is set when new rx-data is available.
- * Until this bit is cleared there will be no other RXs.
- *
- * Bit31 is used for testing, it clears after a read.
- * Writing 1 to this bit will cause an interrupt to be generated.
- * To clear the test interrupt, write 0 to this register.
- */
- uint32_t interruptControl; /*0x14 */
-#define MIPSNET_INTCTL_TXDONE ((uint32_t)(1 << 0))
-#define MIPSNET_INTCTL_RXDONE ((uint32_t)(1 << 1))
-#define MIPSNET_INTCTL_TESTBIT ((uint32_t)(1 << 31))
-#define MIPSNET_INTCTL_ALLSOURCES (MIPSNET_INTCTL_TXDONE | \
- MIPSNET_INTCTL_RXDONE | \
- MIPSNET_INTCTL_TESTBIT)
-
- /*
- * Readonly core-specific interrupt info for the device to signal the
- * core. The meaning of the contents of this field might change.
- *
- * TODO: the whole memIntf interrupt scheme is messy: the device should
- * have no control what so ever of what VPE/register set is being
- * used. The MemIntf should only expose interrupt lines, and
- * something in the config should be responsible for the
- * line<->core/vpe bindings.
- */
- uint32_t interruptInfo; /* 0x18 */
-
- /*
- * This is where the received data is read out.
- * There is more data to read until rxDataReady is 0.
- * Only 1 byte at this regs offset is used.
- */
- uint32_t rxDataBuffer; /* 0x1c */
-
- /*
- * This is where the data to transmit is written. Data should be
- * written for the amount specified in the txDataCount register. Only
- * 1 byte at this regs offset is used.
- */
- uint32_t txDataBuffer; /* 0x20 */
-};
-
-#define MIPSNET_IO_EXTENT 0x40 /* being generous */
-
-#define field_offset(field) (offsetof(struct net_control_block, field))
-
-#endif /* __MIPSNET_H */
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 535a4461d88c..61dc4951d6b0 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -617,9 +617,6 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
int err;
#define QUERY_ADAPTER_OUT_SIZE 0x100
-#define QUERY_ADAPTER_VENDOR_ID_OFFSET 0x00
-#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04
-#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
#define QUERY_ADAPTER_VSD_OFFSET 0x20
@@ -633,9 +630,6 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
if (err)
goto out;
- MLX4_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
- MLX4_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
- MLX4_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 7e1dd9e25cfb..e16dec890413 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -99,9 +99,6 @@ struct mlx4_dev_cap {
};
struct mlx4_adapter {
- u32 vendor_id;
- u32 device_id;
- u32 revision_id;
char board_id[MLX4_BOARD_ID_LEN];
u8 inta_pin;
};
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 89b3f0b7cdc0..08bfc130a33e 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#endif /* CONFIG_PCI_MSI */
-static const char mlx4_version[] __devinitdata =
+static char mlx4_version[] __devinitdata =
DRV_NAME ": Mellanox ConnectX core driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -163,7 +163,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
return 0;
}
-static int __devinit mlx4_load_fw(struct mlx4_dev *dev)
+static int mlx4_load_fw(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
@@ -197,8 +197,8 @@ err_free:
return err;
}
-static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
- int cmpt_entry_sz)
+static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
+ int cmpt_entry_sz)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
@@ -534,7 +534,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
}
priv->eq_table.inta_pin = adapter.inta_pin;
- dev->rev_id = adapter.revision_id;
memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
return 0;
@@ -688,7 +687,7 @@ err_uar_table_free:
return err;
}
-static void __devinit mlx4_enable_msi_x(struct mlx4_dev *dev)
+static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry entries[MLX4_NUM_EQ];
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 0c05a10bae3b..9c9e308d0917 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -122,7 +122,7 @@ static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
spin_unlock(&buddy->lock);
}
-static int __devinit mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
+static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
{
int i, s;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 651c2699d5e1..b528ce77c406 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1652,6 +1652,11 @@ static void eth_tx_fill_frag_descs(struct mv643xx_private *mp,
}
}
+static inline __be16 sum16_as_be(__sum16 sum)
+{
+ return (__force __be16)sum;
+}
+
/**
* eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw
*
@@ -1689,7 +1694,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- BUG_ON(skb->protocol != ETH_P_IP);
+ BUG_ON(skb->protocol != htons(ETH_P_IP));
cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM |
ETH_GEN_IP_V_4_CHECKSUM |
@@ -1698,10 +1703,10 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
cmd_sts |= ETH_UDP_FRAME;
- desc->l4i_chk = udp_hdr(skb)->check;
+ desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
break;
case IPPROTO_TCP:
- desc->l4i_chk = tcp_hdr(skb)->check;
+ desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
break;
default:
BUG();
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index c329a4f5840c..0a3e60418e53 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -203,22 +203,8 @@ skbuff at an offset of "+2", 16-byte aligning the IP header.
IIId. Synchronization
Most operations are synchronized on the np->lock irq spinlock, except the
-performance critical codepaths:
-
-The rx process only runs in the interrupt handler. Access from outside
-the interrupt handler is only permitted after disable_irq().
-
-The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
-is set, then access is permitted under spin_lock_irq(&np->lock).
-
-Thus configuration functions that want to access everything must call
- disable_irq(dev->irq);
- netif_tx_lock_bh(dev);
- spin_lock_irq(&np->lock);
-
-IV. Notes
-
-NatSemi PCI network controllers are very uncommon.
+recieve and transmit paths which are synchronised using a combination of
+hardware descriptor ownership, disabling interrupts and NAPI poll scheduling.
IVb. References
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index bb88a41b7591..2e39e0285d8f 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -62,6 +62,10 @@
#define LRO_MAX_AGGR 64
+#define PE_MIN_MTU 64
+#define PE_MAX_MTU 1500
+#define PE_DEF_MTU ETH_DATA_LEN
+
#define DEFAULT_MSG_ENABLE \
(NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
@@ -82,8 +86,6 @@
& ((ring)->size - 1))
#define RING_AVAIL(ring) ((ring->size) - RING_USED(ring))
-#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
@@ -175,6 +177,24 @@ static int mac_to_intf(struct pasemi_mac *mac)
return -1;
}
+static void pasemi_mac_intf_disable(struct pasemi_mac *mac)
+{
+ unsigned int flags;
+
+ flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
+ flags &= ~PAS_MAC_CFG_PCFG_PE;
+ write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
+}
+
+static void pasemi_mac_intf_enable(struct pasemi_mac *mac)
+{
+ unsigned int flags;
+
+ flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
+ flags |= PAS_MAC_CFG_PCFG_PE;
+ write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
+}
+
static int pasemi_get_mac_addr(struct pasemi_mac *mac)
{
struct pci_dev *pdev = mac->pdev;
@@ -221,6 +241,33 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
return 0;
}
+static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct pasemi_mac *mac = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ unsigned int adr0, adr1;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ adr0 = dev->dev_addr[2] << 24 |
+ dev->dev_addr[3] << 16 |
+ dev->dev_addr[4] << 8 |
+ dev->dev_addr[5];
+ adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1);
+ adr1 &= ~0xffff;
+ adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1];
+
+ pasemi_mac_intf_disable(mac);
+ write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0);
+ write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1);
+ pasemi_mac_intf_enable(mac);
+
+ return 0;
+}
+
static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
void **tcph, u64 *hdr_flags, void *data)
{
@@ -453,7 +500,7 @@ static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac)
}
-static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
+static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac)
{
struct pasemi_mac_rxring *rx = rx_ring(mac);
unsigned int i;
@@ -473,7 +520,12 @@ static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
}
for (i = 0; i < RX_RING_SIZE; i++)
- RX_DESC(rx, i) = 0;
+ RX_BUFF(rx, i) = 0;
+}
+
+static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
+{
+ pasemi_mac_free_rx_buffers(mac);
dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
rx_ring(mac)->buffers, rx_ring(mac)->buf_dma);
@@ -503,14 +555,14 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
/* Entry in use? */
WARN_ON(*buff);
- skb = dev_alloc_skb(BUF_SIZE);
+ skb = dev_alloc_skb(mac->bufsz);
skb_reserve(skb, LOCAL_SKB_ALIGN);
if (unlikely(!skb))
break;
dma = pci_map_single(mac->dma_pdev, skb->data,
- BUF_SIZE - LOCAL_SKB_ALIGN,
+ mac->bufsz - LOCAL_SKB_ALIGN,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(dma))) {
@@ -520,7 +572,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
info->skb = skb;
info->dma = dma;
- *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
+ *buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma);
fill++;
}
@@ -650,7 +702,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx,
len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
- pci_unmap_single(pdev, dma, BUF_SIZE-LOCAL_SKB_ALIGN,
+ pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN,
PCI_DMA_FROMDEVICE);
if (macrx & XCT_MACRX_CRC) {
@@ -874,24 +926,6 @@ static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
return IRQ_HANDLED;
}
-static void pasemi_mac_intf_disable(struct pasemi_mac *mac)
-{
- unsigned int flags;
-
- flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
- flags &= ~PAS_MAC_CFG_PCFG_PE;
- write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
-}
-
-static void pasemi_mac_intf_enable(struct pasemi_mac *mac)
-{
- unsigned int flags;
-
- flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
- flags |= PAS_MAC_CFG_PCFG_PE;
- write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
-}
-
static void pasemi_adjust_link(struct net_device *dev)
{
struct pasemi_mac *mac = netdev_priv(dev);
@@ -1148,11 +1182,71 @@ out_rx_resources:
#define MAX_RETRIES 5000
+static void pasemi_mac_pause_txchan(struct pasemi_mac *mac)
+{
+ unsigned int sta, retries;
+ int txch = tx_ring(mac)->chan.chno;
+
+ write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
+ PAS_DMA_TXCHAN_TCMDSTA_ST);
+
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
+ if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
+ dev_err(&mac->dma_pdev->dev,
+ "Failed to stop tx channel, tcmdsta %08x\n", sta);
+
+ write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
+}
+
+static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac)
+{
+ unsigned int sta, retries;
+ int rxch = rx_ring(mac)->chan.chno;
+
+ write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
+ PAS_DMA_RXCHAN_CCMDSTA_ST);
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
+ if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
+ dev_err(&mac->dma_pdev->dev,
+ "Failed to stop rx channel, ccmdsta 08%x\n", sta);
+ write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
+}
+
+static void pasemi_mac_pause_rxint(struct pasemi_mac *mac)
+{
+ unsigned int sta, retries;
+
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+ PAS_DMA_RXINT_RCMDSTA_ST);
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
+ if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
+ dev_err(&mac->dma_pdev->dev,
+ "Failed to stop rx interface, rcmdsta %08x\n", sta);
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
+}
+
static int pasemi_mac_close(struct net_device *dev)
{
struct pasemi_mac *mac = netdev_priv(dev);
unsigned int sta;
- int retries;
int rxch, txch;
rxch = rx_ring(mac)->chan.chno;
@@ -1190,51 +1284,10 @@ static int pasemi_mac_close(struct net_device *dev)
pasemi_mac_clean_tx(tx_ring(mac));
pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
- /* Disable interface */
- write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
- PAS_DMA_TXCHAN_TCMDSTA_ST);
- write_dma_reg( PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
- PAS_DMA_RXINT_RCMDSTA_ST);
- write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
- PAS_DMA_RXCHAN_CCMDSTA_ST);
-
- for (retries = 0; retries < MAX_RETRIES; retries++) {
- sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(rxch));
- if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
- break;
- cond_resched();
- }
-
- if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
- dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
-
- for (retries = 0; retries < MAX_RETRIES; retries++) {
- sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
- if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
- break;
- cond_resched();
- }
-
- if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
- dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
-
- for (retries = 0; retries < MAX_RETRIES; retries++) {
- sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
- if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
- break;
- cond_resched();
- }
-
- if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
- dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
-
- /* Then, disable the channel. This must be done separately from
- * stopping, since you can't disable when active.
- */
-
- write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
- write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
- write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
+ pasemi_mac_pause_txchan(mac);
+ pasemi_mac_pause_rxint(mac);
+ pasemi_mac_pause_rxchan(mac);
+ pasemi_mac_intf_disable(mac);
free_irq(mac->tx->chan.irq, mac->tx);
free_irq(mac->rx->chan.irq, mac->rx);
@@ -1388,6 +1441,62 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget)
return pkts;
}
+static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct pasemi_mac *mac = netdev_priv(dev);
+ unsigned int reg;
+ unsigned int rcmdsta;
+ int running;
+
+ if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU)
+ return -EINVAL;
+
+ running = netif_running(dev);
+
+ if (running) {
+ /* Need to stop the interface, clean out all already
+ * received buffers, free all unused buffers on the RX
+ * interface ring, then finally re-fill the rx ring with
+ * the new-size buffers and restart.
+ */
+
+ napi_disable(&mac->napi);
+ netif_tx_disable(dev);
+ pasemi_mac_intf_disable(mac);
+
+ rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
+ pasemi_mac_pause_rxint(mac);
+ pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
+ pasemi_mac_free_rx_buffers(mac);
+ }
+
+ /* Change maxf, i.e. what size frames are accepted.
+ * Need room for ethernet header and CRC word
+ */
+ reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG);
+ reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M;
+ reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4);
+ write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg);
+
+ dev->mtu = new_mtu;
+ /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
+ mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
+
+ if (running) {
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+ rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN);
+
+ rx_ring(mac)->next_to_fill = 0;
+ pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1);
+
+ napi_enable(&mac->napi);
+ netif_start_queue(dev);
+ pasemi_mac_intf_enable(mac);
+ }
+
+ return 0;
+}
+
static int __devinit
pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -1475,6 +1584,12 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->stop = pasemi_mac_close;
dev->hard_start_xmit = pasemi_mac_start_tx;
dev->set_multicast_list = pasemi_mac_set_rx_mode;
+ dev->set_mac_address = pasemi_mac_set_mac_addr;
+ dev->mtu = PE_DEF_MTU;
+ /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
+ mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
+
+ dev->change_mtu = pasemi_mac_change_mtu;
if (err)
goto out;
diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h
index 8bee2a664c83..99e7b9329a6f 100644
--- a/drivers/net/pasemi_mac.h
+++ b/drivers/net/pasemi_mac.h
@@ -59,6 +59,7 @@ struct pasemi_mac {
struct phy_device *phydev;
struct napi_struct napi;
+ int bufsz; /* RX ring buffer size */
u8 type;
#define MAC_TYPE_GMAC 1
#define MAC_TYPE_XAUI 2
@@ -96,6 +97,9 @@ struct pasemi_mac_buffer {
/* MAC CFG register offsets */
enum {
PAS_MAC_CFG_PCFG = 0x80,
+ PAS_MAC_CFG_MACCFG = 0x84,
+ PAS_MAC_CFG_ADR0 = 0x8c,
+ PAS_MAC_CFG_ADR1 = 0x90,
PAS_MAC_CFG_TXP = 0x98,
PAS_MAC_IPC_CHNL = 0x208,
};
@@ -130,6 +134,18 @@ enum {
#define PAS_MAC_CFG_PCFG_SPD_100M 0x00000001
#define PAS_MAC_CFG_PCFG_SPD_1G 0x00000002
#define PAS_MAC_CFG_PCFG_SPD_10G 0x00000003
+
+#define PAS_MAC_CFG_MACCFG_TXT_M 0x70000000
+#define PAS_MAC_CFG_MACCFG_TXT_S 28
+#define PAS_MAC_CFG_MACCFG_PRES_M 0x0f000000
+#define PAS_MAC_CFG_MACCFG_PRES_S 24
+#define PAS_MAC_CFG_MACCFG_MAXF_M 0x00ffff00
+#define PAS_MAC_CFG_MACCFG_MAXF_S 8
+#define PAS_MAC_CFG_MACCFG_MAXF(x) (((x) << PAS_MAC_CFG_MACCFG_MAXF_S) & \
+ PAS_MAC_CFG_MACCFG_MAXF_M)
+#define PAS_MAC_CFG_MACCFG_MINF_M 0x000000ff
+#define PAS_MAC_CFG_MACCFG_MINF_S 0
+
#define PAS_MAC_CFG_TXP_FCF 0x01000000
#define PAS_MAC_CFG_TXP_FCE 0x00800000
#define PAS_MAC_CFG_TXP_FC 0x00400000
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index ed402e00e730..fffc49befe04 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -541,7 +541,7 @@ static void netdrv_hw_start (struct net_device *dev);
#define NETDRV_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0)
-#if MMIO_FLUSH_AUDIT_COMPLETE
+#ifdef MMIO_FLUSH_AUDIT_COMPLETE
/* write MMIO register */
#define NETDRV_W8(reg, val8) writeb ((val8), ioaddr + (reg))
@@ -603,7 +603,7 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
- tp = dev->priv;
+ tp = netdev_priv(dev);
/* enable device (incl. PCI PM wakeup), and bus-mastering */
rc = pci_enable_device (pdev);
@@ -759,7 +759,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
return i;
}
- tp = dev->priv;
+ tp = netdev_priv(dev);
assert (ioaddr != NULL);
assert (dev != NULL);
@@ -783,7 +783,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
dev->base_addr = (unsigned long) ioaddr;
/* dev->priv/tp zeroed and aligned in alloc_etherdev */
- tp = dev->priv;
+ tp = netdev_priv(dev);
/* note: tp->chipset set in netdrv_init_board */
tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
@@ -841,7 +841,7 @@ static void __devexit netdrv_remove_one (struct pci_dev *pdev)
assert (dev != NULL);
- np = dev->priv;
+ np = netdev_priv(dev);
assert (np != NULL);
unregister_netdev (dev);
@@ -974,7 +974,7 @@ static void mdio_sync (void *mdio_addr)
static int mdio_read (struct net_device *dev, int phy_id, int location)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *mdio_addr = tp->mmio_addr + Config4;
int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
int retval = 0;
@@ -1017,7 +1017,7 @@ static int mdio_read (struct net_device *dev, int phy_id, int location)
static void mdio_write (struct net_device *dev, int phy_id, int location,
int value)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *mdio_addr = tp->mmio_addr + Config4;
int mii_cmd =
(0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
@@ -1060,7 +1060,7 @@ static void mdio_write (struct net_device *dev, int phy_id, int location,
static int netdrv_open (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
int retval;
#ifdef NETDRV_DEBUG
void *ioaddr = tp->mmio_addr;
@@ -1121,7 +1121,7 @@ static int netdrv_open (struct net_device *dev)
/* Start the hardware at open or resume. */
static void netdrv_hw_start (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u32 i;
@@ -1191,7 +1191,7 @@ static void netdrv_hw_start (struct net_device *dev)
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void netdrv_init_ring (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
int i;
DPRINTK ("ENTER\n");
@@ -1213,7 +1213,7 @@ static void netdrv_init_ring (struct net_device *dev)
static void netdrv_timer (unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
int next_tick = 60 * HZ;
int mii_lpa;
@@ -1252,9 +1252,10 @@ static void netdrv_timer (unsigned long data)
}
-static void netdrv_tx_clear (struct netdrv_private *tp)
+static void netdrv_tx_clear (struct net_device *dev)
{
int i;
+ struct netdrv_private *tp = netdev_priv(dev);
atomic_set (&tp->cur_tx, 0);
atomic_set (&tp->dirty_tx, 0);
@@ -1278,7 +1279,7 @@ static void netdrv_tx_clear (struct netdrv_private *tp)
static void netdrv_tx_timeout (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
int i;
u8 tmp8;
@@ -1311,7 +1312,7 @@ static void netdrv_tx_timeout (struct net_device *dev)
/* Stop a shared interrupt from scavenging while we are. */
spin_lock_irqsave (&tp->lock, flags);
- netdrv_tx_clear (tp);
+ netdrv_tx_clear (dev);
spin_unlock_irqrestore (&tp->lock, flags);
@@ -1325,7 +1326,7 @@ static void netdrv_tx_timeout (struct net_device *dev)
static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
int entry;
@@ -1525,7 +1526,7 @@ static void netdrv_rx_interrupt (struct net_device *dev,
DPRINTK ("%s: netdrv_rx() status %4.4x, size %4.4x,"
" cur %4.4x.\n", dev->name, rx_status,
rx_size, cur_rx);
-#if NETDRV_DEBUG > 2
+#if defined(NETDRV_DEBUG) && (NETDRV_DEBUG > 2)
{
int i;
DPRINTK ("%s: Frame contents ", dev->name);
@@ -1648,7 +1649,7 @@ static void netdrv_weird_interrupt (struct net_device *dev,
static irqreturn_t netdrv_interrupt (int irq, void *dev_instance)
{
struct net_device *dev = (struct net_device *) dev_instance;
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
int boguscnt = max_interrupt_work;
void *ioaddr = tp->mmio_addr;
int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */
@@ -1711,7 +1712,7 @@ static irqreturn_t netdrv_interrupt (int irq, void *dev_instance)
static int netdrv_close (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned long flags;
@@ -1738,10 +1739,10 @@ static int netdrv_close (struct net_device *dev)
spin_unlock_irqrestore (&tp->lock, flags);
- synchronize_irq ();
+ synchronize_irq (dev->irq);
free_irq (dev->irq, dev);
- netdrv_tx_clear (tp);
+ netdrv_tx_clear (dev);
pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
tp->rx_ring, tp->rx_ring_dma);
@@ -1762,7 +1763,7 @@ static int netdrv_close (struct net_device *dev)
static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq);
unsigned long flags;
int rc = 0;
@@ -1805,7 +1806,7 @@ static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
static void netdrv_set_rx_mode (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u32 mc_filter[2]; /* Multicast hash filter */
int i, rx_mode;
@@ -1862,7 +1863,7 @@ static void netdrv_set_rx_mode (struct net_device *dev)
static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata (pdev);
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned long flags;
@@ -1892,7 +1893,7 @@ static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state)
static int netdrv_resume (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
- struct netdrv_private *tp = dev->priv;
+ /*struct netdrv_private *tp = netdev_priv(dev);*/
if (!netif_running(dev))
return 0;
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 36a7ba3134ce..3b78a3819bb3 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -230,10 +230,11 @@ static char mii_preamble_required = 0;
static int tc574_config(struct pcmcia_device *link);
static void tc574_release(struct pcmcia_device *link);
-static void mdio_sync(kio_addr_t ioaddr, int bits);
-static int mdio_read(kio_addr_t ioaddr, int phy_id, int location);
-static void mdio_write(kio_addr_t ioaddr, int phy_id, int location, int value);
-static unsigned short read_eeprom(kio_addr_t ioaddr, int index);
+static void mdio_sync(unsigned int ioaddr, int bits);
+static int mdio_read(unsigned int ioaddr, int phy_id, int location);
+static void mdio_write(unsigned int ioaddr, int phy_id, int location,
+ int value);
+static unsigned short read_eeprom(unsigned int ioaddr, int index);
static void tc574_wait_for_completion(struct net_device *dev, int cmd);
static void tc574_reset(struct net_device *dev);
@@ -341,7 +342,7 @@ static int tc574_config(struct pcmcia_device *link)
tuple_t tuple;
__le16 buf[32];
int last_fn, last_ret, i, j;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
__be16 *phys_addr;
char *cardname;
__u32 config;
@@ -515,7 +516,7 @@ static int tc574_resume(struct pcmcia_device *link)
static void dump_status(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
EL3WINDOW(1);
printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
"%02x, tx free %04x\n", inw(ioaddr+EL3_STATUS),
@@ -544,7 +545,7 @@ static void tc574_wait_for_completion(struct net_device *dev, int cmd)
/* Read a word from the EEPROM using the regular EEPROM access register.
Assume that we are in register window zero.
*/
-static unsigned short read_eeprom(kio_addr_t ioaddr, int index)
+static unsigned short read_eeprom(unsigned int ioaddr, int index)
{
int timer;
outw(EEPROM_Read + index, ioaddr + Wn0EepromCmd);
@@ -572,9 +573,9 @@ static unsigned short read_eeprom(kio_addr_t ioaddr, int index)
/* Generate the preamble required for initial synchronization and
a few older transceivers. */
-static void mdio_sync(kio_addr_t ioaddr, int bits)
+static void mdio_sync(unsigned int ioaddr, int bits)
{
- kio_addr_t mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
/* Establish sync by sending at least 32 logic ones. */
while (-- bits >= 0) {
@@ -583,12 +584,12 @@ static void mdio_sync(kio_addr_t ioaddr, int bits)
}
}
-static int mdio_read(kio_addr_t ioaddr, int phy_id, int location)
+static int mdio_read(unsigned int ioaddr, int phy_id, int location)
{
int i;
int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
unsigned int retval = 0;
- kio_addr_t mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
if (mii_preamble_required)
mdio_sync(ioaddr, 32);
@@ -608,10 +609,10 @@ static int mdio_read(kio_addr_t ioaddr, int phy_id, int location)
return (retval>>1) & 0xffff;
}
-static void mdio_write(kio_addr_t ioaddr, int phy_id, int location, int value)
+static void mdio_write(unsigned int ioaddr, int phy_id, int location, int value)
{
int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
- kio_addr_t mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
int i;
if (mii_preamble_required)
@@ -637,7 +638,7 @@ static void tc574_reset(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
int i;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
unsigned long flags;
tc574_wait_for_completion(dev, TotalReset|0x10);
@@ -695,7 +696,7 @@ static void tc574_reset(struct net_device *dev)
mdio_write(ioaddr, lp->phys, 4, lp->advertising);
if (!auto_polarity) {
/* works for TDK 78Q2120 series MII's */
- int i = mdio_read(ioaddr, lp->phys, 16) | 0x20;
+ i = mdio_read(ioaddr, lp->phys, 16) | 0x20;
mdio_write(ioaddr, lp->phys, 16, i);
}
@@ -741,7 +742,7 @@ static int el3_open(struct net_device *dev)
static void el3_tx_timeout(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name);
dump_status(dev);
@@ -756,7 +757,7 @@ static void el3_tx_timeout(struct net_device *dev)
static void pop_tx_status(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i;
/* Clear the Tx status stack. */
@@ -779,7 +780,7 @@ static void pop_tx_status(struct net_device *dev)
static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
struct el3_private *lp = netdev_priv(dev);
unsigned long flags;
@@ -813,7 +814,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
unsigned status;
int work_budget = max_interrupt_work;
int handled = 0;
@@ -907,7 +908,7 @@ static void media_check(unsigned long arg)
{
struct net_device *dev = (struct net_device *) arg;
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
unsigned long flags;
unsigned short /* cable, */ media, partner;
@@ -996,7 +997,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
static void update_stats(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u8 rx, tx, up;
DEBUG(2, "%s: updating the statistics.\n", dev->name);
@@ -1033,7 +1034,7 @@ static void update_stats(struct net_device *dev)
static int el3_rx(struct net_device *dev, int worklimit)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
short rx_status;
DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
@@ -1094,7 +1095,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 *data = (u16 *)&rq->ifr_ifru;
int phy = lp->phys & 0x1f;
@@ -1148,7 +1149,7 @@ static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void set_rx_mode(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
if (dev->flags & IFF_PROMISC)
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
@@ -1161,7 +1162,7 @@ static void set_rx_mode(struct net_device *dev)
static int el3_close(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
struct el3_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index e862d14ece79..1b1abb19c911 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -145,7 +145,7 @@ DRV_NAME ".c " DRV_VERSION " 2001/10/13 00:08:50 (David Hinds)";
static int tc589_config(struct pcmcia_device *link);
static void tc589_release(struct pcmcia_device *link);
-static u16 read_eeprom(kio_addr_t ioaddr, int index);
+static u16 read_eeprom(unsigned int ioaddr, int index);
static void tc589_reset(struct net_device *dev);
static void media_check(unsigned long arg);
static int el3_config(struct net_device *dev, struct ifmap *map);
@@ -254,7 +254,7 @@ static int tc589_config(struct pcmcia_device *link)
__le16 buf[32];
__be16 *phys_addr;
int last_fn, last_ret, i, j, multi = 0, fifo;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
DECLARE_MAC_BUF(mac);
@@ -403,7 +403,7 @@ static void tc589_wait_for_completion(struct net_device *dev, int cmd)
Read a word from the EEPROM using the regular EEPROM access register.
Assume that we are in register window zero.
*/
-static u16 read_eeprom(kio_addr_t ioaddr, int index)
+static u16 read_eeprom(unsigned int ioaddr, int index)
{
int i;
outw(EEPROM_READ + index, ioaddr + 10);
@@ -421,7 +421,7 @@ static u16 read_eeprom(kio_addr_t ioaddr, int index)
static void tc589_set_xcvr(struct net_device *dev, int if_port)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
EL3WINDOW(0);
switch (if_port) {
@@ -443,7 +443,7 @@ static void tc589_set_xcvr(struct net_device *dev, int if_port)
static void dump_status(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
EL3WINDOW(1);
printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
"%02x tx free %04x\n", inw(ioaddr+EL3_STATUS),
@@ -459,7 +459,7 @@ static void dump_status(struct net_device *dev)
/* Reset and restore all of the 3c589 registers. */
static void tc589_reset(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i;
EL3WINDOW(0);
@@ -567,7 +567,7 @@ static int el3_open(struct net_device *dev)
static void el3_tx_timeout(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
printk(KERN_WARNING "%s: Transmit timed out!\n", dev->name);
dump_status(dev);
@@ -582,7 +582,7 @@ static void el3_tx_timeout(struct net_device *dev)
static void pop_tx_status(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i;
/* Clear the Tx status stack. */
@@ -604,7 +604,7 @@ static void pop_tx_status(struct net_device *dev)
static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
struct el3_private *priv = netdev_priv(dev);
unsigned long flags;
@@ -641,7 +641,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
__u16 status;
int i = 0, handled = 1;
@@ -727,7 +727,7 @@ static void media_check(unsigned long arg)
{
struct net_device *dev = (struct net_device *)(arg);
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 media, errs;
unsigned long flags;
@@ -828,7 +828,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
static void update_stats(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
DEBUG(2, "%s: updating the statistics.\n", dev->name);
/* Turn off statistics updates while reading. */
@@ -855,7 +855,7 @@ static void update_stats(struct net_device *dev)
static int el3_rx(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int worklimit = 32;
short rx_status;
@@ -909,7 +909,7 @@ static void set_multicast_list(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 opts = SetRxFilter | RxStation | RxBroadcast;
if (!pcmcia_dev_present(link)) return;
@@ -924,7 +924,7 @@ static int el3_close(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
DEBUG(1, "%s: shutting down ethercard.\n", dev->name);
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 6d342f6c14f6..e8a63e483a2b 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -96,8 +96,8 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
static void ei_watchdog(u_long arg);
static void axnet_reset_8390(struct net_device *dev);
-static int mdio_read(kio_addr_t addr, int phy_id, int loc);
-static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value);
+static int mdio_read(unsigned int addr, int phy_id, int loc);
+static void mdio_write(unsigned int addr, int phy_id, int loc, int value);
static void get_8390_hdr(struct net_device *,
struct e8390_pkt_hdr *, int);
@@ -203,7 +203,7 @@ static void axnet_detach(struct pcmcia_device *link)
static int get_prom(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i, j;
/* This is based on drivers/net/ne.c */
@@ -473,7 +473,7 @@ static int axnet_resume(struct pcmcia_device *link)
#define MDIO_MASK 0x0f
#define MDIO_ENB_IN 0x02
-static void mdio_sync(kio_addr_t addr)
+static void mdio_sync(unsigned int addr)
{
int bits;
for (bits = 0; bits < 32; bits++) {
@@ -482,7 +482,7 @@ static void mdio_sync(kio_addr_t addr)
}
}
-static int mdio_read(kio_addr_t addr, int phy_id, int loc)
+static int mdio_read(unsigned int addr, int phy_id, int loc)
{
u_int cmd = (0xf6<<10)|(phy_id<<5)|loc;
int i, retval = 0;
@@ -501,7 +501,7 @@ static int mdio_read(kio_addr_t addr, int phy_id, int loc)
return (retval>>1) & 0xffff;
}
-static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value)
+static void mdio_write(unsigned int addr, int phy_id, int loc, int value)
{
u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
int i;
@@ -575,7 +575,7 @@ static int axnet_close(struct net_device *dev)
static void axnet_reset_8390(struct net_device *dev)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
int i;
ei_status.txing = ei_status.dmaing = 0;
@@ -610,8 +610,8 @@ static void ei_watchdog(u_long arg)
{
struct net_device *dev = (struct net_device *)(arg);
axnet_dev_t *info = PRIV(dev);
- kio_addr_t nic_base = dev->base_addr;
- kio_addr_t mii_addr = nic_base + AXNET_MII_EEP;
+ unsigned int nic_base = dev->base_addr;
+ unsigned int mii_addr = nic_base + AXNET_MII_EEP;
u_short link;
if (!netif_device_present(dev)) goto reschedule;
@@ -681,7 +681,7 @@ static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
axnet_dev_t *info = PRIV(dev);
u16 *data = (u16 *)&rq->ifr_ifru;
- kio_addr_t mii_addr = dev->base_addr + AXNET_MII_EEP;
+ unsigned int mii_addr = dev->base_addr + AXNET_MII_EEP;
switch (cmd) {
case SIOCGMIIPHY:
data[0] = info->phy_id;
@@ -703,7 +703,7 @@ static void get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr,
int ring_page)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
outb_p(ring_page, nic_base + EN0_RSARHI);
@@ -721,7 +721,7 @@ static void get_8390_hdr(struct net_device *dev,
static void block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
int xfer_count = count;
char *buf = skb->data;
@@ -744,7 +744,7 @@ static void block_input(struct net_device *dev, int count,
static void block_output(struct net_device *dev, int count,
const u_char *buf, const int start_page)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
#ifdef PCMCIA_DEBUG
if (ei_debug > 4)
@@ -991,7 +991,7 @@ static int ax_open(struct net_device *dev)
*
* Opposite of ax_open(). Only used when "ifconfig <devname> down" is done.
*/
-int ax_close(struct net_device *dev)
+static int ax_close(struct net_device *dev)
{
unsigned long flags;
@@ -1014,7 +1014,7 @@ int ax_close(struct net_device *dev)
* completed (or failed) - i.e. never posted a Tx related interrupt.
*/
-void ei_tx_timeout(struct net_device *dev)
+static void ei_tx_timeout(struct net_device *dev)
{
long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
@@ -1087,8 +1087,8 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
ei_local->irqlock = 1;
- send_length = ETH_ZLEN < length ? length : ETH_ZLEN;
-
+ send_length = max(length, ETH_ZLEN);
+
/*
* We have two Tx slots available for use. Find the first free
* slot, and then perform some sanity checks. With two Tx bufs,
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 949c6df74c97..8f328a03847b 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -298,7 +298,8 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
static int mfc_try_io_port(struct pcmcia_device *link)
{
int i, ret;
- static const kio_addr_t serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
+ static const unsigned int serial_base[5] =
+ { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
for (i = 0; i < 5; i++) {
link->io.BasePort2 = serial_base[i];
@@ -316,7 +317,7 @@ static int mfc_try_io_port(struct pcmcia_device *link)
static int ungermann_try_io_port(struct pcmcia_device *link)
{
int ret;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
/*
Ungermann-Bass Access/CARD accepts 0x300,0x320,0x340,0x360
0x380,0x3c0 only for ioport.
@@ -342,7 +343,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
cisparse_t parse;
u_short buf[32];
int i, last_fn = 0, last_ret = 0, ret;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
cardtype_t cardtype;
char *card_name = "unknown";
u_char *node_id;
@@ -610,7 +611,7 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link)
u_char __iomem *base;
int i, j;
struct net_device *dev = link->priv;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
/* Allocate a small memory window */
req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
@@ -735,7 +736,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
{
struct net_device *dev = dev_id;
local_info_t *lp = netdev_priv(dev);
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
unsigned short tx_stat, rx_stat;
ioaddr = dev->base_addr;
@@ -789,7 +790,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
static void fjn_tx_timeout(struct net_device *dev)
{
struct local_info_t *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
printk(KERN_NOTICE "%s: transmit timed out with status %04x, %s?\n",
dev->name, htons(inw(ioaddr + TX_STATUS)),
@@ -819,7 +820,7 @@ static void fjn_tx_timeout(struct net_device *dev)
static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct local_info_t *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
short length = skb->len;
if (length < ETH_ZLEN)
@@ -892,7 +893,7 @@ static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void fjn_reset(struct net_device *dev)
{
struct local_info_t *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i;
DEBUG(4, "fjn_reset(%s) called.\n",dev->name);
@@ -971,7 +972,7 @@ static void fjn_reset(struct net_device *dev)
static void fjn_rx(struct net_device *dev)
{
struct local_info_t *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int boguscount = 10; /* 5 -> 10: by agy 19940922 */
DEBUG(4, "%s: in rx_packet(), rx_status %02x.\n",
@@ -1125,7 +1126,7 @@ static int fjn_close(struct net_device *dev)
{
struct local_info_t *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
DEBUG(4, "fjn_close('%s').\n", dev->name);
@@ -1168,7 +1169,7 @@ static struct net_device_stats *fjn_get_stats(struct net_device *dev)
static void set_rx_mode(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_char mc_filter[8]; /* Multicast hash filter */
u_long flags;
int i;
@@ -1197,8 +1198,7 @@ static void set_rx_mode(struct net_device *dev)
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
struct dev_mc_list *mclist;
- int i;
-
+
memset(mc_filter, 0, sizeof(mc_filter));
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
i++, mclist = mclist->next) {
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index a355a93b908b..cfcbea9b7e2e 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -518,7 +518,7 @@ mace_read
assuming that during normal operation, the MACE is always in
bank 0.
---------------------------------------------------------------------------- */
-static int mace_read(mace_private *lp, kio_addr_t ioaddr, int reg)
+static int mace_read(mace_private *lp, unsigned int ioaddr, int reg)
{
int data = 0xFF;
unsigned long flags;
@@ -545,7 +545,8 @@ mace_write
are assuming that during normal operation, the MACE is always in
bank 0.
---------------------------------------------------------------------------- */
-static void mace_write(mace_private *lp, kio_addr_t ioaddr, int reg, int data)
+static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
+ int data)
{
unsigned long flags;
@@ -567,7 +568,7 @@ static void mace_write(mace_private *lp, kio_addr_t ioaddr, int reg, int data)
mace_init
Resets the MACE chip.
---------------------------------------------------------------------------- */
-static int mace_init(mace_private *lp, kio_addr_t ioaddr, char *enet_addr)
+static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
{
int i;
int ct = 0;
@@ -657,7 +658,7 @@ static int nmclan_config(struct pcmcia_device *link)
tuple_t tuple;
u_char buf[64];
int i, last_ret, last_fn;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
DECLARE_MAC_BUF(mac);
DEBUG(0, "nmclan_config(0x%p)\n", link);
@@ -839,7 +840,7 @@ mace_open
---------------------------------------------------------------------------- */
static int mace_open(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
mace_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
@@ -862,7 +863,7 @@ mace_close
---------------------------------------------------------------------------- */
static int mace_close(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
mace_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
@@ -935,7 +936,7 @@ static void mace_tx_timeout(struct net_device *dev)
static int mace_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
mace_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
netif_stop_queue(dev);
@@ -996,7 +997,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
mace_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
int status;
int IntrCnt = MACE_MAX_IR_ITERATIONS;
@@ -1140,7 +1141,7 @@ mace_rx
static int mace_rx(struct net_device *dev, unsigned char RxCnt)
{
mace_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
unsigned char rx_framecnt;
unsigned short rx_status;
@@ -1302,7 +1303,7 @@ update_stats
card's SRAM fast enough. If this happens, something is
seriously wrong with the hardware.
---------------------------------------------------------------------------- */
-static void update_stats(kio_addr_t ioaddr, struct net_device *dev)
+static void update_stats(unsigned int ioaddr, struct net_device *dev)
{
mace_private *lp = netdev_priv(dev);
@@ -1448,7 +1449,7 @@ static void restore_multicast_list(struct net_device *dev)
mace_private *lp = netdev_priv(dev);
int num_addrs = lp->multicast_num_addrs;
int *ladrf = lp->multicast_ladrf;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i;
DEBUG(2, "%s: restoring Rx mode to %d addresses.\n",
@@ -1540,7 +1541,7 @@ static void set_multicast_list(struct net_device *dev)
static void restore_multicast_list(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
mace_private *lp = netdev_priv(dev);
DEBUG(2, "%s: restoring Rx mode to %d addresses.\n", dev->name,
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 9ba56aa26a1b..6323988dfa1d 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -349,7 +349,7 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
static hw_info_t *get_prom(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_char prom[32];
int i, j;
@@ -425,7 +425,7 @@ static hw_info_t *get_dl10019(struct pcmcia_device *link)
static hw_info_t *get_ax88190(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i, j;
/* Not much of a test, but the alternatives are messy */
@@ -521,7 +521,7 @@ static int pcnet_config(struct pcmcia_device *link)
int i, last_ret, last_fn, start_pg, stop_pg, cm_offset;
int has_shmem = 0;
u_short buf[64];
- hw_info_t *hw_info;
+ hw_info_t *local_hw_info;
DECLARE_MAC_BUF(mac);
DEBUG(0, "pcnet_config(0x%p)\n", link);
@@ -590,23 +590,23 @@ static int pcnet_config(struct pcmcia_device *link)
dev->if_port = 0;
}
- hw_info = get_hwinfo(link);
- if (hw_info == NULL)
- hw_info = get_prom(link);
- if (hw_info == NULL)
- hw_info = get_dl10019(link);
- if (hw_info == NULL)
- hw_info = get_ax88190(link);
- if (hw_info == NULL)
- hw_info = get_hwired(link);
-
- if (hw_info == NULL) {
+ local_hw_info = get_hwinfo(link);
+ if (local_hw_info == NULL)
+ local_hw_info = get_prom(link);
+ if (local_hw_info == NULL)
+ local_hw_info = get_dl10019(link);
+ if (local_hw_info == NULL)
+ local_hw_info = get_ax88190(link);
+ if (local_hw_info == NULL)
+ local_hw_info = get_hwired(link);
+
+ if (local_hw_info == NULL) {
printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
" address for io base %#3lx\n", dev->base_addr);
goto failed;
}
- info->flags = hw_info->flags;
+ info->flags = local_hw_info->flags;
/* Check for user overrides */
info->flags |= (delay_output) ? DELAY_OUTPUT : 0;
if ((link->manf_id == MANFID_SOCKET) &&
@@ -756,7 +756,7 @@ static int pcnet_resume(struct pcmcia_device *link)
#define MDIO_DATA_READ 0x10
#define MDIO_MASK 0x0f
-static void mdio_sync(kio_addr_t addr)
+static void mdio_sync(unsigned int addr)
{
int bits, mask = inb(addr) & MDIO_MASK;
for (bits = 0; bits < 32; bits++) {
@@ -765,7 +765,7 @@ static void mdio_sync(kio_addr_t addr)
}
}
-static int mdio_read(kio_addr_t addr, int phy_id, int loc)
+static int mdio_read(unsigned int addr, int phy_id, int loc)
{
u_int cmd = (0x06<<10)|(phy_id<<5)|loc;
int i, retval = 0, mask = inb(addr) & MDIO_MASK;
@@ -784,7 +784,7 @@ static int mdio_read(kio_addr_t addr, int phy_id, int loc)
return (retval>>1) & 0xffff;
}
-static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value)
+static void mdio_write(unsigned int addr, int phy_id, int loc, int value)
{
u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
int i, mask = inb(addr) & MDIO_MASK;
@@ -818,10 +818,10 @@ static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value)
#define DL19FDUPLX 0x0400 /* DL10019 Full duplex mode */
-static int read_eeprom(kio_addr_t ioaddr, int location)
+static int read_eeprom(unsigned int ioaddr, int location)
{
int i, retval = 0;
- kio_addr_t ee_addr = ioaddr + DLINK_EEPROM;
+ unsigned int ee_addr = ioaddr + DLINK_EEPROM;
int read_cmd = location | (EE_READ_CMD << 8);
outb(0, ee_addr);
@@ -852,10 +852,10 @@ static int read_eeprom(kio_addr_t ioaddr, int location)
In ASIC mode, EE_ADOT is used to output the data to the ASIC.
*/
-static void write_asic(kio_addr_t ioaddr, int location, short asic_data)
+static void write_asic(unsigned int ioaddr, int location, short asic_data)
{
int i;
- kio_addr_t ee_addr = ioaddr + DLINK_EEPROM;
+ unsigned int ee_addr = ioaddr + DLINK_EEPROM;
short dataval;
int read_cmd = location | (EE_READ_CMD << 8);
@@ -897,7 +897,7 @@ static void write_asic(kio_addr_t ioaddr, int location, short asic_data)
static void set_misc_reg(struct net_device *dev)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
pcnet_dev_t *info = PRIV(dev);
u_char tmp;
@@ -936,7 +936,7 @@ static void set_misc_reg(struct net_device *dev)
static void mii_phy_probe(struct net_device *dev)
{
pcnet_dev_t *info = PRIV(dev);
- kio_addr_t mii_addr = dev->base_addr + DLINK_GPIO;
+ unsigned int mii_addr = dev->base_addr + DLINK_GPIO;
int i;
u_int tmp, phyid;
@@ -1014,7 +1014,7 @@ static int pcnet_close(struct net_device *dev)
static void pcnet_reset_8390(struct net_device *dev)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
int i;
ei_status.txing = ei_status.dmaing = 0;
@@ -1074,8 +1074,8 @@ static void ei_watchdog(u_long arg)
{
struct net_device *dev = (struct net_device *)arg;
pcnet_dev_t *info = PRIV(dev);
- kio_addr_t nic_base = dev->base_addr;
- kio_addr_t mii_addr = nic_base + DLINK_GPIO;
+ unsigned int nic_base = dev->base_addr;
+ unsigned int mii_addr = nic_base + DLINK_GPIO;
u_short link;
if (!netif_device_present(dev)) goto reschedule;
@@ -1177,7 +1177,7 @@ static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
pcnet_dev_t *info = PRIV(dev);
u16 *data = (u16 *)&rq->ifr_ifru;
- kio_addr_t mii_addr = dev->base_addr + DLINK_GPIO;
+ unsigned int mii_addr = dev->base_addr + DLINK_GPIO;
switch (cmd) {
case SIOCGMIIPHY:
data[0] = info->phy_id;
@@ -1199,7 +1199,7 @@ static void dma_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr,
int ring_page)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
if (ei_status.dmaing) {
printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input."
@@ -1230,7 +1230,7 @@ static void dma_get_8390_hdr(struct net_device *dev,
static void dma_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
int xfer_count = count;
char *buf = skb->data;
@@ -1285,7 +1285,7 @@ static void dma_block_input(struct net_device *dev, int count,
static void dma_block_output(struct net_device *dev, int count,
const u_char *buf, const int start_page)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
pcnet_dev_t *info = PRIV(dev);
#ifdef PCMCIA_DEBUG
int retries = 0;
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index c9868e9dac4c..f18eca9831e8 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -295,7 +295,7 @@ static int s9k_config(struct net_device *dev, struct ifmap *map);
static void smc_set_xcvr(struct net_device *dev, int if_port);
static void smc_reset(struct net_device *dev);
static void media_check(u_long arg);
-static void mdio_sync(kio_addr_t addr);
+static void mdio_sync(unsigned int addr);
static int mdio_read(struct net_device *dev, int phy_id, int loc);
static void mdio_write(struct net_device *dev, int phy_id, int loc, int value);
static int smc_link_ok(struct net_device *dev);
@@ -601,8 +601,8 @@ static void mot_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
- kio_addr_t iouart = link->io.BasePort2;
+ unsigned int ioaddr = dev->base_addr;
+ unsigned int iouart = link->io.BasePort2;
/* Set UART base address and force map with COR bit 1 */
writeb(iouart & 0xff, smc->base + MOT_UART + CISREG_IOBASE_0);
@@ -621,7 +621,7 @@ static void mot_config(struct pcmcia_device *link)
static int mot_setup(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i, wait, loop;
u_int addr;
@@ -754,7 +754,7 @@ free_cfg_mem:
static int osi_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- static const kio_addr_t com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 };
+ static const unsigned int com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 };
int i, j;
link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -900,7 +900,7 @@ static int smc91c92_resume(struct pcmcia_device *link)
static int check_sig(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int width;
u_short s;
@@ -960,7 +960,7 @@ static int smc91c92_config(struct pcmcia_device *link)
struct smc_private *smc = netdev_priv(dev);
char *name;
int i, j, rev;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
u_long mir;
DECLARE_MAC_BUF(mac);
@@ -1136,7 +1136,7 @@ static void smc91c92_release(struct pcmcia_device *link)
#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT)
#define MDIO_DATA_READ 0x02
-static void mdio_sync(kio_addr_t addr)
+static void mdio_sync(unsigned int addr)
{
int bits;
for (bits = 0; bits < 32; bits++) {
@@ -1147,7 +1147,7 @@ static void mdio_sync(kio_addr_t addr)
static int mdio_read(struct net_device *dev, int phy_id, int loc)
{
- kio_addr_t addr = dev->base_addr + MGMT;
+ unsigned int addr = dev->base_addr + MGMT;
u_int cmd = (0x06<<10)|(phy_id<<5)|loc;
int i, retval = 0;
@@ -1167,7 +1167,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int loc)
static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
{
- kio_addr_t addr = dev->base_addr + MGMT;
+ unsigned int addr = dev->base_addr + MGMT;
u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
int i;
@@ -1193,7 +1193,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
#ifdef PCMCIA_DEBUG
static void smc_dump(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_short i, w, save;
save = inw(ioaddr + BANK_SELECT);
for (w = 0; w < 4; w++) {
@@ -1248,7 +1248,7 @@ static int smc_close(struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
struct pcmcia_device *link = smc->p_dev;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
DEBUG(0, "%s: smc_close(), status %4.4x.\n",
dev->name, inw(ioaddr + BANK_SELECT));
@@ -1285,7 +1285,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
{
struct smc_private *smc = netdev_priv(dev);
struct sk_buff *skb = smc->saved_skb;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_char packet_no;
if (!skb) {
@@ -1349,7 +1349,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
static void smc_tx_timeout(struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
printk(KERN_NOTICE "%s: SMC91c92 transmit timed out, "
"Tx_status %2.2x status %4.4x.\n",
@@ -1364,7 +1364,7 @@ static void smc_tx_timeout(struct net_device *dev)
static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_short num_pages;
short time_out, ir;
unsigned long flags;
@@ -1434,7 +1434,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void smc_tx_err(struct net_device * dev)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int saved_packet = inw(ioaddr + PNR_ARR) & 0xff;
int packet_no = inw(ioaddr + FIFO_PORTS) & 0x7f;
int tx_status;
@@ -1478,7 +1478,7 @@ static void smc_tx_err(struct net_device * dev)
static void smc_eph_irq(struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_short card_stats, ephs;
SMC_SELECT_BANK(0);
@@ -1513,7 +1513,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
u_short saved_bank, saved_pointer, mask, status;
unsigned int handled = 1;
char bogus_cnt = INTR_WORK; /* Work we are willing to do. */
@@ -1633,7 +1633,7 @@ irq_done:
static void smc_rx(struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int rx_status;
int packet_length; /* Caution: not frame length, rather words
to transfer from the chip. */
@@ -1738,7 +1738,7 @@ static void fill_multicast_tbl(int count, struct dev_mc_list *addrs,
static void set_rx_mode(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
struct smc_private *smc = netdev_priv(dev);
u_int multicast_table[ 2 ] = { 0, };
unsigned long flags;
@@ -1804,7 +1804,7 @@ static int s9k_config(struct net_device *dev, struct ifmap *map)
static void smc_set_xcvr(struct net_device *dev, int if_port)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_short saved_bank;
saved_bank = inw(ioaddr + BANK_SELECT);
@@ -1827,7 +1827,7 @@ static void smc_set_xcvr(struct net_device *dev, int if_port)
static void smc_reset(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
struct smc_private *smc = netdev_priv(dev);
int i;
@@ -1904,7 +1904,7 @@ static void media_check(u_long arg)
{
struct net_device *dev = (struct net_device *) arg;
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_short i, media, saved_bank;
u_short link;
unsigned long flags;
@@ -2021,7 +2021,7 @@ reschedule:
static int smc_link_ok(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
struct smc_private *smc = netdev_priv(dev);
if (smc->cfg & CFG_MII_SELECT) {
@@ -2035,7 +2035,7 @@ static int smc_link_ok(struct net_device *dev)
static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
{
u16 tmp;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
ecmd->supported = (SUPPORTED_TP | SUPPORTED_AUI |
SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full);
@@ -2057,7 +2057,7 @@ static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
{
u16 tmp;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
if (ecmd->speed != SPEED_10)
return -EINVAL;
@@ -2100,7 +2100,7 @@ static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
int ret;
@@ -2118,7 +2118,7 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
int ret;
@@ -2136,7 +2136,7 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
static u32 smc_get_link(struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
u32 ret;
@@ -2164,7 +2164,7 @@ static int smc_nway_reset(struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
if (smc->cfg & CFG_MII_SELECT) {
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
int res;
@@ -2196,7 +2196,7 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
struct mii_ioctl_data *mii = if_mii(rq);
int rc = 0;
u16 saved_bank;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
if (!netif_running(dev))
return -EINVAL;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 1f09bea6db5a..d041f831a18d 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -273,12 +273,12 @@ INT_MODULE_PARM(lockup_hack, 0); /* anti lockup hack */
static unsigned maxrx_bytes = 22000;
/* MII management prototypes */
-static void mii_idle(kio_addr_t ioaddr);
-static void mii_putbit(kio_addr_t ioaddr, unsigned data);
-static int mii_getbit(kio_addr_t ioaddr);
-static void mii_wbits(kio_addr_t ioaddr, unsigned data, int len);
-static unsigned mii_rd(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg);
-static void mii_wr(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg,
+static void mii_idle(unsigned int ioaddr);
+static void mii_putbit(unsigned int ioaddr, unsigned data);
+static int mii_getbit(unsigned int ioaddr);
+static void mii_wbits(unsigned int ioaddr, unsigned data, int len);
+static unsigned mii_rd(unsigned int ioaddr, u_char phyaddr, u_char phyreg);
+static void mii_wr(unsigned int ioaddr, u_char phyaddr, u_char phyreg,
unsigned data, int len);
/*
@@ -403,7 +403,7 @@ next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
static void
PrintRegisters(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
if (pc_debug > 1) {
int i, page;
@@ -439,7 +439,7 @@ PrintRegisters(struct net_device *dev)
* Turn around for read
*/
static void
-mii_idle(kio_addr_t ioaddr)
+mii_idle(unsigned int ioaddr)
{
PutByte(XIRCREG2_GPR2, 0x04|0); /* drive MDCK low */
udelay(1);
@@ -451,7 +451,7 @@ mii_idle(kio_addr_t ioaddr)
* Write a bit to MDI/O
*/
static void
-mii_putbit(kio_addr_t ioaddr, unsigned data)
+mii_putbit(unsigned int ioaddr, unsigned data)
{
#if 1
if (data) {
@@ -484,7 +484,7 @@ mii_putbit(kio_addr_t ioaddr, unsigned data)
* Get a bit from MDI/O
*/
static int
-mii_getbit(kio_addr_t ioaddr)
+mii_getbit(unsigned int ioaddr)
{
unsigned d;
@@ -497,7 +497,7 @@ mii_getbit(kio_addr_t ioaddr)
}
static void
-mii_wbits(kio_addr_t ioaddr, unsigned data, int len)
+mii_wbits(unsigned int ioaddr, unsigned data, int len)
{
unsigned m = 1 << (len-1);
for (; m; m >>= 1)
@@ -505,7 +505,7 @@ mii_wbits(kio_addr_t ioaddr, unsigned data, int len)
}
static unsigned
-mii_rd(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg)
+mii_rd(unsigned int ioaddr, u_char phyaddr, u_char phyreg)
{
int i;
unsigned data=0, m;
@@ -527,7 +527,8 @@ mii_rd(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg)
}
static void
-mii_wr(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg, unsigned data, int len)
+mii_wr(unsigned int ioaddr, u_char phyaddr, u_char phyreg, unsigned data,
+ int len)
{
int i;
@@ -726,7 +727,7 @@ xirc2ps_config(struct pcmcia_device * link)
local_info_t *local = netdev_priv(dev);
tuple_t tuple;
cisparse_t parse;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
int err, i;
u_char buf[64];
cistpl_lan_node_id_t *node_id = (cistpl_lan_node_id_t*)parse.funce.data;
@@ -1104,7 +1105,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
local_info_t *lp = netdev_priv(dev);
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
u_char saved_page;
unsigned bytes_rcvd;
unsigned int_status, eth_status, rx_status, tx_status;
@@ -1209,7 +1210,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
unsigned i;
u_long *p = skb_put(skb, pktlen);
register u_long a;
- kio_addr_t edpreg = ioaddr+XIRCREG_EDP-2;
+ unsigned int edpreg = ioaddr+XIRCREG_EDP-2;
for (i=0; i < len ; i += 4, p++) {
a = inl(edpreg);
__asm__("rorl $16,%0\n\t"
@@ -1346,7 +1347,7 @@ static int
do_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
local_info_t *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int okay;
unsigned freespace;
unsigned pktlen = skb->len;
@@ -1415,7 +1416,7 @@ do_get_stats(struct net_device *dev)
static void
set_addresses(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
local_info_t *lp = netdev_priv(dev);
struct dev_mc_list *dmi = dev->mc_list;
unsigned char *addr;
@@ -1459,7 +1460,7 @@ set_addresses(struct net_device *dev)
static void
set_multicast_list(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
SelectPage(0x42);
if (dev->flags & IFF_PROMISC) { /* snoop */
@@ -1543,7 +1544,7 @@ static int
do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
local_info_t *local = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 *data = (u16 *)&rq->ifr_ifru;
DEBUG(1, "%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n",
@@ -1575,7 +1576,7 @@ static void
hardreset(struct net_device *dev)
{
local_info_t *local = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
SelectPage(4);
udelay(1);
@@ -1592,7 +1593,7 @@ static void
do_reset(struct net_device *dev, int full)
{
local_info_t *local = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
unsigned value;
DEBUG(0, "%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full);
@@ -1753,7 +1754,7 @@ static int
init_mii(struct net_device *dev)
{
local_info_t *local = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
unsigned control, status, linkpartner;
int i;
@@ -1826,7 +1827,7 @@ static void
do_powerdown(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
DEBUG(0, "do_powerdown(%p)\n", dev);
@@ -1838,7 +1839,7 @@ do_powerdown(struct net_device *dev)
static int
do_stop(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
local_info_t *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 7fe03ce774b1..f4ca0591231d 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -60,6 +60,11 @@ config ICPLUS_PHY
---help---
Currently supports the IP175C PHY.
+config REALTEK_PHY
+ tristate "Drivers for Realtek PHYs"
+ ---help---
+ Supports the Realtek 821x PHY.
+
config FIXED_PHY
bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 3d6cc7b67a80..5997d6ef702b 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -12,5 +12,6 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
+obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_FIXED_PHY) += fixed.o
obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 29666c85ed55..5b80358af658 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -141,6 +141,20 @@ static struct phy_driver bcm5461_driver = {
.driver = { .owner = THIS_MODULE },
};
+static struct phy_driver bcm5482_driver = {
+ .phy_id = 0x0143bcb0,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM5482",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm54xx_ack_interrupt,
+ .config_intr = bcm54xx_config_intr,
+ .driver = { .owner = THIS_MODULE },
+};
+
static int __init broadcom_init(void)
{
int ret;
@@ -154,8 +168,13 @@ static int __init broadcom_init(void)
ret = phy_driver_register(&bcm5461_driver);
if (ret)
goto out_5461;
+ ret = phy_driver_register(&bcm5482_driver);
+ if (ret)
+ goto out_5482;
return ret;
+out_5482:
+ phy_driver_unregister(&bcm5461_driver);
out_5461:
phy_driver_unregister(&bcm5421_driver);
out_5421:
@@ -166,6 +185,7 @@ out_5411:
static void __exit broadcom_exit(void)
{
+ phy_driver_unregister(&bcm5482_driver);
phy_driver_unregister(&bcm5461_driver);
phy_driver_unregister(&bcm5421_driver);
phy_driver_unregister(&bcm5411_driver);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index c30196d0ad16..6e9f619c491f 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -49,7 +49,7 @@ int mdiobus_register(struct mii_bus *bus)
int i;
int err = 0;
- spin_lock_init(&bus->mdio_lock);
+ mutex_init(&bus->mdio_lock);
if (NULL == bus || NULL == bus->name ||
NULL == bus->read ||
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7c9e6e349503..12fccb1c76dc 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -26,7 +26,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mii.h>
@@ -72,9 +71,11 @@ int phy_read(struct phy_device *phydev, u16 regnum)
int retval;
struct mii_bus *bus = phydev->bus;
- spin_lock_bh(&bus->mdio_lock);
+ BUG_ON(in_interrupt());
+
+ mutex_lock(&bus->mdio_lock);
retval = bus->read(bus, phydev->addr, regnum);
- spin_unlock_bh(&bus->mdio_lock);
+ mutex_unlock(&bus->mdio_lock);
return retval;
}
@@ -95,9 +96,11 @@ int phy_write(struct phy_device *phydev, u16 regnum, u16 val)
int err;
struct mii_bus *bus = phydev->bus;
- spin_lock_bh(&bus->mdio_lock);
+ BUG_ON(in_interrupt());
+
+ mutex_lock(&bus->mdio_lock);
err = bus->write(bus, phydev->addr, regnum, val);
- spin_unlock_bh(&bus->mdio_lock);
+ mutex_unlock(&bus->mdio_lock);
return err;
}
@@ -428,7 +431,7 @@ int phy_start_aneg(struct phy_device *phydev)
{
int err;
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev);
@@ -449,13 +452,14 @@ int phy_start_aneg(struct phy_device *phydev)
}
out_unlock:
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
return err;
}
EXPORT_SYMBOL(phy_start_aneg);
static void phy_change(struct work_struct *work);
+static void phy_state_machine(struct work_struct *work);
static void phy_timer(unsigned long data);
/**
@@ -476,6 +480,7 @@ void phy_start_machine(struct phy_device *phydev,
{
phydev->adjust_state = handler;
+ INIT_WORK(&phydev->state_queue, phy_state_machine);
init_timer(&phydev->phy_timer);
phydev->phy_timer.function = &phy_timer;
phydev->phy_timer.data = (unsigned long) phydev;
@@ -493,11 +498,12 @@ void phy_start_machine(struct phy_device *phydev,
void phy_stop_machine(struct phy_device *phydev)
{
del_timer_sync(&phydev->phy_timer);
+ cancel_work_sync(&phydev->state_queue);
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
if (phydev->state > PHY_UP)
phydev->state = PHY_UP;
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
phydev->adjust_state = NULL;
}
@@ -541,9 +547,9 @@ static void phy_force_reduction(struct phy_device *phydev)
*/
void phy_error(struct phy_device *phydev)
{
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
phydev->state = PHY_HALTED;
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
}
/**
@@ -705,10 +711,10 @@ static void phy_change(struct work_struct *work)
if (err)
goto phy_err;
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
phydev->state = PHY_CHANGELINK;
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
atomic_dec(&phydev->irq_disable);
enable_irq(phydev->irq);
@@ -735,7 +741,7 @@ phy_err:
*/
void phy_stop(struct phy_device *phydev)
{
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
if (PHY_HALTED == phydev->state)
goto out_unlock;
@@ -751,7 +757,7 @@ void phy_stop(struct phy_device *phydev)
phydev->state = PHY_HALTED;
out_unlock:
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
/*
* Cannot call flush_scheduled_work() here as desired because
@@ -773,7 +779,7 @@ out_unlock:
*/
void phy_start(struct phy_device *phydev)
{
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
switch (phydev->state) {
case PHY_STARTING:
@@ -787,19 +793,26 @@ void phy_start(struct phy_device *phydev)
default:
break;
}
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
}
EXPORT_SYMBOL(phy_stop);
EXPORT_SYMBOL(phy_start);
-/* PHY timer which handles the state machine */
-static void phy_timer(unsigned long data)
+/**
+ * phy_state_machine - Handle the state machine
+ * @work: work_struct that describes the work to be done
+ *
+ * Description: Scheduled by the state_queue workqueue each time
+ * phy_timer is triggered.
+ */
+static void phy_state_machine(struct work_struct *work)
{
- struct phy_device *phydev = (struct phy_device *)data;
+ struct phy_device *phydev =
+ container_of(work, struct phy_device, state_queue);
int needs_aneg = 0;
int err = 0;
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
if (phydev->adjust_state)
phydev->adjust_state(phydev->attached_dev);
@@ -965,7 +978,7 @@ static void phy_timer(unsigned long data)
break;
}
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
if (needs_aneg)
err = phy_start_aneg(phydev);
@@ -976,3 +989,14 @@ static void phy_timer(unsigned long data)
mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
}
+/* PHY timer which schedules the state machine work */
+static void phy_timer(unsigned long data)
+{
+ struct phy_device *phydev = (struct phy_device *)data;
+
+ /*
+ * PHY I/O operations can potentially sleep so we ensure that
+ * it's done from a process context
+ */
+ schedule_work(&phydev->state_queue);
+}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 5b9e1751e1b4..f4c4fd85425f 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -25,7 +25,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mii.h>
@@ -80,7 +79,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
dev->state = PHY_DOWN;
- spin_lock_init(&dev->lock);
+ mutex_init(&dev->lock);
return dev;
}
@@ -656,7 +655,7 @@ static int phy_probe(struct device *dev)
if (!(phydrv->flags & PHY_HAS_INTERRUPT))
phydev->irq = PHY_POLL;
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
/* Start out supporting everything. Eventually,
* a controller will attach, and may modify one
@@ -670,7 +669,7 @@ static int phy_probe(struct device *dev)
if (phydev->drv->probe)
err = phydev->drv->probe(phydev);
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
return err;
@@ -682,9 +681,9 @@ static int phy_remove(struct device *dev)
phydev = to_phy_device(dev);
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
phydev->state = PHY_DOWN;
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
if (phydev->drv->remove)
phydev->drv->remove(phydev);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
new file mode 100644
index 000000000000..a052a6744a51
--- /dev/null
+++ b/drivers/net/phy/realtek.c
@@ -0,0 +1,80 @@
+/*
+ * drivers/net/phy/realtek.c
+ *
+ * Driver for Realtek PHYs
+ *
+ * Author: Johnson Leung <r58129@freescale.com>
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/phy.h>
+
+#define RTL821x_PHYSR 0x11
+#define RTL821x_PHYSR_DUPLEX 0x2000
+#define RTL821x_PHYSR_SPEED 0xc000
+#define RTL821x_INER 0x12
+#define RTL821x_INER_INIT 0x6400
+#define RTL821x_INSR 0x13
+
+MODULE_DESCRIPTION("Realtek PHY driver");
+MODULE_AUTHOR("Johnson Leung");
+MODULE_LICENSE("GPL");
+
+static int rtl821x_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, RTL821x_INSR);
+
+ return (err < 0) ? err : 0;
+}
+
+static int rtl821x_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, RTL821x_INER,
+ RTL821x_INER_INIT);
+ else
+ err = phy_write(phydev, RTL821x_INER, 0);
+
+ return err;
+}
+
+/* RTL8211B */
+static struct phy_driver rtl821x_driver = {
+ .phy_id = 0x001cc912,
+ .name = "RTL821x Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &rtl821x_ack_interrupt,
+ .config_intr = &rtl821x_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init realtek_init(void)
+{
+ int ret;
+
+ ret = phy_driver_register(&rtl821x_driver);
+
+ return ret;
+}
+
+static void __exit realtek_exit(void)
+{
+ phy_driver_unregister(&rtl821x_driver);
+}
+
+module_init(realtek_init);
+module_exit(realtek_exit);
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 1b51bb668d39..5aa0a8089694 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -2468,9 +2468,10 @@ static int __init pppol2tp_init(void)
out:
return err;
-
+#ifdef CONFIG_PROC_FS
out_unregister_pppox_proto:
unregister_pppox_proto(PX_PROTO_OL2TP);
+#endif
out_unregister_pppol2tp_proto:
proto_unregister(&pppol2tp_sk_proto);
goto out;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 5fab7d7b5d74..6179a0a2032c 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -8118,7 +8118,7 @@ static void initiate_new_session(struct lro *lro, u8 *l2h,
lro->iph = ip;
lro->tcph = tcp;
lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
- lro->tcp_ack = ntohl(tcp->ack_seq);
+ lro->tcp_ack = tcp->ack_seq;
lro->sg_num = 1;
lro->total_len = ntohs(ip->tot_len);
lro->frags_len = 0;
@@ -8127,10 +8127,10 @@ static void initiate_new_session(struct lro *lro, u8 *l2h,
* already been done.
*/
if (tcp->doff == 8) {
- u32 *ptr;
- ptr = (u32 *)(tcp+1);
+ __be32 *ptr;
+ ptr = (__be32 *)(tcp+1);
lro->saw_ts = 1;
- lro->cur_tsval = *(ptr+1);
+ lro->cur_tsval = ntohl(*(ptr+1));
lro->cur_tsecr = *(ptr+2);
}
lro->in_use = 1;
@@ -8156,7 +8156,7 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
/* Update tsecr field if this session has timestamps enabled */
if (lro->saw_ts) {
- u32 *ptr = (u32 *)(tcp + 1);
+ __be32 *ptr = (__be32 *)(tcp + 1);
*(ptr+2) = lro->cur_tsecr;
}
@@ -8181,10 +8181,10 @@ static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
lro->window = tcp->window;
if (lro->saw_ts) {
- u32 *ptr;
+ __be32 *ptr;
/* Update tsecr and tsval from this packet */
- ptr = (u32 *) (tcp + 1);
- lro->cur_tsval = *(ptr + 1);
+ ptr = (__be32 *)(tcp+1);
+ lro->cur_tsval = ntohl(*(ptr+1));
lro->cur_tsecr = *(ptr + 2);
}
}
@@ -8235,11 +8235,11 @@ static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
/* Ensure timestamp value increases monotonically */
if (l_lro)
- if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
+ if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
return -1;
/* timestamp echo reply should be non-zero */
- if (*((u32 *)(ptr+6)) == 0)
+ if (*((__be32 *)(ptr+6)) == 0)
return -1;
}
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 9f6016c6f135..64b88eb48287 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -809,7 +809,7 @@ struct lro {
int in_use;
__be16 window;
u32 cur_tsval;
- u32 cur_tsecr;
+ __be32 cur_tsecr;
u8 saw_ts;
};
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index b570402f7fed..2e9e88be7b33 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -326,7 +326,7 @@ static const struct {
{ "SiS 191 PCI Gigabit Ethernet adapter" },
};
-static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
+static struct pci_device_id sis190_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
{ 0, },
diff --git a/drivers/net/skfp/ess.c b/drivers/net/skfp/ess.c
index 62b01328c496..889f98724610 100644
--- a/drivers/net/skfp/ess.c
+++ b/drivers/net/skfp/ess.c
@@ -598,7 +598,7 @@ static void ess_send_alc_req(struct s_smc *smc)
req->cmd.sba_cmd = REQUEST_ALLOCATION ;
/*
- * set the parameter type and parameter lenght of all used
+ * set the parameter type and parameter length of all used
* parameters
*/
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index a45205da8033..76dc8adc9441 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -398,7 +398,7 @@ static void copy_tx_mac(struct s_smc *smc, u_long td, struct fddi_mac *mac,
/* u_long td; transmit descriptor */
/* struct fddi_mac *mac; mac frame pointer */
/* unsigned off; start address within buffer memory */
-/* int len ; lenght of the frame including the FC */
+/* int len ; length of the frame including the FC */
{
int i ;
u_int *p ;
diff --git a/drivers/net/skfp/hwmtm.c b/drivers/net/skfp/hwmtm.c
index 8a430a366547..46e339315656 100644
--- a/drivers/net/skfp/hwmtm.c
+++ b/drivers/net/skfp/hwmtm.c
@@ -1185,7 +1185,7 @@ void process_receive(struct s_smc *smc)
DB_RX("frame length = %d",len,0,4) ;
/*
- * check the frame_lenght and all error flags
+ * check the frame_length and all error flags
*/
if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){
if (rfsw & RD_S_MSRABT) {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 626190eb91e7..9a6295909e43 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -623,6 +623,7 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff)
static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
/* Turn on/off phy power saving */
if (onoff)
@@ -634,7 +635,8 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff)
reg1 |= coma_mode[port];
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
- reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+ sky2_pci_read32(hw, PCI_DEV_REG1);
udelay(100);
}
@@ -855,7 +857,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
/* On chips without ram buffer, pause is controled by MAC level */
- if (sky2_read8(hw, B2_E_0) == 0) {
+ if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
@@ -1192,7 +1194,7 @@ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2)
struct sk_buff *skb;
int i;
- if (sky2->hw->flags & SKY2_HW_FIFO_HANG_CHECK) {
+ if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) {
unsigned char *start;
/*
* Workaround for a bug in FIFO that cause hang
@@ -1385,6 +1387,7 @@ static int sky2_up(struct net_device *dev)
if (ramsize > 0) {
u32 rxspace;
+ hw->flags |= SKY2_HW_RAM_BUFFER;
pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
if (ramsize < 16)
rxspace = ramsize / 2;
@@ -1422,6 +1425,7 @@ static int sky2_up(struct net_device *dev)
imask |= portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
+ sky2_set_multicast(dev);
return 0;
err_out:
@@ -2023,7 +2027,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
synchronize_irq(hw->pdev->irq);
- if (sky2_read8(hw, B2_E_0) == 0)
+ if (!(hw->flags & SKY2_HW_RAM_BUFFER))
sky2_set_tx_stfwd(hw, port);
ctl = gma_read16(hw, port, GM_GP_CTRL);
@@ -2436,6 +2440,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
u16 pci_err;
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_err = sky2_pci_read16(hw, PCI_STATUS);
if (net_ratelimit())
dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
@@ -2443,12 +2448,14 @@ static void sky2_hw_intr(struct sky2_hw *hw)
sky2_pci_write16(hw, PCI_STATUS,
pci_err | PCI_STATUS_ERROR_BITS);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if (status & Y2_IS_PCI_EXP) {
/* PCI-Express uncorrectable Error occurred */
u32 err;
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
0xfffffffful);
@@ -2456,6 +2463,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if (status & Y2_HWE_L1_MASK)
@@ -2559,7 +2567,7 @@ static void sky2_watchdog(unsigned long arg)
++active;
/* For chips with Rx FIFO, check if stuck */
- if ((hw->flags & SKY2_HW_FIFO_HANG_CHECK) &&
+ if ((hw->flags & SKY2_HW_RAM_BUFFER) &&
sky2_rx_hung(dev)) {
pr_info(PFX "%s: receiver hang detected\n",
dev->name);
@@ -2715,11 +2723,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
switch(hw->chip_id) {
case CHIP_ID_YUKON_XL:
- hw->flags = SKY2_HW_GIGABIT
- | SKY2_HW_NEWER_PHY;
- if (hw->chip_rev < 3)
- hw->flags |= SKY2_HW_FIFO_HANG_CHECK;
-
+ hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
break;
case CHIP_ID_YUKON_EC_U:
@@ -2745,7 +2749,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
return -EOPNOTSUPP;
}
- hw->flags = SKY2_HW_GIGABIT | SKY2_HW_FIFO_HANG_CHECK;
+ hw->flags = SKY2_HW_GIGABIT;
break;
case CHIP_ID_YUKON_FE:
@@ -2831,6 +2835,7 @@ static void sky2_reset(struct sky2_hw *hw)
}
sky2_power_on(hw);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
for (i = 0; i < hw->ports; i++) {
sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
@@ -3554,8 +3559,6 @@ static int sky2_set_ringparam(struct net_device *dev,
err = sky2_up(dev);
if (err)
dev_close(dev);
- else
- sky2_set_multicast(dev);
}
return err;
@@ -4389,8 +4392,6 @@ static int sky2_resume(struct pci_dev *pdev)
dev_close(dev);
goto out;
}
-
- sky2_set_multicast(dev);
}
}
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 2bced1a0898f..5ab5c1c7c5aa 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2045,7 +2045,7 @@ struct sky2_hw {
#define SKY2_HW_FIBRE_PHY 0x00000002
#define SKY2_HW_GIGABIT 0x00000004
#define SKY2_HW_NEWER_PHY 0x00000008
-#define SKY2_HW_FIFO_HANG_CHECK 0x00000010
+#define SKY2_HW_RAM_BUFFER 0x00000010
#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index fe3ac6f9ae89..0e4a88d16327 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -1075,7 +1075,7 @@ static const struct ethtool_ops bigmac_ethtool_ops = {
.get_link = bigmac_get_link,
};
-static int __init bigmac_ether_init(struct sbus_dev *qec_sdev)
+static int __devinit bigmac_ether_init(struct sbus_dev *qec_sdev)
{
struct net_device *dev;
static int version_printed;
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index ff23c6489efd..e811331d4608 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -747,7 +747,7 @@ static inline void qec_init_once(struct sunqec *qecp, struct sbus_dev *qsdev)
qecp->gregs + GLOB_RSIZE);
}
-static u8 __init qec_get_burst(struct device_node *dp)
+static u8 __devinit qec_get_burst(struct device_node *dp)
{
u8 bsizes, bsizes_more;
@@ -767,7 +767,7 @@ static u8 __init qec_get_burst(struct device_node *dp)
return bsizes;
}
-static struct sunqec * __init get_qec(struct sbus_dev *child_sdev)
+static struct sunqec * __devinit get_qec(struct sbus_dev *child_sdev)
{
struct sbus_dev *qec_sdev = child_sdev->parent;
struct sunqec *qecp;
@@ -823,7 +823,7 @@ fail:
return NULL;
}
-static int __init qec_ether_init(struct sbus_dev *sdev)
+static int __devinit qec_ether_init(struct sbus_dev *sdev)
{
static unsigned version_printed;
struct net_device *dev;
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index 4a0035f7a842..6415ce15c2ef 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -1130,7 +1130,7 @@ static struct vio_driver_ops vnet_vio_ops = {
.handshake_complete = vnet_handshake_complete,
};
-static void print_version(void)
+static void __devinit print_version(void)
{
static int version_printed;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index c99ce74a7aff..3af5b92b48c8 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -465,7 +465,7 @@ static struct pci_driver tlan_driver = {
static int __init tlan_probe(void)
{
- static int pad_allocated;
+ int rc = -ENODEV;
printk(KERN_INFO "%s", tlan_banner);
@@ -473,17 +473,22 @@ static int __init tlan_probe(void)
if (TLanPadBuffer == NULL) {
printk(KERN_ERR "TLAN: Could not allocate memory for pad buffer.\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_out;
}
memset(TLanPadBuffer, 0, TLAN_MIN_FRAME_SIZE);
- pad_allocated = 1;
TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
/* Use new style PCI probing. Now the kernel will
do most of this for us */
- pci_register_driver(&tlan_driver);
+ rc = pci_register_driver(&tlan_driver);
+
+ if (rc != 0) {
+ printk(KERN_ERR "TLAN: Could not register pci driver.\n");
+ goto err_out_pci_free;
+ }
TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
TLan_EisaProbe();
@@ -493,11 +498,17 @@ static int __init tlan_probe(void)
tlan_have_pci, tlan_have_eisa);
if (TLanDevicesInstalled == 0) {
- pci_unregister_driver(&tlan_driver);
- pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA);
- return -ENODEV;
+ rc = -ENODEV;
+ goto err_out_pci_unreg;
}
return 0;
+
+err_out_pci_unreg:
+ pci_unregister_driver(&tlan_driver);
+err_out_pci_free:
+ pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA);
+err_out:
+ return rc;
}
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index 124cfd4fbcf4..7a7de0469eae 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -10,7 +10,7 @@
* - Madge Smart 16/4 PCI Mk2
*
* Maintainer(s):
- * AF Adam Fritzler mid@auk.cx
+ * AF Adam Fritzler
*
* Modification History:
* 30-Dec-99 AF Split off from the tms380tr driver.
diff --git a/drivers/net/tokenring/abyss.h b/drivers/net/tokenring/abyss.h
index 0ee6e4f085b1..b0a473b89133 100644
--- a/drivers/net/tokenring/abyss.h
+++ b/drivers/net/tokenring/abyss.h
@@ -2,7 +2,7 @@
* abyss.h: Header for the abyss tms380tr module
*
* Authors:
- * - Adam Fritzler <mid@auk.cx>
+ * - Adam Fritzler
*/
#ifndef __LINUX_MADGETR_H
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 5a4151362fc0..c9c5a2b1ed9e 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -11,7 +11,7 @@
* - Madge Smart 16/4 Ringnode MC32 (??)
*
* Maintainer(s):
- * AF Adam Fritzler mid@auk.cx
+ * AF Adam Fritzler
*
* Modification History:
* 16-Jan-00 AF Created
diff --git a/drivers/net/tokenring/madgemc.h b/drivers/net/tokenring/madgemc.h
index 2dd822203809..fe88e272c531 100644
--- a/drivers/net/tokenring/madgemc.h
+++ b/drivers/net/tokenring/madgemc.h
@@ -2,7 +2,7 @@
* madgemc.h: Header for the madgemc tms380tr module
*
* Authors:
- * - Adam Fritzler <mid@auk.cx>
+ * - Adam Fritzler
*/
#ifndef __LINUX_MADGEMC_H
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index e7b4adc5c4e7..433c994ea9d8 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -434,7 +434,7 @@ static int __devinit olympic_init(struct net_device *dev)
}
-static int olympic_open(struct net_device *dev)
+static int __devinit olympic_open(struct net_device *dev)
{
struct olympic_private *olympic_priv=netdev_priv(dev);
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
index ca6b65919b3d..00ea94513460 100644
--- a/drivers/net/tokenring/proteon.c
+++ b/drivers/net/tokenring/proteon.c
@@ -12,7 +12,7 @@
* - Proteon 1392, 1392+
*
* Maintainer(s):
- * AF Adam Fritzler mid@auk.cx
+ * AF Adam Fritzler
* JF Jochen Friedrich jochen@scram.de
*
* Modification History:
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
index 32e8d5a9f958..41b6999a0f33 100644
--- a/drivers/net/tokenring/skisa.c
+++ b/drivers/net/tokenring/skisa.c
@@ -13,7 +13,7 @@
* - SysKonnect TR4/16(+) ISA (SK-4190)
*
* Maintainer(s):
- * AF Adam Fritzler mid@auk.cx
+ * AF Adam Fritzler
* JF Jochen Friedrich jochen@scram.de
*
* Modification History:
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index d5fa36d36515..d07c4523c847 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -30,7 +30,7 @@
* Maintainer(s):
* JS Jay Schulist jschlst@samba.org
* CG Christoph Goos cgoos@syskonnect.de
- * AF Adam Fritzler mid@auk.cx
+ * AF Adam Fritzler
* MLP Mike Phillips phillim@amtrak.com
* JF Jochen Friedrich jochen@scram.de
*
diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h
index 7daf74e31ccd..7af76d708849 100644
--- a/drivers/net/tokenring/tms380tr.h
+++ b/drivers/net/tokenring/tms380tr.h
@@ -3,7 +3,7 @@
*
* Authors:
* - Christoph Goos <cgoos@syskonnect.de>
- * - Adam Fritzler <mid@auk.cx>
+ * - Adam Fritzler
*/
#ifndef __LINUX_TMS380TR_H
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index 1c18f782f522..5f0ee880cfff 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -14,7 +14,7 @@
* - 3Com 3C339 Token Link Velocity
*
* Maintainer(s):
- * AF Adam Fritzler mid@auk.cx
+ * AF Adam Fritzler
*
* Modification History:
* 30-Dec-99 AF Split off from the tms380tr driver.
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 8fc7274642eb..6b93d0169116 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -441,7 +441,7 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&card->lock,flags);
trigger_transmit(card);
- return -EIO;
+ return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 46339f6bcd00..038c1ef94d2e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -529,9 +529,13 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr)
if (ifr->ifr_flags & IFF_NO_PI)
tun->flags |= TUN_NO_PI;
+ else
+ tun->flags &= ~TUN_NO_PI;
if (ifr->ifr_flags & IFF_ONE_QUEUE)
tun->flags |= TUN_ONE_QUEUE;
+ else
+ tun->flags &= ~TUN_ONE_QUEUE;
file->private_data = tun;
tun->attached = 1;
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 4ffd8739f8b7..fba0811d2608 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2084,8 +2084,10 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
if (!ugeth)
return;
- if (ugeth->uccf)
+ if (ugeth->uccf) {
ucc_fast_free(ugeth->uccf);
+ ugeth->uccf = NULL;
+ }
if (ugeth->p_thread_data_tx) {
qe_muram_free(ugeth->thread_dat_tx_offset);
@@ -2305,10 +2307,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
- /* Create CQs for hash tables */
- INIT_LIST_HEAD(&ugeth->group_hash_q);
- INIT_LIST_HEAD(&ugeth->ind_hash_q);
-
if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
(uf_info->bd_mem_part == MEM_PART_MURAM))) {
if (netif_msg_probe(ugeth))
@@ -3668,6 +3666,23 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void ucc_netpoll(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ int irq = ugeth->ug_info->uf_info.irq;
+
+ disable_irq(irq);
+ ucc_geth_irq_handler(irq, dev);
+ enable_irq(irq);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
/* Called when something needs to use the ethernet device */
/* Returns 0 for success. */
static int ucc_geth_open(struct net_device *dev)
@@ -3990,6 +4005,10 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
ugeth = netdev_priv(dev);
spin_lock_init(&ugeth->lock);
+ /* Create CQs for hash tables */
+ INIT_LIST_HEAD(&ugeth->group_hash_q);
+ INIT_LIST_HEAD(&ugeth->ind_hash_q);
+
dev_set_drvdata(device, dev);
/* Set the dev->base_addr to the gfar reg region */
@@ -4006,6 +4025,9 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
#ifdef CONFIG_UGETH_NAPI
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT);
#endif /* CONFIG_UGETH_NAPI */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ucc_netpoll;
+#endif
dev->stop = ucc_geth_close;
// dev->change_mtu = ucc_geth_change_mtu;
dev->mtu = 1500;
@@ -4040,9 +4062,10 @@ static int ucc_geth_remove(struct of_device* ofdev)
struct net_device *dev = dev_get_drvdata(device);
struct ucc_geth_private *ugeth = netdev_priv(dev);
- dev_set_drvdata(device, NULL);
- ucc_geth_memclean(ugeth);
+ unregister_netdev(dev);
free_netdev(dev);
+ ucc_geth_memclean(ugeth);
+ dev_set_drvdata(device, NULL);
return 0;
}
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
index e3ba14a19915..c69e654d539f 100644
--- a/drivers/net/ucc_geth_mii.c
+++ b/drivers/net/ucc_geth_mii.c
@@ -109,7 +109,7 @@ int uec_mdio_reset(struct mii_bus *bus)
struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv;
unsigned int timeout = PHY_INIT_TIMEOUT;
- spin_lock_bh(&bus->mdio_lock);
+ mutex_lock(&bus->mdio_lock);
/* Reset the management interface */
out_be32(&regs->miimcfg, MIIMCFG_RESET_MANAGEMENT);
@@ -121,7 +121,7 @@ int uec_mdio_reset(struct mii_bus *bus)
while ((in_be32(&regs->miimind) & MIIMIND_BUSY) && timeout--)
cpu_relax();
- spin_unlock_bh(&bus->mdio_lock);
+ mutex_unlock(&bus->mdio_lock);
if (timeout <= 0) {
printk(KERN_ERR "%s: The MII Bus is stuck!\n", bus->name);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 33cbc306226c..7e1f00131f91 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -926,7 +926,6 @@ static int rtl8150_probe(struct usb_interface *intf,
netdev->set_multicast_list = rtl8150_set_multicast;
netdev->set_mac_address = rtl8150_set_mac_address;
netdev->get_stats = rtl8150_netdev_stats;
- netdev->mtu = RTL8150_MTU;
SET_ETHTOOL_OPS(netdev, &ops);
dev->intr_interval = 100; /* 100ms */
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 87c180b563d3..7c851b1e6daa 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -606,7 +606,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
}
#endif
-static void rhine_hw_init(struct net_device *dev, long pioaddr)
+static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
{
struct rhine_private *rp = netdev_priv(dev);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 35cd65d6b9ed..8c9fb824cbd4 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -8,7 +8,6 @@
* for 64bit hardware platforms.
*
* TODO
- * Big-endian support
* rx_copybreak/alignment
* Scatter gather
* More testing
@@ -681,7 +680,7 @@ static void velocity_rx_reset(struct velocity_info *vptr)
* Init state, all RD entries belong to the NIC
*/
for (i = 0; i < vptr->options.numrx; ++i)
- vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC;
+ vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC;
writew(vptr->options.numrx, &regs->RBRDU);
writel(vptr->rd_pool_dma, &regs->RDBaseLo);
@@ -777,7 +776,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
vptr->int_mask = INT_MASK_DEF;
- writel(cpu_to_le32(vptr->rd_pool_dma), &regs->RDBaseLo);
+ writel(vptr->rd_pool_dma, &regs->RDBaseLo);
writew(vptr->options.numrx - 1, &regs->RDCSize);
mac_rx_queue_run(regs);
mac_rx_queue_wake(regs);
@@ -785,7 +784,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
writew(vptr->options.numtx - 1, &regs->TDCSize);
for (i = 0; i < vptr->num_txq; i++) {
- writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i]));
+ writel(vptr->td_pool_dma[i], &regs->TDBaseLo[i]);
mac_tx_queue_run(regs, i);
}
@@ -1195,7 +1194,7 @@ static inline void velocity_give_many_rx_descs(struct velocity_info *vptr)
dirty = vptr->rd_dirty - unusable;
for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {
dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
- vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC;
+ vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC;
}
writew(vptr->rd_filled & 0xfffc, &regs->RBRDU);
@@ -1210,7 +1209,7 @@ static int velocity_rx_refill(struct velocity_info *vptr)
struct rx_desc *rd = vptr->rd_ring + dirty;
/* Fine for an all zero Rx desc at init time as well */
- if (rd->rdesc0.owner == OWNED_BY_NIC)
+ if (rd->rdesc0.len & OWNED_BY_NIC)
break;
if (!vptr->rd_info[dirty].skb) {
@@ -1413,7 +1412,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
if (!vptr->rd_info[rd_curr].skb)
break;
- if (rd->rdesc0.owner == OWNED_BY_NIC)
+ if (rd->rdesc0.len & OWNED_BY_NIC)
break;
rmb();
@@ -1421,7 +1420,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
/*
* Don't drop CE or RL error frame although RXOK is off
*/
- if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) {
+ if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
if (velocity_receive_frame(vptr, rd_curr) < 0)
stats->rx_dropped++;
} else {
@@ -1433,7 +1432,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
stats->rx_dropped++;
}
- rd->inten = 1;
+ rd->size |= RX_INTEN;
vptr->dev->last_rx = jiffies;
@@ -1554,7 +1553,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
struct net_device_stats *stats = &vptr->stats;
struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
struct rx_desc *rd = &(vptr->rd_ring[idx]);
- int pkt_len = rd->rdesc0.len;
+ int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
struct sk_buff *skb;
if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
@@ -1637,8 +1636,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
*/
*((u32 *) & (rd->rdesc0)) = 0;
- rd->len = cpu_to_le32(vptr->rx_buf_sz);
- rd->inten = 1;
+ rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN;
rd->pa_low = cpu_to_le32(rd_info->skb_dma);
rd->pa_high = 0;
return 0;
@@ -1674,7 +1672,7 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
td = &(vptr->td_rings[qnum][idx]);
tdinfo = &(vptr->td_infos[qnum][idx]);
- if (td->tdesc0.owner == OWNED_BY_NIC)
+ if (td->tdesc0.len & OWNED_BY_NIC)
break;
if ((works++ > 15))
@@ -1874,7 +1872,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
for (i = 0; i < tdinfo->nskb_dma; i++) {
#ifdef VELOCITY_ZERO_COPY_SUPPORT
- pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE);
+ pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
#else
pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE);
#endif
@@ -2067,8 +2065,8 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
struct velocity_td_info *tdinfo;
unsigned long flags;
int index;
-
int pktlen = skb->len;
+ __le16 len = cpu_to_le16(pktlen);
#ifdef VELOCITY_ZERO_COPY_SUPPORT
if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
@@ -2083,9 +2081,8 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
td_ptr = &(vptr->td_rings[qnum][index]);
tdinfo = &(vptr->td_infos[qnum][index]);
- td_ptr->tdesc1.TCPLS = TCPLS_NORMAL;
td_ptr->tdesc1.TCR = TCR0_TIC;
- td_ptr->td_buf[0].queue = 0;
+ td_ptr->td_buf[0].size &= ~TD_QUEUE;
/*
* Pad short frames.
@@ -2093,16 +2090,16 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
if (pktlen < ETH_ZLEN) {
/* Cannot occur until ZC support */
pktlen = ETH_ZLEN;
+ len = cpu_to_le16(ETH_ZLEN);
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
tdinfo->skb = skb;
tdinfo->skb_dma[0] = tdinfo->buf_dma;
- td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
tdinfo->nskb_dma = 1;
- td_ptr->tdesc1.CMDZ = 2;
} else
#ifdef VELOCITY_ZERO_COPY_SUPPORT
if (skb_shinfo(skb)->nr_frags > 0) {
@@ -2111,36 +2108,35 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
if (nfrags > 6) {
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
tdinfo->skb_dma[0] = tdinfo->buf_dma;
- td_ptr->tdesc0.pktsize =
+ td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
tdinfo->nskb_dma = 1;
- td_ptr->tdesc1.CMDZ = 2;
} else {
int i = 0;
tdinfo->nskb_dma = 0;
- tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE);
+ tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
- td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->tdesc0.len = len;
/* FIXME: support 48bit DMA later */
td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
td_ptr->td_buf[i].pa_high = 0;
- td_ptr->td_buf[i].bufsize = skb->len->skb->data_len;
+ td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb));
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- void *addr = ((void *) page_address(frag->page + frag->page_offset));
+ void *addr = (void *)page_address(frag->page) + frag->page_offset;
tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
td_ptr->td_buf[i + 1].pa_high = 0;
- td_ptr->td_buf[i + 1].bufsize = frag->size;
+ td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
}
tdinfo->nskb_dma = i - 1;
- td_ptr->tdesc1.CMDZ = i;
}
} else
@@ -2152,18 +2148,16 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
*/
tdinfo->skb = skb;
tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
- td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ td_ptr->td_buf[0].size = len;
tdinfo->nskb_dma = 1;
- td_ptr->tdesc1.CMDZ = 2;
}
+ td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
if (vptr->vlgrp && vlan_tx_tag_present(skb)) {
- td_ptr->tdesc1.pqinf.VID = vlan_tx_tag_get(skb);
- td_ptr->tdesc1.pqinf.priority = 0;
- td_ptr->tdesc1.pqinf.CFI = 0;
+ td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
td_ptr->tdesc1.TCR |= TCR0_VETAG;
}
@@ -2185,7 +2179,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
if (prev < 0)
prev = vptr->options.numtx - 1;
- td_ptr->tdesc0.owner = OWNED_BY_NIC;
+ td_ptr->tdesc0.len |= OWNED_BY_NIC;
vptr->td_used[qnum]++;
vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx;
@@ -2193,7 +2187,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
td_ptr = &(vptr->td_rings[qnum][prev]);
- td_ptr->td_buf[0].queue = 1;
+ td_ptr->td_buf[0].size |= TD_QUEUE;
mac_tx_queue_wake(vptr->mac_regs, qnum);
}
dev->trans_start = jiffies;
@@ -3410,7 +3404,7 @@ static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
velocity_save_context(vptr, &vptr->context);
velocity_shutdown(vptr);
velocity_set_wol(vptr);
- pci_enable_wake(pdev, 3, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
pci_set_power_state(pdev, PCI_D3hot);
} else {
velocity_save_context(vptr, &vptr->context);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index aa9179623d90..7387be4f428d 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -70,40 +70,27 @@
* Bits in the RSR0 register
*/
-#define RSR_DETAG 0x0080
-#define RSR_SNTAG 0x0040
-#define RSR_RXER 0x0020
-#define RSR_RL 0x0010
-#define RSR_CE 0x0008
-#define RSR_FAE 0x0004
-#define RSR_CRC 0x0002
-#define RSR_VIDM 0x0001
+#define RSR_DETAG cpu_to_le16(0x0080)
+#define RSR_SNTAG cpu_to_le16(0x0040)
+#define RSR_RXER cpu_to_le16(0x0020)
+#define RSR_RL cpu_to_le16(0x0010)
+#define RSR_CE cpu_to_le16(0x0008)
+#define RSR_FAE cpu_to_le16(0x0004)
+#define RSR_CRC cpu_to_le16(0x0002)
+#define RSR_VIDM cpu_to_le16(0x0001)
/*
* Bits in the RSR1 register
*/
-#define RSR_RXOK 0x8000 // rx OK
-#define RSR_PFT 0x4000 // Perfect filtering address match
-#define RSR_MAR 0x2000 // MAC accept multicast address packet
-#define RSR_BAR 0x1000 // MAC accept broadcast address packet
-#define RSR_PHY 0x0800 // MAC accept physical address packet
-#define RSR_VTAG 0x0400 // 802.1p/1q tagging packet indicator
-#define RSR_STP 0x0200 // start of packet
-#define RSR_EDP 0x0100 // end of packet
-
-/*
- * Bits in the RSR1 register
- */
-
-#define RSR1_RXOK 0x80 // rx OK
-#define RSR1_PFT 0x40 // Perfect filtering address match
-#define RSR1_MAR 0x20 // MAC accept multicast address packet
-#define RSR1_BAR 0x10 // MAC accept broadcast address packet
-#define RSR1_PHY 0x08 // MAC accept physical address packet
-#define RSR1_VTAG 0x04 // 802.1p/1q tagging packet indicator
-#define RSR1_STP 0x02 // start of packet
-#define RSR1_EDP 0x01 // end of packet
+#define RSR_RXOK cpu_to_le16(0x8000) // rx OK
+#define RSR_PFT cpu_to_le16(0x4000) // Perfect filtering address match
+#define RSR_MAR cpu_to_le16(0x2000) // MAC accept multicast address packet
+#define RSR_BAR cpu_to_le16(0x1000) // MAC accept broadcast address packet
+#define RSR_PHY cpu_to_le16(0x0800) // MAC accept physical address packet
+#define RSR_VTAG cpu_to_le16(0x0400) // 802.1p/1q tagging packet indicator
+#define RSR_STP cpu_to_le16(0x0200) // start of packet
+#define RSR_EDP cpu_to_le16(0x0100) // end of packet
/*
* Bits in the CSM register
@@ -120,33 +107,21 @@
* Bits in the TSR0 register
*/
-#define TSR0_ABT 0x0080 // Tx abort because of excessive collision
-#define TSR0_OWT 0x0040 // Jumbo frame Tx abort
-#define TSR0_OWC 0x0020 // Out of window collision
-#define TSR0_COLS 0x0010 // experience collision in this transmit event
-#define TSR0_NCR3 0x0008 // collision retry counter[3]
-#define TSR0_NCR2 0x0004 // collision retry counter[2]
-#define TSR0_NCR1 0x0002 // collision retry counter[1]
-#define TSR0_NCR0 0x0001 // collision retry counter[0]
-#define TSR0_TERR 0x8000 //
-#define TSR0_FDX 0x4000 // current transaction is serviced by full duplex mode
-#define TSR0_GMII 0x2000 // current transaction is serviced by GMII mode
-#define TSR0_LNKFL 0x1000 // packet serviced during link down
-#define TSR0_SHDN 0x0400 // shutdown case
-#define TSR0_CRS 0x0200 // carrier sense lost
-#define TSR0_CDH 0x0100 // AQE test fail (CD heartbeat)
-
-/*
- * Bits in the TSR1 register
- */
-
-#define TSR1_TERR 0x80 //
-#define TSR1_FDX 0x40 // current transaction is serviced by full duplex mode
-#define TSR1_GMII 0x20 // current transaction is serviced by GMII mode
-#define TSR1_LNKFL 0x10 // packet serviced during link down
-#define TSR1_SHDN 0x04 // shutdown case
-#define TSR1_CRS 0x02 // carrier sense lost
-#define TSR1_CDH 0x01 // AQE test fail (CD heartbeat)
+#define TSR0_ABT cpu_to_le16(0x0080) // Tx abort because of excessive collision
+#define TSR0_OWT cpu_to_le16(0x0040) // Jumbo frame Tx abort
+#define TSR0_OWC cpu_to_le16(0x0020) // Out of window collision
+#define TSR0_COLS cpu_to_le16(0x0010) // experience collision in this transmit event
+#define TSR0_NCR3 cpu_to_le16(0x0008) // collision retry counter[3]
+#define TSR0_NCR2 cpu_to_le16(0x0004) // collision retry counter[2]
+#define TSR0_NCR1 cpu_to_le16(0x0002) // collision retry counter[1]
+#define TSR0_NCR0 cpu_to_le16(0x0001) // collision retry counter[0]
+#define TSR0_TERR cpu_to_le16(0x8000) //
+#define TSR0_FDX cpu_to_le16(0x4000) // current transaction is serviced by full duplex mode
+#define TSR0_GMII cpu_to_le16(0x2000) // current transaction is serviced by GMII mode
+#define TSR0_LNKFL cpu_to_le16(0x1000) // packet serviced during link down
+#define TSR0_SHDN cpu_to_le16(0x0400) // shutdown case
+#define TSR0_CRS cpu_to_le16(0x0200) // carrier sense lost
+#define TSR0_CDH cpu_to_le16(0x0100) // AQE test fail (CD heartbeat)
//
// Bits in the TCR0 register
@@ -197,25 +172,26 @@
*/
struct rdesc0 {
- u16 RSR; /* Receive status */
- u16 len:14; /* Received packet length */
- u16 reserved:1;
- u16 owner:1; /* Who owns this buffer ? */
+ __le16 RSR; /* Receive status */
+ __le16 len; /* bits 0--13; bit 15 - owner */
};
struct rdesc1 {
- u16 PQTAG;
+ __le16 PQTAG;
u8 CSM;
u8 IPKT;
};
+enum {
+ RX_INTEN = __constant_cpu_to_le16(0x8000)
+};
+
struct rx_desc {
struct rdesc0 rdesc0;
struct rdesc1 rdesc1;
- u32 pa_low; /* Low 32 bit PCI address */
- u16 pa_high; /* Next 16 bit PCI address (48 total) */
- u16 len:15; /* Frame size */
- u16 inten:1; /* Enable interrupt */
+ __le32 pa_low; /* Low 32 bit PCI address */
+ __le16 pa_high; /* Next 16 bit PCI address (48 total) */
+ __le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */
} __attribute__ ((__packed__));
/*
@@ -223,32 +199,24 @@ struct rx_desc {
*/
struct tdesc0 {
- u16 TSR; /* Transmit status register */
- u16 pktsize:14; /* Size of frame */
- u16 reserved:1;
- u16 owner:1; /* Who owns the buffer */
+ __le16 TSR; /* Transmit status register */
+ __le16 len; /* bits 0--13 - size of frame, bit 15 - owner */
};
-struct pqinf { /* Priority queue info */
- u16 VID:12;
- u16 CFI:1;
- u16 priority:3;
-} __attribute__ ((__packed__));
-
struct tdesc1 {
- struct pqinf pqinf;
+ __le16 vlan;
u8 TCR;
- u8 TCPLS:2;
- u8 reserved:2;
- u8 CMDZ:4;
+ u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */
} __attribute__ ((__packed__));
+enum {
+ TD_QUEUE = __constant_cpu_to_le16(0x8000)
+};
+
struct td_buf {
- u32 pa_low;
- u16 pa_high;
- u16 bufsize:14;
- u16 reserved:1;
- u16 queue:1;
+ __le32 pa_low;
+ __le16 pa_high;
+ __le16 size; /* bits 0--13 - size, bit 15 - queue */
} __attribute__ ((__packed__));
struct tx_desc {
@@ -276,7 +244,7 @@ struct velocity_td_info {
enum velocity_owner {
OWNED_BY_HOST = 0,
- OWNED_BY_NIC = 1
+ OWNED_BY_NIC = __constant_cpu_to_le16(0x8000)
};
@@ -1012,45 +980,45 @@ struct mac_regs {
volatile u8 RCR;
volatile u8 TCR;
- volatile u32 CR0Set; /* 0x08 */
- volatile u32 CR0Clr; /* 0x0C */
+ volatile __le32 CR0Set; /* 0x08 */
+ volatile __le32 CR0Clr; /* 0x0C */
volatile u8 MARCAM[8]; /* 0x10 */
- volatile u32 DecBaseHi; /* 0x18 */
- volatile u16 DbfBaseHi; /* 0x1C */
- volatile u16 reserved_1E;
+ volatile __le32 DecBaseHi; /* 0x18 */
+ volatile __le16 DbfBaseHi; /* 0x1C */
+ volatile __le16 reserved_1E;
- volatile u16 ISRCTL; /* 0x20 */
+ volatile __le16 ISRCTL; /* 0x20 */
volatile u8 TXESR;
volatile u8 RXESR;
- volatile u32 ISR; /* 0x24 */
- volatile u32 IMR;
+ volatile __le32 ISR; /* 0x24 */
+ volatile __le32 IMR;
- volatile u32 TDStatusPort; /* 0x2C */
+ volatile __le32 TDStatusPort; /* 0x2C */
- volatile u16 TDCSRSet; /* 0x30 */
+ volatile __le16 TDCSRSet; /* 0x30 */
volatile u8 RDCSRSet;
volatile u8 reserved_33;
- volatile u16 TDCSRClr;
+ volatile __le16 TDCSRClr;
volatile u8 RDCSRClr;
volatile u8 reserved_37;
- volatile u32 RDBaseLo; /* 0x38 */
- volatile u16 RDIdx; /* 0x3C */
- volatile u16 reserved_3E;
+ volatile __le32 RDBaseLo; /* 0x38 */
+ volatile __le16 RDIdx; /* 0x3C */
+ volatile __le16 reserved_3E;
- volatile u32 TDBaseLo[4]; /* 0x40 */
+ volatile __le32 TDBaseLo[4]; /* 0x40 */
- volatile u16 RDCSize; /* 0x50 */
- volatile u16 TDCSize; /* 0x52 */
- volatile u16 TDIdx[4]; /* 0x54 */
- volatile u16 tx_pause_timer; /* 0x5C */
- volatile u16 RBRDU; /* 0x5E */
+ volatile __le16 RDCSize; /* 0x50 */
+ volatile __le16 TDCSize; /* 0x52 */
+ volatile __le16 TDIdx[4]; /* 0x54 */
+ volatile __le16 tx_pause_timer; /* 0x5C */
+ volatile __le16 RBRDU; /* 0x5E */
- volatile u32 FIFOTest0; /* 0x60 */
- volatile u32 FIFOTest1; /* 0x64 */
+ volatile __le32 FIFOTest0; /* 0x60 */
+ volatile __le32 FIFOTest1; /* 0x64 */
volatile u8 CAMADDR; /* 0x68 */
volatile u8 CAMCR; /* 0x69 */
@@ -1063,18 +1031,18 @@ struct mac_regs {
volatile u8 PHYSR1;
volatile u8 MIICR;
volatile u8 MIIADR;
- volatile u16 MIIDATA;
+ volatile __le16 MIIDATA;
- volatile u16 SoftTimer0; /* 0x74 */
- volatile u16 SoftTimer1;
+ volatile __le16 SoftTimer0; /* 0x74 */
+ volatile __le16 SoftTimer1;
volatile u8 CFGA; /* 0x78 */
volatile u8 CFGB;
volatile u8 CFGC;
volatile u8 CFGD;
- volatile u16 DCFG; /* 0x7C */
- volatile u16 MCFG;
+ volatile __le16 DCFG; /* 0x7C */
+ volatile __le16 MCFG;
volatile u8 TBIST; /* 0x80 */
volatile u8 RBIST;
@@ -1086,9 +1054,9 @@ struct mac_regs {
volatile u8 rev_id;
volatile u8 PORSTS;
- volatile u32 MIBData; /* 0x88 */
+ volatile __le32 MIBData; /* 0x88 */
- volatile u16 EEWrData;
+ volatile __le16 EEWrData;
volatile u8 reserved_8E;
volatile u8 BPMDWr;
@@ -1098,7 +1066,7 @@ struct mac_regs {
volatile u8 EECHKSUM; /* 0x92 */
volatile u8 EECSR;
- volatile u16 EERdData; /* 0x94 */
+ volatile __le16 EERdData; /* 0x94 */
volatile u8 EADDR;
volatile u8 EMBCMD;
@@ -1112,22 +1080,22 @@ struct mac_regs {
volatile u8 DEBUG;
volatile u8 CHIPGCR;
- volatile u16 WOLCRSet; /* 0xA0 */
+ volatile __le16 WOLCRSet; /* 0xA0 */
volatile u8 PWCFGSet;
volatile u8 WOLCFGSet;
- volatile u16 WOLCRClr; /* 0xA4 */
+ volatile __le16 WOLCRClr; /* 0xA4 */
volatile u8 PWCFGCLR;
volatile u8 WOLCFGClr;
- volatile u16 WOLSRSet; /* 0xA8 */
- volatile u16 reserved_AA;
+ volatile __le16 WOLSRSet; /* 0xA8 */
+ volatile __le16 reserved_AA;
- volatile u16 WOLSRClr; /* 0xAC */
- volatile u16 reserved_AE;
+ volatile __le16 WOLSRClr; /* 0xAC */
+ volatile __le16 reserved_AE;
- volatile u16 PatternCRC[8]; /* 0xB0 */
- volatile u32 ByteMask[4][4]; /* 0xC0 */
+ volatile __le16 PatternCRC[8]; /* 0xB0 */
+ volatile __le32 ByteMask[4][4]; /* 0xC0 */
} __attribute__ ((__packed__));
@@ -1238,12 +1206,12 @@ typedef u8 MCAM_ADDR[ETH_ALEN];
struct arp_packet {
u8 dest_mac[ETH_ALEN];
u8 src_mac[ETH_ALEN];
- u16 type;
- u16 ar_hrd;
- u16 ar_pro;
+ __be16 type;
+ __be16 ar_hrd;
+ __be16 ar_pro;
u8 ar_hln;
u8 ar_pln;
- u16 ar_op;
+ __be16 ar_op;
u8 ar_sha[ETH_ALEN];
u8 ar_sip[4];
u8 ar_tha[ETH_ALEN];
@@ -1253,7 +1221,7 @@ struct arp_packet {
struct _magic_packet {
u8 dest_mac[6];
u8 src_mac[6];
- u16 type;
+ __be16 type;
u8 MAC[16][6];
u8 password[6];
} __attribute__ ((__packed__));
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5413dbf3d4ac..fdc23678117b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -24,6 +24,13 @@
#include <linux/virtio_net.h>
#include <linux/scatterlist.h>
+static int napi_weight = 128;
+module_param(napi_weight, int, 0444);
+
+static int csum = 1, gso = 1;
+module_param(csum, bool, 0444);
+module_param(gso, bool, 0444);
+
/* FIXME: MTU in config. */
#define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN)
@@ -52,13 +59,14 @@ static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb)
sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr));
}
-static bool skb_xmit_done(struct virtqueue *rvq)
+static void skb_xmit_done(struct virtqueue *svq)
{
- struct virtnet_info *vi = rvq->vdev->priv;
+ struct virtnet_info *vi = svq->vdev->priv;
- /* In case we were waiting for output buffers. */
+ /* Suppress further interrupts. */
+ svq->vq_ops->disable_cb(svq);
+ /* We were waiting for more output buffers. */
netif_wake_queue(vi->dev);
- return true;
}
static void receive_skb(struct net_device *dev, struct sk_buff *skb,
@@ -83,28 +91,16 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
pr_debug("Needs csum!\n");
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = hdr->csum_start;
- skb->csum_offset = hdr->csum_offset;
- if (skb->csum_start > skb->len - 2
- || skb->csum_offset > skb->len - 2) {
- if (net_ratelimit())
- printk(KERN_WARNING "%s: csum=%u/%u len=%u\n",
- dev->name, skb->csum_start,
- skb->csum_offset, skb->len);
+ if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset))
goto frame_err;
- }
}
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
pr_debug("GSO!\n");
- switch (hdr->gso_type) {
+ switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
break;
- case VIRTIO_NET_HDR_GSO_TCPV4_ECN:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCP_ECN;
- break;
case VIRTIO_NET_HDR_GSO_UDP:
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
break;
@@ -118,6 +114,9 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
goto frame_err;
}
+ if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+
skb_shinfo(skb)->gso_size = hdr->gso_size;
if (skb_shinfo(skb)->gso_size == 0) {
if (net_ratelimit())
@@ -170,12 +169,14 @@ static void try_fill_recv(struct virtnet_info *vi)
vi->rvq->vq_ops->kick(vi->rvq);
}
-static bool skb_recv_done(struct virtqueue *rvq)
+static void skb_recv_done(struct virtqueue *rvq)
{
struct virtnet_info *vi = rvq->vdev->priv;
- netif_rx_schedule(vi->dev, &vi->napi);
- /* Suppress further interrupts. */
- return false;
+ /* Schedule NAPI, Suppress further interrupts if successful. */
+ if (netif_rx_schedule_prep(vi->dev, &vi->napi)) {
+ rvq->vq_ops->disable_cb(rvq);
+ __netif_rx_schedule(vi->dev, &vi->napi);
+ }
}
static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -201,7 +202,7 @@ again:
/* Out of packets? */
if (received < budget) {
netif_rx_complete(vi->dev, napi);
- if (unlikely(!vi->rvq->vq_ops->restart(vi->rvq))
+ if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
&& netif_rx_reschedule(vi->dev, napi))
goto again;
}
@@ -236,8 +237,6 @@ static int start_xmit(struct sk_buff *skb, struct net_device *dev)
pr_debug("%s: xmit %p %s\n", dev->name, skb, print_mac(mac, dest));
- free_old_xmit_skbs(vi);
-
/* Encode metadata header at front. */
hdr = skb_vnet_hdr(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -250,10 +249,9 @@ static int start_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (skb_is_gso(skb)) {
+ hdr->hdr_len = skb_transport_header(skb) - skb->data;
hdr->gso_size = skb_shinfo(skb)->gso_size;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
- hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4_ECN;
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
@@ -261,19 +259,34 @@ static int start_xmit(struct sk_buff *skb, struct net_device *dev)
hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
else
BUG();
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
+ hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
} else {
hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
- hdr->gso_size = 0;
+ hdr->gso_size = hdr->hdr_len = 0;
}
vnet_hdr_to_sg(sg, skb);
num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
__skb_queue_head(&vi->send, skb);
+
+again:
+ /* Free up any pending old buffers before queueing new ones. */
+ free_old_xmit_skbs(vi);
err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
if (err) {
pr_debug("%s: virtio not prepared to send\n", dev->name);
- skb_unlink(skb, &vi->send);
netif_stop_queue(dev);
+
+ /* Activate callback for using skbs: if this fails it
+ * means some were used in the meantime. */
+ if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+ printk("Unlikely: restart svq failed\n");
+ netif_start_queue(dev);
+ goto again;
+ }
+ __skb_unlink(skb, &vi->send);
+
return NETDEV_TX_BUSY;
}
vi->svq->vq_ops->kick(vi->svq);
@@ -285,45 +298,33 @@ static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
- try_fill_recv(vi);
-
- /* If we didn't even get one input buffer, we're useless. */
- if (vi->num == 0)
- return -ENOMEM;
-
napi_enable(&vi->napi);
+
+ /* If all buffers were filled by other side before we napi_enabled, we
+ * won't get another interrupt, so process any outstanding packets
+ * now. virtnet_poll wants re-enable the queue, so we disable here.
+ * We synchronize against interrupts via NAPI_STATE_SCHED */
+ if (netif_rx_schedule_prep(dev, &vi->napi)) {
+ vi->rvq->vq_ops->disable_cb(vi->rvq);
+ __netif_rx_schedule(dev, &vi->napi);
+ }
return 0;
}
static int virtnet_close(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
- struct sk_buff *skb;
napi_disable(&vi->napi);
- /* networking core has neutered skb_xmit_done/skb_recv_done, so don't
- * worry about races vs. get(). */
- vi->rvq->vq_ops->shutdown(vi->rvq);
- while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
- kfree_skb(skb);
- vi->num--;
- }
- vi->svq->vq_ops->shutdown(vi->svq);
- while ((skb = __skb_dequeue(&vi->send)) != NULL)
- kfree_skb(skb);
-
- BUG_ON(vi->num != 0);
return 0;
}
static int virtnet_probe(struct virtio_device *vdev)
{
int err;
- unsigned int len;
struct net_device *dev;
struct virtnet_info *vi;
- void *token;
/* Allocate ourselves a network device with room for our info */
dev = alloc_etherdev(sizeof(struct virtnet_info));
@@ -331,7 +332,6 @@ static int virtnet_probe(struct virtio_device *vdev)
return -ENOMEM;
/* Set up network device as normal. */
- ether_setup(dev);
dev->open = virtnet_open;
dev->stop = virtnet_close;
dev->hard_start_xmit = start_xmit;
@@ -339,42 +339,37 @@ static int virtnet_probe(struct virtio_device *vdev)
SET_NETDEV_DEV(dev, &vdev->dev);
/* Do we support "hardware" checksums? */
- token = vdev->config->find(vdev, VIRTIO_CONFIG_NET_F, &len);
- if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_NO_CSUM)) {
+ if (csum && vdev->config->feature(vdev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */
dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
- if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_TSO4))
- dev->features |= NETIF_F_TSO;
- if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_UFO))
- dev->features |= NETIF_F_UFO;
- if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_TSO4_ECN))
- dev->features |= NETIF_F_TSO_ECN;
- if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_TSO6))
- dev->features |= NETIF_F_TSO6;
+ if (gso && vdev->config->feature(vdev, VIRTIO_NET_F_GSO)) {
+ dev->features |= NETIF_F_TSO | NETIF_F_UFO
+ | NETIF_F_TSO_ECN | NETIF_F_TSO6;
+ }
}
/* Configuration may specify what MAC to use. Otherwise random. */
- token = vdev->config->find(vdev, VIRTIO_CONFIG_NET_MAC_F, &len);
- if (token) {
- dev->addr_len = len;
- vdev->config->get(vdev, token, dev->dev_addr, len);
+ if (vdev->config->feature(vdev, VIRTIO_NET_F_MAC)) {
+ vdev->config->get(vdev,
+ offsetof(struct virtio_net_config, mac),
+ dev->dev_addr, dev->addr_len);
} else
random_ether_addr(dev->dev_addr);
/* Set up our device-specific information */
vi = netdev_priv(dev);
- netif_napi_add(dev, &vi->napi, virtnet_poll, 16);
+ netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
vi->dev = dev;
vi->vdev = vdev;
/* We expect two virtqueues, receive then send. */
- vi->rvq = vdev->config->find_vq(vdev, skb_recv_done);
+ vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
if (IS_ERR(vi->rvq)) {
err = PTR_ERR(vi->rvq);
goto free;
}
- vi->svq = vdev->config->find_vq(vdev, skb_xmit_done);
+ vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done);
if (IS_ERR(vi->svq)) {
err = PTR_ERR(vi->svq);
goto free_recv;
@@ -389,10 +384,22 @@ static int virtnet_probe(struct virtio_device *vdev)
pr_debug("virtio_net: registering device failed\n");
goto free_send;
}
+
+ /* Last of all, set up some receive buffers. */
+ try_fill_recv(vi);
+
+ /* If we didn't even get one input buffer, we're useless. */
+ if (vi->num == 0) {
+ err = -ENOMEM;
+ goto unregister;
+ }
+
pr_debug("virtnet: registered device %s\n", dev->name);
vdev->priv = vi;
return 0;
+unregister:
+ unregister_netdev(dev);
free_send:
vdev->config->del_vq(vi->svq);
free_recv:
@@ -405,6 +412,20 @@ free:
static void virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
+ struct sk_buff *skb;
+
+ /* Stop all the virtqueues. */
+ vdev->config->reset(vdev);
+
+ /* Free our skbs in send and recv queues, if any. */
+ while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
+ kfree_skb(skb);
+ vi->num--;
+ }
+ while ((skb = __skb_dequeue(&vi->send)) != NULL)
+ kfree_skb(skb);
+
+ BUG_ON(vi->num != 0);
vdev->config->del_vq(vi->svq);
vdev->config->del_vq(vi->rvq);
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
index d347d59db656..d14e6678deed 100644
--- a/drivers/net/wan/cycx_drv.c
+++ b/drivers/net/wan/cycx_drv.c
@@ -322,7 +322,7 @@ static int cycx_data_boot(void __iomem *addr, u8 *code, u32 len)
void __iomem *pt_boot_cmd = addr + CMD_OFFSET;
u32 i;
- /* boot buffer lenght */
+ /* boot buffer length */
writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16));
writew(GEN_DEFPAR, pt_boot_cmd);
@@ -353,7 +353,7 @@ static int cycx_code_boot(void __iomem *addr, u8 *code, u32 len)
void __iomem *pt_boot_cmd = addr + CMD_OFFSET;
u32 i;
- /* boot buffer lenght */
+ /* boot buffer length */
writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16));
writew(GEN_DEFPAR, pt_boot_cmd);
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index d553e6f32851..39951d0c34d6 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -1,7 +1,7 @@
/*
* Generic HDLC support routines for Linux
*
- * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
+ * Copyright (C) 1999 - 2008 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
@@ -39,7 +39,7 @@
#include <net/net_namespace.h>
-static const char* version = "HDLC support module revision 1.21";
+static const char* version = "HDLC support module revision 1.22";
#undef DEBUG_LINK
@@ -66,19 +66,15 @@ static struct net_device_stats *hdlc_get_stats(struct net_device *dev)
static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *p, struct net_device *orig_dev)
{
- struct hdlc_device_desc *desc = dev_to_desc(dev);
+ struct hdlc_device *hdlc = dev_to_hdlc(dev);
if (dev->nd_net != &init_net) {
kfree_skb(skb);
return 0;
}
- if (desc->netif_rx)
- return desc->netif_rx(skb);
-
- desc->stats.rx_dropped++; /* Shouldn't happen */
- dev_kfree_skb(skb);
- return NET_RX_DROP;
+ BUG_ON(!hdlc->proto->netif_rx);
+ return hdlc->proto->netif_rx(skb);
}
@@ -87,7 +83,7 @@ static inline void hdlc_proto_start(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
if (hdlc->proto->start)
- return hdlc->proto->start(dev);
+ hdlc->proto->start(dev);
}
@@ -96,7 +92,7 @@ static inline void hdlc_proto_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
if (hdlc->proto->stop)
- return hdlc->proto->stop(dev);
+ hdlc->proto->stop(dev);
}
@@ -263,8 +259,7 @@ static void hdlc_setup(struct net_device *dev)
struct net_device *alloc_hdlcdev(void *priv)
{
struct net_device *dev;
- dev = alloc_netdev(sizeof(struct hdlc_device_desc) +
- sizeof(hdlc_device), "hdlc%d", hdlc_setup);
+ dev = alloc_netdev(sizeof(struct hdlc_device), "hdlc%d", hdlc_setup);
if (dev)
dev_to_hdlc(dev)->priv = priv;
return dev;
@@ -281,7 +276,7 @@ void unregister_hdlc_device(struct net_device *dev)
int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
- int (*rx)(struct sk_buff *skb), size_t size)
+ size_t size)
{
detach_hdlc_protocol(dev);
@@ -297,7 +292,6 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
return -ENOBUFS;
}
dev_to_hdlc(dev)->proto = proto;
- dev_to_desc(dev)->netif_rx = rx;
return 0;
}
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 038a6e748bbf..7133c688cf20 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -250,7 +250,7 @@ static int cisco_rx(struct sk_buff *skb)
return NET_RX_DROP;
rx_error:
- dev_to_desc(dev)->stats.rx_errors++; /* Mark error */
+ dev_to_hdlc(dev)->stats.rx_errors++; /* Mark error */
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
@@ -314,6 +314,7 @@ static struct hdlc_proto proto = {
.stop = cisco_stop,
.type_trans = cisco_type_trans,
.ioctl = cisco_ioctl,
+ .netif_rx = cisco_rx,
.module = THIS_MODULE,
};
@@ -360,7 +361,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
if (result)
return result;
- result = attach_hdlc_protocol(dev, &proto, cisco_rx,
+ result = attach_hdlc_protocol(dev, &proto,
sizeof(struct cisco_state));
if (result)
return result;
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 071a64cacd5c..c4ab0326f911 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -42,7 +42,6 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/pkt_sched.h>
-#include <linux/random.h>
#include <linux/inetdevice.h>
#include <linux/lapb.h>
#include <linux/rtnetlink.h>
@@ -136,6 +135,10 @@ typedef struct pvc_device_struct {
}state;
}pvc_device;
+struct pvc_desc {
+ struct net_device_stats stats;
+ pvc_device *pvc;
+};
struct frad_state {
fr_proto settings;
@@ -171,17 +174,20 @@ static inline void dlci_to_q922(u8 *hdr, u16 dlci)
}
-static inline struct frad_state * state(hdlc_device *hdlc)
+static inline struct frad_state* state(hdlc_device *hdlc)
{
return(struct frad_state *)(hdlc->state);
}
-
-static __inline__ pvc_device* dev_to_pvc(struct net_device *dev)
+static inline struct pvc_desc* pvcdev_to_desc(struct net_device *dev)
{
return dev->priv;
}
+static inline struct net_device_stats* pvc_get_stats(struct net_device *dev)
+{
+ return &pvcdev_to_desc(dev)->stats;
+}
static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
{
@@ -351,7 +357,7 @@ static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
static int pvc_open(struct net_device *dev)
{
- pvc_device *pvc = dev_to_pvc(dev);
+ pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
if ((pvc->frad->flags & IFF_UP) == 0)
return -EIO; /* Frad must be UP in order to activate PVC */
@@ -371,7 +377,7 @@ static int pvc_open(struct net_device *dev)
static int pvc_close(struct net_device *dev)
{
- pvc_device *pvc = dev_to_pvc(dev);
+ pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
if (--pvc->open_count == 0) {
hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
@@ -390,7 +396,7 @@ static int pvc_close(struct net_device *dev)
static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- pvc_device *pvc = dev_to_pvc(dev);
+ pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
fr_proto_pvc_info info;
if (ifr->ifr_settings.type == IF_GET_PROTO) {
@@ -416,17 +422,9 @@ static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EINVAL;
}
-
-static inline struct net_device_stats *pvc_get_stats(struct net_device *dev)
-{
- return &dev_to_desc(dev)->stats;
-}
-
-
-
static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
{
- pvc_device *pvc = dev_to_pvc(dev);
+ pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
struct net_device_stats *stats = pvc_get_stats(dev);
if (pvc->state.active) {
@@ -957,7 +955,7 @@ static int fr_rx(struct sk_buff *skb)
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
- dev_to_desc(frad)->stats.rx_dropped++;
+ dev_to_hdlc(frad)->stats.rx_dropped++;
return NET_RX_DROP;
}
@@ -1018,7 +1016,7 @@ static int fr_rx(struct sk_buff *skb)
}
rx_error:
- dev_to_desc(frad)->stats.rx_errors++; /* Mark error */
+ dev_to_hdlc(frad)->stats.rx_errors++; /* Mark error */
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
@@ -1109,11 +1107,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
used = pvc_is_used(pvc);
if (type == ARPHRD_ETHER)
- dev = alloc_netdev(sizeof(struct net_device_stats),
- "pvceth%d", ether_setup);
+ dev = alloc_netdev(sizeof(struct pvc_desc), "pvceth%d",
+ ether_setup);
else
- dev = alloc_netdev(sizeof(struct net_device_stats),
- "pvc%d", pvc_setup);
+ dev = alloc_netdev(sizeof(struct pvc_desc), "pvc%d", pvc_setup);
if (!dev) {
printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n",
@@ -1122,10 +1119,9 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
return -ENOBUFS;
}
- if (type == ARPHRD_ETHER) {
- memcpy(dev->dev_addr, "\x00\x01", 2);
- get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
- } else {
+ if (type == ARPHRD_ETHER)
+ random_ether_addr(dev->dev_addr);
+ else {
*(__be16*)dev->dev_addr = htons(dlci);
dlci_to_q922(dev->broadcast, dlci);
}
@@ -1137,7 +1133,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
dev->change_mtu = pvc_change_mtu;
dev->mtu = HDLC_MAX_MTU;
dev->tx_queue_len = 0;
- dev->priv = pvc;
+ pvcdev_to_desc(dev)->pvc = pvc;
result = dev_alloc_name(dev, dev->name);
if (result < 0) {
@@ -1219,6 +1215,7 @@ static struct hdlc_proto proto = {
.stop = fr_stop,
.detach = fr_destroy,
.ioctl = fr_ioctl,
+ .netif_rx = fr_rx,
.module = THIS_MODULE,
};
@@ -1277,7 +1274,7 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
return result;
if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
- result = attach_hdlc_protocol(dev, &proto, fr_rx,
+ result = attach_hdlc_protocol(dev, &proto,
sizeof(struct frad_state));
if (result)
return result;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 519e1550e2e7..10396d9686f4 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -122,7 +122,7 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
if (result)
return result;
- result = attach_hdlc_protocol(dev, &proto, NULL,
+ result = attach_hdlc_protocol(dev, &proto,
sizeof(struct ppp_state));
if (result)
return result;
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index e23bc6656267..bbbb819d764c 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -82,7 +82,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
if (result)
return result;
- result = attach_hdlc_protocol(dev, &proto, NULL,
+ result = attach_hdlc_protocol(dev, &proto,
sizeof(raw_hdlc_proto));
if (result)
return result;
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index 8895394e6006..d20c685f6711 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -18,7 +18,6 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/pkt_sched.h>
-#include <linux/random.h>
#include <linux/inetdevice.h>
#include <linux/lapb.h>
#include <linux/rtnetlink.h>
@@ -96,7 +95,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
if (result)
return result;
- result = attach_hdlc_protocol(dev, &proto, NULL,
+ result = attach_hdlc_protocol(dev, &proto,
sizeof(raw_hdlc_proto));
if (result)
return result;
@@ -107,8 +106,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
ether_setup(dev);
dev->change_mtu = old_ch_mtu;
dev->tx_queue_len = old_qlen;
- memcpy(dev->dev_addr, "\x00\x01", 2);
- get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
+ random_ether_addr(dev->dev_addr);
netif_dormant_off(dev);
return 0;
}
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index cd7b22f50edc..c15cc11e399b 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -164,17 +164,17 @@ static void x25_close(struct net_device *dev)
static int x25_rx(struct sk_buff *skb)
{
- struct hdlc_device_desc *desc = dev_to_desc(skb->dev);
+ struct hdlc_device *hdlc = dev_to_hdlc(skb->dev);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
- desc->stats.rx_dropped++;
+ hdlc->stats.rx_dropped++;
return NET_RX_DROP;
}
if (lapb_data_received(skb->dev, skb) == LAPB_OK)
return NET_RX_SUCCESS;
- desc->stats.rx_errors++;
+ hdlc->stats.rx_errors++;
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
@@ -184,6 +184,7 @@ static struct hdlc_proto proto = {
.open = x25_open,
.close = x25_close,
.ioctl = x25_ioctl,
+ .netif_rx = x25_rx,
.module = THIS_MODULE,
};
@@ -211,8 +212,7 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
if (result)
return result;
- if ((result = attach_hdlc_protocol(dev, &proto,
- x25_rx, 0)) != 0)
+ if ((result = attach_hdlc_protocol(dev, &proto, 0)))
return result;
dev->hard_start_xmit = x25_xmit;
dev->type = ARPHRD_X25;
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index d6599d219193..ddc87149fe31 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -153,7 +153,7 @@ static int ath5k_pci_resume(struct pci_dev *pdev);
#define ath5k_pci_resume NULL
#endif /* CONFIG_PM */
-static struct pci_driver ath5k_pci_drv_id = {
+static struct pci_driver ath5k_pci_driver = {
.name = "ath5k_pci",
.id_table = ath5k_pci_id_table,
.probe = ath5k_pci_probe,
@@ -329,7 +329,7 @@ init_ath5k_pci(void)
ath5k_debug_init();
- ret = pci_register_driver(&ath5k_pci_drv_id);
+ ret = pci_register_driver(&ath5k_pci_driver);
if (ret) {
printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
return ret;
@@ -341,7 +341,7 @@ init_ath5k_pci(void)
static void __exit
exit_ath5k_pci(void)
{
- pci_unregister_driver(&ath5k_pci_drv_id);
+ pci_unregister_driver(&ath5k_pci_driver);
ath5k_debug_finish();
}
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 32a24f5c4fa6..08a011f0834a 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -724,6 +724,7 @@ struct b43_wldev {
bool short_preamble; /* TRUE, if short preamble is enabled. */
bool short_slot; /* TRUE, if short slot timing is enabled. */
bool radio_hw_enable; /* saved state of radio hardware enabled state */
+ bool suspend_in_progress; /* TRUE, if we are in a suspend/resume cycle */
/* PHY/Radio device. */
struct b43_phy phy;
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 8a708b77925d..3dfb28a34be9 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -337,7 +337,7 @@ static inline int txring_to_priority(struct b43_dmaring *ring)
return idx_to_prio[index];
}
-u16 b43_dmacontroller_base(int dma64bit, int controller_idx)
+static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
{
static const u16 map64[] = {
B43_MMIO_DMA64_BASE0,
@@ -356,7 +356,7 @@ u16 b43_dmacontroller_base(int dma64bit, int controller_idx)
B43_MMIO_DMA32_BASE5,
};
- if (dma64bit) {
+ if (type == B43_DMA_64BIT) {
B43_WARN_ON(!(controller_idx >= 0 &&
controller_idx < ARRAY_SIZE(map64)));
return map64[controller_idx];
@@ -437,7 +437,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
* 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
* which accounts for the GFP_DMA flag below.
*/
- if (ring->dma64)
+ if (ring->type == B43_DMA_64BIT)
flags |= GFP_DMA;
ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
&(ring->dmabase), flags);
@@ -459,7 +459,8 @@ static void free_ringmemory(struct b43_dmaring *ring)
}
/* Reset the RX DMA channel */
-int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
+static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
+ enum b43_dmatype type)
{
int i;
u32 value;
@@ -467,12 +468,13 @@ int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
might_sleep();
- offset = dma64 ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
+ offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
b43_write32(dev, mmio_base + offset, 0);
for (i = 0; i < 10; i++) {
- offset = dma64 ? B43_DMA64_RXSTATUS : B43_DMA32_RXSTATUS;
+ offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
+ B43_DMA32_RXSTATUS;
value = b43_read32(dev, mmio_base + offset);
- if (dma64) {
+ if (type == B43_DMA_64BIT) {
value &= B43_DMA64_RXSTAT;
if (value == B43_DMA64_RXSTAT_DISABLED) {
i = -1;
@@ -496,7 +498,8 @@ int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
}
/* Reset the TX DMA channel */
-int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
+static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
+ enum b43_dmatype type)
{
int i;
u32 value;
@@ -505,9 +508,10 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
might_sleep();
for (i = 0; i < 10; i++) {
- offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS;
+ offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
+ B43_DMA32_TXSTATUS;
value = b43_read32(dev, mmio_base + offset);
- if (dma64) {
+ if (type == B43_DMA_64BIT) {
value &= B43_DMA64_TXSTAT;
if (value == B43_DMA64_TXSTAT_DISABLED ||
value == B43_DMA64_TXSTAT_IDLEWAIT ||
@@ -522,12 +526,13 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
}
msleep(1);
}
- offset = dma64 ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
+ offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
b43_write32(dev, mmio_base + offset, 0);
for (i = 0; i < 10; i++) {
- offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS;
+ offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
+ B43_DMA32_TXSTATUS;
value = b43_read32(dev, mmio_base + offset);
- if (dma64) {
+ if (type == B43_DMA_64BIT) {
value &= B43_DMA64_TXSTAT;
if (value == B43_DMA64_TXSTAT_DISABLED) {
i = -1;
@@ -552,6 +557,33 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
return 0;
}
+/* Check if a DMA mapping address is invalid. */
+static bool b43_dma_mapping_error(struct b43_dmaring *ring,
+ dma_addr_t addr,
+ size_t buffersize)
+{
+ if (unlikely(dma_mapping_error(addr)))
+ return 1;
+
+ switch (ring->type) {
+ case B43_DMA_30BIT:
+ if ((u64)addr + buffersize > (1ULL << 30))
+ return 1;
+ break;
+ case B43_DMA_32BIT:
+ if ((u64)addr + buffersize > (1ULL << 32))
+ return 1;
+ break;
+ case B43_DMA_64BIT:
+ /* Currently we can't have addresses beyond
+ * 64bit in the kernel. */
+ break;
+ }
+
+ /* The address is OK. */
+ return 0;
+}
+
static int setup_rx_descbuffer(struct b43_dmaring *ring,
struct b43_dmadesc_generic *desc,
struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
@@ -567,7 +599,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
if (unlikely(!skb))
return -ENOMEM;
dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
- if (dma_mapping_error(dmaaddr)) {
+ if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
/* ugh. try to realloc in zone_dma */
gfp_flags |= GFP_DMA;
@@ -580,7 +612,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
ring->rx_buffersize, 0);
}
- if (dma_mapping_error(dmaaddr)) {
+ if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
dev_kfree_skb_any(skb);
return -EIO;
}
@@ -645,7 +677,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
u32 trans = ssb_dma_translation(ring->dev->dev);
if (ring->tx) {
- if (ring->dma64) {
+ if (ring->type == B43_DMA_64BIT) {
u64 ringbase = (u64) (ring->dmabase);
addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@@ -677,7 +709,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
err = alloc_initial_descbuffers(ring);
if (err)
goto out;
- if (ring->dma64) {
+ if (ring->type == B43_DMA_64BIT) {
u64 ringbase = (u64) (ring->dmabase);
addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@@ -722,16 +754,16 @@ static void dmacontroller_cleanup(struct b43_dmaring *ring)
{
if (ring->tx) {
b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
- ring->dma64);
- if (ring->dma64) {
+ ring->type);
+ if (ring->type == B43_DMA_64BIT) {
b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
} else
b43_dma_write(ring, B43_DMA32_TXRING, 0);
} else {
b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
- ring->dma64);
- if (ring->dma64) {
+ ring->type);
+ if (ring->type == B43_DMA_64BIT) {
b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
} else
@@ -786,7 +818,8 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
static
struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
int controller_index,
- int for_tx, int dma64)
+ int for_tx,
+ enum b43_dmatype type)
{
struct b43_dmaring *ring;
int err;
@@ -796,6 +829,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
goto out;
+ ring->type = type;
nr_slots = B43_RXRING_SLOTS;
if (for_tx)
@@ -818,7 +852,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
b43_txhdr_size(dev),
DMA_TO_DEVICE);
- if (dma_mapping_error(dma_test)) {
+ if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev))) {
/* ugh realloc */
kfree(ring->txhdr_cache);
ring->txhdr_cache = kcalloc(nr_slots,
@@ -832,7 +866,8 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
b43_txhdr_size(dev),
DMA_TO_DEVICE);
- if (dma_mapping_error(dma_test))
+ if (b43_dma_mapping_error(ring, dma_test,
+ b43_txhdr_size(dev)))
goto err_kfree_txhdr_cache;
}
@@ -843,10 +878,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
ring->dev = dev;
ring->nr_slots = nr_slots;
- ring->mmio_base = b43_dmacontroller_base(dma64, controller_index);
+ ring->mmio_base = b43_dmacontroller_base(type, controller_index);
ring->index = controller_index;
- ring->dma64 = !!dma64;
- if (dma64)
+ if (type == B43_DMA_64BIT)
ring->ops = &dma64_ops;
else
ring->ops = &dma32_ops;
@@ -896,8 +930,8 @@ static void b43_destroy_dmaring(struct b43_dmaring *ring)
if (!ring)
return;
- b43dbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
- (ring->dma64) ? "64" : "32",
+ b43dbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots: %d/%d\n",
+ (unsigned int)(ring->type),
ring->mmio_base,
(ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots);
/* Device IRQs are disabled prior entering this function,
@@ -941,12 +975,22 @@ int b43_dma_init(struct b43_wldev *dev)
struct b43_dmaring *ring;
int err;
u64 dmamask;
- int dma64 = 0;
+ enum b43_dmatype type;
dmamask = supported_dma_mask(dev);
- if (dmamask == DMA_64BIT_MASK)
- dma64 = 1;
-
+ switch (dmamask) {
+ default:
+ B43_WARN_ON(1);
+ case DMA_30BIT_MASK:
+ type = B43_DMA_30BIT;
+ break;
+ case DMA_32BIT_MASK:
+ type = B43_DMA_32BIT;
+ break;
+ case DMA_64BIT_MASK:
+ type = B43_DMA_64BIT;
+ break;
+ }
err = ssb_dma_set_mask(dev->dev, dmamask);
if (err) {
b43err(dev->wl, "The machine/kernel does not support "
@@ -958,52 +1002,51 @@ int b43_dma_init(struct b43_wldev *dev)
err = -ENOMEM;
/* setup TX DMA channels. */
- ring = b43_setup_dmaring(dev, 0, 1, dma64);
+ ring = b43_setup_dmaring(dev, 0, 1, type);
if (!ring)
goto out;
dma->tx_ring0 = ring;
- ring = b43_setup_dmaring(dev, 1, 1, dma64);
+ ring = b43_setup_dmaring(dev, 1, 1, type);
if (!ring)
goto err_destroy_tx0;
dma->tx_ring1 = ring;
- ring = b43_setup_dmaring(dev, 2, 1, dma64);
+ ring = b43_setup_dmaring(dev, 2, 1, type);
if (!ring)
goto err_destroy_tx1;
dma->tx_ring2 = ring;
- ring = b43_setup_dmaring(dev, 3, 1, dma64);
+ ring = b43_setup_dmaring(dev, 3, 1, type);
if (!ring)
goto err_destroy_tx2;
dma->tx_ring3 = ring;
- ring = b43_setup_dmaring(dev, 4, 1, dma64);
+ ring = b43_setup_dmaring(dev, 4, 1, type);
if (!ring)
goto err_destroy_tx3;
dma->tx_ring4 = ring;
- ring = b43_setup_dmaring(dev, 5, 1, dma64);
+ ring = b43_setup_dmaring(dev, 5, 1, type);
if (!ring)
goto err_destroy_tx4;
dma->tx_ring5 = ring;
/* setup RX DMA channels. */
- ring = b43_setup_dmaring(dev, 0, 0, dma64);
+ ring = b43_setup_dmaring(dev, 0, 0, type);
if (!ring)
goto err_destroy_tx5;
dma->rx_ring0 = ring;
if (dev->dev->id.revision < 5) {
- ring = b43_setup_dmaring(dev, 3, 0, dma64);
+ ring = b43_setup_dmaring(dev, 3, 0, type);
if (!ring)
goto err_destroy_rx0;
dma->rx_ring3 = ring;
}
- b43dbg(dev->wl, "%d-bit DMA initialized\n",
- (dmamask == DMA_64BIT_MASK) ? 64 :
- (dmamask == DMA_32BIT_MASK) ? 32 : 30);
+ b43dbg(dev->wl, "%u-bit DMA initialized\n",
+ (unsigned int)type);
err = 0;
out:
return err;
@@ -1146,7 +1189,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
hdrsize, 1);
- if (dma_mapping_error(meta_hdr->dmaaddr)) {
+ if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize)) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
return -EIO;
@@ -1165,7 +1208,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
- if (dma_mapping_error(meta->dmaaddr)) {
+ if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb) {
ring->current_slot = old_top_slot;
@@ -1179,7 +1222,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
skb = bounce_skb;
meta->skb = skb;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
- if (dma_mapping_error(meta->dmaaddr)) {
+ if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
err = -EIO;
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index 58db03ac536e..c0d6b69e6501 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -203,6 +203,12 @@ struct b43_dma_ops {
void (*set_current_rxslot) (struct b43_dmaring * ring, int slot);
};
+enum b43_dmatype {
+ B43_DMA_30BIT = 30,
+ B43_DMA_32BIT = 32,
+ B43_DMA_64BIT = 64,
+};
+
struct b43_dmaring {
/* Lowlevel DMA ops. */
const struct b43_dma_ops *ops;
@@ -235,8 +241,8 @@ struct b43_dmaring {
int index;
/* Boolean. Is this a TX ring? */
bool tx;
- /* Boolean. 64bit DMA if true, 32bit DMA otherwise. */
- bool dma64;
+ /* The type of DMA engine used. */
+ enum b43_dmatype type;
/* Boolean. Is this ring stopped at ieee80211 level? */
bool stopped;
/* Lock, only used for TX. */
@@ -255,8 +261,7 @@ static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset)
return b43_read32(ring->dev, ring->mmio_base + offset);
}
-static inline
- void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value)
+static inline void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value)
{
b43_write32(ring->dev, ring->mmio_base + offset, value);
}
@@ -264,13 +269,6 @@ static inline
int b43_dma_init(struct b43_wldev *dev);
void b43_dma_free(struct b43_wldev *dev);
-int b43_dmacontroller_rx_reset(struct b43_wldev *dev,
- u16 dmacontroller_mmio_base, int dma64);
-int b43_dmacontroller_tx_reset(struct b43_wldev *dev,
- u16 dmacontroller_mmio_base, int dma64);
-
-u16 b43_dmacontroller_base(int dma64bit, int dmacontroller_idx);
-
void b43_dma_tx_suspend(struct b43_wldev *dev);
void b43_dma_tx_resume(struct b43_wldev *dev);
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index 4b590d8c65ff..0aac1ff511df 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -116,7 +116,10 @@ static void b43_unregister_led(struct b43_led *led)
{
if (!led->dev)
return;
- led_classdev_unregister(&led->led_dev);
+ if (led->dev->suspend_in_progress)
+ led_classdev_unregister_suspended(&led->led_dev);
+ else
+ led_classdev_unregister(&led->led_dev);
b43_led_turn_off(led->dev, led->index, led->activelow);
led->dev = NULL;
}
@@ -144,12 +147,12 @@ static void b43_map_led(struct b43_wldev *dev,
case B43_LED_TRANSFER:
case B43_LED_APTRANSFER:
snprintf(name, sizeof(name),
- "b43-%s:tx", wiphy_name(hw->wiphy));
+ "b43-%s::tx", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->led_tx, name,
ieee80211_get_tx_led_name(hw),
led_index, activelow);
snprintf(name, sizeof(name),
- "b43-%s:rx", wiphy_name(hw->wiphy));
+ "b43-%s::rx", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->led_rx, name,
ieee80211_get_rx_led_name(hw),
led_index, activelow);
@@ -159,7 +162,7 @@ static void b43_map_led(struct b43_wldev *dev,
case B43_LED_RADIO_B:
case B43_LED_MODE_BG:
snprintf(name, sizeof(name),
- "b43-%s:radio", wiphy_name(hw->wiphy));
+ "b43-%s::radio", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->led_radio, name,
b43_rfkill_led_name(dev),
led_index, activelow);
@@ -170,7 +173,7 @@ static void b43_map_led(struct b43_wldev *dev,
case B43_LED_WEIRD:
case B43_LED_ASSOC:
snprintf(name, sizeof(name),
- "b43-%s:assoc", wiphy_name(hw->wiphy));
+ "b43-%s::assoc", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->led_assoc, name,
ieee80211_get_assoc_led_name(hw),
led_index, activelow);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 64c154d080d8..ef65c41af00f 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -38,6 +38,7 @@
#include <linux/wireless.h>
#include <linux/workqueue.h>
#include <linux/skbuff.h>
+#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <asm/unaligned.h>
@@ -2554,10 +2555,10 @@ static int b43_rng_read(struct hwrng *rng, u32 * data)
return (sizeof(u16));
}
-static void b43_rng_exit(struct b43_wl *wl)
+static void b43_rng_exit(struct b43_wl *wl, bool suspended)
{
if (wl->rng_initialized)
- hwrng_unregister(&wl->rng);
+ __hwrng_unregister(&wl->rng, suspended);
}
static int b43_rng_init(struct b43_wl *wl)
@@ -3417,8 +3418,10 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
macctl |= B43_MACCTL_PSM_JMP0;
b43_write32(dev, B43_MMIO_MACCTL, macctl);
- b43_leds_exit(dev);
- b43_rng_exit(dev->wl);
+ if (!dev->suspend_in_progress) {
+ b43_leds_exit(dev);
+ b43_rng_exit(dev->wl, false);
+ }
b43_dma_free(dev);
b43_chip_exit(dev);
b43_radio_turn_off(dev, 1);
@@ -3534,11 +3537,13 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */
b43_upload_card_macaddress(dev);
b43_security_init(dev);
- b43_rng_init(wl);
+ if (!dev->suspend_in_progress)
+ b43_rng_init(wl);
b43_set_status(dev, B43_STAT_INITIALIZED);
- b43_leds_init(dev);
+ if (!dev->suspend_in_progress)
+ b43_leds_init(dev);
out:
return err;
@@ -4135,6 +4140,7 @@ static int b43_suspend(struct ssb_device *dev, pm_message_t state)
b43dbg(wl, "Suspending...\n");
mutex_lock(&wl->mutex);
+ wldev->suspend_in_progress = true;
wldev->suspend_init_status = b43_status(wldev);
if (wldev->suspend_init_status >= B43_STAT_STARTED)
b43_wireless_core_stop(wldev);
@@ -4166,15 +4172,17 @@ static int b43_resume(struct ssb_device *dev)
if (wldev->suspend_init_status >= B43_STAT_STARTED) {
err = b43_wireless_core_start(wldev);
if (err) {
+ b43_leds_exit(wldev);
+ b43_rng_exit(wldev->wl, true);
b43_wireless_core_exit(wldev);
b43err(wl, "Resume failed at core start\n");
goto out;
}
}
- mutex_unlock(&wl->mutex);
-
b43dbg(wl, "Device resumed.\n");
- out:
+ out:
+ wldev->suspend_in_progress = false;
+ mutex_unlock(&wl->mutex);
return err;
}
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 83161d9af813..6e08405e8026 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -1164,7 +1164,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
{
const struct b43legacy_dma_ops *ops = ring->ops;
u8 *header;
- int slot;
+ int slot, old_top_slot, old_used_slots;
int err;
struct b43legacy_dmadesc_generic *desc;
struct b43legacy_dmadesc_meta *meta;
@@ -1174,6 +1174,9 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
#define SLOTS_PER_PACKET 2
B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
+ old_top_slot = ring->current_slot;
+ old_used_slots = ring->used_slots;
+
/* Get a slot for the header. */
slot = request_slot(ring);
desc = ops->idx2desc(ring, slot, &meta_hdr);
@@ -1181,9 +1184,14 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
header = &(ring->txhdr_cache[slot * sizeof(
struct b43legacy_txhdr_fw3)]);
- b43legacy_generate_txhdr(ring->dev, header,
+ err = b43legacy_generate_txhdr(ring->dev, header,
skb->data, skb->len, ctl,
generate_cookie(ring, slot));
+ if (unlikely(err)) {
+ ring->current_slot = old_top_slot;
+ ring->used_slots = old_used_slots;
+ return err;
+ }
meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
sizeof(struct b43legacy_txhdr_fw3), 1);
@@ -1206,6 +1214,8 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
if (dma_mapping_error(meta->dmaaddr)) {
bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb) {
+ ring->current_slot = old_top_slot;
+ ring->used_slots = old_used_slots;
err = -ENOMEM;
goto out_unmap_hdr;
}
@@ -1216,6 +1226,8 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
meta->skb = skb;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
if (dma_mapping_error(meta->dmaaddr)) {
+ ring->current_slot = old_top_slot;
+ ring->used_slots = old_used_slots;
err = -EIO;
goto out_free_bounce;
}
@@ -1282,6 +1294,13 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
B43legacy_BUG_ON(ring->stopped);
err = dma_tx_fragment(ring, skb, ctl);
+ if (unlikely(err == -ENOKEY)) {
+ /* Drop this packet, as we don't have the encryption key
+ * anymore and must not transmit it unencrypted. */
+ dev_kfree_skb_any(skb);
+ err = 0;
+ goto out_unlock;
+ }
if (unlikely(err)) {
b43legacyerr(dev->wl, "DMA tx mapping failure\n");
goto out_unlock;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index aa20d5d56e2f..53f7f2e97615 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3160,8 +3160,6 @@ static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev)
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0414, 0x01F4);
ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */
- memset(wl->bssid, 0, ETH_ALEN);
- memset(wl->mac_addr, 0, ETH_ALEN);
b43legacy_upload_card_macaddress(dev);
b43legacy_security_init(dev);
b43legacy_rng_init(wl);
@@ -3263,6 +3261,13 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
* LEDs that are registered later depend on it. */
b43legacy_rfkill_init(dev);
+ /* Kill all old instance specific information to make sure
+ * the card won't use it in the short timeframe between start
+ * and mac80211 reconfiguring it. */
+ memset(wl->bssid, 0, ETH_ALEN);
+ memset(wl->mac_addr, 0, ETH_ALEN);
+ wl->filter_flags = 0;
+
mutex_lock(&wl->mutex);
if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) {
diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c
index e4f4c5c39e33..bcdd54eb2edb 100644
--- a/drivers/net/wireless/b43legacy/pio.c
+++ b/drivers/net/wireless/b43legacy/pio.c
@@ -181,7 +181,7 @@ union txhdr_union {
struct b43legacy_txhdr_fw3 txhdr_fw3;
};
-static void pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
+static int pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
struct sk_buff *skb,
struct b43legacy_pio_txpacket *packet,
size_t txhdr_size)
@@ -189,14 +189,17 @@ static void pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
union txhdr_union txhdr_data;
u8 *txhdr = NULL;
unsigned int octets;
+ int err;
txhdr = (u8 *)(&txhdr_data.txhdr_fw3);
B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
- b43legacy_generate_txhdr(queue->dev,
+ err = b43legacy_generate_txhdr(queue->dev,
txhdr, skb->data, skb->len,
&packet->txstat.control,
generate_cookie(queue, packet));
+ if (err)
+ return err;
tx_start(queue);
octets = skb->len + txhdr_size;
@@ -204,6 +207,8 @@ static void pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
octets--;
tx_data(queue, txhdr, (u8 *)skb->data, octets);
tx_complete(queue, skb);
+
+ return 0;
}
static void free_txpacket(struct b43legacy_pio_txpacket *packet,
@@ -226,6 +231,7 @@ static int pio_tx_packet(struct b43legacy_pio_txpacket *packet)
struct b43legacy_pioqueue *queue = packet->queue;
struct sk_buff *skb = packet->skb;
u16 octets;
+ int err;
octets = (u16)skb->len + sizeof(struct b43legacy_txhdr_fw3);
if (queue->tx_devq_size < octets) {
@@ -247,8 +253,14 @@ static int pio_tx_packet(struct b43legacy_pio_txpacket *packet)
if (queue->tx_devq_used + octets > queue->tx_devq_size)
return -EBUSY;
/* Now poke the device. */
- pio_tx_write_fragment(queue, skb, packet,
+ err = pio_tx_write_fragment(queue, skb, packet,
sizeof(struct b43legacy_txhdr_fw3));
+ if (unlikely(err == -ENOKEY)) {
+ /* Drop this packet, as we don't have the encryption key
+ * anymore and must not transmit it unencrypted. */
+ free_txpacket(packet, 1);
+ return 0;
+ }
/* Account for the packet size.
* (We must not overflow the device TX queue)
@@ -486,6 +498,9 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
queue = parse_cookie(dev, status->cookie, &packet);
B43legacy_WARN_ON(!queue);
+ if (!packet->skb)
+ return;
+
queue->tx_devq_packets--;
queue->tx_devq_used -= (packet->skb->len +
sizeof(struct b43legacy_txhdr_fw3));
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index e20c552442d5..d84408a82db9 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -181,7 +181,7 @@ static u8 b43legacy_calc_fallback_rate(u8 bitrate)
return 0;
}
-static void generate_txhdr_fw3(struct b43legacy_wldev *dev,
+static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
struct b43legacy_txhdr_fw3 *txhdr,
const unsigned char *fragment_data,
unsigned int fragment_len,
@@ -252,6 +252,13 @@ static void generate_txhdr_fw3(struct b43legacy_wldev *dev,
iv_len = min((size_t)txctl->iv_len,
ARRAY_SIZE(txhdr->iv));
memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len);
+ } else {
+ /* This key is invalid. This might only happen
+ * in a short timeframe after machine resume before
+ * we were able to reconfigure keys.
+ * Drop this packet completely. Do not transmit it
+ * unencrypted to avoid leaking information. */
+ return -ENOKEY;
}
}
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
@@ -345,16 +352,18 @@ static void generate_txhdr_fw3(struct b43legacy_wldev *dev,
/* Apply the bitfields */
txhdr->mac_ctl = cpu_to_le32(mac_ctl);
txhdr->phy_ctl = cpu_to_le16(phy_ctl);
+
+ return 0;
}
-void b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
+int b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
u8 *txhdr,
const unsigned char *fragment_data,
unsigned int fragment_len,
const struct ieee80211_tx_control *txctl,
u16 cookie)
{
- generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr,
+ return generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr,
fragment_data, fragment_len,
txctl, cookie);
}
diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h
index 8a155d0a5d1f..bab47928a0c9 100644
--- a/drivers/net/wireless/b43legacy/xmit.h
+++ b/drivers/net/wireless/b43legacy/xmit.h
@@ -76,7 +76,7 @@ struct b43legacy_txhdr_fw3 {
-void b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
+int b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
u8 *txhdr,
const unsigned char *fragment_data,
unsigned int fragment_len,
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 2ab107f45793..5bf9e00b070c 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -162,7 +162,7 @@ that only one external action is invoked at a time.
#include <linux/firmware.h>
#include <linux/acpi.h>
#include <linux/ctype.h>
-#include <linux/latency.h>
+#include <linux/pm_qos_params.h>
#include "ipw2100.h"
@@ -1701,7 +1701,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
/* the ipw2100 hardware really doesn't want power management delays
* longer than 175usec
*/
- modify_acceptable_latency("ipw2100", 175);
+ pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", 175);
/* If the interrupt is enabled, turn it off... */
spin_lock_irqsave(&priv->low_lock, flags);
@@ -1856,7 +1856,8 @@ static void ipw2100_down(struct ipw2100_priv *priv)
ipw2100_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->low_lock, flags);
- modify_acceptable_latency("ipw2100", INFINITE_LATENCY);
+ pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100",
+ PM_QOS_DEFAULT_VALUE);
/* We have to signal any supplicant if we are disassociating */
if (associated)
@@ -6554,7 +6555,8 @@ static int __init ipw2100_init(void)
if (ret)
goto out;
- set_acceptable_latency("ipw2100", INFINITE_LATENCY);
+ pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100",
+ PM_QOS_DEFAULT_VALUE);
#ifdef CONFIG_IPW2100_DEBUG
ipw2100_debug_level = debug;
ret = driver_create_file(&ipw2100_pci_driver.driver,
@@ -6576,7 +6578,7 @@ static void __exit ipw2100_exit(void)
&driver_attr_debug_level);
#endif
pci_unregister_driver(&ipw2100_pci_driver);
- remove_acceptable_latency("ipw2100");
+ pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100");
}
module_init(ipw2100_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 4fdeb5323248..8d4d91d35fd2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -238,9 +238,10 @@ void iwl3945_hw_rx_statistics(struct iwl3945_priv *priv, struct iwl3945_rx_mem_b
priv->last_statistics_time = jiffies;
}
-void iwl3945_add_radiotap(struct iwl3945_priv *priv, struct sk_buff *skb,
- struct iwl3945_rx_frame_hdr *rx_hdr,
- struct ieee80211_rx_status *stats)
+static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
+ struct sk_buff *skb,
+ struct iwl3945_rx_frame_hdr *rx_hdr,
+ struct ieee80211_rx_status *stats)
{
/* First cache any information we need before we overwrite
* the information provided in the skb from the hardware */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 569347ff377b..d727de8b96fe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -4658,17 +4658,30 @@ void iwl4965_set_ht_add_station(struct iwl4965_priv *priv, u8 index,
struct ieee80211_ht_info *sta_ht_inf)
{
__le32 sta_flags;
+ u8 mimo_ps_mode;
if (!sta_ht_inf || !sta_ht_inf->ht_supported)
goto done;
+ mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
+
sta_flags = priv->stations[index].sta.station_flags;
- if (((sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS >> 2))
- == IWL_MIMO_PS_DYNAMIC)
+ sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+ switch (mimo_ps_mode) {
+ case WLAN_HT_CAP_MIMO_PS_STATIC:
+ sta_flags |= STA_FLG_MIMO_DIS_MSK;
+ break;
+ case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
- else
- sta_flags &= ~STA_FLG_RTS_MIMO_PROT_MSK;
+ break;
+ case WLAN_HT_CAP_MIMO_PS_DISABLED:
+ break;
+ default:
+ IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
+ break;
+ }
sta_flags |= cpu_to_le32(
(u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
@@ -4679,7 +4692,7 @@ void iwl4965_set_ht_add_station(struct iwl4965_priv *priv, u8 index,
if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf))
sta_flags |= STA_FLG_FAT_EN_MSK;
else
- sta_flags &= (~STA_FLG_FAT_EN_MSK);
+ sta_flags &= ~STA_FLG_FAT_EN_MSK;
priv->stations[index].sta.station_flags = sta_flags;
done:
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index cb009f4c401f..8993cca81b40 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -147,9 +147,6 @@ static inline struct ieee80211_conf *ieee80211_get_hw_conf(
#define QOS_CONTROL_LEN 2
-#define IEEE80211_STYPE_BACK_REQ 0x0080
-#define IEEE80211_STYPE_BACK 0x0090
-
static inline int ieee80211_is_management(u16 fc)
{
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 33239f197984..5ee1ad69898b 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -4207,13 +4207,13 @@ static u8 ratio2dB[100] = {
* Conversion assumes that levels are voltages (20*log), not powers (10*log). */
int iwl3945_calc_db_from_ratio(int sig_ratio)
{
- /* Anything above 1000:1 just report as 60 dB */
- if (sig_ratio > 1000)
+ /* 1000:1 or higher just report as 60 dB */
+ if (sig_ratio >= 1000)
return 60;
- /* Above 100:1, divide by 10 and use table,
+ /* 100:1 or higher, divide by 10 and use table,
* add 20 dB to make up for divide by 10 */
- if (sig_ratio > 100)
+ if (sig_ratio >= 100)
return (20 + (int)ratio2dB[sig_ratio/10]);
/* We shouldn't see this */
@@ -6330,6 +6330,11 @@ static int __iwl3945_up(struct iwl3945_priv *priv)
return -ENODEV;
}
+ if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
+ IWL_ERROR("ucode not available for device bringup\n");
+ return -EIO;
+ }
+
/* If platform's RF_KILL switch is NOT set to KILL */
if (iwl3945_read32(priv, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
@@ -6342,11 +6347,6 @@ static int __iwl3945_up(struct iwl3945_priv *priv)
}
}
- if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
- IWL_ERROR("ucode not available for device bringup\n");
- return -EIO;
- }
-
iwl3945_write32(priv, CSR_INT, 0xFFFFFFFF);
rc = iwl3945_hw_nic_init(priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c
index bf3a60c037aa..f423241b9567 100644
--- a/drivers/net/wireless/iwlwifi/iwl4965-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c
@@ -6755,6 +6755,11 @@ static int __iwl4965_up(struct iwl4965_priv *priv)
return -ENODEV;
}
+ if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
+ IWL_ERROR("ucode not available for device bringup\n");
+ return -EIO;
+ }
+
/* If platform's RF_KILL switch is NOT set to KILL */
if (iwl4965_read32(priv, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
@@ -6767,11 +6772,6 @@ static int __iwl4965_up(struct iwl4965_priv *priv)
}
}
- if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
- IWL_ERROR("ucode not available for device bringup\n");
- return -EIO;
- }
-
iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
rc = iwl4965_hw_nic_init(priv);
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 9a61188b62e9..69f94c92b32d 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -1473,7 +1473,7 @@ int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
* Called via lbs_prepare_and_send_command(priv, CMD_802_11_SCAN, ...)
* from cmd.c
*
- * Sends a fixed lenght data part (specifying the BSS type and BSSID filters)
+ * Sends a fixed length data part (specifying the BSS type and BSSID filters)
* as well as a variable number/length of TLVs to the firmware.
*
* @param priv A pointer to struct lbs_private structure
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index d2fa079fbc4c..f479c1af6782 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -195,7 +195,7 @@ static int netwave_pcmcia_config(struct pcmcia_device *arg); /* Runs after card
static void netwave_detach(struct pcmcia_device *p_dev); /* Destroy instance */
/* Hardware configuration */
-static void netwave_doreset(kio_addr_t iobase, u_char __iomem *ramBase);
+static void netwave_doreset(unsigned int iobase, u_char __iomem *ramBase);
static void netwave_reset(struct net_device *dev);
/* Misc device stuff */
@@ -309,7 +309,7 @@ static inline void wait_WOC(unsigned int iobase)
}
static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase,
- kio_addr_t iobase) {
+ unsigned int iobase) {
u_short resultBuffer;
/* if time since last snapshot is > 1 sec. (100 jiffies?) then take
@@ -340,7 +340,7 @@ static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase,
static struct iw_statistics *netwave_get_wireless_stats(struct net_device *dev)
{
unsigned long flags;
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
netwave_private *priv = netdev_priv(dev);
u_char __iomem *ramBase = priv->ramBase;
struct iw_statistics* wstats;
@@ -471,7 +471,7 @@ static int netwave_set_nwid(struct net_device *dev,
char *extra)
{
unsigned long flags;
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
netwave_private *priv = netdev_priv(dev);
u_char __iomem *ramBase = priv->ramBase;
@@ -518,7 +518,7 @@ static int netwave_set_scramble(struct net_device *dev,
char *key)
{
unsigned long flags;
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
netwave_private *priv = netdev_priv(dev);
u_char __iomem *ramBase = priv->ramBase;
@@ -621,7 +621,7 @@ static int netwave_get_snap(struct net_device *dev,
char *extra)
{
unsigned long flags;
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
netwave_private *priv = netdev_priv(dev);
u_char __iomem *ramBase = priv->ramBase;
@@ -874,7 +874,7 @@ static int netwave_resume(struct pcmcia_device *link)
*
* Proper hardware reset of the card.
*/
-static void netwave_doreset(kio_addr_t ioBase, u_char __iomem *ramBase)
+static void netwave_doreset(unsigned int ioBase, u_char __iomem *ramBase)
{
/* Reset card */
wait_WOC(ioBase);
@@ -892,7 +892,7 @@ static void netwave_reset(struct net_device *dev) {
/* u_char state; */
netwave_private *priv = netdev_priv(dev);
u_char __iomem *ramBase = priv->ramBase;
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
DEBUG(0, "netwave_reset: Done with hardware reset\n");
@@ -973,7 +973,7 @@ static int netwave_hw_xmit(unsigned char* data, int len,
netwave_private *priv = netdev_priv(dev);
u_char __iomem * ramBase = priv->ramBase;
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
/* Disable interrupts & save flags */
spin_lock_irqsave(&priv->spinlock, flags);
@@ -1065,7 +1065,7 @@ static int netwave_start_xmit(struct sk_buff *skb, struct net_device *dev) {
*/
static irqreturn_t netwave_interrupt(int irq, void* dev_id)
{
- kio_addr_t iobase;
+ unsigned int iobase;
u_char __iomem *ramBase;
struct net_device *dev = (struct net_device *)dev_id;
struct netwave_private *priv = netdev_priv(dev);
@@ -1235,7 +1235,7 @@ static int netwave_rx(struct net_device *dev)
{
netwave_private *priv = netdev_priv(dev);
u_char __iomem *ramBase = priv->ramBase;
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
u_char rxStatus;
struct sk_buff *skb = NULL;
unsigned int curBuffer,
@@ -1388,7 +1388,7 @@ module_exit(exit_netwave_cs);
*/
static void set_multicast_list(struct net_device *dev)
{
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
netwave_private *priv = netdev_priv(dev);
u_char __iomem * ramBase = priv->ramBase;
u_char rcvMode = 0;
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index c2037b2a05bf..06eea6ab7bf0 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -149,7 +149,7 @@ psa_write(struct net_device * dev,
net_local *lp = netdev_priv(dev);
u_char __iomem *ptr = lp->mem + PSA_ADDR + (o << 1);
int count = 0;
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
/* As there seem to have no flag PSA_BUSY as in the ISA model, we are
* oblige to verify this address to know when the PSA is ready... */
volatile u_char __iomem *verify = lp->mem + PSA_ADDR +
@@ -708,7 +708,7 @@ static void wl_update_history(wavepoint_history *wavepoint, unsigned char sigqua
/* Perform a handover to a new WavePoint */
static void wv_roam_handover(wavepoint_history *wavepoint, net_local *lp)
{
- kio_addr_t base = lp->dev->base_addr;
+ unsigned int base = lp->dev->base_addr;
mm_t m;
unsigned long flags;
@@ -821,7 +821,7 @@ wv_82593_cmd(struct net_device * dev,
int cmd,
int result)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
int status;
int wait_completed;
long spin;
@@ -945,7 +945,7 @@ read_ringbuf(struct net_device * dev,
char * buf,
int len)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
int ring_ptr = addr;
int chunk_len;
char * buf_ptr = buf;
@@ -1096,7 +1096,7 @@ wv_psa_show(psa_t * p)
static void
wv_mmc_show(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local * lp = netdev_priv(dev);
mmr_t m;
@@ -1275,7 +1275,7 @@ wv_packet_info(u_char * p, /* Packet to dump */
static inline void
wv_init_info(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
psa_t psa;
DECLARE_MAC_BUF(mac);
@@ -1294,7 +1294,7 @@ wv_init_info(struct net_device * dev)
#ifdef DEBUG_BASIC_SHOW
/* Now, let's go for the basic stuff */
- printk(KERN_NOTICE "%s: WaveLAN: port %#lx, irq %d, "
+ printk(KERN_NOTICE "%s: WaveLAN: port %#x, irq %d, "
"hw_addr %s",
dev->name, base, dev->irq,
print_mac(mac, dev->dev_addr));
@@ -1828,7 +1828,7 @@ static int wavelan_set_nwid(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
psa_t psa;
mm_t m;
@@ -1918,7 +1918,7 @@ static int wavelan_set_freq(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
unsigned long flags;
int ret;
@@ -1948,7 +1948,7 @@ static int wavelan_get_freq(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
psa_t psa;
unsigned long flags;
@@ -1994,7 +1994,7 @@ static int wavelan_set_sens(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
psa_t psa;
unsigned long flags;
@@ -2060,7 +2060,7 @@ static int wavelan_set_encode(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
unsigned long flags;
psa_t psa;
@@ -2130,7 +2130,7 @@ static int wavelan_get_encode(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
psa_t psa;
unsigned long flags;
@@ -2349,7 +2349,7 @@ static int wavelan_get_range(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
struct iw_range *range = (struct iw_range *) extra;
unsigned long flags;
@@ -2425,7 +2425,7 @@ static int wavelan_set_qthr(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
psa_t psa;
unsigned long flags;
@@ -2701,7 +2701,7 @@ static const struct iw_handler_def wavelan_handler_def =
static iw_stats *
wavelan_get_wireless_stats(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local * lp = netdev_priv(dev);
mmr_t m;
iw_stats * wstats;
@@ -2764,7 +2764,7 @@ wv_start_of_frame(struct net_device * dev,
int rfp, /* end of frame */
int wrap) /* start of buffer */
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
int rp;
int len;
@@ -2925,7 +2925,7 @@ wv_packet_read(struct net_device * dev,
static inline void
wv_packet_rcv(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local * lp = netdev_priv(dev);
int newrfp;
int rp;
@@ -3062,7 +3062,7 @@ wv_packet_write(struct net_device * dev,
short length)
{
net_local * lp = netdev_priv(dev);
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
unsigned long flags;
int clen = length;
register u_short xmtdata_base = TX_BASE;
@@ -3183,7 +3183,7 @@ wavelan_packet_xmit(struct sk_buff * skb,
static inline int
wv_mmc_init(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
psa_t psa;
mmw_t m;
int configured;
@@ -3377,7 +3377,7 @@ wv_mmc_init(struct net_device * dev)
static int
wv_ru_stop(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local * lp = netdev_priv(dev);
unsigned long flags;
int status;
@@ -3440,7 +3440,7 @@ wv_ru_stop(struct net_device * dev)
static int
wv_ru_start(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local * lp = netdev_priv(dev);
unsigned long flags;
@@ -3528,7 +3528,7 @@ wv_ru_start(struct net_device * dev)
static int
wv_82593_config(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local * lp = netdev_priv(dev);
struct i82593_conf_block cfblk;
int ret = TRUE;
@@ -3765,7 +3765,7 @@ static int
wv_hw_config(struct net_device * dev)
{
net_local * lp = netdev_priv(dev);
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
unsigned long flags;
int ret = FALSE;
@@ -4047,7 +4047,7 @@ wavelan_interrupt(int irq,
{
struct net_device * dev = dev_id;
net_local * lp;
- kio_addr_t base;
+ unsigned int base;
int status0;
u_int tx_status;
@@ -4306,7 +4306,7 @@ static void
wavelan_watchdog(struct net_device * dev)
{
net_local * lp = netdev_priv(dev);
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
unsigned long flags;
int aborted = FALSE;
@@ -4382,7 +4382,7 @@ wavelan_open(struct net_device * dev)
{
net_local * lp = netdev_priv(dev);
struct pcmcia_device * link = lp->link;
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
#ifdef DEBUG_CALLBACK_TRACE
printk(KERN_DEBUG "%s: ->wavelan_open(dev=0x%x)\n", dev->name,
@@ -4436,7 +4436,7 @@ static int
wavelan_close(struct net_device * dev)
{
struct pcmcia_device * link = ((net_local *)netdev_priv(dev))->link;
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
#ifdef DEBUG_CALLBACK_TRACE
printk(KERN_DEBUG "%s: ->wavelan_close(dev=0x%x)\n", dev->name,
diff --git a/drivers/nubus/Makefile b/drivers/nubus/Makefile
index f5ef03cf9879..21bda2031e7e 100644
--- a/drivers/nubus/Makefile
+++ b/drivers/nubus/Makefile
@@ -4,5 +4,4 @@
obj-y := nubus.o
-obj-$(CONFIG_MODULES) += nubus_syms.o
obj-$(CONFIG_PROC_FS) += proc.o
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index e503c9c98032..2f047e573d86 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <asm/setup.h>
#include <asm/system.h>
#include <asm/page.h>
@@ -186,6 +187,7 @@ void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent* dirent,
len--;
}
}
+EXPORT_SYMBOL(nubus_get_rsrc_mem);
void nubus_get_rsrc_str(void *dest, const struct nubus_dirent* dirent,
int len)
@@ -200,6 +202,7 @@ void nubus_get_rsrc_str(void *dest, const struct nubus_dirent* dirent,
len--;
}
}
+EXPORT_SYMBOL(nubus_get_rsrc_str);
int nubus_get_root_dir(const struct nubus_board* board,
struct nubus_dir* dir)
@@ -209,6 +212,7 @@ int nubus_get_root_dir(const struct nubus_board* board,
dir->mask = board->lanes;
return 0;
}
+EXPORT_SYMBOL(nubus_get_root_dir);
/* This is a slyly renamed version of the above */
int nubus_get_func_dir(const struct nubus_dev* dev,
@@ -219,6 +223,7 @@ int nubus_get_func_dir(const struct nubus_dev* dev,
dir->mask = dev->board->lanes;
return 0;
}
+EXPORT_SYMBOL(nubus_get_func_dir);
int nubus_get_board_dir(const struct nubus_board* board,
struct nubus_dir* dir)
@@ -237,6 +242,7 @@ int nubus_get_board_dir(const struct nubus_board* board,
return -1;
return 0;
}
+EXPORT_SYMBOL(nubus_get_board_dir);
int nubus_get_subdir(const struct nubus_dirent *ent,
struct nubus_dir *dir)
@@ -246,6 +252,7 @@ int nubus_get_subdir(const struct nubus_dirent *ent,
dir->mask = ent->mask;
return 0;
}
+EXPORT_SYMBOL(nubus_get_subdir);
int nubus_readdir(struct nubus_dir *nd, struct nubus_dirent *ent)
{
@@ -274,12 +281,14 @@ int nubus_readdir(struct nubus_dir *nd, struct nubus_dirent *ent)
ent->mask = nd->mask;
return 0;
}
+EXPORT_SYMBOL(nubus_readdir);
int nubus_rewinddir(struct nubus_dir* dir)
{
dir->ptr = dir->base;
return 0;
}
+EXPORT_SYMBOL(nubus_rewinddir);
/* Driver interface functions, more or less like in pci.c */
@@ -303,6 +312,7 @@ nubus_find_device(unsigned short category,
}
return NULL;
}
+EXPORT_SYMBOL(nubus_find_device);
struct nubus_dev*
nubus_find_type(unsigned short category,
@@ -320,6 +330,7 @@ nubus_find_type(unsigned short category,
}
return NULL;
}
+EXPORT_SYMBOL(nubus_find_type);
struct nubus_dev*
nubus_find_slot(unsigned int slot,
@@ -335,6 +346,7 @@ nubus_find_slot(unsigned int slot,
}
return NULL;
}
+EXPORT_SYMBOL(nubus_find_slot);
int
nubus_find_rsrc(struct nubus_dir* dir, unsigned char rsrc_type,
@@ -346,13 +358,14 @@ nubus_find_rsrc(struct nubus_dir* dir, unsigned char rsrc_type,
}
return -1;
}
+EXPORT_SYMBOL(nubus_find_rsrc);
/* Initialization functions - decide which slots contain stuff worth
looking at, and print out lots and lots of information from the
resource blocks. */
/* FIXME: A lot of this stuff will eventually be useful after
- initializaton, for intelligently probing Ethernet and video chips,
+ initialization, for intelligently probing Ethernet and video chips,
among other things. The rest of it should go in the /proc code.
For now, we just use it to give verbose boot logs. */
diff --git a/drivers/nubus/nubus_syms.c b/drivers/nubus/nubus_syms.c
deleted file mode 100644
index 9204f04fbf0b..000000000000
--- a/drivers/nubus/nubus_syms.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Exported symbols for NuBus services
-
- (c) 1999 David Huggins-Daines <dhd@debian.org> */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/nubus.h>
-
-#ifdef CONFIG_PROC_FS
-EXPORT_SYMBOL(nubus_proc_attach_device);
-EXPORT_SYMBOL(nubus_proc_detach_device);
-#endif
-
-MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(nubus_find_device);
-EXPORT_SYMBOL(nubus_find_type);
-EXPORT_SYMBOL(nubus_find_slot);
-EXPORT_SYMBOL(nubus_get_root_dir);
-EXPORT_SYMBOL(nubus_get_board_dir);
-EXPORT_SYMBOL(nubus_get_func_dir);
-EXPORT_SYMBOL(nubus_readdir);
-EXPORT_SYMBOL(nubus_find_rsrc);
-EXPORT_SYMBOL(nubus_rewinddir);
-EXPORT_SYMBOL(nubus_get_subdir);
-EXPORT_SYMBOL(nubus_get_rsrc_mem);
-EXPORT_SYMBOL(nubus_get_rsrc_str);
-
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
index 5271a4a7af26..e07492be1f4a 100644
--- a/drivers/nubus/proc.c
+++ b/drivers/nubus/proc.c
@@ -22,6 +22,8 @@
#include <linux/nubus.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
+#include <linux/module.h>
+
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -140,6 +142,7 @@ int nubus_proc_attach_device(struct nubus_dev *dev)
return 0;
}
+EXPORT_SYMBOL(nubus_proc_attach_device);
/* FIXME: this is certainly broken! */
int nubus_proc_detach_device(struct nubus_dev *dev)
@@ -154,6 +157,7 @@ int nubus_proc_detach_device(struct nubus_dev *dev)
}
return 0;
}
+EXPORT_SYMBOL(nubus_proc_detach_device);
void __init proc_bus_nubus_add_devices(void)
{
diff --git a/drivers/of/base.c b/drivers/of/base.c
index b306fef1ac41..80c9deca5f35 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -138,6 +138,31 @@ struct device_node *of_get_parent(const struct device_node *node)
EXPORT_SYMBOL(of_get_parent);
/**
+ * of_get_next_parent - Iterate to a node's parent
+ * @node: Node to get parent of
+ *
+ * This is like of_get_parent() except that it drops the
+ * refcount on the passed node, making it suitable for iterating
+ * through a node's parents.
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_get_next_parent(struct device_node *node)
+{
+ struct device_node *parent;
+
+ if (!node)
+ return NULL;
+
+ read_lock(&devtree_lock);
+ parent = of_node_get(node->parent);
+ of_node_put(node);
+ read_unlock(&devtree_lock);
+ return parent;
+}
+
+/**
* of_get_next_child - Iterate a node childs
* @node: parent node
* @prev: previous child of the parent node, or NULL to get first
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index b47bb2d7476a..ca09a63a64db 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -85,6 +85,15 @@ static int of_platform_device_resume(struct device * dev)
return error;
}
+static void of_platform_device_shutdown(struct device *dev)
+{
+ struct of_device *of_dev = to_of_device(dev);
+ struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
+
+ if (dev->driver && drv->shutdown)
+ drv->shutdown(of_dev);
+}
+
int of_bus_type_init(struct bus_type *bus, const char *name)
{
bus->name = name;
@@ -93,6 +102,7 @@ int of_bus_type_init(struct bus_type *bus, const char *name)
bus->remove = of_platform_device_remove;
bus->suspend = of_platform_device_suspend;
bus->resume = of_platform_device_resume;
+ bus->shutdown = of_platform_device_shutdown;
return bus_register(bus);
}
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 7c60cbd85dc8..d08b284de196 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -363,7 +363,7 @@ ccio_alloc_range(struct ioc *ioc, size_t size)
if (pages_needed <= 8) {
/*
* LAN traffic will not thrash the TLB IFF the same NIC
- * uses 8 adjacent pages to map seperate payload data.
+ * uses 8 adjacent pages to map separate payload data.
* ie the same byte in the resource bit map.
*/
#if 0
@@ -941,7 +941,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm.
*/
- coalesced = iommu_coalesce_chunks(ioc, sglist, nents, ccio_alloc_range);
+ coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range);
/*
** Program the I/O Pdir
@@ -1589,7 +1589,7 @@ static int __init ccio_probe(struct parisc_device *dev)
}
/**
- * ccio_init - ccio initalization procedure.
+ * ccio_init - ccio initialization procedure.
*
* Register this driver.
*/
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
index a728a7cd2fc8..65eee67aa2ae 100644
--- a/drivers/parisc/hppb.c
+++ b/drivers/parisc/hppb.c
@@ -95,7 +95,7 @@ static struct parisc_driver hppb_driver = {
};
/**
- * hppb_init - HP-PB bus initalization procedure.
+ * hppb_init - HP-PB bus initialization procedure.
*
* Register this driver.
*/
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
index 0a1f99a2e93e..97ba8286c596 100644
--- a/drivers/parisc/iommu-helpers.h
+++ b/drivers/parisc/iommu-helpers.h
@@ -95,12 +95,14 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
*/
static inline unsigned int
-iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents,
+iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
+ struct scatterlist *startsg, int nents,
int (*iommu_alloc_range)(struct ioc *, size_t))
{
struct scatterlist *contig_sg; /* contig chunk head */
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
unsigned int n_mappings = 0;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
while (nents > 0) {
@@ -142,6 +144,9 @@ iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents,
IOVP_SIZE) > DMA_CHUNK_SIZE))
break;
+ if (startsg->length + dma_len > max_seg_size)
+ break;
+
/*
** Next see if we can append the next chunk (i.e.
** it must end on one page and begin on another
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index e527a0e1d6c0..d06627c3f353 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -946,7 +946,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm.
*/
- coalesced = iommu_coalesce_chunks(ioc, sglist, nents, sba_alloc_range);
+ coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
/*
** Program the I/O Pdir
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index e9743d3efaf6..238628d3a854 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1540,6 +1540,38 @@ static void __devinit detect_and_report_smsc (void)
smsc_check(0x3f0,0x44);
smsc_check(0x370,0x44);
}
+
+static void __devinit detect_and_report_it87(void)
+{
+ u16 dev;
+ u8 r;
+ if (verbose_probing)
+ printk(KERN_DEBUG "IT8705 Super-IO detection, now testing port 2E ...\n");
+ if (!request_region(0x2e, 1, __FUNCTION__))
+ return;
+ outb(0x87, 0x2e);
+ outb(0x01, 0x2e);
+ outb(0x55, 0x2e);
+ outb(0x55, 0x2e);
+ outb(0x20, 0x2e);
+ dev = inb(0x2f) << 8;
+ outb(0x21, 0x2e);
+ dev |= inb(0x2f);
+ if (dev == 0x8712 || dev == 0x8705 || dev == 0x8715 ||
+ dev == 0x8716 || dev == 0x8718 || dev == 0x8726) {
+ printk(KERN_INFO "IT%04X SuperIO detected.\n", dev);
+ outb(0x07, 0x2E); /* Parallel Port */
+ outb(0x03, 0x2F);
+ outb(0xF0, 0x2E); /* BOOT 0x80 off */
+ r = inb(0x2f);
+ outb(0xF0, 0x2E);
+ outb(r | 8, 0x2F);
+ outb(0x02, 0x2E); /* Lock */
+ outb(0x02, 0x2F);
+
+ release_region(0x2e, 1);
+ }
+}
#endif /* CONFIG_PARPORT_PC_SUPERIO */
static int get_superio_dma (struct parport *p)
@@ -2767,6 +2799,7 @@ enum parport_pc_pci_cards {
netmos_9755,
netmos_9805,
netmos_9815,
+ quatech_sppxp100,
};
@@ -2843,6 +2876,7 @@ static struct parport_pc_pci {
/* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} }, /* untested */
/* netmos_9805 */ { 1, { { 0, -1 }, } }, /* untested */
/* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, /* untested */
+ /* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
};
static const struct pci_device_id parport_pc_pci_tbl[] = {
@@ -2926,6 +2960,9 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
+ /* Quatech SPPXP-100 Parallel port PCI ExpressCard */
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
{ 0, } /* terminate list */
};
MODULE_DEVICE_TABLE(pci,parport_pc_pci_tbl);
@@ -3159,24 +3196,25 @@ static void __init parport_pc_find_ports (int autoirq, int autodma)
int count = 0, err;
#ifdef CONFIG_PARPORT_PC_SUPERIO
- detect_and_report_winbond ();
- detect_and_report_smsc ();
+ detect_and_report_it87();
+ detect_and_report_winbond();
+ detect_and_report_smsc();
#endif
/* Onboard SuperIO chipsets that show themselves on the PCI bus. */
- count += parport_pc_init_superio (autoirq, autodma);
+ count += parport_pc_init_superio(autoirq, autodma);
/* PnP ports, skip detection if SuperIO already found them */
if (!count) {
- err = pnp_register_driver (&parport_pc_pnp_driver);
+ err = pnp_register_driver(&parport_pc_pnp_driver);
if (!err)
pnp_registered_parport = 1;
}
/* ISA ports and whatever (see asm/parport.h). */
- parport_pc_find_nonpci_ports (autoirq, autodma);
+ parport_pc_find_nonpci_ports(autoirq, autodma);
- err = pci_register_driver (&parport_pc_pci_driver);
+ err = pci_register_driver(&parport_pc_pci_driver);
if (!err)
pci_registered_parport = 1;
}
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index bd6ad8b38168..e2e95b36a603 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -77,7 +77,7 @@ static struct parport_pc_pci cards[] __devinitdata = {
/* titan_110l */ { 1, { { 3, -1 }, } },
/* titan_210l */ { 1, { { 3, -1 }, } },
/* netmos_9xx5_combo */ { 1, { { 2, -1 }, }, netmos_parallel_init },
- /* netmos_9855 */ { 1, { { 0, -1 }, }, netmos_parallel_init },
+ /* netmos_9855 */ { 1, { { 2, -1 }, }, netmos_parallel_init },
/* avlab_1s1p */ { 1, { { 1, 2}, } },
/* avlab_1s2p */ { 2, { { 1, 2}, { 3, 4 },} },
/* avlab_2s1p */ { 1, { { 2, 3}, } },
@@ -185,7 +185,7 @@ static struct pciserial_board pci_parport_serial_boards[] __devinitdata = {
.uart_offset = 8,
},
[netmos_9855] = {
- .flags = FL_BASE2 | FL_BASE_BARS,
+ .flags = FL_BASE4 | FL_BASE_BARS,
.num_ports = 1,
.base_baud = 115200,
.uart_offset = 8,
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index 853a15f44f88..cd565bb4e1a9 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -163,7 +163,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
idlens[1] = idlens[0]+2;
if (belen != lelen) {
int off = 2;
- /* Don't try lenghts of 0x100 and 0x200 as 1 and 2 */
+ /* Don't try lengths of 0x100 and 0x200 as 1 and 2 */
if (idlens[0] <= 2)
off = 0;
idlens[off] = max(belen, lelen);
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index f697f3d728eb..9f04d17576d6 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -13,6 +13,9 @@ obj-$(CONFIG_HOTPLUG) += hotplug.o
# Build the PCI Hotplug drivers if we were asked to
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
+ifdef CONFIG_HOTPLUG_PCI
+obj-y += hotplug-pci.o
+endif
# Build the PCI MSI interrupt support
obj-$(CONFIG_PCI_MSI) += msi.o
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 91b2dc956be5..8ed26480371f 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include <linux/dmar.h>
#include "iova.h"
+#include "intel-iommu.h"
#undef PREFIX
#define PREFIX "DMAR:"
diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c
new file mode 100644
index 000000000000..a590ef682153
--- /dev/null
+++ b/drivers/pci/hotplug-pci.c
@@ -0,0 +1,20 @@
+/* Core PCI functionality used only by PCI hotplug */
+
+#include <linux/pci.h>
+#include "pci.h"
+
+
+unsigned int pci_do_scan_bus(struct pci_bus *bus)
+{
+ unsigned int max;
+
+ max = pci_scan_child_bus(bus);
+
+ /*
+ * Make the discovered devices available.
+ */
+ pci_bus_add_devices(bus);
+
+ return max;
+}
+EXPORT_SYMBOL(pci_do_scan_bus);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 4e01df99681a..31fa6c92aa5e 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1088,7 +1088,7 @@ static void dmar_init_reserved_ranges(void)
int i;
u64 addr, size;
- init_iova_domain(&reserved_iova_list);
+ init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
/* IOAPIC ranges shouldn't be accessed by DMA */
iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
@@ -1142,7 +1142,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
int adjust_width, agaw;
unsigned long sagaw;
- init_iova_domain(&domain->iovad);
+ init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
spin_lock_init(&domain->mapping_lock);
domain_reserve_special_ranges(domain);
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h
index 459ad1f9dc54..0e4862675ad2 100644
--- a/drivers/pci/intel-iommu.h
+++ b/drivers/pci/intel-iommu.h
@@ -23,10 +23,24 @@
#include <linux/types.h>
#include <linux/msi.h>
+#include <linux/sysdev.h>
#include "iova.h"
#include <linux/io.h>
/*
+ * We need a fixed PAGE_SIZE of 4K irrespective of
+ * arch PAGE_SIZE for IOMMU page tables.
+ */
+#define PAGE_SHIFT_4K (12)
+#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
+#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
+#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
+
+#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
+#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
+#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
+
+/*
* Intel IOMMU register specification per version 1.0 public spec.
*/
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index a84571c29360..8de7ab6c6d0c 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -9,19 +9,19 @@
#include "iova.h"
void
-init_iova_domain(struct iova_domain *iovad)
+init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
{
spin_lock_init(&iovad->iova_alloc_lock);
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
iovad->cached32_node = NULL;
-
+ iovad->dma_32bit_pfn = pfn_32bit;
}
static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
{
- if ((*limit_pfn != DMA_32BIT_PFN) ||
+ if ((*limit_pfn != iovad->dma_32bit_pfn) ||
(iovad->cached32_node == NULL))
return rb_last(&iovad->rbroot);
else {
@@ -37,7 +37,7 @@ static void
__cached_rbnode_insert_update(struct iova_domain *iovad,
unsigned long limit_pfn, struct iova *new)
{
- if (limit_pfn != DMA_32BIT_PFN)
+ if (limit_pfn != iovad->dma_32bit_pfn)
return;
iovad->cached32_node = &new->node;
}
diff --git a/drivers/pci/iova.h b/drivers/pci/iova.h
index ae3028d5a941..d521b5b7319c 100644
--- a/drivers/pci/iova.h
+++ b/drivers/pci/iova.h
@@ -15,22 +15,9 @@
#include <linux/rbtree.h>
#include <linux/dma-mapping.h>
-/*
- * We need a fixed PAGE_SIZE of 4K irrespective of
- * arch PAGE_SIZE for IOMMU page tables.
- */
-#define PAGE_SHIFT_4K (12)
-#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
-#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
-#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
-
/* IO virtual address start page frame number */
#define IOVA_START_PFN (1)
-#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
-#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
-#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
-
/* iova structure */
struct iova {
struct rb_node node;
@@ -44,6 +31,7 @@ struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */
struct rb_node *cached32_node; /* Save last alloced node */
+ unsigned long dma_32bit_pfn;
};
struct iova *alloc_iova_mem(void);
@@ -56,7 +44,7 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
-void init_iova_domain(struct iova_domain *iovad);
+void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index abf4203304e4..8dcf1458aa2f 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -21,7 +21,6 @@
#include <linux/topology.h>
#include <linux/mm.h>
#include <linux/capability.h>
-#include <linux/aspm.h>
#include "pci.h"
static int sysfs_initialized; /* = 0 */
@@ -651,8 +650,6 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
if (pcibios_add_platform_entries(pdev))
goto err_rom_file;
- pcie_aspm_create_sysfs_dev_files(pdev);
-
return 0;
err_rom_file:
@@ -682,8 +679,6 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
if (!sysfs_initialized)
return;
- pcie_aspm_remove_sysfs_dev_files(pdev);
-
if (pdev->cfg_size < 4096)
sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
else
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b3e9294e4a0e..ae3df46eaabf 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -18,7 +18,6 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/log2.h>
-#include <linux/aspm.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
@@ -520,9 +519,6 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
if (need_restore)
pci_restore_bars(dev);
- if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
-
return 0;
}
@@ -1455,6 +1451,22 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
}
#endif
+#ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE
+int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
+{
+ return dma_set_max_seg_size(&dev->dev, size);
+}
+EXPORT_SYMBOL(pci_set_dma_max_seg_size);
+#endif
+
+#ifndef HAVE_ARCH_PCI_SET_DMA_SEGMENT_BOUNDARY
+int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
+{
+ return dma_set_seg_boundary(&dev->dev, mask);
+}
+EXPORT_SYMBOL(pci_set_dma_seg_boundary);
+#endif
+
/**
* pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
* @dev: PCI device to query
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 60104cf98796..287a9311716c 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -26,23 +26,3 @@ config HOTPLUG_PCI_PCIE
When in doubt, say N.
source "drivers/pci/pcie/aer/Kconfig"
-
-#
-# PCI Express ASPM
-#
-config PCIEASPM
- bool "PCI Express ASPM support(Experimental)"
- depends on PCI && EXPERIMENTAL
- default y
- help
- This enables PCI Express ASPM (Active State Power Management) and
- Clock Power Management. ASPM supports state L0/L0s/L1.
-
- When in doubt, say N.
-config PCIEASPM_DEBUG
- bool "Debug PCI Express ASPM"
- depends on PCIEASPM
- default n
- help
- This enables PCI Express ASPM debug support. It will add per-device
- interface to control ASPM.
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 11f6bb1eae24..e00fb99acf44 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -2,9 +2,6 @@
# Makefile for PCI-Express PORT Driver
#
-# Build PCI Express ASPM if needed
-obj-$(CONFIG_PCIEASPM) += aspm.o
-
pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
deleted file mode 100644
index 1a5adeb10c95..000000000000
--- a/drivers/pci/pcie/aspm.c
+++ /dev/null
@@ -1,802 +0,0 @@
-/*
- * File: drivers/pci/pcie/aspm.c
- * Enabling PCIE link L0s/L1 state and Clock Power Management
- *
- * Copyright (C) 2007 Intel
- * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
- * Copyright (C) Shaohua Li (shaohua.li@intel.com)
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/pci_regs.h>
-#include <linux/errno.h>
-#include <linux/pm.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/aspm.h>
-#include <acpi/acpi_bus.h>
-#include <linux/pci-acpi.h>
-#include "../pci.h"
-
-#ifdef MODULE_PARAM_PREFIX
-#undef MODULE_PARAM_PREFIX
-#endif
-#define MODULE_PARAM_PREFIX "pcie_aspm."
-
-struct endpoint_state {
- unsigned int l0s_acceptable_latency;
- unsigned int l1_acceptable_latency;
-};
-
-struct pcie_link_state {
- struct list_head sibiling;
- struct pci_dev *pdev;
-
- /* ASPM state */
- unsigned int support_state;
- unsigned int enabled_state;
- unsigned int bios_aspm_state;
- /* upstream component */
- unsigned int l0s_upper_latency;
- unsigned int l1_upper_latency;
- /* downstream component */
- unsigned int l0s_down_latency;
- unsigned int l1_down_latency;
- /* Clock PM state*/
- unsigned int clk_pm_capable;
- unsigned int clk_pm_enabled;
- unsigned int bios_clk_state;
-
- /*
- * A pcie downstream port only has one slot under it, so at most there
- * are 8 functions
- */
- struct endpoint_state endpoints[8];
-};
-
-static int aspm_disabled;
-static DEFINE_MUTEX(aspm_lock);
-static LIST_HEAD(link_list);
-
-#define POLICY_DEFAULT 0 /* BIOS default setting */
-#define POLICY_PERFORMANCE 1 /* high performance */
-#define POLICY_POWERSAVE 2 /* high power saving */
-static int aspm_policy;
-static const char *policy_str[] = {
- [POLICY_DEFAULT] = "default",
- [POLICY_PERFORMANCE] = "performance",
- [POLICY_POWERSAVE] = "powersave"
-};
-
-static int policy_to_aspm_state(struct pci_dev *pdev)
-{
- struct pcie_link_state *link_state = pdev->link_state;
-
- switch (aspm_policy) {
- case POLICY_PERFORMANCE:
- /* Disable ASPM and Clock PM */
- return 0;
- case POLICY_POWERSAVE:
- /* Enable ASPM L0s/L1 */
- return PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
- case POLICY_DEFAULT:
- return link_state->bios_aspm_state;
- }
- return 0;
-}
-
-static int policy_to_clkpm_state(struct pci_dev *pdev)
-{
- struct pcie_link_state *link_state = pdev->link_state;
-
- switch (aspm_policy) {
- case POLICY_PERFORMANCE:
- /* Disable ASPM and Clock PM */
- return 0;
- case POLICY_POWERSAVE:
- /* Disable Clock PM */
- return 1;
- case POLICY_DEFAULT:
- return link_state->bios_clk_state;
- }
- return 0;
-}
-
-static void pcie_set_clock_pm(struct pci_dev *pdev, int enable)
-{
- struct pci_dev *child_dev;
- int pos;
- u16 reg16;
- struct pcie_link_state *link_state = pdev->link_state;
-
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- if (!pos)
- return;
- pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, &reg16);
- if (enable)
- reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN;
- else
- reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
- pci_write_config_word(child_dev, pos + PCI_EXP_LNKCTL, reg16);
- }
- link_state->clk_pm_enabled = !!enable;
-}
-
-static void pcie_check_clock_pm(struct pci_dev *pdev)
-{
- int pos;
- u32 reg32;
- u16 reg16;
- int capable = 1, enabled = 1;
- struct pci_dev *child_dev;
- struct pcie_link_state *link_state = pdev->link_state;
-
- /* All functions should have the same cap and state, take the worst */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- if (!pos)
- return;
- pci_read_config_dword(child_dev, pos + PCI_EXP_LNKCAP, &reg32);
- if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
- capable = 0;
- enabled = 0;
- break;
- }
- pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, &reg16);
- if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
- enabled = 0;
- }
- link_state->clk_pm_capable = capable;
- link_state->clk_pm_enabled = enabled;
- link_state->bios_clk_state = enabled;
- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
-}
-
-/*
- * pcie_aspm_configure_common_clock: check if the 2 ends of a link
- * could use common clock. If they are, configure them to use the
- * common clock. That will reduce the ASPM state exit latency.
- */
-static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
-{
- int pos, child_pos;
- u16 reg16 = 0;
- struct pci_dev *child_dev;
- int same_clock = 1;
-
- /*
- * all functions of a slot should have the same Slot Clock
- * Configuration, so just check one function
- * */
- child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev,
- bus_list);
- BUG_ON(!child_dev->is_pcie);
-
- /* Check downstream component if bit Slot Clock Configuration is 1 */
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKSTA, &reg16);
- if (!(reg16 & PCI_EXP_LNKSTA_SLC))
- same_clock = 0;
-
- /* Check upstream component if bit Slot Clock Configuration is 1 */
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16);
- if (!(reg16 & PCI_EXP_LNKSTA_SLC))
- same_clock = 0;
-
- /* Configure downstream component, all functions */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
- &reg16);
- if (same_clock)
- reg16 |= PCI_EXP_LNKCTL_CCC;
- else
- reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
- reg16);
- }
-
- /* Configure upstream component */
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
- if (same_clock)
- reg16 |= PCI_EXP_LNKCTL_CCC;
- else
- reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
-
- /* retrain link */
- reg16 |= PCI_EXP_LNKCTL_RL;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
-
- /* Wait for link training end */
- while (1) {
- pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16);
- if (!(reg16 & PCI_EXP_LNKSTA_LT))
- break;
- cpu_relax();
- }
-}
-
-/*
- * calc_L0S_latency: Convert L0s latency encoding to ns
- */
-static unsigned int calc_L0S_latency(unsigned int latency_encoding, int ac)
-{
- unsigned int ns = 64;
-
- if (latency_encoding == 0x7) {
- if (ac)
- ns = -1U;
- else
- ns = 5*1000; /* > 4us */
- } else
- ns *= (1 << latency_encoding);
- return ns;
-}
-
-/*
- * calc_L1_latency: Convert L1 latency encoding to ns
- */
-static unsigned int calc_L1_latency(unsigned int latency_encoding, int ac)
-{
- unsigned int ns = 1000;
-
- if (latency_encoding == 0x7) {
- if (ac)
- ns = -1U;
- else
- ns = 65*1000; /* > 64us */
- } else
- ns *= (1 << latency_encoding);
- return ns;
-}
-
-static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state,
- unsigned int *l0s, unsigned int *l1, unsigned int *enabled)
-{
- int pos;
- u16 reg16;
- u32 reg32;
- unsigned int latency;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
- *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
- if (*state != PCIE_LINK_STATE_L0S &&
- *state != (PCIE_LINK_STATE_L1|PCIE_LINK_STATE_L0S))
- * state = 0;
- if (*state == 0)
- return;
-
- latency = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
- *l0s = calc_L0S_latency(latency, 0);
- if (*state & PCIE_LINK_STATE_L1) {
- latency = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
- *l1 = calc_L1_latency(latency, 0);
- }
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
- *enabled = reg16 & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1);
-}
-
-static void pcie_aspm_cap_init(struct pci_dev *pdev)
-{
- struct pci_dev *child_dev;
- u32 state, tmp;
- struct pcie_link_state *link_state = pdev->link_state;
-
- /* upstream component states */
- pcie_aspm_get_cap_device(pdev, &link_state->support_state,
- &link_state->l0s_upper_latency,
- &link_state->l1_upper_latency,
- &link_state->enabled_state);
- /* downstream component states, all functions have the same setting */
- child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev,
- bus_list);
- pcie_aspm_get_cap_device(child_dev, &state,
- &link_state->l0s_down_latency,
- &link_state->l1_down_latency,
- &tmp);
- link_state->support_state &= state;
- if (!link_state->support_state)
- return;
- link_state->enabled_state &= link_state->support_state;
- link_state->bios_aspm_state = link_state->enabled_state;
-
- /* ENDPOINT states*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- int pos;
- u32 reg32;
- unsigned int latency;
- struct endpoint_state *ep_state =
- &link_state->endpoints[PCI_FUNC(child_dev->devfn)];
-
- if (child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child_dev->pcie_type != PCI_EXP_TYPE_LEG_END)
- continue;
-
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_dword(child_dev, pos + PCI_EXP_DEVCAP, &reg32);
- latency = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
- latency = calc_L0S_latency(latency, 1);
- ep_state->l0s_acceptable_latency = latency;
- if (link_state->support_state & PCIE_LINK_STATE_L1) {
- latency = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
- latency = calc_L1_latency(latency, 1);
- ep_state->l1_acceptable_latency = latency;
- }
- }
-}
-
-static unsigned int __pcie_aspm_check_state_one(struct pci_dev *pdev,
- unsigned int state)
-{
- struct pci_dev *parent_dev, *tmp_dev;
- unsigned int latency, l1_latency = 0;
- struct pcie_link_state *link_state;
- struct endpoint_state *ep_state;
-
- parent_dev = pdev->bus->self;
- link_state = parent_dev->link_state;
- state &= link_state->support_state;
- if (state == 0)
- return 0;
- ep_state = &link_state->endpoints[PCI_FUNC(pdev->devfn)];
-
- /*
- * Check latency for endpoint device.
- * TBD: The latency from the endpoint to root complex vary per
- * switch's upstream link state above the device. Here we just do a
- * simple check which assumes all links above the device can be in L1
- * state, that is we just consider the worst case. If switch's upstream
- * link can't be put into L0S/L1, then our check is too strictly.
- */
- tmp_dev = pdev;
- while (state & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
- parent_dev = tmp_dev->bus->self;
- link_state = parent_dev->link_state;
- if (state & PCIE_LINK_STATE_L0S) {
- latency = max_t(unsigned int,
- link_state->l0s_upper_latency,
- link_state->l0s_down_latency);
- if (latency > ep_state->l0s_acceptable_latency)
- state &= ~PCIE_LINK_STATE_L0S;
- }
- if (state & PCIE_LINK_STATE_L1) {
- latency = max_t(unsigned int,
- link_state->l1_upper_latency,
- link_state->l1_down_latency);
- if (latency + l1_latency >
- ep_state->l1_acceptable_latency)
- state &= ~PCIE_LINK_STATE_L1;
- }
- if (!parent_dev->bus->self) /* parent_dev is a root port */
- break;
- else {
- /*
- * parent_dev is the downstream port of a switch, make
- * tmp_dev the upstream port of the switch
- */
- tmp_dev = parent_dev->bus->self;
- /*
- * every switch on the path to root complex need 1 more
- * microsecond for L1. Spec doesn't mention L0S.
- */
- if (state & PCIE_LINK_STATE_L1)
- l1_latency += 1000;
- }
- }
- return state;
-}
-
-static unsigned int pcie_aspm_check_state(struct pci_dev *pdev,
- unsigned int state)
-{
- struct pci_dev *child_dev;
-
- /* If no child, disable the link */
- if (list_empty(&pdev->subordinate->devices))
- return 0;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
- /*
- * If downstream component of a link is pci bridge, we
- * disable ASPM for now for the link
- * */
- state = 0;
- break;
- }
- if ((child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child_dev->pcie_type != PCI_EXP_TYPE_LEG_END))
- continue;
- /* Device not in D0 doesn't need check latency */
- if (child_dev->current_state == PCI_D1 ||
- child_dev->current_state == PCI_D2 ||
- child_dev->current_state == PCI_D3hot ||
- child_dev->current_state == PCI_D3cold)
- continue;
- state = __pcie_aspm_check_state_one(child_dev, state);
- }
- return state;
-}
-
-static void __pcie_aspm_config_one_dev(struct pci_dev *pdev, unsigned int state)
-{
- u16 reg16;
- int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
- reg16 &= ~0x3;
- reg16 |= state;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
-}
-
-static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state)
-{
- struct pci_dev *child_dev;
- int valid = 1;
- struct pcie_link_state *link_state = pdev->link_state;
-
- /*
- * if the downstream component has pci bridge function, don't do ASPM
- * now
- */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
- valid = 0;
- break;
- }
- }
- if (!valid)
- return;
-
- /*
- * spec 2.0 suggests all functions should be configured the same
- * setting for ASPM. Enabling ASPM L1 should be done in upstream
- * component first and then downstream, and vice versa for disabling
- * ASPM L1. Spec doesn't mention L0S.
- */
- if (state & PCIE_LINK_STATE_L1)
- __pcie_aspm_config_one_dev(pdev, state);
-
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list)
- __pcie_aspm_config_one_dev(child_dev, state);
-
- if (!(state & PCIE_LINK_STATE_L1))
- __pcie_aspm_config_one_dev(pdev, state);
-
- link_state->enabled_state = state;
-}
-
-static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
- unsigned int state)
-{
- struct pcie_link_state *link_state = pdev->link_state;
-
- if (link_state->support_state == 0)
- return;
- state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
-
- /* state 0 means disabling aspm */
- state = pcie_aspm_check_state(pdev, state);
- if (link_state->enabled_state == state)
- return;
- __pcie_aspm_config_link(pdev, state);
-}
-
-/*
- * pcie_aspm_configure_link_state: enable/disable PCI express link state
- * @pdev: the root port or switch downstream port
- */
-static void pcie_aspm_configure_link_state(struct pci_dev *pdev,
- unsigned int state)
-{
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
- __pcie_aspm_configure_link_state(pdev, state);
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
-}
-
-static void free_link_state(struct pci_dev *pdev)
-{
- kfree(pdev->link_state);
- pdev->link_state = NULL;
-}
-
-/*
- * pcie_aspm_init_link_state: Initiate PCI express link state.
- * It is called after the pcie and its children devices are scaned.
- * @pdev: the root port or switch downstream port
- */
-void pcie_aspm_init_link_state(struct pci_dev *pdev)
-{
- unsigned int state;
- struct pcie_link_state *link_state;
- int error = 0;
-
- if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
- return;
- if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
- return;
- down_read(&pci_bus_sem);
- if (list_empty(&pdev->subordinate->devices))
- goto out;
-
- mutex_lock(&aspm_lock);
-
- link_state = kzalloc(sizeof(*link_state), GFP_KERNEL);
- if (!link_state)
- goto unlock_out;
- pdev->link_state = link_state;
-
- pcie_aspm_configure_common_clock(pdev);
-
- pcie_aspm_cap_init(pdev);
-
- /* config link state to avoid BIOS error */
- state = pcie_aspm_check_state(pdev, policy_to_aspm_state(pdev));
- __pcie_aspm_config_link(pdev, state);
-
- pcie_check_clock_pm(pdev);
-
- link_state->pdev = pdev;
- list_add(&link_state->sibiling, &link_list);
-
-unlock_out:
- if (error)
- free_link_state(pdev);
- mutex_unlock(&aspm_lock);
-out:
- up_read(&pci_bus_sem);
-}
-
-/* @pdev: the endpoint device */
-void pcie_aspm_exit_link_state(struct pci_dev *pdev)
-{
- struct pci_dev *parent = pdev->bus->self;
- struct pcie_link_state *link_state = parent->link_state;
-
- if (aspm_disabled || !pdev->is_pcie || !parent || !link_state)
- return;
- if (parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
- return;
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
-
- /*
- * All PCIe functions are in one slot, remove one function will remove
- * the the whole slot, so just wait
- */
- if (!list_empty(&parent->subordinate->devices))
- goto out;
-
- /* All functions are removed, so just disable ASPM for the link */
- __pcie_aspm_config_one_dev(parent, 0);
- list_del(&link_state->sibiling);
- /* Clock PM is for endpoint device */
-
- free_link_state(parent);
-out:
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
-}
-
-/* @pdev: the root port or switch downstream port */
-void pcie_aspm_pm_state_change(struct pci_dev *pdev)
-{
- struct pcie_link_state *link_state = pdev->link_state;
-
- if (aspm_disabled || !pdev->is_pcie || !pdev->link_state)
- return;
- if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
- return;
- /*
- * devices changed PM state, we should recheck if latency meets all
- * functions' requirement
- */
- pcie_aspm_configure_link_state(pdev, link_state->enabled_state);
-}
-
-/*
- * pci_disable_link_state - disable pci device's link state, so the link will
- * never enter specific states
- */
-void pci_disable_link_state(struct pci_dev *pdev, int state)
-{
- struct pci_dev *parent = pdev->bus->self;
- struct pcie_link_state *link_state;
-
- if (aspm_disabled || !pdev->is_pcie)
- return;
- if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
- pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
- parent = pdev;
- if (!parent)
- return;
-
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
- link_state = parent->link_state;
- link_state->support_state &=
- ~(state & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1));
- if (state & PCIE_LINK_STATE_CLKPM)
- link_state->clk_pm_capable = 0;
-
- __pcie_aspm_configure_link_state(parent, link_state->enabled_state);
- if (!link_state->clk_pm_capable && link_state->clk_pm_enabled)
- pcie_set_clock_pm(parent, 0);
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
-}
-EXPORT_SYMBOL(pci_disable_link_state);
-
-static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
-{
- int i;
- struct pci_dev *pdev;
- struct pcie_link_state *link_state;
-
- for (i = 0; i < ARRAY_SIZE(policy_str); i++)
- if (!strncmp(val, policy_str[i], strlen(policy_str[i])))
- break;
- if (i >= ARRAY_SIZE(policy_str))
- return -EINVAL;
- if (i == aspm_policy)
- return 0;
-
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
- aspm_policy = i;
- list_for_each_entry(link_state, &link_list, sibiling) {
- pdev = link_state->pdev;
- __pcie_aspm_configure_link_state(pdev,
- policy_to_aspm_state(pdev));
- if (link_state->clk_pm_capable &&
- link_state->clk_pm_enabled != policy_to_clkpm_state(pdev))
- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
-
- }
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
- return 0;
-}
-
-static int pcie_aspm_get_policy(char *buffer, struct kernel_param *kp)
-{
- int i, cnt = 0;
- for (i = 0; i < ARRAY_SIZE(policy_str); i++)
- if (i == aspm_policy)
- cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
- else
- cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
- return cnt;
-}
-
-module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
- NULL, 0644);
-
-#ifdef CONFIG_PCIEASPM_DEBUG
-static ssize_t link_state_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct pci_dev *pci_device = to_pci_dev(dev);
- struct pcie_link_state *link_state = pci_device->link_state;
-
- return sprintf(buf, "%d\n", link_state->enabled_state);
-}
-
-static ssize_t link_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t n)
-{
- struct pci_dev *pci_device = to_pci_dev(dev);
- int state;
-
- if (n < 1)
- return -EINVAL;
- state = buf[0]-'0';
- if (state >= 0 && state <= 3) {
- /* setup link aspm state */
- pcie_aspm_configure_link_state(pci_device, state);
- return n;
- }
-
- return -EINVAL;
-}
-
-static ssize_t clk_ctl_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct pci_dev *pci_device = to_pci_dev(dev);
- struct pcie_link_state *link_state = pci_device->link_state;
-
- return sprintf(buf, "%d\n", link_state->clk_pm_enabled);
-}
-
-static ssize_t clk_ctl_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t n)
-{
- struct pci_dev *pci_device = to_pci_dev(dev);
- int state;
-
- if (n < 1)
- return -EINVAL;
- state = buf[0]-'0';
-
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
- pcie_set_clock_pm(pci_device, !!state);
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
-
- return n;
-}
-
-static DEVICE_ATTR(link_state, 0644, link_state_show, link_state_store);
-static DEVICE_ATTR(clk_ctl, 0644, clk_ctl_show, clk_ctl_store);
-
-static char power_group[] = "power";
-void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
-{
- struct pcie_link_state *link_state = pdev->link_state;
-
- if (!pdev->is_pcie || (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
- return;
-
- if (link_state->support_state)
- sysfs_add_file_to_group(&pdev->dev.kobj,
- &dev_attr_link_state.attr, power_group);
- if (link_state->clk_pm_capable)
- sysfs_add_file_to_group(&pdev->dev.kobj,
- &dev_attr_clk_ctl.attr, power_group);
-}
-
-void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
-{
- struct pcie_link_state *link_state = pdev->link_state;
-
- if (!pdev->is_pcie || (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
- return;
-
- if (link_state->support_state)
- sysfs_remove_file_from_group(&pdev->dev.kobj,
- &dev_attr_link_state.attr, power_group);
- if (link_state->clk_pm_capable)
- sysfs_remove_file_from_group(&pdev->dev.kobj,
- &dev_attr_clk_ctl.attr, power_group);
-}
-#endif
-
-static int __init pcie_aspm_disable(char *str)
-{
- aspm_disabled = 1;
- return 1;
-}
-
-__setup("pcie_noaspm", pcie_aspm_disable);
-
-static int __init pcie_aspm_init(void)
-{
- if (aspm_disabled)
- return 0;
- pci_osc_support_set(OSC_ACTIVE_STATE_PWR_SUPPORT|
- OSC_CLOCK_PWR_CAPABILITY_SUPPORT);
- return 0;
-}
-
-fs_initcall(pcie_aspm_init);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8b505bd925aa..4d23b9fb551b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -9,7 +9,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/cpumask.h>
-#include <linux/aspm.h>
#include "pci.h"
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
@@ -434,7 +433,7 @@ pci_alloc_child_bus(struct pci_bus *parent, struct pci_dev *bridge, int busnr)
return child;
}
-struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
+struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
{
struct pci_bus *child;
@@ -934,8 +933,12 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
set_dev_node(&dev->dev, pcibus_to_node(bus));
dev->dev.dma_mask = &dev->dma_mask;
+ dev->dev.dma_parms = &dev->dma_parms;
dev->dev.coherent_dma_mask = 0xffffffffull;
+ pci_set_dma_max_seg_size(dev, 65536);
+ pci_set_dma_seg_boundary(dev, 0xffffffff);
+
/* Fix up broken headers */
pci_fixup_device(pci_fixup_header, dev);
@@ -949,7 +952,7 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
up_write(&pci_bus_sem);
}
-struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
+struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
{
struct pci_dev *dev;
@@ -1002,10 +1005,6 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
break;
}
}
-
- if (bus->self)
- pcie_aspm_init_link_state(bus->self);
-
return nr;
}
@@ -1045,20 +1044,6 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
return max;
}
-unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus)
-{
- unsigned int max;
-
- max = pci_scan_child_bus(bus);
-
- /*
- * Make the discovered devices available.
- */
- pci_bus_add_devices(bus);
-
- return max;
-}
-
struct pci_bus * pci_create_bus(struct device *parent,
int bus, struct pci_ops *ops, void *sysdata)
{
@@ -1145,7 +1130,6 @@ EXPORT_SYMBOL(pci_scan_bus_parented);
#ifdef CONFIG_HOTPLUG
EXPORT_SYMBOL(pci_add_new_bus);
-EXPORT_SYMBOL(pci_do_scan_bus);
EXPORT_SYMBOL(pci_scan_slot);
EXPORT_SYMBOL(pci_scan_bridge);
EXPORT_SYMBOL_GPL(pci_scan_child_bus);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index ec4a82ba29a8..9684e1bde277 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -1,6 +1,5 @@
#include <linux/pci.h>
#include <linux/module.h>
-#include <linux/aspm.h>
#include "pci.h"
static void pci_free_resources(struct pci_dev *dev)
@@ -31,9 +30,6 @@ static void pci_stop_dev(struct pci_dev *dev)
dev->global_list.next = dev->global_list.prev = NULL;
up_write(&pci_bus_sem);
}
-
- if (dev->bus->self)
- pcie_aspm_exit_link_state(dev);
}
static void pci_destroy_dev(struct pci_dev *dev)
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 8a7232feb553..262b0439abe9 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -456,7 +456,7 @@ pci_bus_size_cardbus(struct pci_bus *bus)
}
}
-void pci_bus_size_bridges(struct pci_bus *bus)
+void __ref pci_bus_size_bridges(struct pci_bus *bus)
{
struct pci_dev *dev;
unsigned long mask, prefmask;
@@ -511,7 +511,7 @@ void pci_bus_size_bridges(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_size_bridges);
-void pci_bus_assign_resources(struct pci_bus *bus)
+void __ref pci_bus_assign_resources(struct pci_bus *bus)
{
struct pci_bus *b;
struct pci_dev *dev;
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index eb6abd3f9221..385e145e1acc 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -21,9 +21,9 @@
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/sizes.h>
+#include <asm/gpio.h>
#include <asm/arch/board.h>
-#include <asm/arch/gpio.h>
#include <asm/arch/at91rm9200_mc.h>
@@ -56,7 +56,7 @@ struct at91_cf_socket {
static inline int at91_cf_present(struct at91_cf_socket *cf)
{
- return !at91_get_gpio_value(cf->board->det_pin);
+ return !gpio_get_value(cf->board->det_pin);
}
/*--------------------------------------------------------------------------*/
@@ -100,9 +100,9 @@ static int at91_cf_get_status(struct pcmcia_socket *s, u_int *sp)
int vcc = cf->board->vcc_pin;
*sp = SS_DETECT | SS_3VCARD;
- if (!rdy || at91_get_gpio_value(rdy))
+ if (!rdy || gpio_get_value(rdy))
*sp |= SS_READY;
- if (!vcc || at91_get_gpio_value(vcc))
+ if (!vcc || gpio_get_value(vcc))
*sp |= SS_POWERON;
} else
*sp = 0;
@@ -121,10 +121,10 @@ at91_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
if (cf->board->vcc_pin) {
switch (s->Vcc) {
case 0:
- at91_set_gpio_value(cf->board->vcc_pin, 0);
+ gpio_set_value(cf->board->vcc_pin, 0);
break;
case 33:
- at91_set_gpio_value(cf->board->vcc_pin, 1);
+ gpio_set_value(cf->board->vcc_pin, 1);
break;
default:
return -EINVAL;
@@ -132,7 +132,7 @@ at91_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
}
/* toggle reset if needed */
- at91_set_gpio_value(cf->board->rst_pin, s->flags & SS_RESET);
+ gpio_set_value(cf->board->rst_pin, s->flags & SS_RESET);
pr_debug("%s: Vcc %d, io_irq %d, flags %04x csc %04x\n",
driver_name, s->Vcc, s->io_irq, s->flags, s->csc_mask);
@@ -239,11 +239,24 @@ static int __init at91_cf_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, cf);
/* must be a GPIO; ergo must trigger on both edges */
- status = request_irq(board->det_pin, at91_cf_irq, 0, driver_name, cf);
+ status = gpio_request(board->det_pin, "cf_det");
if (status < 0)
goto fail0;
+ status = request_irq(board->det_pin, at91_cf_irq, 0, driver_name, cf);
+ if (status < 0)
+ goto fail00;
device_init_wakeup(&pdev->dev, 1);
+ status = gpio_request(board->rst_pin, "cf_rst");
+ if (status < 0)
+ goto fail0a;
+
+ if (board->vcc_pin) {
+ status = gpio_request(board->vcc_pin, "cf_vcc");
+ if (status < 0)
+ goto fail0b;
+ }
+
/*
* The card driver will request this irq later as needed.
* but it causes lots of "irqNN: nobody cared" messages
@@ -251,16 +264,20 @@ static int __init at91_cf_probe(struct platform_device *pdev)
* (Note: DK board doesn't wire the IRQ pin...)
*/
if (board->irq_pin) {
+ status = gpio_request(board->irq_pin, "cf_irq");
+ if (status < 0)
+ goto fail0c;
status = request_irq(board->irq_pin, at91_cf_irq,
IRQF_SHARED, driver_name, cf);
if (status < 0)
- goto fail0a;
+ goto fail0d;
cf->socket.pci_irq = board->irq_pin;
} else
cf->socket.pci_irq = NR_IRQS + 1;
/* pcmcia layer only remaps "real" memory not iospace */
- cf->socket.io_offset = (unsigned long) ioremap(cf->phys_baseaddr + CF_IO_PHYS, SZ_2K);
+ cf->socket.io_offset = (unsigned long)
+ ioremap(cf->phys_baseaddr + CF_IO_PHYS, SZ_2K);
if (!cf->socket.io_offset) {
status = -ENXIO;
goto fail1;
@@ -296,11 +313,21 @@ fail2:
fail1:
if (cf->socket.io_offset)
iounmap((void __iomem *) cf->socket.io_offset);
- if (board->irq_pin)
+ if (board->irq_pin) {
free_irq(board->irq_pin, cf);
+fail0d:
+ gpio_free(board->irq_pin);
+ }
+fail0c:
+ if (board->vcc_pin)
+ gpio_free(board->vcc_pin);
+fail0b:
+ gpio_free(board->rst_pin);
fail0a:
device_init_wakeup(&pdev->dev, 0);
free_irq(board->det_pin, cf);
+fail00:
+ gpio_free(board->det_pin);
fail0:
kfree(cf);
return status;
@@ -313,13 +340,18 @@ static int __exit at91_cf_remove(struct platform_device *pdev)
struct resource *io = cf->socket.io[0].res;
pcmcia_unregister_socket(&cf->socket);
- if (board->irq_pin)
+ release_mem_region(io->start, io->end + 1 - io->start);
+ iounmap((void __iomem *) cf->socket.io_offset);
+ if (board->irq_pin) {
free_irq(board->irq_pin, cf);
+ gpio_free(board->irq_pin);
+ }
+ if (board->vcc_pin)
+ gpio_free(board->vcc_pin);
+ gpio_free(board->rst_pin);
device_init_wakeup(&pdev->dev, 0);
free_irq(board->det_pin, cf);
- iounmap((void __iomem *) cf->socket.io_offset);
- release_mem_region(io->start, io->end + 1 - io->start);
-
+ gpio_free(board->det_pin);
kfree(cf);
return 0;
}
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index a1bd763b4e33..714baaeb6da1 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -143,7 +143,7 @@ int read_cb_mem(struct pcmcia_socket * s, int space, u_int addr, u_int len, void
/* Config space? */
if (space == 0) {
if (addr + len > 0x100)
- goto fail;
+ goto failput;
for (; len; addr++, ptr++, len--)
pci_read_config_byte(dev, addr, ptr);
return 0;
@@ -171,6 +171,8 @@ int read_cb_mem(struct pcmcia_socket * s, int space, u_int addr, u_int len, void
memcpy_fromio(ptr, s->cb_cis_virt + addr, len);
return 0;
+failput:
+ pci_dev_put(dev);
fail:
memset(ptr, 0xff, len);
return -1;
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 15c18f5246d6..5a85871f5ee9 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -865,11 +865,12 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
ds_dbg(1, "trying to load CIS file %s\n", filename);
if (strlen(filename) > 14) {
- printk(KERN_WARNING "pcmcia: CIS filename is too long\n");
+ printk(KERN_WARNING "pcmcia: CIS filename is too long [%s]\n",
+ filename);
return -EINVAL;
}
- snprintf(path, 20, "%s", filename);
+ snprintf(path, sizeof(path), "%s", filename);
if (request_firmware(&fw, path, &dev->dev) == 0) {
if (fw->size >= CISTPL_MAX_CIS_SIZE) {
@@ -1130,8 +1131,6 @@ static int runtime_suspend(struct device *dev)
down(&dev->sem);
rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND);
up(&dev->sem);
- if (!rc)
- dev->power.power_state.event = PM_EVENT_SUSPEND;
return rc;
}
@@ -1142,8 +1141,6 @@ static void runtime_resume(struct device *dev)
down(&dev->sem);
rc = pcmcia_dev_resume(dev);
up(&dev->sem);
- if (!rc)
- dev->power.power_state.event = PM_EVENT_ON;
}
/************************ per-device sysfs output ***************************/
@@ -1265,6 +1262,9 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
struct pcmcia_driver *p_drv = NULL;
int ret = 0;
+ if (p_dev->suspended)
+ return 0;
+
ds_dbg(2, "suspending %s\n", dev->bus_id);
if (dev->driver)
@@ -1301,6 +1301,9 @@ static int pcmcia_dev_resume(struct device * dev)
struct pcmcia_driver *p_drv = NULL;
int ret = 0;
+ if (!p_dev->suspended)
+ return 0;
+
ds_dbg(2, "resuming %s\n", dev->bus_id);
if (dev->driver)
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index df21e2d16f87..749515534cc0 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -82,7 +82,7 @@ struct socket_info {
1 = empty socket,
2 = card but not initialized,
3 = operational card */
- kio_addr_t io_base; /* base io address of the socket */
+ unsigned int io_base; /* base io address of the socket */
struct pcmcia_socket socket;
struct pci_dev *dev; /* The PCI device for the socket */
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index 839bb1c0db58..32a2ab119798 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -164,7 +164,7 @@ struct i82365_socket {
u_short type, flags;
struct pcmcia_socket socket;
unsigned int number;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
u_short psock;
u_char cs_irq, intr;
union {
@@ -238,7 +238,7 @@ static u_char i365_get(u_short sock, u_short reg)
unsigned long flags;
spin_lock_irqsave(&bus_lock,flags);
{
- kio_addr_t port = socket[sock].ioaddr;
+ unsigned int port = socket[sock].ioaddr;
u_char val;
reg = I365_REG(socket[sock].psock, reg);
outb(reg, port); val = inb(port+1);
@@ -252,7 +252,7 @@ static void i365_set(u_short sock, u_short reg, u_char data)
unsigned long flags;
spin_lock_irqsave(&bus_lock,flags);
{
- kio_addr_t port = socket[sock].ioaddr;
+ unsigned int port = socket[sock].ioaddr;
u_char val = I365_REG(socket[sock].psock, reg);
outb(val, port); outb(data, port+1);
spin_unlock_irqrestore(&bus_lock,flags);
@@ -588,7 +588,7 @@ static int to_cycles(int ns)
/*====================================================================*/
-static int __init identify(kio_addr_t port, u_short sock)
+static int __init identify(unsigned int port, u_short sock)
{
u_char val;
int type = -1;
@@ -659,7 +659,7 @@ static int __init identify(kio_addr_t port, u_short sock)
static int __init is_alive(u_short sock)
{
u_char stat;
- kio_addr_t start, stop;
+ unsigned int start, stop;
stat = i365_get(sock, I365_STATUS);
start = i365_get_pair(sock, I365_IO(0)+I365_W_START);
@@ -678,7 +678,7 @@ static int __init is_alive(u_short sock)
/*====================================================================*/
-static void __init add_socket(kio_addr_t port, int psock, int type)
+static void __init add_socket(unsigned int port, int psock, int type)
{
socket[sockets].ioaddr = port;
socket[sockets].psock = psock;
@@ -698,7 +698,7 @@ static void __init add_pcic(int ns, int type)
base = sockets-ns;
if (base == 0) printk("\n");
printk(KERN_INFO " %s", pcic[type].name);
- printk(" ISA-to-PCMCIA at port %#lx ofs 0x%02x",
+ printk(" ISA-to-PCMCIA at port %#x ofs 0x%02x",
t->ioaddr, t->psock*0x40);
printk(", %d socket%s\n", ns, ((ns > 1) ? "s" : ""));
@@ -772,7 +772,7 @@ static struct pnp_dev *i82365_pnpdev;
static void __init isa_probe(void)
{
int i, j, sock, k, ns, id;
- kio_addr_t port;
+ unsigned int port;
#ifdef CONFIG_PNP
struct isapnp_device_id *devid;
struct pnp_dev *dev;
@@ -1053,7 +1053,7 @@ static int i365_set_io_map(u_short sock, struct pccard_io_map *io)
u_char map, ioctl;
debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, "
- "%#lx-%#lx)\n", sock, io->map, io->flags,
+ "%#x-%#x)\n", sock, io->map, io->flags,
io->speed, io->start, io->stop);
map = io->map;
if ((map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) ||
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 91da15b5a81e..3616da227152 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -58,7 +58,7 @@ typedef struct pcc_socket {
u_short type, flags;
struct pcmcia_socket socket;
unsigned int number;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
u_long mapaddr;
u_long base; /* PCC register base */
u_char cs_irq1, cs_irq2, intr;
@@ -298,7 +298,8 @@ static int __init is_alive(u_short sock)
return 0;
}
-static void add_pcc_socket(ulong base, int irq, ulong mapaddr, kio_addr_t ioaddr)
+static void add_pcc_socket(ulong base, int irq, ulong mapaddr,
+ unsigned int ioaddr)
{
pcc_socket_t *t = &socket[pcc_sockets];
@@ -738,7 +739,7 @@ static int __init init_m32r_pcc(void)
#else /* CONFIG_PLAT_USRV */
{
ulong base, mapaddr;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
for (i = 0 ; i < M32R_MAX_PCC ; i++) {
base = (ulong)PLD_CFRSTCR;
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index c5e0d89c3ece..2b42b7155e34 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -65,7 +65,7 @@ typedef struct pcc_socket {
u_short type, flags;
struct pcmcia_socket socket;
unsigned int number;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
u_long mapaddr;
u_long base; /* PCC register base */
u_char cs_irq, intr;
@@ -310,7 +310,8 @@ static int __init is_alive(u_short sock)
return 0;
}
-static void add_pcc_socket(ulong base, int irq, ulong mapaddr, kio_addr_t ioaddr)
+static void add_pcc_socket(ulong base, int irq, ulong mapaddr,
+ unsigned int ioaddr)
{
pcc_socket_t *t = &socket[pcc_sockets];
@@ -368,7 +369,7 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
handled = 1;
irc = pcc_get(i, PCIRC);
irc >>=16;
- debug(2, "m32r-pcc:interrput: socket %d pcirc 0x%02x ", i, irc);
+ debug(2, "m32r-pcc:interrupt: socket %d pcirc 0x%02x ", i, irc);
if (!irc)
continue;
@@ -491,7 +492,7 @@ static int _pcc_set_io_map(u_short sock, struct pccard_io_map *io)
u_char map;
debug(3, "m32r-pcc: SetIOMap(%d, %d, %#2.2x, %d ns, "
- "%#lx-%#lx)\n", sock, io->map, io->flags,
+ "%#x-%#x)\n", sock, io->map, io->flags,
io->speed, io->start, io->stop);
map = io->map;
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index d182760f035b..ac70d2cb7dd4 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -851,7 +851,7 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t * state)
I tried to control the CxOE signal with SS_OUTPUT_ENA,
but the reset signal seems connected via the 541.
If the CxOE is left high are some signals tristated and
- no pullups are present -> the cards act wierd.
+ no pullups are present -> the cards act weird.
So right now the buffers are enabled if the power is on. */
if (state->Vcc || state->Vpp)
@@ -1174,8 +1174,10 @@ static int __init m8xx_probe(struct of_device *ofdev,
pcmcia_schlvl = irq_of_parse_and_map(np, 0);
hwirq = irq_map[pcmcia_schlvl].hwirq;
- if (pcmcia_schlvl < 0)
+ if (pcmcia_schlvl < 0) {
+ iounmap(pcmcia);
return -EINVAL;
+ }
m8xx_pgcrx[0] = &pcmcia->pcmc_pgcra;
m8xx_pgcrx[1] = &pcmcia->pcmc_pgcrb;
@@ -1189,6 +1191,7 @@ static int __init m8xx_probe(struct of_device *ofdev,
driver_name, socket)) {
pcmcia_error("Cannot allocate IRQ %u for SCHLVL!\n",
pcmcia_schlvl);
+ iounmap(pcmcia);
return -1;
}
@@ -1284,6 +1287,7 @@ static int m8xx_remove(struct of_device *ofdev)
}
for (i = 0; i < PCMCIA_SOCKETS_NO; i++)
pcmcia_unregister_socket(&socket[i].socket);
+ iounmap(pcmcia);
free_irq(pcmcia_schlvl, NULL);
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 0ce39de834c4..1d128fbd1a92 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -65,23 +65,23 @@ extern int ds_pc_debug;
* Special stuff for managing IO windows, because they are scarce
*/
-static int alloc_io_space(struct pcmcia_socket *s, u_int attr, ioaddr_t *base,
- ioaddr_t num, u_int lines)
+static int alloc_io_space(struct pcmcia_socket *s, u_int attr,
+ unsigned int *base, unsigned int num, u_int lines)
{
int i;
- kio_addr_t try, align;
+ unsigned int try, align;
align = (*base) ? (lines ? 1<<lines : 0) : 1;
if (align && (align < num)) {
if (*base) {
- ds_dbg(s, 0, "odd IO request: num %#x align %#lx\n",
+ ds_dbg(s, 0, "odd IO request: num %#x align %#x\n",
num, align);
align = 0;
} else
while (align && (align < num)) align <<= 1;
}
if (*base & ~(align-1)) {
- ds_dbg(s, 0, "odd IO request: base %#x align %#lx\n",
+ ds_dbg(s, 0, "odd IO request: base %#x align %#x\n",
*base, align);
align = 0;
}
@@ -132,8 +132,8 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr, ioaddr_t *base,
} /* alloc_io_space */
-static void release_io_space(struct pcmcia_socket *s, ioaddr_t base,
- ioaddr_t num)
+static void release_io_space(struct pcmcia_socket *s, unsigned int base,
+ unsigned int num)
{
int i;
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index bfcaad6021cf..a8d100707721 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -186,15 +186,16 @@ static int sub_interval(struct resource_map *map, u_long base, u_long num)
======================================================================*/
#ifdef CONFIG_PCMCIA_PROBE
-static void do_io_probe(struct pcmcia_socket *s, kio_addr_t base, kio_addr_t num)
+static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
+ unsigned int num)
{
struct resource *res;
struct socket_data *s_data = s->resource_data;
- kio_addr_t i, j, bad;
+ unsigned int i, j, bad;
int any;
u_char *b, hole, most;
- printk(KERN_INFO "cs: IO port probe %#lx-%#lx:",
+ printk(KERN_INFO "cs: IO port probe %#x-%#x:",
base, base+num-1);
/* First, what does a floating port look like? */
@@ -233,7 +234,7 @@ static void do_io_probe(struct pcmcia_socket *s, kio_addr_t base, kio_addr_t num
} else {
if (bad) {
sub_interval(&s_data->io_db, bad, i-bad);
- printk(" %#lx-%#lx", bad, i-1);
+ printk(" %#x-%#x", bad, i-1);
bad = 0;
}
}
@@ -244,7 +245,7 @@ static void do_io_probe(struct pcmcia_socket *s, kio_addr_t base, kio_addr_t num
return;
} else {
sub_interval(&s_data->io_db, bad, i-bad);
- printk(" %#lx-%#lx", bad, i-1);
+ printk(" %#x-%#x", bad, i-1);
}
}
diff --git a/drivers/pcmcia/sa1100_jornada720.c b/drivers/pcmcia/sa1100_jornada720.c
index af485ae38602..6284c35dabc6 100644
--- a/drivers/pcmcia/sa1100_jornada720.c
+++ b/drivers/pcmcia/sa1100_jornada720.c
@@ -101,7 +101,7 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = {
.socket_suspend = sa1111_pcmcia_socket_suspend,
};
-int __init pcmcia_jornada720_init(struct device *dev)
+int __devinit pcmcia_jornada720_init(struct device *dev)
{
int ret = -ENODEV;
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index 749ac3710914..5792bd5c54f9 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -719,7 +719,7 @@ static int tcic_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
u_short base, len, ioctl;
debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, "
- "%#lx-%#lx)\n", psock, io->map, io->flags,
+ "%#x-%#x)\n", psock, io->map, io->flags,
io->speed, io->start, io->stop);
if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) ||
(io->stop < io->start)) return -EINVAL;
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index a262762c5b88..12a1645a2e43 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -161,8 +161,7 @@ static int pnp_bus_suspend(struct device *dev, pm_message_t state)
return error;
}
- if (!(pnp_drv->flags & PNP_DRIVER_RES_DO_NOT_CHANGE) &&
- pnp_can_disable(pnp_dev)) {
+ if (pnp_can_disable(pnp_dev)) {
error = pnp_stop_dev(pnp_dev);
if (error)
return error;
@@ -185,14 +184,17 @@ static int pnp_bus_resume(struct device *dev)
if (pnp_dev->protocol && pnp_dev->protocol->resume)
pnp_dev->protocol->resume(pnp_dev);
- if (!(pnp_drv->flags & PNP_DRIVER_RES_DO_NOT_CHANGE)) {
+ if (pnp_can_write(pnp_dev)) {
error = pnp_start_dev(pnp_dev);
if (error)
return error;
}
- if (pnp_drv->resume)
- return pnp_drv->resume(pnp_dev);
+ if (pnp_drv->resume) {
+ error = pnp_drv->resume(pnp_dev);
+ if (error)
+ return error;
+ }
return 0;
}
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 31548044fdde..982658477a58 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -10,9 +10,12 @@
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/types.h>
+#include <linux/pnp.h>
#include <linux/stat.h>
#include <linux/ctype.h>
#include <linux/slab.h>
+#include <linux/mutex.h>
+
#include <asm/uaccess.h>
#include "base.h"
@@ -315,8 +318,6 @@ static ssize_t pnp_show_current_resources(struct device *dmdev,
return ret;
}
-extern struct semaphore pnp_res_mutex;
-
static ssize_t
pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
const char *ubuf, size_t count)
@@ -361,10 +362,10 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
goto done;
}
if (!strnicmp(buf, "get", 3)) {
- down(&pnp_res_mutex);
+ mutex_lock(&pnp_res_mutex);
if (pnp_can_read(dev))
dev->protocol->get(dev, &dev->res);
- up(&pnp_res_mutex);
+ mutex_unlock(&pnp_res_mutex);
goto done;
}
if (!strnicmp(buf, "set", 3)) {
@@ -373,7 +374,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
goto done;
buf += 3;
pnp_init_resource_table(&dev->res);
- down(&pnp_res_mutex);
+ mutex_lock(&pnp_res_mutex);
while (1) {
while (isspace(*buf))
++buf;
@@ -455,7 +456,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
}
break;
}
- up(&pnp_res_mutex);
+ mutex_unlock(&pnp_res_mutex);
goto done;
}
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index c6b3d4e63ccc..c28caf272c11 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -12,9 +12,10 @@
#include <linux/pnp.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
+#include <linux/mutex.h>
#include "base.h"
-DECLARE_MUTEX(pnp_res_mutex);
+DEFINE_MUTEX(pnp_res_mutex);
static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
{
@@ -297,7 +298,7 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
if (!pnp_can_configure(dev))
return -ENODEV;
- down(&pnp_res_mutex);
+ mutex_lock(&pnp_res_mutex);
pnp_clean_resource_table(&dev->res); /* start with a fresh slate */
if (dev->independent) {
port = dev->independent->port;
@@ -366,12 +367,12 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
} else if (dev->dependent)
goto fail;
- up(&pnp_res_mutex);
+ mutex_unlock(&pnp_res_mutex);
return 1;
fail:
pnp_clean_resource_table(&dev->res);
- up(&pnp_res_mutex);
+ mutex_unlock(&pnp_res_mutex);
return 0;
}
@@ -396,7 +397,7 @@ int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table *res,
return -ENOMEM;
*bak = dev->res;
- down(&pnp_res_mutex);
+ mutex_lock(&pnp_res_mutex);
dev->res = *res;
if (!(mode & PNP_CONFIG_FORCE)) {
for (i = 0; i < PNP_MAX_PORT; i++) {
@@ -416,14 +417,14 @@ int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table *res,
goto fail;
}
}
- up(&pnp_res_mutex);
+ mutex_unlock(&pnp_res_mutex);
kfree(bak);
return 0;
fail:
dev->res = *bak;
- up(&pnp_res_mutex);
+ mutex_unlock(&pnp_res_mutex);
kfree(bak);
return -EINVAL;
}
@@ -513,7 +514,7 @@ int pnp_activate_dev(struct pnp_dev *dev)
int error;
if (dev->active)
- return 0; /* the device is already active */
+ return 0;
/* ensure resources are allocated */
if (pnp_auto_config_dev(dev))
@@ -524,7 +525,7 @@ int pnp_activate_dev(struct pnp_dev *dev)
return error;
dev->active = 1;
- return 1;
+ return 0;
}
/**
@@ -538,7 +539,7 @@ int pnp_disable_dev(struct pnp_dev *dev)
int error;
if (!dev->active)
- return 0; /* the device is already disabled */
+ return 0;
error = pnp_stop_dev(dev);
if (error)
@@ -547,11 +548,11 @@ int pnp_disable_dev(struct pnp_dev *dev)
dev->active = 0;
/* release the resources so that other devices can use them */
- down(&pnp_res_mutex);
+ mutex_lock(&pnp_res_mutex);
pnp_clean_resource_table(&dev->res);
- up(&pnp_res_mutex);
+ mutex_unlock(&pnp_res_mutex);
- return 1;
+ return 0;
}
/**
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index dada89906314..662b4c279cfc 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -183,7 +183,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
if (ACPI_SUCCESS(status))
dev->capabilities |= PNP_CONFIGURABLE;
dev->capabilities |= PNP_READ;
- if (device->flags.dynamic_status)
+ if (device->flags.dynamic_status && (dev->capabilities & PNP_CONFIGURABLE))
dev->capabilities |= PNP_WRITE;
if (device->flags.removable)
dev->capabilities |= PNP_REMOVABLE;
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 6b9840cce0f4..6aa231ef642d 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -391,8 +391,8 @@ acpi_status pnpacpi_parse_allocated_resource(acpi_handle handle,
pnpacpi_allocated_resource, res);
}
-static void pnpacpi_parse_dma_option(struct pnp_option *option,
- struct acpi_resource_dma *p)
+static __init void pnpacpi_parse_dma_option(struct pnp_option *option,
+ struct acpi_resource_dma *p)
{
int i;
struct pnp_dma *dma;
@@ -411,8 +411,8 @@ static void pnpacpi_parse_dma_option(struct pnp_option *option,
pnp_register_dma_resource(option, dma);
}
-static void pnpacpi_parse_irq_option(struct pnp_option *option,
- struct acpi_resource_irq *p)
+static __init void pnpacpi_parse_irq_option(struct pnp_option *option,
+ struct acpi_resource_irq *p)
{
int i;
struct pnp_irq *irq;
@@ -431,8 +431,8 @@ static void pnpacpi_parse_irq_option(struct pnp_option *option,
pnp_register_irq_resource(option, irq);
}
-static void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
- struct acpi_resource_extended_irq *p)
+static __init void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
+ struct acpi_resource_extended_irq *p)
{
int i;
struct pnp_irq *irq;
@@ -451,8 +451,8 @@ static void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
pnp_register_irq_resource(option, irq);
}
-static void pnpacpi_parse_port_option(struct pnp_option *option,
- struct acpi_resource_io *io)
+static __init void pnpacpi_parse_port_option(struct pnp_option *option,
+ struct acpi_resource_io *io)
{
struct pnp_port *port;
@@ -470,8 +470,8 @@ static void pnpacpi_parse_port_option(struct pnp_option *option,
pnp_register_port_resource(option, port);
}
-static void pnpacpi_parse_fixed_port_option(struct pnp_option *option,
- struct acpi_resource_fixed_io *io)
+static __init void pnpacpi_parse_fixed_port_option(struct pnp_option *option,
+ struct acpi_resource_fixed_io *io)
{
struct pnp_port *port;
@@ -487,8 +487,8 @@ static void pnpacpi_parse_fixed_port_option(struct pnp_option *option,
pnp_register_port_resource(option, port);
}
-static void pnpacpi_parse_mem24_option(struct pnp_option *option,
- struct acpi_resource_memory24 *p)
+static __init void pnpacpi_parse_mem24_option(struct pnp_option *option,
+ struct acpi_resource_memory24 *p)
{
struct pnp_mem *mem;
@@ -508,8 +508,8 @@ static void pnpacpi_parse_mem24_option(struct pnp_option *option,
pnp_register_mem_resource(option, mem);
}
-static void pnpacpi_parse_mem32_option(struct pnp_option *option,
- struct acpi_resource_memory32 *p)
+static __init void pnpacpi_parse_mem32_option(struct pnp_option *option,
+ struct acpi_resource_memory32 *p)
{
struct pnp_mem *mem;
@@ -529,8 +529,8 @@ static void pnpacpi_parse_mem32_option(struct pnp_option *option,
pnp_register_mem_resource(option, mem);
}
-static void pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
- struct acpi_resource_fixed_memory32 *p)
+static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
+ struct acpi_resource_fixed_memory32 *p)
{
struct pnp_mem *mem;
@@ -549,8 +549,8 @@ static void pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
pnp_register_mem_resource(option, mem);
}
-static void pnpacpi_parse_address_option(struct pnp_option *option,
- struct acpi_resource *r)
+static __init void pnpacpi_parse_address_option(struct pnp_option *option,
+ struct acpi_resource *r)
{
struct acpi_resource_address64 addr, *p = &addr;
acpi_status status;
@@ -596,8 +596,8 @@ struct acpipnp_parse_option_s {
struct pnp_dev *dev;
};
-static acpi_status pnpacpi_option_resource(struct acpi_resource *res,
- void *data)
+static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
+ void *data)
{
int priority = 0;
struct acpipnp_parse_option_s *parse_data = data;
@@ -696,8 +696,8 @@ static acpi_status pnpacpi_option_resource(struct acpi_resource *res,
return AE_OK;
}
-acpi_status pnpacpi_parse_resource_option_data(acpi_handle handle,
- struct pnp_dev * dev)
+acpi_status __init pnpacpi_parse_resource_option_data(acpi_handle handle,
+ struct pnp_dev *dev)
{
acpi_status status;
struct acpipnp_parse_option_s parse_data;
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index e33e03f71084..f7e67197a568 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -315,7 +315,7 @@ struct pnp_protocol pnpbios_protocol = {
.disable = pnpbios_disable_resources,
};
-static int insert_device(struct pnp_bios_node *node)
+static int __init insert_device(struct pnp_bios_node *node)
{
struct list_head *pos;
struct pnp_dev *dev;
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index 3fabf11b0027..caade3531416 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -262,8 +262,8 @@ len_err:
* Resource Configuration Options
*/
-static void pnpbios_parse_mem_option(unsigned char *p, int size,
- struct pnp_option *option)
+static __init void pnpbios_parse_mem_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
struct pnp_mem *mem;
@@ -278,8 +278,8 @@ static void pnpbios_parse_mem_option(unsigned char *p, int size,
pnp_register_mem_resource(option, mem);
}
-static void pnpbios_parse_mem32_option(unsigned char *p, int size,
- struct pnp_option *option)
+static __init void pnpbios_parse_mem32_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
struct pnp_mem *mem;
@@ -294,8 +294,8 @@ static void pnpbios_parse_mem32_option(unsigned char *p, int size,
pnp_register_mem_resource(option, mem);
}
-static void pnpbios_parse_fixed_mem32_option(unsigned char *p, int size,
- struct pnp_option *option)
+static __init void pnpbios_parse_fixed_mem32_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
struct pnp_mem *mem;
@@ -309,7 +309,7 @@ static void pnpbios_parse_fixed_mem32_option(unsigned char *p, int size,
pnp_register_mem_resource(option, mem);
}
-static void pnpbios_parse_irq_option(unsigned char *p, int size,
+static __init void pnpbios_parse_irq_option(unsigned char *p, int size,
struct pnp_option *option)
{
struct pnp_irq *irq;
@@ -327,7 +327,7 @@ static void pnpbios_parse_irq_option(unsigned char *p, int size,
pnp_register_irq_resource(option, irq);
}
-static void pnpbios_parse_dma_option(unsigned char *p, int size,
+static __init void pnpbios_parse_dma_option(unsigned char *p, int size,
struct pnp_option *option)
{
struct pnp_dma *dma;
@@ -340,8 +340,8 @@ static void pnpbios_parse_dma_option(unsigned char *p, int size,
pnp_register_dma_resource(option, dma);
}
-static void pnpbios_parse_port_option(unsigned char *p, int size,
- struct pnp_option *option)
+static __init void pnpbios_parse_port_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
struct pnp_port *port;
@@ -356,8 +356,8 @@ static void pnpbios_parse_port_option(unsigned char *p, int size,
pnp_register_port_resource(option, port);
}
-static void pnpbios_parse_fixed_port_option(unsigned char *p, int size,
- struct pnp_option *option)
+static __init void pnpbios_parse_fixed_port_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
struct pnp_port *port;
@@ -371,9 +371,9 @@ static void pnpbios_parse_fixed_port_option(unsigned char *p, int size,
pnp_register_port_resource(option, port);
}
-static unsigned char *pnpbios_parse_resource_option_data(unsigned char *p,
- unsigned char *end,
- struct pnp_dev *dev)
+static __init unsigned char *
+pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
+ struct pnp_dev *dev)
{
unsigned int len, tag;
int priority = 0;
@@ -781,7 +781,8 @@ len_err:
* Core Parsing Functions
*/
-int pnpbios_parse_data_stream(struct pnp_dev *dev, struct pnp_bios_node *node)
+int __init pnpbios_parse_data_stream(struct pnp_dev *dev,
+ struct pnp_bios_node *node)
{
unsigned char *p = (char *)node->data;
unsigned char *end = (char *)(node->data + node->size);
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index e903b8c2b1fa..4065139753b6 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/pnp.h>
#include <linux/io.h>
+#include <linux/dmi.h>
#include <linux/kallsyms.h>
#include "base.h"
@@ -108,6 +109,46 @@ static void quirk_sb16audio_resources(struct pnp_dev *dev)
"pnp: SB audio device quirk - increasing port range\n");
}
+static void quirk_supermicro_h8dce_system(struct pnp_dev *dev)
+{
+ int i;
+ static struct dmi_system_id supermicro_h8dce[] = {
+ {
+ .ident = "Supermicro H8DCE",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "H8DCE"),
+ },
+ },
+ { }
+ };
+
+ if (!dmi_check_system(supermicro_h8dce))
+ return;
+
+ /*
+ * On the Supermicro H8DCE, there's a system device with resources
+ * that overlap BAR 6 of the built-in SATA PCI adapter. If the PNP
+ * system device claims them, the sata_nv driver won't be able to.
+ * More details at:
+ * https://bugzilla.redhat.com/show_bug.cgi?id=280641
+ * https://bugzilla.redhat.com/show_bug.cgi?id=313491
+ * http://lkml.org/lkml/2008/1/9/449
+ * http://thread.gmane.org/gmane.linux.acpi.devel/27312
+ */
+ for (i = 0; i < PNP_MAX_MEM; i++) {
+ if (pnp_mem_valid(dev, i) && pnp_mem_len(dev, i) &&
+ (pnp_mem_start(dev, i) & 0xdfef0000) == 0xdfef0000) {
+ dev_warn(&dev->dev, "disabling 0x%llx-0x%llx to prevent"
+ " conflict with sata_nv PCI device\n",
+ (unsigned long long) pnp_mem_start(dev, i),
+ (unsigned long long) (pnp_mem_start(dev, i) +
+ pnp_mem_len(dev, i) - 1));
+ pnp_mem_flags(dev, i) = 0;
+ }
+ }
+}
+
/*
* PnP Quirks
* Cards or devices that need some tweaking due to incomplete resource info
@@ -128,6 +169,8 @@ static struct pnp_fixup pnp_fixups[] = {
{"CTL0043", quirk_sb16audio_resources},
{"CTL0044", quirk_sb16audio_resources},
{"CTL0045", quirk_sb16audio_resources},
+ {"PNP0c01", quirk_supermicro_h8dce_system},
+ {"PNP0c02", quirk_supermicro_h8dce_system},
{""}
};
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index d4824840c5bf..c444d6b10c58 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -106,7 +106,6 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(energy_now),
POWER_SUPPLY_ATTR(energy_avg),
POWER_SUPPLY_ATTR(capacity),
- POWER_SUPPLY_ATTR(capacity_level),
POWER_SUPPLY_ATTR(temp),
POWER_SUPPLY_ATTR(temp_ambient),
POWER_SUPPLY_ATTR(time_to_empty_now),
@@ -116,6 +115,7 @@ static struct device_attribute power_supply_attrs[] = {
/* Properties of type `const char *' */
POWER_SUPPLY_ATTR(model_name),
POWER_SUPPLY_ATTR(manufacturer),
+ POWER_SUPPLY_ATTR(serial_number),
};
static ssize_t power_supply_show_static_attrs(struct device *dev,
diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c
index 87b3493d88e5..6f2f90ebb020 100644
--- a/drivers/ps3/ps3av.c
+++ b/drivers/ps3/ps3av.c
@@ -78,23 +78,21 @@ static const struct avset_video_mode {
u32 aspect;
u32 x;
u32 y;
- u32 interlace;
- u32 freq;
} video_mode_table[] = {
{ 0, }, /* auto */
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480I, A_N, 720, 480, 1, 60},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480P, A_N, 720, 480, 0, 60},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_60HZ, A_N, 1280, 720, 0, 60},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_60HZ, A_W, 1920, 1080, 1, 60},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_60HZ, A_W, 1920, 1080, 0, 60},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576I, A_N, 720, 576, 1, 50},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576P, A_N, 720, 576, 0, 50},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_50HZ, A_N, 1280, 720, 0, 50},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_50HZ, A_W, 1920, 1080, 1, 50},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_50HZ, A_W, 1920, 1080, 0, 50},
- { RGB8, XRGB, PS3AV_CMD_VIDEO_VID_WXGA, A_W, 1280, 768, 0, 60},
- { RGB8, XRGB, PS3AV_CMD_VIDEO_VID_SXGA, A_N, 1280, 1024, 0, 60},
- { RGB8, XRGB, PS3AV_CMD_VIDEO_VID_WUXGA, A_W, 1920, 1200, 0, 60},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480I, A_N, 720, 480},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480P, A_N, 720, 480},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_60HZ, A_N, 1280, 720},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_60HZ, A_W, 1920, 1080},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_60HZ, A_W, 1920, 1080},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576I, A_N, 720, 576},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576P, A_N, 720, 576},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_50HZ, A_N, 1280, 720},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_50HZ, A_W, 1920, 1080},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_50HZ, A_W, 1920, 1080},
+ { RGB8, XRGB, PS3AV_CMD_VIDEO_VID_WXGA, A_W, 1280, 768},
+ { RGB8, XRGB, PS3AV_CMD_VIDEO_VID_SXGA, A_N, 1280, 1024},
+ { RGB8, XRGB, PS3AV_CMD_VIDEO_VID_WUXGA, A_W, 1920, 1200},
};
/* supported CIDs */
@@ -544,7 +542,7 @@ static void ps3av_set_videomode_packet(u32 id)
static void ps3av_set_videomode_cont(u32 id, u32 old_id)
{
- static int vesa = 0;
+ static int vesa;
int res;
/* video signal off */
@@ -554,9 +552,9 @@ static void ps3av_set_videomode_cont(u32 id, u32 old_id)
* AV backend needs non-VESA mode setting at least one time
* when VESA mode is used.
*/
- if (vesa == 0 && (id & PS3AV_MODE_MASK) >= 11) {
+ if (vesa == 0 && (id & PS3AV_MODE_MASK) >= PS3AV_MODE_WXGA) {
/* vesa mode */
- ps3av_set_videomode_packet(2); /* 480P */
+ ps3av_set_videomode_packet(PS3AV_MODE_480P);
}
vesa = 1;
@@ -596,20 +594,21 @@ static const struct {
unsigned mask : 19;
unsigned id : 4;
} ps3av_preferred_modes[] = {
- { .mask = PS3AV_RESBIT_WUXGA << SHIFT_VESA, .id = 13 },
- { .mask = PS3AV_RESBIT_1920x1080P << SHIFT_60, .id = 5 },
- { .mask = PS3AV_RESBIT_1920x1080P << SHIFT_50, .id = 10 },
- { .mask = PS3AV_RESBIT_1920x1080I << SHIFT_60, .id = 4 },
- { .mask = PS3AV_RESBIT_1920x1080I << SHIFT_50, .id = 9 },
- { .mask = PS3AV_RESBIT_SXGA << SHIFT_VESA, .id = 12 },
- { .mask = PS3AV_RESBIT_WXGA << SHIFT_VESA, .id = 11 },
- { .mask = PS3AV_RESBIT_1280x720P << SHIFT_60, .id = 3 },
- { .mask = PS3AV_RESBIT_1280x720P << SHIFT_50, .id = 8 },
- { .mask = PS3AV_RESBIT_720x480P << SHIFT_60, .id = 2 },
- { .mask = PS3AV_RESBIT_720x576P << SHIFT_50, .id = 7 },
+ { PS3AV_RESBIT_WUXGA << SHIFT_VESA, PS3AV_MODE_WUXGA },
+ { PS3AV_RESBIT_1920x1080P << SHIFT_60, PS3AV_MODE_1080P60 },
+ { PS3AV_RESBIT_1920x1080P << SHIFT_50, PS3AV_MODE_1080P50 },
+ { PS3AV_RESBIT_1920x1080I << SHIFT_60, PS3AV_MODE_1080I60 },
+ { PS3AV_RESBIT_1920x1080I << SHIFT_50, PS3AV_MODE_1080I50 },
+ { PS3AV_RESBIT_SXGA << SHIFT_VESA, PS3AV_MODE_SXGA },
+ { PS3AV_RESBIT_WXGA << SHIFT_VESA, PS3AV_MODE_WXGA },
+ { PS3AV_RESBIT_1280x720P << SHIFT_60, PS3AV_MODE_720P60 },
+ { PS3AV_RESBIT_1280x720P << SHIFT_50, PS3AV_MODE_720P50 },
+ { PS3AV_RESBIT_720x480P << SHIFT_60, PS3AV_MODE_480P },
+ { PS3AV_RESBIT_720x576P << SHIFT_50, PS3AV_MODE_576P },
};
-static int ps3av_resbit2id(u32 res_50, u32 res_60, u32 res_vesa)
+static enum ps3av_mode_num ps3av_resbit2id(u32 res_50, u32 res_60,
+ u32 res_vesa)
{
unsigned int i;
u32 res_all;
@@ -638,9 +637,9 @@ static int ps3av_resbit2id(u32 res_50, u32 res_60, u32 res_vesa)
return 0;
}
-static int ps3av_hdmi_get_id(struct ps3av_info_monitor *info)
+static enum ps3av_mode_num ps3av_hdmi_get_id(struct ps3av_info_monitor *info)
{
- int id;
+ enum ps3av_mode_num id;
if (safe_mode)
return PS3AV_DEFAULT_HDMI_MODE_ID_REG_60;
@@ -854,7 +853,7 @@ int ps3av_set_video_mode(u32 id)
/* auto mode */
option = id & ~PS3AV_MODE_MASK;
- if ((id & PS3AV_MODE_MASK) == 0) {
+ if ((id & PS3AV_MODE_MASK) == PS3AV_MODE_AUTO) {
id = ps3av_auto_videomode(&ps3av->av_hw_conf);
if (id < 1) {
printk(KERN_ERR "%s: invalid id :%d\n", __func__, id);
@@ -889,36 +888,6 @@ int ps3av_get_mode(void)
EXPORT_SYMBOL_GPL(ps3av_get_mode);
-int ps3av_get_scanmode(int id)
-{
- int size;
-
- id = id & PS3AV_MODE_MASK;
- size = ARRAY_SIZE(video_mode_table);
- if (id > size - 1 || id < 0) {
- printk(KERN_ERR "%s: invalid mode %d\n", __func__, id);
- return -EINVAL;
- }
- return video_mode_table[id].interlace;
-}
-
-EXPORT_SYMBOL_GPL(ps3av_get_scanmode);
-
-int ps3av_get_refresh_rate(int id)
-{
- int size;
-
- id = id & PS3AV_MODE_MASK;
- size = ARRAY_SIZE(video_mode_table);
- if (id > size - 1 || id < 0) {
- printk(KERN_ERR "%s: invalid mode %d\n", __func__, id);
- return -EINVAL;
- }
- return video_mode_table[id].freq;
-}
-
-EXPORT_SYMBOL_GPL(ps3av_get_refresh_rate);
-
/* get resolution by video_mode */
int ps3av_video_mode2res(u32 id, u32 *xres, u32 *yres)
{
@@ -990,7 +959,7 @@ static int ps3av_probe(struct ps3_system_bus_device *dev)
return -ENOMEM;
mutex_init(&ps3av->mutex);
- ps3av->ps3av_mode = 0;
+ ps3av->ps3av_mode = PS3AV_MODE_AUTO;
ps3av->dev = dev;
INIT_WORK(&ps3av->work, ps3avd);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 45e4b9648176..6402d699072b 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -20,6 +20,10 @@ menuconfig RTC_CLASS
if RTC_CLASS
+if GEN_RTC || RTC
+comment "Conflicting RTC option has been selected, check GEN_RTC and RTC"
+endif
+
config RTC_HCTOSYS
bool "Set system time from RTC on startup and resume"
depends on RTC_CLASS = y
@@ -49,7 +53,7 @@ config RTC_HCTOSYS_DEVICE
If the clock you specify here is not battery backed, it may still
be useful to reinitialize system time when resuming from system
- sleep states. Do not specify an RTC here unless it stays powered
+ sleep states. Do not specify an RTC here unless it stays powered
during all this system's supported sleep states.
config RTC_DEBUG
@@ -142,7 +146,7 @@ config RTC_DRV_DS1307
will be called rtc-ds1307.
config RTC_DRV_DS1374
- tristate "Maxim/Dallas Semiconductor DS1374 Real Time Clock"
+ tristate "Dallas/Maxim DS1374"
depends on RTC_CLASS && I2C
help
If you say yes here you get support for Dallas Semiconductor
@@ -162,7 +166,7 @@ config RTC_DRV_DS1672
will be called rtc-ds1672.
config RTC_DRV_MAX6900
- tristate "Maxim 6900"
+ tristate "Maxim MAX6900"
help
If you say yes here you will get support for the
Maxim MAX6900 I2C RTC chip.
@@ -180,10 +184,10 @@ config RTC_DRV_RS5C372
will be called rtc-rs5c372.
config RTC_DRV_ISL1208
- tristate "Intersil 1208"
+ tristate "Intersil ISL1208"
help
If you say yes here you get support for the
- Intersil 1208 RTC chip.
+ Intersil ISL1208 RTC chip.
This driver can also be built as a module. If so, the module
will be called rtc-isl1208.
@@ -220,7 +224,7 @@ config RTC_DRV_PCF8583
will be called rtc-pcf8583.
config RTC_DRV_M41T80
- tristate "ST M41T80 series RTC"
+ tristate "ST M41T80/81/82/83/84/85/87"
help
If you say Y here you will get support for the
ST M41T80 RTC chips series. Currently following chips are
@@ -252,23 +256,32 @@ comment "SPI RTC drivers"
if SPI_MASTER
-config RTC_DRV_RS5C348
- tristate "Ricoh RS5C348A/B"
+config RTC_DRV_MAX6902
+ tristate "Maxim MAX6902"
help
- If you say yes here you get support for the
- Ricoh RS5C348A and RS5C348B RTC chips.
+ If you say yes here you will get support for the
+ Maxim MAX6902 SPI RTC chip.
This driver can also be built as a module. If so, the module
- will be called rtc-rs5c348.
+ will be called rtc-max6902.
-config RTC_DRV_MAX6902
- tristate "Maxim 6902"
+config RTC_DRV_R9701
+ tristate "Epson RTC-9701JE"
help
If you say yes here you will get support for the
- Maxim MAX6902 SPI RTC chip.
+ Epson RTC-9701JE SPI RTC chip.
This driver can also be built as a module. If so, the module
- will be called rtc-max6902.
+ will be called rtc-r9701.
+
+config RTC_DRV_RS5C348
+ tristate "Ricoh RS5C348A/B"
+ help
+ If you say yes here you get support for the
+ Ricoh RS5C348A and RS5C348B RTC chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-rs5c348.
endif # SPI_MASTER
@@ -302,34 +315,50 @@ config RTC_DRV_DS1216
help
If you say yes here you get support for the Dallas DS1216 RTC chips.
-config RTC_DRV_DS1553
- tristate "Dallas DS1553"
+config RTC_DRV_DS1302
+ tristate "Dallas DS1302"
+ depends on SH_SECUREEDGE5410
+ help
+ If you say yes here you get support for the Dallas DS1302 RTC chips.
+
+config RTC_DRV_DS1511
+ tristate "Dallas DS1511"
+ depends on RTC_CLASS
help
If you say yes here you get support for the
- Dallas DS1553 timekeeping chip.
+ Dallas DS1511 timekeeping/watchdog chip.
This driver can also be built as a module. If so, the module
- will be called rtc-ds1553.
+ will be called rtc-ds1511.
-config RTC_DRV_STK17TA8
- tristate "Simtek STK17TA8"
- depends on RTC_CLASS
+config RTC_DRV_DS1553
+ tristate "Maxim/Dallas DS1553"
help
If you say yes here you get support for the
- Simtek STK17TA8 timekeeping chip.
+ Maxim/Dallas DS1553 timekeeping chip.
This driver can also be built as a module. If so, the module
- will be called rtc-stk17ta8.
+ will be called rtc-ds1553.
config RTC_DRV_DS1742
- tristate "Dallas DS1742/1743"
+ tristate "Maxim/Dallas DS1742/1743"
help
If you say yes here you get support for the
- Dallas DS1742/1743 timekeeping chip.
+ Maxim/Dallas DS1742/1743 timekeeping chip.
This driver can also be built as a module. If so, the module
will be called rtc-ds1742.
+config RTC_DRV_STK17TA8
+ tristate "Simtek STK17TA8"
+ depends on RTC_CLASS
+ help
+ If you say yes here you get support for the
+ Simtek STK17TA8 timekeeping chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-stk17ta8.
+
config RTC_DRV_M48T86
tristate "ST M48T86/Dallas DS12887"
help
@@ -440,10 +469,47 @@ config RTC_DRV_AT32AP700X
AT32AP700x family processors.
config RTC_DRV_AT91RM9200
- tristate "AT91RM9200"
- depends on ARCH_AT91RM9200
- help
- Driver for the Atmel AT91RM9200's internal RTC (Realtime Clock).
+ tristate "AT91RM9200 or AT91SAM9RL"
+ depends on ARCH_AT91RM9200 || ARCH_AT91SAM9RL
+ help
+ Driver for the internal RTC (Realtime Clock) module found on
+ Atmel AT91RM9200's and AT91SAM9RL chips. On SAM9RL chips
+ this is powered by the backup power supply.
+
+config RTC_DRV_AT91SAM9
+ tristate "AT91SAM9x"
+ depends on ARCH_AT91 && !(ARCH_AT91RM9200 || ARCH_AT91X40)
+ help
+ RTC driver for the Atmel AT91SAM9x internal RTT (Real Time Timer).
+ These timers are powered by the backup power supply (such as a
+ small coin cell battery), but do not need to be used as RTCs.
+
+ (On AT91SAM9rl chips you probably want to use the dedicated RTC
+ module and leave the RTT available for other uses.)
+
+config RTC_DRV_AT91SAM9_RTT
+ int
+ range 0 1
+ default 0
+ prompt "RTT module Number" if ARCH_AT91SAM9263
+ depends on RTC_DRV_AT91SAM9
+ help
+ More than one RTT module is available. You can choose which
+ one will be used as an RTC. The default of zero is normally
+ OK to use, though some systems use that for non-RTC purposes.
+
+config RTC_DRV_AT91SAM9_GPBR
+ int
+ range 0 3 if !ARCH_AT91SAM9263
+ range 0 15 if ARCH_AT91SAM9263
+ default 0
+ prompt "Backup Register Number"
+ depends on RTC_DRV_AT91SAM9
+ help
+ The RTC driver needs to use one of the General Purpose Backup
+ Registers (GPBRs) as well as the RTT. You can choose which one
+ will be used. The default of zero is normally OK to use, but
+ on some systems other software needs to use that register.
config RTC_DRV_BFIN
tristate "Blackfin On-Chip RTC"
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 465db4dd50b2..ec703f34ab86 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -19,11 +19,14 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
+obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
+obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o
obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o
obj-$(CONFIG_RTC_DRV_DS1374) += rtc-ds1374.o
+obj-$(CONFIG_RTC_DRV_DS1511) += rtc-ds1511.o
obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o
obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o
obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o
@@ -38,6 +41,7 @@ obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
+obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
new file mode 100644
index 000000000000..bbf10ecf416c
--- /dev/null
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -0,0 +1,520 @@
+/*
+ * "RTT as Real Time Clock" driver for AT91SAM9 SoC family
+ *
+ * (C) 2007 Michel Benoit
+ *
+ * Based on rtc-at91rm9200.c by Rick Bronson
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+#include <linux/rtc.h>
+#include <linux/interrupt.h>
+#include <linux/ioctl.h>
+
+#include <asm/mach/time.h>
+#include <asm/arch/board.h>
+#include <asm/arch/at91_rtt.h>
+
+
+/*
+ * This driver uses two configurable hardware resources that live in the
+ * AT91SAM9 backup power domain (intended to be powered at all times)
+ * to implement the Real Time Clock interfaces
+ *
+ * - A "Real-time Timer" (RTT) counts up in seconds from a base time.
+ * We can't assign the counter value (CRTV) ... but we can reset it.
+ *
+ * - One of the "General Purpose Backup Registers" (GPBRs) holds the
+ * base time, normally an offset from the beginning of the POSIX
+ * epoch (1970-Jan-1 00:00:00 UTC). Some systems also include the
+ * local timezone's offset.
+ *
+ * The RTC's value is the RTT counter plus that offset. The RTC's alarm
+ * is likewise a base (ALMV) plus that offset.
+ *
+ * Not all RTTs will be used as RTCs; some systems have multiple RTTs to
+ * choose from, or a "real" RTC module. All systems have multiple GPBR
+ * registers available, likewise usable for more than "RTC" support.
+ */
+
+/*
+ * We store ALARM_DISABLED in ALMV to record that no alarm is set.
+ * It's also the reset value for that field.
+ */
+#define ALARM_DISABLED ((u32)~0)
+
+
+struct sam9_rtc {
+ void __iomem *rtt;
+ struct rtc_device *rtcdev;
+ u32 imr;
+};
+
+#define rtt_readl(rtc, field) \
+ __raw_readl((rtc)->rtt + AT91_RTT_ ## field)
+#define rtt_writel(rtc, field, val) \
+ __raw_writel((val), (rtc)->rtt + AT91_RTT_ ## field)
+
+#define gpbr_readl(rtc) \
+ at91_sys_read(AT91_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR)
+#define gpbr_writel(rtc, val) \
+ at91_sys_write(AT91_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR, (val))
+
+/*
+ * Read current time and date in RTC
+ */
+static int at91_rtc_readtime(struct device *dev, struct rtc_time *tm)
+{
+ struct sam9_rtc *rtc = dev_get_drvdata(dev);
+ u32 secs, secs2;
+ u32 offset;
+
+ /* read current time offset */
+ offset = gpbr_readl(rtc);
+ if (offset == 0)
+ return -EILSEQ;
+
+ /* reread the counter to help sync the two clock domains */
+ secs = rtt_readl(rtc, VR);
+ secs2 = rtt_readl(rtc, VR);
+ if (secs != secs2)
+ secs = rtt_readl(rtc, VR);
+
+ rtc_time_to_tm(offset + secs, tm);
+
+ dev_dbg(dev, "%s: %4d-%02d-%02d %02d:%02d:%02d\n", "readtime",
+ 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ return 0;
+}
+
+/*
+ * Set current time and date in RTC
+ */
+static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct sam9_rtc *rtc = dev_get_drvdata(dev);
+ int err;
+ u32 offset, alarm, mr;
+ unsigned long secs;
+
+ dev_dbg(dev, "%s: %4d-%02d-%02d %02d:%02d:%02d\n", "settime",
+ 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ err = rtc_tm_to_time(tm, &secs);
+ if (err != 0)
+ return err;
+
+ mr = rtt_readl(rtc, MR);
+
+ /* disable interrupts */
+ rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
+
+ /* read current time offset */
+ offset = gpbr_readl(rtc);
+
+ /* store the new base time in a battery backup register */
+ secs += 1;
+ gpbr_writel(rtc, secs);
+
+ /* adjust the alarm time for the new base */
+ alarm = rtt_readl(rtc, AR);
+ if (alarm != ALARM_DISABLED) {
+ if (offset > secs) {
+ /* time jumped backwards, increase time until alarm */
+ alarm += (offset - secs);
+ } else if ((alarm + offset) > secs) {
+ /* time jumped forwards, decrease time until alarm */
+ alarm -= (secs - offset);
+ } else {
+ /* time jumped past the alarm, disable alarm */
+ alarm = ALARM_DISABLED;
+ mr &= ~AT91_RTT_ALMIEN;
+ }
+ rtt_writel(rtc, AR, alarm);
+ }
+
+ /* reset the timer, and re-enable interrupts */
+ rtt_writel(rtc, MR, mr | AT91_RTT_RTTRST);
+
+ return 0;
+}
+
+static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sam9_rtc *rtc = dev_get_drvdata(dev);
+ struct rtc_time *tm = &alrm->time;
+ u32 alarm = rtt_readl(rtc, AR);
+ u32 offset;
+
+ offset = gpbr_readl(rtc);
+ if (offset == 0)
+ return -EILSEQ;
+
+ memset(alrm, 0, sizeof(alrm));
+ if (alarm != ALARM_DISABLED && offset != 0) {
+ rtc_time_to_tm(offset + alarm, tm);
+
+ dev_dbg(dev, "%s: %4d-%02d-%02d %02d:%02d:%02d\n", "readalarm",
+ 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ if (rtt_readl(rtc, MR) & AT91_RTT_ALMIEN)
+ alrm->enabled = 1;
+ }
+
+ return 0;
+}
+
+static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sam9_rtc *rtc = dev_get_drvdata(dev);
+ struct rtc_time *tm = &alrm->time;
+ unsigned long secs;
+ u32 offset;
+ u32 mr;
+ int err;
+
+ err = rtc_tm_to_time(tm, &secs);
+ if (err != 0)
+ return err;
+
+ offset = gpbr_readl(rtc);
+ if (offset == 0) {
+ /* time is not set */
+ return -EILSEQ;
+ }
+ mr = rtt_readl(rtc, MR);
+ rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
+
+ /* alarm in the past? finish and leave disabled */
+ if (secs <= offset) {
+ rtt_writel(rtc, AR, ALARM_DISABLED);
+ return 0;
+ }
+
+ /* else set alarm and maybe enable it */
+ rtt_writel(rtc, AR, secs - offset);
+ if (alrm->enabled)
+ rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
+
+ dev_dbg(dev, "%s: %4d-%02d-%02d %02d:%02d:%02d\n", "setalarm",
+ tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour,
+ tm->tm_min, tm->tm_sec);
+
+ return 0;
+}
+
+/*
+ * Handle commands from user-space
+ */
+static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sam9_rtc *rtc = dev_get_drvdata(dev);
+ int ret = 0;
+ u32 mr = rtt_readl(rtc, MR);
+
+ dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr);
+
+ switch (cmd) {
+ case RTC_AIE_OFF: /* alarm off */
+ rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
+ break;
+ case RTC_AIE_ON: /* alarm on */
+ rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
+ break;
+ case RTC_UIE_OFF: /* update off */
+ rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
+ break;
+ case RTC_UIE_ON: /* update on */
+ rtt_writel(rtc, MR, mr | AT91_RTT_RTTINCIEN);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Provide additional RTC information in /proc/driver/rtc
+ */
+static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+ struct sam9_rtc *rtc = dev_get_drvdata(dev);
+ u32 mr = mr = rtt_readl(rtc, MR);
+
+ seq_printf(seq, "update_IRQ\t: %s\n",
+ (mr & AT91_RTT_RTTINCIEN) ? "yes" : "no");
+ return 0;
+}
+
+/*
+ * IRQ handler for the RTC
+ */
+static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
+{
+ struct sam9_rtc *rtc = _rtc;
+ u32 sr, mr;
+ unsigned long events = 0;
+
+ /* Shared interrupt may be for another device. Note: reading
+ * SR clears it, so we must only read it in this irq handler!
+ */
+ mr = rtt_readl(rtc, MR) & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
+ sr = rtt_readl(rtc, SR) & mr;
+ if (!sr)
+ return IRQ_NONE;
+
+ /* alarm status */
+ if (sr & AT91_RTT_ALMS)
+ events |= (RTC_AF | RTC_IRQF);
+
+ /* timer update/increment */
+ if (sr & AT91_RTT_RTTINC)
+ events |= (RTC_UF | RTC_IRQF);
+
+ rtc_update_irq(rtc->rtcdev, 1, events);
+
+ pr_debug("%s: num=%ld, events=0x%02lx\n", __FUNCTION__,
+ events >> 8, events & 0x000000FF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops at91_rtc_ops = {
+ .ioctl = at91_rtc_ioctl,
+ .read_time = at91_rtc_readtime,
+ .set_time = at91_rtc_settime,
+ .read_alarm = at91_rtc_readalarm,
+ .set_alarm = at91_rtc_setalarm,
+ .proc = at91_rtc_proc,
+};
+
+/*
+ * Initialize and install RTC driver
+ */
+static int __init at91_rtc_probe(struct platform_device *pdev)
+{
+ struct resource *r;
+ struct sam9_rtc *rtc;
+ int ret;
+ u32 mr;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENODEV;
+
+ rtc = kzalloc(sizeof *rtc, GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rtc);
+ rtc->rtt = (void __force __iomem *) (AT91_VA_BASE_SYS - AT91_BASE_SYS);
+ rtc->rtt += r->start;
+
+ mr = rtt_readl(rtc, MR);
+
+ /* unless RTT is counting at 1 Hz, re-initialize it */
+ if ((mr & AT91_RTT_RTPRES) != AT91_SLOW_CLOCK) {
+ mr = AT91_RTT_RTTRST | (AT91_SLOW_CLOCK & AT91_RTT_RTPRES);
+ gpbr_writel(rtc, 0);
+ }
+
+ /* disable all interrupts (same as on shutdown path) */
+ mr &= ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
+ rtt_writel(rtc, MR, mr);
+
+ rtc->rtcdev = rtc_device_register(pdev->name, &pdev->dev,
+ &at91_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtcdev)) {
+ ret = PTR_ERR(rtc->rtcdev);
+ goto fail;
+ }
+
+ /* register irq handler after we know what name we'll use */
+ ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt,
+ IRQF_DISABLED | IRQF_SHARED,
+ rtc->rtcdev->dev.bus_id, rtc);
+ if (ret) {
+ dev_dbg(&pdev->dev, "can't share IRQ %d?\n", AT91_ID_SYS);
+ rtc_device_unregister(rtc->rtcdev);
+ goto fail;
+ }
+
+ /* NOTE: sam9260 rev A silicon has a ROM bug which resets the
+ * RTT on at least some reboots. If you have that chip, you must
+ * initialize the time from some external source like a GPS, wall
+ * clock, discrete RTC, etc
+ */
+
+ if (gpbr_readl(rtc) == 0)
+ dev_warn(&pdev->dev, "%s: SET TIME!\n",
+ rtc->rtcdev->dev.bus_id);
+
+ return 0;
+
+fail:
+ platform_set_drvdata(pdev, NULL);
+ kfree(rtc);
+ return ret;
+}
+
+/*
+ * Disable and remove the RTC driver
+ */
+static int __exit at91_rtc_remove(struct platform_device *pdev)
+{
+ struct sam9_rtc *rtc = platform_get_drvdata(pdev);
+ u32 mr = rtt_readl(rtc, MR);
+
+ /* disable all interrupts */
+ rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
+ free_irq(AT91_ID_SYS, rtc);
+
+ rtc_device_unregister(rtc->rtcdev);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(rtc);
+ return 0;
+}
+
+static void at91_rtc_shutdown(struct platform_device *pdev)
+{
+ struct sam9_rtc *rtc = platform_get_drvdata(pdev);
+ u32 mr = rtt_readl(rtc, MR);
+
+ rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
+ rtt_writel(rtc, MR, mr & ~rtc->imr);
+}
+
+#ifdef CONFIG_PM
+
+/* AT91SAM9 RTC Power management control */
+
+static int at91_rtc_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct sam9_rtc *rtc = platform_get_drvdata(pdev);
+ u32 mr = rtt_readl(rtc, MR);
+
+ /*
+ * This IRQ is shared with DBGU and other hardware which isn't
+ * necessarily a wakeup event source.
+ */
+ rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
+ if (rtc->imr) {
+ if (device_may_wakeup(&pdev->dev) && (mr & AT91_RTT_ALMIEN)) {
+ enable_irq_wake(AT91_ID_SYS);
+ /* don't let RTTINC cause wakeups */
+ if (mr & AT91_RTT_RTTINCIEN)
+ rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
+ } else
+ rtt_writel(rtc, MR, mr & ~rtc->imr);
+ }
+
+ return 0;
+}
+
+static int at91_rtc_resume(struct platform_device *pdev)
+{
+ struct sam9_rtc *rtc = platform_get_drvdata(pdev);
+ u32 mr;
+
+ if (rtc->imr) {
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(AT91_ID_SYS);
+ mr = rtt_readl(rtc, MR);
+ rtt_writel(rtc, MR, mr | rtc->imr);
+ }
+
+ return 0;
+}
+#else
+#define at91_rtc_suspend NULL
+#define at91_rtc_resume NULL
+#endif
+
+static struct platform_driver at91_rtc_driver = {
+ .driver.name = "rtc-at91sam9",
+ .driver.owner = THIS_MODULE,
+ .remove = __exit_p(at91_rtc_remove),
+ .shutdown = at91_rtc_shutdown,
+ .suspend = at91_rtc_suspend,
+ .resume = at91_rtc_resume,
+};
+
+/* Chips can have more than one RTT module, and they can be used for more
+ * than just RTCs. So we can't just register as "the" RTT driver.
+ *
+ * A normal approach in such cases is to create a library to allocate and
+ * free the modules. Here we just use bus_find_device() as like such a
+ * library, binding directly ... no runtime "library" footprint is needed.
+ */
+static int __init at91_rtc_match(struct device *dev, void *v)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ /* continue searching if this isn't the RTT we need */
+ if (strcmp("at91_rtt", pdev->name) != 0
+ || pdev->id != CONFIG_RTC_DRV_AT91SAM9_RTT)
+ goto fail;
+
+ /* else we found it ... but fail unless we can bind to the RTC driver */
+ if (dev->driver) {
+ dev_dbg(dev, "busy, can't use as RTC!\n");
+ goto fail;
+ }
+ dev->driver = &at91_rtc_driver.driver;
+ if (device_attach(dev) == 0) {
+ dev_dbg(dev, "can't attach RTC!\n");
+ goto fail;
+ }
+ ret = at91_rtc_probe(pdev);
+ if (ret == 0)
+ return true;
+
+ dev_dbg(dev, "RTC probe err %d!\n", ret);
+fail:
+ return false;
+}
+
+static int __init at91_rtc_init(void)
+{
+ int status;
+ struct device *rtc;
+
+ status = platform_driver_register(&at91_rtc_driver);
+ if (status)
+ return status;
+ rtc = bus_find_device(&platform_bus_type, NULL,
+ NULL, at91_rtc_match);
+ if (!rtc)
+ platform_driver_unregister(&at91_rtc_driver);
+ return rtc ? 0 : -ENODEV;
+}
+module_init(at91_rtc_init);
+
+static void __exit at91_rtc_exit(void)
+{
+ platform_driver_unregister(&at91_rtc_driver);
+}
+module_exit(at91_rtc_exit);
+
+
+MODULE_AUTHOR("Michel Benoit");
+MODULE_DESCRIPTION("RTC driver for Atmel AT91SAM9x");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 1aa709dda0d6..d90ba860d216 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -1,6 +1,6 @@
/*
* Blackfin On-Chip Real Time Clock Driver
- * Supports BF53[123]/BF53[467]/BF54[2489]
+ * Supports BF52[257]/BF53[123]/BF53[467]/BF54[24789]
*
* Copyright 2004-2007 Analog Devices Inc.
*
@@ -32,26 +32,25 @@
* writes to clear status registers complete immediately.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/bcd.h>
-#include <linux/rtc.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/rtc.h>
#include <linux/seq_file.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
#include <asm/blackfin.h>
-#define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __FUNCTION__, __LINE__, ## args)
-#define stampit() stamp("here i am")
+#define dev_dbg_stamp(dev) dev_dbg(dev, "%s:%i: here i am\n", __func__, __LINE__)
struct bfin_rtc {
struct rtc_device *rtc_dev;
struct rtc_time rtc_alarm;
- spinlock_t lock;
+ u16 rtc_wrote_regs;
};
/* Bit values for the ISTAT / ICTL registers */
@@ -72,7 +71,7 @@ struct bfin_rtc {
#define SEC_BITS_OFF 0
/* Some helper functions to convert between the common RTC notion of time
- * and the internal Blackfin notion that is stored in 32bits.
+ * and the internal Blackfin notion that is encoded in 32bits.
*/
static inline u32 rtc_time_to_bfin(unsigned long now)
{
@@ -97,7 +96,10 @@ static inline void rtc_bfin_to_tm(u32 rtc_bfin, struct rtc_time *tm)
rtc_time_to_tm(rtc_bfin_to_time(rtc_bfin), tm);
}
-/* Wait for the previous write to a RTC register to complete.
+/**
+ * bfin_rtc_sync_pending - make sure pending writes have complete
+ *
+ * Wait for the previous write to a RTC register to complete.
* Unfortunately, we can't sleep here as that introduces a race condition when
* turning on interrupt events. Consider this:
* - process sets alarm
@@ -112,188 +114,202 @@ static inline void rtc_bfin_to_tm(u32 rtc_bfin, struct rtc_time *tm)
* If anyone can point out the obvious solution here, i'm listening :). This
* shouldn't be an issue on an SMP or preempt system as this function should
* only be called with the rtc lock held.
+ *
+ * Other options:
+ * - disable PREN so the sync happens at 32.768kHZ ... but this changes the
+ * inc rate for all RTC registers from 1HZ to 32.768kHZ ...
+ * - use the write complete IRQ
*/
-static void rtc_bfin_sync_pending(void)
+/*
+static void bfin_rtc_sync_pending_polled(void)
{
- stampit();
- while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_COMPLETE)) {
+ while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_COMPLETE))
if (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING))
break;
- }
bfin_write_RTC_ISTAT(RTC_ISTAT_WRITE_COMPLETE);
}
+*/
+static DECLARE_COMPLETION(bfin_write_complete);
+static void bfin_rtc_sync_pending(struct device *dev)
+{
+ dev_dbg_stamp(dev);
+ while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING)
+ wait_for_completion_timeout(&bfin_write_complete, HZ * 5);
+ dev_dbg_stamp(dev);
+}
-static void rtc_bfin_reset(struct bfin_rtc *rtc)
+/**
+ * bfin_rtc_reset - set RTC to sane/known state
+ *
+ * Initialize the RTC. Enable pre-scaler to scale RTC clock
+ * to 1Hz and clear interrupt/status registers.
+ */
+static void bfin_rtc_reset(struct device *dev)
{
- /* Initialize the RTC. Enable pre-scaler to scale RTC clock
- * to 1Hz and clear interrupt/status registers. */
- spin_lock_irq(&rtc->lock);
- rtc_bfin_sync_pending();
+ struct bfin_rtc *rtc = dev_get_drvdata(dev);
+ dev_dbg_stamp(dev);
+ bfin_rtc_sync_pending(dev);
bfin_write_RTC_PREN(0x1);
- bfin_write_RTC_ICTL(0);
+ bfin_write_RTC_ICTL(RTC_ISTAT_WRITE_COMPLETE);
bfin_write_RTC_SWCNT(0);
bfin_write_RTC_ALARM(0);
bfin_write_RTC_ISTAT(0xFFFF);
- spin_unlock_irq(&rtc->lock);
+ rtc->rtc_wrote_regs = 0;
}
+/**
+ * bfin_rtc_interrupt - handle interrupt from RTC
+ *
+ * Since we handle all RTC events here, we have to make sure the requested
+ * interrupt is enabled (in RTC_ICTL) as the event status register (RTC_ISTAT)
+ * always gets updated regardless of the interrupt being enabled. So when one
+ * even we care about (e.g. stopwatch) goes off, we don't want to turn around
+ * and say that other events have happened as well (e.g. second). We do not
+ * have to worry about pending writes to the RTC_ICTL register as interrupts
+ * only fire if they are enabled in the RTC_ICTL register.
+ */
static irqreturn_t bfin_rtc_interrupt(int irq, void *dev_id)
{
- struct platform_device *pdev = to_platform_device(dev_id);
- struct bfin_rtc *rtc = platform_get_drvdata(pdev);
+ struct device *dev = dev_id;
+ struct bfin_rtc *rtc = dev_get_drvdata(dev);
unsigned long events = 0;
- u16 rtc_istat;
-
- stampit();
+ bool write_complete = false;
+ u16 rtc_istat, rtc_ictl;
- spin_lock_irq(&rtc->lock);
+ dev_dbg_stamp(dev);
rtc_istat = bfin_read_RTC_ISTAT();
+ rtc_ictl = bfin_read_RTC_ICTL();
- if (rtc_istat & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)) {
- bfin_write_RTC_ISTAT(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY);
- events |= RTC_AF | RTC_IRQF;
+ if (rtc_istat & RTC_ISTAT_WRITE_COMPLETE) {
+ bfin_write_RTC_ISTAT(RTC_ISTAT_WRITE_COMPLETE);
+ write_complete = true;
+ complete(&bfin_write_complete);
}
- if (rtc_istat & RTC_ISTAT_STOPWATCH) {
- bfin_write_RTC_ISTAT(RTC_ISTAT_STOPWATCH);
- events |= RTC_PF | RTC_IRQF;
- bfin_write_RTC_SWCNT(rtc->rtc_dev->irq_freq);
+ if (rtc_ictl & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)) {
+ if (rtc_istat & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)) {
+ bfin_write_RTC_ISTAT(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY);
+ events |= RTC_AF | RTC_IRQF;
+ }
}
- if (rtc_istat & RTC_ISTAT_SEC) {
- bfin_write_RTC_ISTAT(RTC_ISTAT_SEC);
- events |= RTC_UF | RTC_IRQF;
+ if (rtc_ictl & RTC_ISTAT_STOPWATCH) {
+ if (rtc_istat & RTC_ISTAT_STOPWATCH) {
+ bfin_write_RTC_ISTAT(RTC_ISTAT_STOPWATCH);
+ events |= RTC_PF | RTC_IRQF;
+ bfin_write_RTC_SWCNT(rtc->rtc_dev->irq_freq);
+ }
}
- rtc_update_irq(rtc->rtc_dev, 1, events);
+ if (rtc_ictl & RTC_ISTAT_SEC) {
+ if (rtc_istat & RTC_ISTAT_SEC) {
+ bfin_write_RTC_ISTAT(RTC_ISTAT_SEC);
+ events |= RTC_UF | RTC_IRQF;
+ }
+ }
- spin_unlock_irq(&rtc->lock);
+ if (events)
+ rtc_update_irq(rtc->rtc_dev, 1, events);
- return IRQ_HANDLED;
+ if (write_complete || events)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
}
static int bfin_rtc_open(struct device *dev)
{
- struct bfin_rtc *rtc = dev_get_drvdata(dev);
int ret;
- stampit();
+ dev_dbg_stamp(dev);
- ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_DISABLED, "rtc-bfin", dev);
- if (unlikely(ret)) {
- dev_err(dev, "request RTC IRQ failed with %d\n", ret);
- return ret;
- }
-
- rtc_bfin_reset(rtc);
+ ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, to_platform_device(dev)->name, dev);
+ if (!ret)
+ bfin_rtc_reset(dev);
return ret;
}
static void bfin_rtc_release(struct device *dev)
{
- struct bfin_rtc *rtc = dev_get_drvdata(dev);
- stampit();
- rtc_bfin_reset(rtc);
+ dev_dbg_stamp(dev);
+ bfin_rtc_reset(dev);
free_irq(IRQ_RTC, dev);
}
+static void bfin_rtc_int_set(struct bfin_rtc *rtc, u16 rtc_int)
+{
+ bfin_write_RTC_ISTAT(rtc_int);
+ bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() | rtc_int);
+}
+static void bfin_rtc_int_clear(struct bfin_rtc *rtc, u16 rtc_int)
+{
+ bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() & rtc_int);
+}
+static void bfin_rtc_int_set_alarm(struct bfin_rtc *rtc)
+{
+ /* Blackfin has different bits for whether the alarm is
+ * more than 24 hours away.
+ */
+ bfin_rtc_int_set(rtc, (rtc->rtc_alarm.tm_yday == -1 ? RTC_ISTAT_ALARM : RTC_ISTAT_ALARM_DAY));
+}
static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
struct bfin_rtc *rtc = dev_get_drvdata(dev);
+ int ret = 0;
+
+ dev_dbg_stamp(dev);
- stampit();
+ bfin_rtc_sync_pending(dev);
switch (cmd) {
case RTC_PIE_ON:
- stampit();
- spin_lock_irq(&rtc->lock);
- rtc_bfin_sync_pending();
- bfin_write_RTC_ISTAT(RTC_ISTAT_STOPWATCH);
+ dev_dbg_stamp(dev);
+ bfin_rtc_int_set(rtc, RTC_ISTAT_STOPWATCH);
bfin_write_RTC_SWCNT(rtc->rtc_dev->irq_freq);
- bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() | RTC_ISTAT_STOPWATCH);
- spin_unlock_irq(&rtc->lock);
- return 0;
+ break;
case RTC_PIE_OFF:
- stampit();
- spin_lock_irq(&rtc->lock);
- rtc_bfin_sync_pending();
- bfin_write_RTC_SWCNT(0);
- bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() & ~RTC_ISTAT_STOPWATCH);
- spin_unlock_irq(&rtc->lock);
- return 0;
+ dev_dbg_stamp(dev);
+ bfin_rtc_int_clear(rtc, ~RTC_ISTAT_STOPWATCH);
+ break;
case RTC_UIE_ON:
- stampit();
- spin_lock_irq(&rtc->lock);
- rtc_bfin_sync_pending();
- bfin_write_RTC_ISTAT(RTC_ISTAT_SEC);
- bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() | RTC_ISTAT_SEC);
- spin_unlock_irq(&rtc->lock);
- return 0;
+ dev_dbg_stamp(dev);
+ bfin_rtc_int_set(rtc, RTC_ISTAT_SEC);
+ break;
case RTC_UIE_OFF:
- stampit();
- spin_lock_irq(&rtc->lock);
- rtc_bfin_sync_pending();
- bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() & ~RTC_ISTAT_SEC);
- spin_unlock_irq(&rtc->lock);
- return 0;
-
- case RTC_AIE_ON: {
- unsigned long rtc_alarm;
- u16 which_alarm;
- int ret = 0;
-
- stampit();
-
- spin_lock_irq(&rtc->lock);
-
- rtc_bfin_sync_pending();
- if (rtc->rtc_alarm.tm_yday == -1) {
- struct rtc_time now;
- rtc_bfin_to_tm(bfin_read_RTC_STAT(), &now);
- now.tm_sec = rtc->rtc_alarm.tm_sec;
- now.tm_min = rtc->rtc_alarm.tm_min;
- now.tm_hour = rtc->rtc_alarm.tm_hour;
- ret = rtc_tm_to_time(&now, &rtc_alarm);
- which_alarm = RTC_ISTAT_ALARM;
- } else {
- ret = rtc_tm_to_time(&rtc->rtc_alarm, &rtc_alarm);
- which_alarm = RTC_ISTAT_ALARM_DAY;
- }
- if (ret == 0) {
- bfin_write_RTC_ISTAT(which_alarm);
- bfin_write_RTC_ALARM(rtc_time_to_bfin(rtc_alarm));
- bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() | which_alarm);
- }
-
- spin_unlock_irq(&rtc->lock);
-
- return ret;
- }
+ dev_dbg_stamp(dev);
+ bfin_rtc_int_clear(rtc, ~RTC_ISTAT_SEC);
+ break;
+
+ case RTC_AIE_ON:
+ dev_dbg_stamp(dev);
+ bfin_rtc_int_set_alarm(rtc);
+ break;
case RTC_AIE_OFF:
- stampit();
- spin_lock_irq(&rtc->lock);
- rtc_bfin_sync_pending();
- bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() & ~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
- spin_unlock_irq(&rtc->lock);
- return 0;
+ dev_dbg_stamp(dev);
+ bfin_rtc_int_clear(rtc, ~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
+ break;
+
+ default:
+ dev_dbg_stamp(dev);
+ ret = -ENOIOCTLCMD;
}
- return -ENOIOCTLCMD;
+ return ret;
}
static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct bfin_rtc *rtc = dev_get_drvdata(dev);
- stampit();
+ dev_dbg_stamp(dev);
+
+ if (rtc->rtc_wrote_regs & 0x1)
+ bfin_rtc_sync_pending(dev);
- spin_lock_irq(&rtc->lock);
- rtc_bfin_sync_pending();
rtc_bfin_to_tm(bfin_read_RTC_STAT(), tm);
- spin_unlock_irq(&rtc->lock);
return 0;
}
@@ -304,64 +320,79 @@ static int bfin_rtc_set_time(struct device *dev, struct rtc_time *tm)
int ret;
unsigned long now;
- stampit();
-
- spin_lock_irq(&rtc->lock);
+ dev_dbg_stamp(dev);
ret = rtc_tm_to_time(tm, &now);
if (ret == 0) {
- rtc_bfin_sync_pending();
+ if (rtc->rtc_wrote_regs & 0x1)
+ bfin_rtc_sync_pending(dev);
bfin_write_RTC_STAT(rtc_time_to_bfin(now));
+ rtc->rtc_wrote_regs = 0x1;
}
- spin_unlock_irq(&rtc->lock);
-
return ret;
}
static int bfin_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct bfin_rtc *rtc = dev_get_drvdata(dev);
- stampit();
- memcpy(&alrm->time, &rtc->rtc_alarm, sizeof(struct rtc_time));
- alrm->pending = !!(bfin_read_RTC_ICTL() & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
+ dev_dbg_stamp(dev);
+ alrm->time = rtc->rtc_alarm;
+ bfin_rtc_sync_pending(dev);
+ alrm->enabled = !!(bfin_read_RTC_ICTL() & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
return 0;
}
static int bfin_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct bfin_rtc *rtc = dev_get_drvdata(dev);
- stampit();
- memcpy(&rtc->rtc_alarm, &alrm->time, sizeof(struct rtc_time));
+ unsigned long rtc_alarm;
+
+ dev_dbg_stamp(dev);
+
+ if (rtc_tm_to_time(&alrm->time, &rtc_alarm))
+ return -EINVAL;
+
+ rtc->rtc_alarm = alrm->time;
+
+ bfin_rtc_sync_pending(dev);
+ bfin_write_RTC_ALARM(rtc_time_to_bfin(rtc_alarm));
+ if (alrm->enabled)
+ bfin_rtc_int_set_alarm(rtc);
+
return 0;
}
static int bfin_rtc_proc(struct device *dev, struct seq_file *seq)
{
-#define yesno(x) (x ? "yes" : "no")
+#define yesno(x) ((x) ? "yes" : "no")
u16 ictl = bfin_read_RTC_ICTL();
- stampit();
- seq_printf(seq, "alarm_IRQ\t: %s\n", yesno(ictl & RTC_ISTAT_ALARM));
- seq_printf(seq, "wkalarm_IRQ\t: %s\n", yesno(ictl & RTC_ISTAT_ALARM_DAY));
- seq_printf(seq, "seconds_IRQ\t: %s\n", yesno(ictl & RTC_ISTAT_SEC));
- seq_printf(seq, "periodic_IRQ\t: %s\n", yesno(ictl & RTC_ISTAT_STOPWATCH));
-#ifdef DEBUG
- seq_printf(seq, "RTC_STAT\t: 0x%08X\n", bfin_read_RTC_STAT());
- seq_printf(seq, "RTC_ICTL\t: 0x%04X\n", bfin_read_RTC_ICTL());
- seq_printf(seq, "RTC_ISTAT\t: 0x%04X\n", bfin_read_RTC_ISTAT());
- seq_printf(seq, "RTC_SWCNT\t: 0x%04X\n", bfin_read_RTC_SWCNT());
- seq_printf(seq, "RTC_ALARM\t: 0x%08X\n", bfin_read_RTC_ALARM());
- seq_printf(seq, "RTC_PREN\t: 0x%04X\n", bfin_read_RTC_PREN());
-#endif
+ dev_dbg_stamp(dev);
+ seq_printf(seq,
+ "alarm_IRQ\t: %s\n"
+ "wkalarm_IRQ\t: %s\n"
+ "seconds_IRQ\t: %s\n"
+ "periodic_IRQ\t: %s\n",
+ yesno(ictl & RTC_ISTAT_ALARM),
+ yesno(ictl & RTC_ISTAT_ALARM_DAY),
+ yesno(ictl & RTC_ISTAT_SEC),
+ yesno(ictl & RTC_ISTAT_STOPWATCH));
return 0;
+#undef yesno
}
+/**
+ * bfin_irq_set_freq - make sure hardware supports requested freq
+ * @dev: pointer to RTC device structure
+ * @freq: requested frequency rate
+ *
+ * The Blackfin RTC can only generate periodic events at 1 per
+ * second (1 Hz), so reject any attempt at changing it.
+ */
static int bfin_irq_set_freq(struct device *dev, int freq)
{
- struct bfin_rtc *rtc = dev_get_drvdata(dev);
- stampit();
- rtc->rtc_dev->irq_freq = freq;
- return 0;
+ dev_dbg_stamp(dev);
+ return -ENOTTY;
}
static struct rtc_class_ops bfin_rtc_ops = {
@@ -381,27 +412,24 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
struct bfin_rtc *rtc;
int ret = 0;
- stampit();
+ dev_dbg_stamp(&pdev->dev);
rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
if (unlikely(!rtc))
return -ENOMEM;
- spin_lock_init(&rtc->lock);
-
rtc->rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &bfin_rtc_ops, THIS_MODULE);
if (unlikely(IS_ERR(rtc))) {
ret = PTR_ERR(rtc->rtc_dev);
goto err;
}
- rtc->rtc_dev->irq_freq = 0;
- rtc->rtc_dev->max_user_freq = (2 << 16); /* stopwatch is an unsigned 16 bit reg */
+ rtc->rtc_dev->irq_freq = 1;
platform_set_drvdata(pdev, rtc);
return 0;
-err:
+ err:
kfree(rtc);
return ret;
}
@@ -428,7 +456,6 @@ static struct platform_driver bfin_rtc_driver = {
static int __init bfin_rtc_init(void)
{
- stampit();
return platform_driver_register(&bfin_rtc_driver);
}
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 29cf1457ca10..e059f94c79eb 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -36,9 +36,24 @@
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
+#ifdef CONFIG_HPET_EMULATE_RTC
+#include <asm/hpet.h>
+#endif
+
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <asm-generic/rtc.h>
+#ifndef CONFIG_HPET_EMULATE_RTC
+#define is_hpet_enabled() 0
+#define hpet_set_alarm_time(hrs, min, sec) do { } while (0)
+#define hpet_set_periodic_freq(arg) 0
+#define hpet_mask_rtc_irq_bit(arg) do { } while (0)
+#define hpet_set_rtc_irq_bit(arg) do { } while (0)
+#define hpet_rtc_timer_init() do { } while (0)
+#define hpet_register_irq_handler(h) 0
+#define hpet_unregister_irq_handler(h) do { } while (0)
+extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
+#endif
struct cmos_rtc {
struct rtc_device *rtc;
@@ -199,6 +214,7 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
sec = t->time.tm_sec;
sec = (sec < 60) ? BIN2BCD(sec) : 0xff;
+ hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min, t->time.tm_sec);
spin_lock_irq(&rtc_lock);
/* next rtc irq must not be from previous alarm setting */
@@ -252,7 +268,8 @@ static int cmos_irq_set_freq(struct device *dev, int freq)
f = 16 - f;
spin_lock_irqsave(&rtc_lock, flags);
- CMOS_WRITE(RTC_REF_CLCK_32KHZ | f, RTC_FREQ_SELECT);
+ if (!hpet_set_periodic_freq(freq))
+ CMOS_WRITE(RTC_REF_CLCK_32KHZ | f, RTC_FREQ_SELECT);
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
@@ -314,28 +331,37 @@ cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RTC_AIE_OFF: /* alarm off */
rtc_control &= ~RTC_AIE;
+ hpet_mask_rtc_irq_bit(RTC_AIE);
break;
case RTC_AIE_ON: /* alarm on */
rtc_control |= RTC_AIE;
+ hpet_set_rtc_irq_bit(RTC_AIE);
break;
case RTC_UIE_OFF: /* update off */
rtc_control &= ~RTC_UIE;
+ hpet_mask_rtc_irq_bit(RTC_UIE);
break;
case RTC_UIE_ON: /* update on */
rtc_control |= RTC_UIE;
+ hpet_set_rtc_irq_bit(RTC_UIE);
break;
case RTC_PIE_OFF: /* periodic off */
rtc_control &= ~RTC_PIE;
+ hpet_mask_rtc_irq_bit(RTC_PIE);
break;
case RTC_PIE_ON: /* periodic on */
rtc_control |= RTC_PIE;
+ hpet_set_rtc_irq_bit(RTC_PIE);
break;
}
- CMOS_WRITE(rtc_control, RTC_CONTROL);
+ if (!is_hpet_enabled())
+ CMOS_WRITE(rtc_control, RTC_CONTROL);
+
rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
if (is_intr(rtc_intr))
rtc_update_irq(cmos->rtc, 1, rtc_intr);
+
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
@@ -393,15 +419,111 @@ static const struct rtc_class_ops cmos_rtc_ops = {
/*----------------------------------------------------------------*/
+/*
+ * All these chips have at least 64 bytes of address space, shared by
+ * RTC registers and NVRAM. Most of those bytes of NVRAM are used
+ * by boot firmware. Modern chips have 128 or 256 bytes.
+ */
+
+#define NVRAM_OFFSET (RTC_REG_D + 1)
+
+static ssize_t
+cmos_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ int retval;
+
+ if (unlikely(off >= attr->size))
+ return 0;
+ if ((off + count) > attr->size)
+ count = attr->size - off;
+
+ spin_lock_irq(&rtc_lock);
+ for (retval = 0, off += NVRAM_OFFSET; count--; retval++, off++)
+ *buf++ = CMOS_READ(off);
+ spin_unlock_irq(&rtc_lock);
+
+ return retval;
+}
+
+static ssize_t
+cmos_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct cmos_rtc *cmos;
+ int retval;
+
+ cmos = dev_get_drvdata(container_of(kobj, struct device, kobj));
+ if (unlikely(off >= attr->size))
+ return -EFBIG;
+ if ((off + count) > attr->size)
+ count = attr->size - off;
+
+ /* NOTE: on at least PCs and Ataris, the boot firmware uses a
+ * checksum on part of the NVRAM data. That's currently ignored
+ * here. If userspace is smart enough to know what fields of
+ * NVRAM to update, updating checksums is also part of its job.
+ */
+ spin_lock_irq(&rtc_lock);
+ for (retval = 0, off += NVRAM_OFFSET; count--; retval++, off++) {
+ /* don't trash RTC registers */
+ if (off == cmos->day_alrm
+ || off == cmos->mon_alrm
+ || off == cmos->century)
+ buf++;
+ else
+ CMOS_WRITE(*buf++, off);
+ }
+ spin_unlock_irq(&rtc_lock);
+
+ return retval;
+}
+
+static struct bin_attribute nvram = {
+ .attr = {
+ .name = "nvram",
+ .mode = S_IRUGO | S_IWUSR,
+ .owner = THIS_MODULE,
+ },
+
+ .read = cmos_nvram_read,
+ .write = cmos_nvram_write,
+ /* size gets set up later */
+};
+
+/*----------------------------------------------------------------*/
+
static struct cmos_rtc cmos_rtc;
static irqreturn_t cmos_interrupt(int irq, void *p)
{
u8 irqstat;
+ u8 rtc_control;
spin_lock(&rtc_lock);
- irqstat = CMOS_READ(RTC_INTR_FLAGS);
- irqstat &= (CMOS_READ(RTC_CONTROL) & RTC_IRQMASK) | RTC_IRQF;
+ /*
+ * In this case it is HPET RTC interrupt handler
+ * calling us, with the interrupt information
+ * passed as arg1, instead of irq.
+ */
+ if (is_hpet_enabled())
+ irqstat = (unsigned long)irq & 0xF0;
+ else {
+ irqstat = CMOS_READ(RTC_INTR_FLAGS);
+ rtc_control = CMOS_READ(RTC_CONTROL);
+ irqstat &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
+ }
+
+ /* All Linux RTC alarms should be treated as if they were oneshot.
+ * Similar code may be needed in system wakeup paths, in case the
+ * alarm woke the system.
+ */
+ if (irqstat & RTC_AIE) {
+ rtc_control = CMOS_READ(RTC_CONTROL);
+ rtc_control &= ~RTC_AIE;
+ CMOS_WRITE(rtc_control, RTC_CONTROL);
+ CMOS_READ(RTC_INTR_FLAGS);
+ }
spin_unlock(&rtc_lock);
if (is_intr(irqstat)) {
@@ -412,11 +534,9 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
}
#ifdef CONFIG_PNP
-#define is_pnp() 1
#define INITSECTION
#else
-#define is_pnp() 0
#define INITSECTION __init
#endif
@@ -426,6 +546,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
struct cmos_rtc_board_info *info = dev->platform_data;
int retval = 0;
unsigned char rtc_control;
+ unsigned address_space;
/* there can be only one ... */
if (cmos_rtc.dev)
@@ -450,15 +571,36 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
cmos_rtc.irq = rtc_irq;
cmos_rtc.iomem = ports;
+ /* Heuristic to deduce NVRAM size ... do what the legacy NVRAM
+ * driver did, but don't reject unknown configs. Old hardware
+ * won't address 128 bytes, and for now we ignore the way newer
+ * chips can address 256 bytes (using two more i/o ports).
+ */
+#if defined(CONFIG_ATARI)
+ address_space = 64;
+#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+ address_space = 128;
+#else
+#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
+ address_space = 128;
+#endif
+
/* For ACPI systems extension info comes from the FADT. On others,
* board specific setup provides it as appropriate. Systems where
* the alarm IRQ isn't automatically a wakeup IRQ (like ACPI, and
* some almost-clones) can provide hooks to make that behave.
+ *
+ * Note that ACPI doesn't preclude putting these registers into
+ * "extended" areas of the chip, including some that we won't yet
+ * expect CMOS_READ and friends to handle.
*/
if (info) {
- cmos_rtc.day_alrm = info->rtc_day_alarm;
- cmos_rtc.mon_alrm = info->rtc_mon_alarm;
- cmos_rtc.century = info->rtc_century;
+ if (info->rtc_day_alarm && info->rtc_day_alarm < 128)
+ cmos_rtc.day_alrm = info->rtc_day_alarm;
+ if (info->rtc_mon_alarm && info->rtc_mon_alarm < 128)
+ cmos_rtc.mon_alrm = info->rtc_mon_alarm;
+ if (info->rtc_century && info->rtc_century < 128)
+ cmos_rtc.century = info->rtc_century;
if (info->wake_on && info->wake_off) {
cmos_rtc.wake_on = info->wake_on;
@@ -485,8 +627,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
* doesn't use 32KHz here ... for portability we might need to
* do something about other clock frequencies.
*/
- CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT);
cmos_rtc.rtc->irq_freq = 1024;
+ if (!hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq))
+ CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT);
/* disable irqs.
*
@@ -509,19 +652,39 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup1;
}
- if (is_valid_irq(rtc_irq))
- retval = request_irq(rtc_irq, cmos_interrupt, IRQF_DISABLED,
- cmos_rtc.rtc->dev.bus_id,
+ if (is_valid_irq(rtc_irq)) {
+ irq_handler_t rtc_cmos_int_handler;
+
+ if (is_hpet_enabled()) {
+ int err;
+
+ rtc_cmos_int_handler = hpet_rtc_interrupt;
+ err = hpet_register_irq_handler(cmos_interrupt);
+ if (err != 0) {
+ printk(KERN_WARNING "hpet_register_irq_handler "
+ " failed in rtc_init().");
+ goto cleanup1;
+ }
+ } else
+ rtc_cmos_int_handler = cmos_interrupt;
+
+ retval = request_irq(rtc_irq, rtc_cmos_int_handler,
+ IRQF_DISABLED, cmos_rtc.rtc->dev.bus_id,
cmos_rtc.rtc);
- if (retval < 0) {
- dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
- goto cleanup1;
+ if (retval < 0) {
+ dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
+ goto cleanup1;
+ }
}
+ hpet_rtc_timer_init();
- /* REVISIT optionally make 50 or 114 bytes NVRAM available,
- * like rtc-ds1553, rtc-ds1742 ... this will often include
- * registers for century, and day/month alarm.
- */
+ /* export at least the first block of NVRAM */
+ nvram.size = address_space - NVRAM_OFFSET;
+ retval = sysfs_create_bin_file(&dev->kobj, &nvram);
+ if (retval < 0) {
+ dev_dbg(dev, "can't create nvram file? %d\n", retval);
+ goto cleanup2;
+ }
pr_info("%s: alarms up to one %s%s\n",
cmos_rtc.rtc->dev.bus_id,
@@ -536,6 +699,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
return 0;
+cleanup2:
+ if (is_valid_irq(rtc_irq))
+ free_irq(rtc_irq, cmos_rtc.rtc);
cleanup1:
cmos_rtc.dev = NULL;
rtc_device_unregister(cmos_rtc.rtc);
@@ -563,8 +729,12 @@ static void __exit cmos_do_remove(struct device *dev)
cmos_do_shutdown();
- if (is_valid_irq(cmos->irq))
+ sysfs_remove_bin_file(&dev->kobj, &nvram);
+
+ if (is_valid_irq(cmos->irq)) {
free_irq(cmos->irq, cmos->rtc);
+ hpet_unregister_irq_handler(cmos_interrupt);
+ }
rtc_device_unregister(cmos->rtc);
cmos->rtc = NULL;
@@ -659,9 +829,12 @@ static int cmos_resume(struct device *dev)
/*----------------------------------------------------------------*/
-/* The "CMOS" RTC normally lives on the platform_bus. On ACPI systems,
- * the device node will always be created as a PNPACPI device. Plus
- * pre-ACPI PCs probably list it in the PNPBIOS tables.
+/* On non-x86 systems, a "CMOS" RTC lives most naturally on platform_bus.
+ * ACPI systems always list these as PNPACPI devices, and pre-ACPI PCs
+ * probably list them in similar PNPBIOS tables; so PNP is more common.
+ *
+ * We don't use legacy "poke at the hardware" probing. Ancient PCs that
+ * predate even PNPBIOS should set up platform_bus devices.
*/
#ifdef CONFIG_PNP
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 025c60a17a4a..90dfa0df747a 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -246,6 +246,15 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
/* if the driver does not provide the ioctl interface
* or if that particular ioctl was not implemented
* (-ENOIOCTLCMD), we will try to emulate here.
+ *
+ * Drivers *SHOULD NOT* provide ioctl implementations
+ * for these requests. Instead, provide methods to
+ * support the following code, so that the RTC's main
+ * features are accessible without using ioctls.
+ *
+ * RTC and alarm times will be in UTC, by preference,
+ * but dual-booting with MS-Windows implies RTCs must
+ * use the local wall clock time.
*/
switch (cmd) {
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
new file mode 100644
index 000000000000..7b002ceeaa7d
--- /dev/null
+++ b/drivers/rtc/rtc-ds1302.c
@@ -0,0 +1,262 @@
+/*
+ * Dallas DS1302 RTC Support
+ *
+ * Copyright (C) 2002 David McCullough
+ * Copyright (C) 2003 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License version 2. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+#include <linux/rtc.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/bcd.h>
+#include <asm/rtc.h>
+
+#define DRV_NAME "rtc-ds1302"
+#define DRV_VERSION "0.1.0"
+
+#define RTC_CMD_READ 0x81 /* Read command */
+#define RTC_CMD_WRITE 0x80 /* Write command */
+
+#define RTC_ADDR_RAM0 0x20 /* Address of RAM0 */
+#define RTC_ADDR_TCR 0x08 /* Address of trickle charge register */
+#define RTC_ADDR_YEAR 0x06 /* Address of year register */
+#define RTC_ADDR_DAY 0x05 /* Address of day of week register */
+#define RTC_ADDR_MON 0x04 /* Address of month register */
+#define RTC_ADDR_DATE 0x03 /* Address of day of month register */
+#define RTC_ADDR_HOUR 0x02 /* Address of hour register */
+#define RTC_ADDR_MIN 0x01 /* Address of minute register */
+#define RTC_ADDR_SEC 0x00 /* Address of second register */
+
+#define RTC_RESET 0x1000
+#define RTC_IODATA 0x0800
+#define RTC_SCLK 0x0400
+
+#ifdef CONFIG_SH_SECUREEDGE5410
+#include <asm/snapgear.h>
+#define set_dp(x) SECUREEDGE_WRITE_IOPORT(x, 0x1c00)
+#define get_dp() SECUREEDGE_READ_IOPORT()
+#else
+#error "Add support for your platform"
+#endif
+
+struct ds1302_rtc {
+ struct rtc_device *rtc_dev;
+ spinlock_t lock;
+};
+
+static void ds1302_sendbits(unsigned int val)
+{
+ int i;
+
+ for (i = 8; (i); i--, val >>= 1) {
+ set_dp((get_dp() & ~RTC_IODATA) | ((val & 0x1) ?
+ RTC_IODATA : 0));
+ set_dp(get_dp() | RTC_SCLK); /* clock high */
+ set_dp(get_dp() & ~RTC_SCLK); /* clock low */
+ }
+}
+
+static unsigned int ds1302_recvbits(void)
+{
+ unsigned int val;
+ int i;
+
+ for (i = 0, val = 0; (i < 8); i++) {
+ val |= (((get_dp() & RTC_IODATA) ? 1 : 0) << i);
+ set_dp(get_dp() | RTC_SCLK); /* clock high */
+ set_dp(get_dp() & ~RTC_SCLK); /* clock low */
+ }
+
+ return val;
+}
+
+static unsigned int ds1302_readbyte(unsigned int addr)
+{
+ unsigned int val;
+
+ set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
+
+ set_dp(get_dp() | RTC_RESET);
+ ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_READ);
+ val = ds1302_recvbits();
+ set_dp(get_dp() & ~RTC_RESET);
+
+ return val;
+}
+
+static void ds1302_writebyte(unsigned int addr, unsigned int val)
+{
+ set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
+ set_dp(get_dp() | RTC_RESET);
+ ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_WRITE);
+ ds1302_sendbits(val);
+ set_dp(get_dp() & ~RTC_RESET);
+}
+
+static int ds1302_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ds1302_rtc *rtc = dev_get_drvdata(dev);
+
+ spin_lock_irq(&rtc->lock);
+
+ tm->tm_sec = BCD2BIN(ds1302_readbyte(RTC_ADDR_SEC));
+ tm->tm_min = BCD2BIN(ds1302_readbyte(RTC_ADDR_MIN));
+ tm->tm_hour = BCD2BIN(ds1302_readbyte(RTC_ADDR_HOUR));
+ tm->tm_wday = BCD2BIN(ds1302_readbyte(RTC_ADDR_DAY));
+ tm->tm_mday = BCD2BIN(ds1302_readbyte(RTC_ADDR_DATE));
+ tm->tm_mon = BCD2BIN(ds1302_readbyte(RTC_ADDR_MON)) - 1;
+ tm->tm_year = BCD2BIN(ds1302_readbyte(RTC_ADDR_YEAR));
+
+ if (tm->tm_year < 70)
+ tm->tm_year += 100;
+
+ spin_unlock_irq(&rtc->lock);
+
+ dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
+ "mday=%d, mon=%d, year=%d, wday=%d\n",
+ __FUNCTION__,
+ tm->tm_sec, tm->tm_min, tm->tm_hour,
+ tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday);
+
+ if (rtc_valid_tm(tm) < 0)
+ dev_err(dev, "invalid date\n");
+
+ return 0;
+}
+
+static int ds1302_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ds1302_rtc *rtc = dev_get_drvdata(dev);
+
+ spin_lock_irq(&rtc->lock);
+
+ /* Stop RTC */
+ ds1302_writebyte(RTC_ADDR_SEC, ds1302_readbyte(RTC_ADDR_SEC) | 0x80);
+
+ ds1302_writebyte(RTC_ADDR_SEC, BIN2BCD(tm->tm_sec));
+ ds1302_writebyte(RTC_ADDR_MIN, BIN2BCD(tm->tm_min));
+ ds1302_writebyte(RTC_ADDR_HOUR, BIN2BCD(tm->tm_hour));
+ ds1302_writebyte(RTC_ADDR_DAY, BIN2BCD(tm->tm_wday));
+ ds1302_writebyte(RTC_ADDR_DATE, BIN2BCD(tm->tm_mday));
+ ds1302_writebyte(RTC_ADDR_MON, BIN2BCD(tm->tm_mon + 1));
+ ds1302_writebyte(RTC_ADDR_YEAR, BIN2BCD(tm->tm_year % 100));
+
+ /* Start RTC */
+ ds1302_writebyte(RTC_ADDR_SEC, ds1302_readbyte(RTC_ADDR_SEC) & ~0x80);
+
+ spin_unlock_irq(&rtc->lock);
+
+ return 0;
+}
+
+static int ds1302_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+#ifdef RTC_SET_CHARGE
+ case RTC_SET_CHARGE:
+ {
+ struct ds1302_rtc *rtc = dev_get_drvdata(dev);
+ int tcs_val;
+
+ if (copy_from_user(&tcs_val, (int __user *)arg, sizeof(int)))
+ return -EFAULT;
+
+ spin_lock_irq(&rtc->lock);
+ ds1302_writebyte(RTC_ADDR_TCR, (0xa0 | tcs_val * 0xf));
+ spin_unlock_irq(&rtc->lock);
+ return 0;
+ }
+#endif
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+static struct rtc_class_ops ds1302_rtc_ops = {
+ .read_time = ds1302_rtc_read_time,
+ .set_time = ds1302_rtc_set_time,
+ .ioctl = ds1302_rtc_ioctl,
+};
+
+static int __devinit ds1302_rtc_probe(struct platform_device *pdev)
+{
+ struct ds1302_rtc *rtc;
+ int ret;
+
+ /* Reset */
+ set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
+
+ /* Write a magic value to the DS1302 RAM, and see if it sticks. */
+ ds1302_writebyte(RTC_ADDR_RAM0, 0x42);
+ if (ds1302_readbyte(RTC_ADDR_RAM0) != 0x42)
+ return -ENODEV;
+
+ rtc = kzalloc(sizeof(struct ds1302_rtc), GFP_KERNEL);
+ if (unlikely(!rtc))
+ return -ENOMEM;
+
+ spin_lock_init(&rtc->lock);
+ rtc->rtc_dev = rtc_device_register("ds1302", &pdev->dev,
+ &ds1302_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc_dev)) {
+ ret = PTR_ERR(rtc->rtc_dev);
+ goto out;
+ }
+
+ platform_set_drvdata(pdev, rtc);
+
+ return 0;
+out:
+ kfree(rtc);
+ return ret;
+}
+
+static int __devexit ds1302_rtc_remove(struct platform_device *pdev)
+{
+ struct ds1302_rtc *rtc = platform_get_drvdata(pdev);
+
+ if (likely(rtc->rtc_dev))
+ rtc_device_unregister(rtc->rtc_dev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(rtc);
+
+ return 0;
+}
+
+static struct platform_driver ds1302_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ds1302_rtc_probe,
+ .remove = __devexit_p(ds1302_rtc_remove),
+};
+
+static int __init ds1302_rtc_init(void)
+{
+ return platform_driver_register(&ds1302_platform_driver);
+}
+
+static void __exit ds1302_rtc_exit(void)
+{
+ platform_driver_unregister(&ds1302_platform_driver);
+}
+
+module_init(ds1302_rtc_init);
+module_exit(ds1302_rtc_exit);
+
+MODULE_DESCRIPTION("Dallas DS1302 RTC driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Paul Mundt, David McCullough");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index bc1c7fe94ad3..f389a28720d2 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -256,7 +256,7 @@ ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
struct i2c_msg msg[2];
int result;
- client = to_i2c_client(container_of(kobj, struct device, kobj));
+ client = kobj_to_i2c_client(kobj);
ds1307 = i2c_get_clientdata(client);
if (unlikely(off >= NVRAM_SIZE))
@@ -294,7 +294,7 @@ ds1307_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
u8 buffer[NVRAM_SIZE + 1];
int ret;
- client = to_i2c_client(container_of(kobj, struct device, kobj));
+ client = kobj_to_i2c_client(kobj);
if (unlikely(off >= NVRAM_SIZE))
return -EFBIG;
@@ -412,11 +412,6 @@ read_rtc:
*/
tmp = ds1307->regs[DS1307_REG_SECS];
switch (ds1307->type) {
- case ds_1340:
- /* FIXME read register with DS1340_BIT_OSF, use that to
- * trigger the "set time" warning (*after* restarting the
- * oscillator!) instead of this weaker ds1307/m41t00 test.
- */
case ds_1307:
case m41t00:
/* clock halted? turn it on, so clock can tick. */
@@ -440,6 +435,24 @@ read_rtc:
goto read_rtc;
}
break;
+ case ds_1340:
+ /* clock halted? turn it on, so clock can tick. */
+ if (tmp & DS1340_BIT_nEOSC)
+ i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
+
+ tmp = i2c_smbus_read_byte_data(client, DS1340_REG_FLAG);
+ if (tmp < 0) {
+ pr_debug("read error %d\n", tmp);
+ err = -EIO;
+ goto exit_free;
+ }
+
+ /* oscillator fault? clear flag, and warn */
+ if (tmp & DS1340_BIT_OSF) {
+ i2c_smbus_write_byte_data(client, DS1340_REG_FLAG, 0);
+ dev_warn(&client->dev, "SET TIME!\n");
+ }
+ break;
case ds_1337:
case ds_1339:
break;
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
new file mode 100644
index 000000000000..d74b8086fa31
--- /dev/null
+++ b/drivers/rtc/rtc-ds1511.c
@@ -0,0 +1,656 @@
+/*
+ * An rtc driver for the Dallas DS1511
+ *
+ * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ * Copyright (C) 2007 Andrew Sharp <andy.sharp@onstor.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Real time clock driver for the Dallas 1511 chip, which also
+ * contains a watchdog timer. There is a tiny amount of code that
+ * platform code could use to mess with the watchdog device a little
+ * bit, but not a full watchdog driver.
+ */
+
+#include <linux/bcd.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#define DRV_VERSION "0.6"
+
+enum ds1511reg {
+ DS1511_SEC = 0x0,
+ DS1511_MIN = 0x1,
+ DS1511_HOUR = 0x2,
+ DS1511_DOW = 0x3,
+ DS1511_DOM = 0x4,
+ DS1511_MONTH = 0x5,
+ DS1511_YEAR = 0x6,
+ DS1511_CENTURY = 0x7,
+ DS1511_AM1_SEC = 0x8,
+ DS1511_AM2_MIN = 0x9,
+ DS1511_AM3_HOUR = 0xa,
+ DS1511_AM4_DATE = 0xb,
+ DS1511_WD_MSEC = 0xc,
+ DS1511_WD_SEC = 0xd,
+ DS1511_CONTROL_A = 0xe,
+ DS1511_CONTROL_B = 0xf,
+ DS1511_RAMADDR_LSB = 0x10,
+ DS1511_RAMDATA = 0x13
+};
+
+#define DS1511_BLF1 0x80
+#define DS1511_BLF2 0x40
+#define DS1511_PRS 0x20
+#define DS1511_PAB 0x10
+#define DS1511_TDF 0x08
+#define DS1511_KSF 0x04
+#define DS1511_WDF 0x02
+#define DS1511_IRQF 0x01
+#define DS1511_TE 0x80
+#define DS1511_CS 0x40
+#define DS1511_BME 0x20
+#define DS1511_TPE 0x10
+#define DS1511_TIE 0x08
+#define DS1511_KIE 0x04
+#define DS1511_WDE 0x02
+#define DS1511_WDS 0x01
+#define DS1511_RAM_MAX 0xff
+
+#define RTC_CMD DS1511_CONTROL_B
+#define RTC_CMD1 DS1511_CONTROL_A
+
+#define RTC_ALARM_SEC DS1511_AM1_SEC
+#define RTC_ALARM_MIN DS1511_AM2_MIN
+#define RTC_ALARM_HOUR DS1511_AM3_HOUR
+#define RTC_ALARM_DATE DS1511_AM4_DATE
+
+#define RTC_SEC DS1511_SEC
+#define RTC_MIN DS1511_MIN
+#define RTC_HOUR DS1511_HOUR
+#define RTC_DOW DS1511_DOW
+#define RTC_DOM DS1511_DOM
+#define RTC_MON DS1511_MONTH
+#define RTC_YEAR DS1511_YEAR
+#define RTC_CENTURY DS1511_CENTURY
+
+#define RTC_TIE DS1511_TIE
+#define RTC_TE DS1511_TE
+
+struct rtc_plat_data {
+ struct rtc_device *rtc;
+ void __iomem *ioaddr; /* virtual base address */
+ unsigned long baseaddr; /* physical base address */
+ int size; /* amount of memory mapped */
+ int irq;
+ unsigned int irqen;
+ int alrm_sec;
+ int alrm_min;
+ int alrm_hour;
+ int alrm_mday;
+};
+
+static DEFINE_SPINLOCK(ds1511_lock);
+
+static __iomem char *ds1511_base;
+static u32 reg_spacing = 1;
+
+ static noinline void
+rtc_write(uint8_t val, uint32_t reg)
+{
+ writeb(val, ds1511_base + (reg * reg_spacing));
+}
+
+ static inline void
+rtc_write_alarm(uint8_t val, enum ds1511reg reg)
+{
+ rtc_write((val | 0x80), reg);
+}
+
+ static noinline uint8_t
+rtc_read(enum ds1511reg reg)
+{
+ return readb(ds1511_base + (reg * reg_spacing));
+}
+
+ static inline void
+rtc_disable_update(void)
+{
+ rtc_write((rtc_read(RTC_CMD) & ~RTC_TE), RTC_CMD);
+}
+
+ static void
+rtc_enable_update(void)
+{
+ rtc_write((rtc_read(RTC_CMD) | RTC_TE), RTC_CMD);
+}
+
+/*
+ * #define DS1511_WDOG_RESET_SUPPORT
+ *
+ * Uncomment this if you want to use these routines in
+ * some platform code.
+ */
+#ifdef DS1511_WDOG_RESET_SUPPORT
+/*
+ * just enough code to set the watchdog timer so that it
+ * will reboot the system
+ */
+ void
+ds1511_wdog_set(unsigned long deciseconds)
+{
+ /*
+ * the wdog timer can take 99.99 seconds
+ */
+ deciseconds %= 10000;
+ /*
+ * set the wdog values in the wdog registers
+ */
+ rtc_write(BIN2BCD(deciseconds % 100), DS1511_WD_MSEC);
+ rtc_write(BIN2BCD(deciseconds / 100), DS1511_WD_SEC);
+ /*
+ * set wdog enable and wdog 'steering' bit to issue a reset
+ */
+ rtc_write(DS1511_WDE | DS1511_WDS, RTC_CMD);
+}
+
+ void
+ds1511_wdog_disable(void)
+{
+ /*
+ * clear wdog enable and wdog 'steering' bits
+ */
+ rtc_write(rtc_read(RTC_CMD) & ~(DS1511_WDE | DS1511_WDS), RTC_CMD);
+ /*
+ * clear the wdog counter
+ */
+ rtc_write(0, DS1511_WD_MSEC);
+ rtc_write(0, DS1511_WD_SEC);
+}
+#endif
+
+/*
+ * set the rtc chip's idea of the time.
+ * stupidly, some callers call with year unmolested;
+ * and some call with year = year - 1900. thanks.
+ */
+ int
+ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
+{
+ u8 mon, day, dow, hrs, min, sec, yrs, cen;
+ unsigned int flags;
+
+ /*
+ * won't have to change this for a while
+ */
+ if (rtc_tm->tm_year < 1900) {
+ rtc_tm->tm_year += 1900;
+ }
+
+ if (rtc_tm->tm_year < 1970) {
+ return -EINVAL;
+ }
+ yrs = rtc_tm->tm_year % 100;
+ cen = rtc_tm->tm_year / 100;
+ mon = rtc_tm->tm_mon + 1; /* tm_mon starts at zero */
+ day = rtc_tm->tm_mday;
+ dow = rtc_tm->tm_wday & 0x7; /* automatic BCD */
+ hrs = rtc_tm->tm_hour;
+ min = rtc_tm->tm_min;
+ sec = rtc_tm->tm_sec;
+
+ if ((mon > 12) || (day == 0)) {
+ return -EINVAL;
+ }
+
+ if (day > rtc_month_days(rtc_tm->tm_mon, rtc_tm->tm_year)) {
+ return -EINVAL;
+ }
+
+ if ((hrs >= 24) || (min >= 60) || (sec >= 60)) {
+ return -EINVAL;
+ }
+
+ /*
+ * each register is a different number of valid bits
+ */
+ sec = BIN2BCD(sec) & 0x7f;
+ min = BIN2BCD(min) & 0x7f;
+ hrs = BIN2BCD(hrs) & 0x3f;
+ day = BIN2BCD(day) & 0x3f;
+ mon = BIN2BCD(mon) & 0x1f;
+ yrs = BIN2BCD(yrs) & 0xff;
+ cen = BIN2BCD(cen) & 0xff;
+
+ spin_lock_irqsave(&ds1511_lock, flags);
+ rtc_disable_update();
+ rtc_write(cen, RTC_CENTURY);
+ rtc_write(yrs, RTC_YEAR);
+ rtc_write((rtc_read(RTC_MON) & 0xe0) | mon, RTC_MON);
+ rtc_write(day, RTC_DOM);
+ rtc_write(hrs, RTC_HOUR);
+ rtc_write(min, RTC_MIN);
+ rtc_write(sec, RTC_SEC);
+ rtc_write(dow, RTC_DOW);
+ rtc_enable_update();
+ spin_unlock_irqrestore(&ds1511_lock, flags);
+
+ return 0;
+}
+
+ int
+ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
+{
+ unsigned int century;
+ unsigned int flags;
+
+ spin_lock_irqsave(&ds1511_lock, flags);
+ rtc_disable_update();
+
+ rtc_tm->tm_sec = rtc_read(RTC_SEC) & 0x7f;
+ rtc_tm->tm_min = rtc_read(RTC_MIN) & 0x7f;
+ rtc_tm->tm_hour = rtc_read(RTC_HOUR) & 0x3f;
+ rtc_tm->tm_mday = rtc_read(RTC_DOM) & 0x3f;
+ rtc_tm->tm_wday = rtc_read(RTC_DOW) & 0x7;
+ rtc_tm->tm_mon = rtc_read(RTC_MON) & 0x1f;
+ rtc_tm->tm_year = rtc_read(RTC_YEAR) & 0x7f;
+ century = rtc_read(RTC_CENTURY);
+
+ rtc_enable_update();
+ spin_unlock_irqrestore(&ds1511_lock, flags);
+
+ rtc_tm->tm_sec = BCD2BIN(rtc_tm->tm_sec);
+ rtc_tm->tm_min = BCD2BIN(rtc_tm->tm_min);
+ rtc_tm->tm_hour = BCD2BIN(rtc_tm->tm_hour);
+ rtc_tm->tm_mday = BCD2BIN(rtc_tm->tm_mday);
+ rtc_tm->tm_wday = BCD2BIN(rtc_tm->tm_wday);
+ rtc_tm->tm_mon = BCD2BIN(rtc_tm->tm_mon);
+ rtc_tm->tm_year = BCD2BIN(rtc_tm->tm_year);
+ century = BCD2BIN(century) * 100;
+
+ /*
+ * Account for differences between how the RTC uses the values
+ * and how they are defined in a struct rtc_time;
+ */
+ century += rtc_tm->tm_year;
+ rtc_tm->tm_year = century - 1900;
+
+ rtc_tm->tm_mon--;
+
+ if (rtc_valid_tm(rtc_tm) < 0) {
+ dev_err(dev, "retrieved date/time is not valid.\n");
+ rtc_time_to_tm(0, rtc_tm);
+ }
+ return 0;
+}
+
+/*
+ * write the alarm register settings
+ *
+ * we only have the use to interrupt every second, otherwise
+ * known as the update interrupt, or the interrupt if the whole
+ * date/hours/mins/secs matches. the ds1511 has many more
+ * permutations, but the kernel doesn't.
+ */
+ static void
+ds1511_rtc_update_alarm(struct rtc_plat_data *pdata)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
+ rtc_write(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
+ 0x80 : BIN2BCD(pdata->alrm_mday) & 0x3f,
+ RTC_ALARM_DATE);
+ rtc_write(pdata->alrm_hour < 0 || (pdata->irqen & RTC_UF) ?
+ 0x80 : BIN2BCD(pdata->alrm_hour) & 0x3f,
+ RTC_ALARM_HOUR);
+ rtc_write(pdata->alrm_min < 0 || (pdata->irqen & RTC_UF) ?
+ 0x80 : BIN2BCD(pdata->alrm_min) & 0x7f,
+ RTC_ALARM_MIN);
+ rtc_write(pdata->alrm_sec < 0 || (pdata->irqen & RTC_UF) ?
+ 0x80 : BIN2BCD(pdata->alrm_sec) & 0x7f,
+ RTC_ALARM_SEC);
+ rtc_write(rtc_read(RTC_CMD) | (pdata->irqen ? RTC_TIE : 0), RTC_CMD);
+ rtc_read(RTC_CMD1); /* clear interrupts */
+ spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
+}
+
+ static int
+ds1511_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq < 0) {
+ return -EINVAL;
+ }
+ pdata->alrm_mday = alrm->time.tm_mday;
+ pdata->alrm_hour = alrm->time.tm_hour;
+ pdata->alrm_min = alrm->time.tm_min;
+ pdata->alrm_sec = alrm->time.tm_sec;
+ if (alrm->enabled) {
+ pdata->irqen |= RTC_AF;
+ }
+ ds1511_rtc_update_alarm(pdata);
+ return 0;
+}
+
+ static int
+ds1511_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq < 0) {
+ return -EINVAL;
+ }
+ alrm->time.tm_mday = pdata->alrm_mday < 0 ? 0 : pdata->alrm_mday;
+ alrm->time.tm_hour = pdata->alrm_hour < 0 ? 0 : pdata->alrm_hour;
+ alrm->time.tm_min = pdata->alrm_min < 0 ? 0 : pdata->alrm_min;
+ alrm->time.tm_sec = pdata->alrm_sec < 0 ? 0 : pdata->alrm_sec;
+ alrm->enabled = (pdata->irqen & RTC_AF) ? 1 : 0;
+ return 0;
+}
+
+ static irqreturn_t
+ds1511_interrupt(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ unsigned long events = RTC_IRQF;
+
+ /*
+ * read and clear interrupt
+ */
+ if (!(rtc_read(RTC_CMD1) & DS1511_IRQF)) {
+ return IRQ_NONE;
+ }
+ if (rtc_read(RTC_ALARM_SEC) & 0x80) {
+ events |= RTC_UF;
+ } else {
+ events |= RTC_AF;
+ }
+ rtc_update_irq(pdata->rtc, 1, events);
+ return IRQ_HANDLED;
+}
+
+ static void
+ds1511_rtc_release(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq >= 0) {
+ pdata->irqen = 0;
+ ds1511_rtc_update_alarm(pdata);
+ }
+}
+
+ static int
+ds1511_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq < 0) {
+ return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
+ }
+ switch (cmd) {
+ case RTC_AIE_OFF:
+ pdata->irqen &= ~RTC_AF;
+ ds1511_rtc_update_alarm(pdata);
+ break;
+ case RTC_AIE_ON:
+ pdata->irqen |= RTC_AF;
+ ds1511_rtc_update_alarm(pdata);
+ break;
+ case RTC_UIE_OFF:
+ pdata->irqen &= ~RTC_UF;
+ ds1511_rtc_update_alarm(pdata);
+ break;
+ case RTC_UIE_ON:
+ pdata->irqen |= RTC_UF;
+ ds1511_rtc_update_alarm(pdata);
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static const struct rtc_class_ops ds1511_rtc_ops = {
+ .read_time = ds1511_rtc_read_time,
+ .set_time = ds1511_rtc_set_time,
+ .read_alarm = ds1511_rtc_read_alarm,
+ .set_alarm = ds1511_rtc_set_alarm,
+ .release = ds1511_rtc_release,
+ .ioctl = ds1511_rtc_ioctl,
+};
+
+ static ssize_t
+ds1511_nvram_read(struct kobject *kobj, struct bin_attribute *ba,
+ char *buf, loff_t pos, size_t size)
+{
+ ssize_t count;
+
+ /*
+ * if count is more than one, turn on "burst" mode
+ * turn it off when you're done
+ */
+ if (size > 1) {
+ rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD);
+ }
+ if (pos > DS1511_RAM_MAX) {
+ pos = DS1511_RAM_MAX;
+ }
+ if (size + pos > DS1511_RAM_MAX + 1) {
+ size = DS1511_RAM_MAX - pos + 1;
+ }
+ rtc_write(pos, DS1511_RAMADDR_LSB);
+ for (count = 0; size > 0; count++, size--) {
+ *buf++ = rtc_read(DS1511_RAMDATA);
+ }
+ if (count > 1) {
+ rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD);
+ }
+ return count;
+}
+
+ static ssize_t
+ds1511_nvram_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
+{
+ ssize_t count;
+
+ /*
+ * if count is more than one, turn on "burst" mode
+ * turn it off when you're done
+ */
+ if (size > 1) {
+ rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD);
+ }
+ if (pos > DS1511_RAM_MAX) {
+ pos = DS1511_RAM_MAX;
+ }
+ if (size + pos > DS1511_RAM_MAX + 1) {
+ size = DS1511_RAM_MAX - pos + 1;
+ }
+ rtc_write(pos, DS1511_RAMADDR_LSB);
+ for (count = 0; size > 0; count++, size--) {
+ rtc_write(*buf++, DS1511_RAMDATA);
+ }
+ if (count > 1) {
+ rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD);
+ }
+ return count;
+}
+
+static struct bin_attribute ds1511_nvram_attr = {
+ .attr = {
+ .name = "nvram",
+ .mode = S_IRUGO | S_IWUGO,
+ .owner = THIS_MODULE,
+ },
+ .size = DS1511_RAM_MAX,
+ .read = ds1511_nvram_read,
+ .write = ds1511_nvram_write,
+};
+
+ static int __devinit
+ds1511_rtc_probe(struct platform_device *pdev)
+{
+ struct rtc_device *rtc;
+ struct resource *res;
+ struct rtc_plat_data *pdata = NULL;
+ int ret = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ return -ENODEV;
+ }
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ return -ENOMEM;
+ }
+ pdata->irq = -1;
+ pdata->size = res->end - res->start + 1;
+ if (!request_mem_region(res->start, pdata->size, pdev->name)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ pdata->baseaddr = res->start;
+ pdata->size = pdata->size;
+ ds1511_base = ioremap(pdata->baseaddr, pdata->size);
+ if (!ds1511_base) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pdata->ioaddr = ds1511_base;
+ pdata->irq = platform_get_irq(pdev, 0);
+
+ /*
+ * turn on the clock and the crystal, etc.
+ */
+ rtc_write(0, RTC_CMD);
+ rtc_write(0, RTC_CMD1);
+ /*
+ * clear the wdog counter
+ */
+ rtc_write(0, DS1511_WD_MSEC);
+ rtc_write(0, DS1511_WD_SEC);
+ /*
+ * start the clock
+ */
+ rtc_enable_update();
+
+ /*
+ * check for a dying bat-tree
+ */
+ if (rtc_read(RTC_CMD1) & DS1511_BLF1) {
+ dev_warn(&pdev->dev, "voltage-low detected.\n");
+ }
+
+ /*
+ * if the platform has an interrupt in mind for this device,
+ * then by all means, set it
+ */
+ if (pdata->irq >= 0) {
+ rtc_read(RTC_CMD1);
+ if (request_irq(pdata->irq, ds1511_interrupt,
+ IRQF_DISABLED | IRQF_SHARED, pdev->name, pdev) < 0) {
+
+ dev_warn(&pdev->dev, "interrupt not available.\n");
+ pdata->irq = -1;
+ }
+ }
+
+ rtc = rtc_device_register(pdev->name, &pdev->dev, &ds1511_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ ret = PTR_ERR(rtc);
+ goto out;
+ }
+ pdata->rtc = rtc;
+ platform_set_drvdata(pdev, pdata);
+ ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
+ if (ret) {
+ goto out;
+ }
+ return 0;
+ out:
+ if (pdata->rtc) {
+ rtc_device_unregister(pdata->rtc);
+ }
+ if (pdata->irq >= 0) {
+ free_irq(pdata->irq, pdev);
+ }
+ if (ds1511_base) {
+ iounmap(ds1511_base);
+ ds1511_base = NULL;
+ }
+ if (pdata->baseaddr) {
+ release_mem_region(pdata->baseaddr, pdata->size);
+ }
+
+ kfree(pdata);
+ return ret;
+}
+
+ static int __devexit
+ds1511_rtc_remove(struct platform_device *pdev)
+{
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
+ rtc_device_unregister(pdata->rtc);
+ pdata->rtc = NULL;
+ if (pdata->irq >= 0) {
+ /*
+ * disable the alarm interrupt
+ */
+ rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD);
+ rtc_read(RTC_CMD1);
+ free_irq(pdata->irq, pdev);
+ }
+ iounmap(pdata->ioaddr);
+ ds1511_base = NULL;
+ release_mem_region(pdata->baseaddr, pdata->size);
+ kfree(pdata);
+ return 0;
+}
+
+static struct platform_driver ds1511_rtc_driver = {
+ .probe = ds1511_rtc_probe,
+ .remove = __devexit_p(ds1511_rtc_remove),
+ .driver = {
+ .name = "ds1511",
+ .owner = THIS_MODULE,
+ },
+};
+
+ static int __init
+ds1511_rtc_init(void)
+{
+ return platform_driver_register(&ds1511_rtc_driver);
+}
+
+ static void __exit
+ds1511_rtc_exit(void)
+{
+ return platform_driver_unregister(&ds1511_rtc_driver);
+}
+
+module_init(ds1511_rtc_init);
+module_exit(ds1511_rtc_exit);
+
+MODULE_AUTHOR("Andrew Sharp <andy.sharp@onstor.com>");
+MODULE_DESCRIPTION("Dallas DS1511 RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index c973ba94c422..8b3997007506 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -163,27 +163,17 @@ static int pcf8583_read_mem(struct i2c_client *client, struct rtc_mem *mem)
static int pcf8583_write_mem(struct i2c_client *client, struct rtc_mem *mem)
{
- unsigned char addr[1];
- struct i2c_msg msgs[2] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = 1,
- .buf = addr,
- }, {
- .addr = client->addr,
- .flags = I2C_M_NOSTART,
- .len = mem->nr,
- .buf = mem->data,
- }
- };
+ unsigned char buf[9];
+ int ret;
- if (mem->loc < 8)
+ if (mem->loc < 8 || mem->nr > 8)
return -EINVAL;
- addr[0] = mem->loc;
+ buf[0] = mem->loc;
+ memcpy(buf + 1, mem->data, mem->nr);
- return i2c_transfer(client->adapter, msgs, 2) == 2 ? 0 : -EIO;
+ ret = i2c_master_send(client, buf, mem->nr + 1);
+ return ret == mem->nr + 1 ? 0 : -EIO;
}
static int pcf8583_rtc_read_time(struct device *dev, struct rtc_time *tm)
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
new file mode 100644
index 000000000000..a64626a82d0b
--- /dev/null
+++ b/drivers/rtc/rtc-r9701.c
@@ -0,0 +1,178 @@
+/*
+ * Driver for Epson RTC-9701JE
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on rtc-max6902.c
+ *
+ * Copyright (C) 2006 8D Technologies inc.
+ * Copyright (C) 2004 Compulab Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/rtc.h>
+#include <linux/spi/spi.h>
+#include <linux/bcd.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#define RSECCNT 0x00 /* Second Counter */
+#define RMINCNT 0x01 /* Minute Counter */
+#define RHRCNT 0x02 /* Hour Counter */
+#define RWKCNT 0x03 /* Week Counter */
+#define RDAYCNT 0x04 /* Day Counter */
+#define RMONCNT 0x05 /* Month Counter */
+#define RYRCNT 0x06 /* Year Counter */
+#define R100CNT 0x07 /* Y100 Counter */
+#define RMINAR 0x08 /* Minute Alarm */
+#define RHRAR 0x09 /* Hour Alarm */
+#define RWKAR 0x0a /* Week/Day Alarm */
+#define RTIMCNT 0x0c /* Interval Timer */
+#define REXT 0x0d /* Extension Register */
+#define RFLAG 0x0e /* RTC Flag Register */
+#define RCR 0x0f /* RTC Control Register */
+
+static int write_reg(struct device *dev, int address, unsigned char data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned char buf[2];
+
+ buf[0] = address & 0x7f;
+ buf[1] = data;
+
+ return spi_write(spi, buf, ARRAY_SIZE(buf));
+}
+
+static int read_regs(struct device *dev, unsigned char *regs, int no_regs)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ u8 txbuf[1], rxbuf[1];
+ int k, ret;
+
+ ret = 0;
+
+ for (k = 0; ret == 0 && k < no_regs; k++) {
+ txbuf[0] = 0x80 | regs[k];
+ ret = spi_write_then_read(spi, txbuf, 1, rxbuf, 1);
+ regs[k] = rxbuf[0];
+ }
+
+ return ret;
+}
+
+static int r9701_get_datetime(struct device *dev, struct rtc_time *dt)
+{
+ unsigned long time;
+ int ret;
+ unsigned char buf[] = { RSECCNT, RMINCNT, RHRCNT,
+ RDAYCNT, RMONCNT, RYRCNT };
+
+ ret = read_regs(dev, buf, ARRAY_SIZE(buf));
+ if (ret)
+ return ret;
+
+ memset(dt, 0, sizeof(*dt));
+
+ dt->tm_sec = BCD2BIN(buf[0]); /* RSECCNT */
+ dt->tm_min = BCD2BIN(buf[1]); /* RMINCNT */
+ dt->tm_hour = BCD2BIN(buf[2]); /* RHRCNT */
+
+ dt->tm_mday = BCD2BIN(buf[3]); /* RDAYCNT */
+ dt->tm_mon = BCD2BIN(buf[4]) - 1; /* RMONCNT */
+ dt->tm_year = BCD2BIN(buf[5]) + 100; /* RYRCNT */
+
+ /* the rtc device may contain illegal values on power up
+ * according to the data sheet. make sure they are valid.
+ */
+
+ return rtc_valid_tm(dt);
+}
+
+static int r9701_set_datetime(struct device *dev, struct rtc_time *dt)
+{
+ int ret, year;
+
+ year = dt->tm_year + 1900;
+ if (year >= 2100 || year < 2000)
+ return -EINVAL;
+
+ ret = write_reg(dev, RHRCNT, BIN2BCD(dt->tm_hour));
+ ret = ret ? ret : write_reg(dev, RMINCNT, BIN2BCD(dt->tm_min));
+ ret = ret ? ret : write_reg(dev, RSECCNT, BIN2BCD(dt->tm_sec));
+ ret = ret ? ret : write_reg(dev, RDAYCNT, BIN2BCD(dt->tm_mday));
+ ret = ret ? ret : write_reg(dev, RMONCNT, BIN2BCD(dt->tm_mon + 1));
+ ret = ret ? ret : write_reg(dev, RYRCNT, BIN2BCD(dt->tm_year - 100));
+ ret = ret ? ret : write_reg(dev, RWKCNT, 1 << dt->tm_wday);
+
+ return ret;
+}
+
+static const struct rtc_class_ops r9701_rtc_ops = {
+ .read_time = r9701_get_datetime,
+ .set_time = r9701_set_datetime,
+};
+
+static int __devinit r9701_probe(struct spi_device *spi)
+{
+ struct rtc_device *rtc;
+ unsigned char tmp;
+ int res;
+
+ rtc = rtc_device_register("r9701",
+ &spi->dev, &r9701_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ dev_set_drvdata(&spi->dev, rtc);
+
+ tmp = R100CNT;
+ res = read_regs(&spi->dev, &tmp, 1);
+ if (res || tmp != 0x20) {
+ rtc_device_unregister(rtc);
+ return res;
+ }
+
+ return 0;
+}
+
+static int __devexit r9701_remove(struct spi_device *spi)
+{
+ struct rtc_device *rtc = dev_get_drvdata(&spi->dev);
+
+ rtc_device_unregister(rtc);
+ return 0;
+}
+
+static struct spi_driver r9701_driver = {
+ .driver = {
+ .name = "rtc-r9701",
+ .owner = THIS_MODULE,
+ },
+ .probe = r9701_probe,
+ .remove = __devexit_p(r9701_remove),
+};
+
+static __init int r9701_init(void)
+{
+ return spi_register_driver(&r9701_driver);
+}
+module_init(r9701_init);
+
+static __exit void r9701_exit(void)
+{
+ spi_unregister_driver(&r9701_driver);
+}
+module_exit(r9701_exit);
+
+MODULE_DESCRIPTION("r9701 spi RTC driver");
+MODULE_AUTHOR("Magnus Damm <damm@opensource.se>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index e2041b4d0c85..86766f1f2496 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -20,6 +20,7 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/clk.h>
+#include <linux/log2.h>
#include <asm/hardware.h>
#include <asm/uaccess.h>
@@ -309,9 +310,7 @@ static int s3c_rtc_ioctl(struct device *dev,
break;
case RTC_IRQP_SET:
- /* check for power of 2 */
-
- if ((arg & (arg-1)) != 0 || arg < 1) {
+ if (!is_power_of_2(arg)) {
ret = -EINVAL;
goto exit;
}
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 2eb38520f0c8..ee253cc45de1 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -357,23 +357,15 @@ static int sa1100_rtc_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int sa1100_rtc_suspend(struct platform_device *pdev, pm_message_t state)
{
- if (pdev->dev.power.power_state.event != state.event) {
- if (state.event == PM_EVENT_SUSPEND &&
- device_may_wakeup(&pdev->dev))
- enable_irq_wake(IRQ_RTCAlrm);
-
- pdev->dev.power.power_state = state;
- }
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(IRQ_RTCAlrm);
return 0;
}
static int sa1100_rtc_resume(struct platform_device *pdev)
{
- if (pdev->dev.power.power_state.event != PM_EVENT_ON) {
- if (device_may_wakeup(&pdev->dev))
- disable_irq_wake(IRQ_RTCAlrm);
- pdev->dev.power.power_state = PMSG_ON;
- }
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(IRQ_RTCAlrm);
return 0;
}
#else
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index 2ae0e8304d3a..4d27ccc4fc06 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -17,6 +17,13 @@
/* device attributes */
+/*
+ * NOTE: RTC times displayed in sysfs use the RTC's timezone. That's
+ * ideally UTC. However, PCs that also boot to MS-Windows normally use
+ * the local time and change to match daylight savings time. That affects
+ * attributes including date, time, since_epoch, and wakealarm.
+ */
+
static ssize_t
rtc_sysfs_show_name(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -113,13 +120,13 @@ rtc_sysfs_show_wakealarm(struct device *dev, struct device_attribute *attr,
unsigned long alarm;
struct rtc_wkalrm alm;
- /* Don't show disabled alarms; but the RTC could leave the
- * alarm enabled after it's already triggered. Alarms are
- * conceptually one-shot, even though some common hardware
- * (PCs) doesn't actually work that way.
+ /* Don't show disabled alarms. For uniformity, RTC alarms are
+ * conceptually one-shot, even though some common RTCs (on PCs)
+ * don't actually work that way.
*
- * REVISIT maybe we should require RTC implementations to
- * disable the RTC alarm after it triggers, for uniformity.
+ * NOTE: RTC implementations where the alarm doesn't match an
+ * exact YYYY-MM-DD HH:MM[:SS] date *must* disable their RTC
+ * alarms after they trigger, to ensure one-shot semantics.
*/
retval = rtc_read_alarm(to_rtc_device(dev), &alm);
if (retval == 0 && alm.enabled) {
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d640427c74c8..d984e0fae630 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1057,12 +1057,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (device->features & DASD_FEATURE_ERPLOG) {
dasd_log_sense(cqr, irb);
}
- /* If we have no sense data, or we just don't want complex ERP
- * for this request, but if we have retries left, then just
- * reset this request and retry it in the fastpath
+ /*
+ * If we don't want complex ERP for this request, then just
+ * reset this and retry it in the fastpath
*/
- if (!(cqr->irb.esw.esw0.erw.cons &&
- test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) &&
+ if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
cqr->retries > 0) {
DEV_MESSAGE(KERN_DEBUG, device,
"default ERP in fastpath (%i retries left)",
@@ -1707,7 +1706,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
req = (struct request *) cqr->callback_data;
dasd_profile_end(cqr->block, cqr, req);
- status = cqr->memdev->discipline->free_cp(cqr, req);
+ status = cqr->block->base->discipline->free_cp(cqr, req);
if (status <= 0)
error = status ? status : -EIO;
dasd_end_request(req, error);
@@ -1742,12 +1741,8 @@ restart:
/* Process requests that may be recovered */
if (cqr->status == DASD_CQR_NEED_ERP) {
- if (cqr->irb.esw.esw0.erw.cons &&
- test_bit(DASD_CQR_FLAGS_USE_ERP,
- &cqr->flags)) {
- erp_fn = base->discipline->erp_action(cqr);
- erp_fn(cqr);
- }
+ erp_fn = base->discipline->erp_action(cqr);
+ erp_fn(cqr);
goto restart;
}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index c361ab69ec00..f69714a0e9e7 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -164,7 +164,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
/* reset status to submit the request again... */
erp->status = DASD_CQR_FILLED;
- erp->retries = 1;
+ erp->retries = 10;
} else {
DEV_MESSAGE(KERN_ERR, device,
"No alternate channel path left (lpum=%x / "
@@ -301,8 +301,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
erp->function = dasd_3990_erp_action_4;
} else {
-
- if (sense[25] == 0x1D) { /* state change pending */
+ if (sense && (sense[25] == 0x1D)) { /* state change pending */
DEV_MESSAGE(KERN_INFO, device,
"waiting for state change pending "
@@ -311,7 +310,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
dasd_3990_erp_block_queue(erp, 30*HZ);
- } else if (sense[25] == 0x1E) { /* busy */
+ } else if (sense && (sense[25] == 0x1E)) { /* busy */
DEV_MESSAGE(KERN_INFO, device,
"busy - redriving request later, "
"%d retries left",
@@ -2120,6 +2119,34 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
*/
/*
+ * DASD_3990_ERP_CONTROL_CHECK
+ *
+ * DESCRIPTION
+ * Does a generic inspection if a control check occured and sets up
+ * the related error recovery procedure
+ *
+ * PARAMETER
+ * erp pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ * erp_filled pointer to the erp
+ */
+
+static struct dasd_ccw_req *
+dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
+{
+ struct dasd_device *device = erp->startdev;
+
+ if (erp->refers->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK
+ | SCHN_STAT_CHN_CTRL_CHK)) {
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "channel or interface control check");
+ erp = dasd_3990_erp_action_4(erp, NULL);
+ }
+ return erp;
+}
+
+/*
* DASD_3990_ERP_INSPECT
*
* DESCRIPTION
@@ -2145,8 +2172,11 @@ dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
if (erp_new)
return erp_new;
+ /* check if no concurrent sens is available */
+ if (!erp->refers->irb.esw.esw0.erw.cons)
+ erp_new = dasd_3990_erp_control_check(erp);
/* distinguish between 24 and 32 byte sense data */
- if (sense[27] & DASD_SENSE_BIT_0) {
+ else if (sense[27] & DASD_SENSE_BIT_0) {
/* inspect the 24 byte sense data */
erp_new = dasd_3990_erp_inspect_24(erp, sense);
@@ -2285,6 +2315,17 @@ dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2)
// return 0; /* CCW doesn't match */
}
+ if (cqr1->irb.esw.esw0.erw.cons != cqr2->irb.esw.esw0.erw.cons)
+ return 0;
+
+ if ((cqr1->irb.esw.esw0.erw.cons == 0) &&
+ (cqr2->irb.esw.esw0.erw.cons == 0)) {
+ if ((cqr1->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK |
+ SCHN_STAT_CHN_CTRL_CHK)) ==
+ (cqr2->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK |
+ SCHN_STAT_CHN_CTRL_CHK)))
+ return 1; /* match with ifcc*/
+ }
/* check sense data; byte 0-2,25,27 */
if (!((memcmp (cqr1->irb.ecw, cqr2->irb.ecw, 3) == 0) &&
(cqr1->irb.ecw[27] == cqr2->irb.ecw[27]) &&
@@ -2560,17 +2601,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
return cqr;
}
- /* check if sense data are available */
- if (!cqr->irb.ecw) {
- DEV_MESSAGE(KERN_DEBUG, device,
- "ERP called witout sense data avail ..."
- "request %p - NO ERP possible", cqr);
-
- cqr->status = DASD_CQR_FAILED;
-
- return cqr;
-
- }
/* check if error happened before */
erp = dasd_3990_erp_in_erp(cqr);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 7779bfce1c31..3faf0538b328 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -415,6 +415,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->queue = dev_info->dcssblk_queue;
dev_info->gd->private_data = dev_info;
dev_info->gd->driverfs_dev = &dev_info->dev;
+ blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
+ blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
/*
* load the segment
*/
@@ -472,9 +474,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
if (rc)
goto unregister_dev;
- blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
- blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
-
add_disk(dev_info->gd);
switch (dev_info->segment_type) {
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index e3b3d390b4a3..2e616e33891d 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -332,7 +332,7 @@ sclp_tty_write_string(const unsigned char *str, int count)
if (sclp_ttybuf == NULL) {
while (list_empty(&sclp_tty_pages)) {
spin_unlock_irqrestore(&sclp_tty_lock, flags);
- if (in_interrupt())
+ if (in_atomic())
sclp_sync_wait();
else
wait_event(sclp_tty_waitq,
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 40cd21bc5cc4..68071622d4bb 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -400,7 +400,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
while (list_empty(&sclp_vt220_empty)) {
spin_unlock_irqrestore(&sclp_vt220_lock,
flags);
- if (in_interrupt())
+ if (in_atomic())
sclp_sync_wait();
else
wait_event(sclp_vt220_waitq,
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 3964056a9a47..03914fa81174 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -391,12 +391,24 @@ ccwgroup_remove (struct device *dev)
return 0;
}
+static void ccwgroup_shutdown(struct device *dev)
+{
+ struct ccwgroup_device *gdev;
+ struct ccwgroup_driver *gdrv;
+
+ gdev = to_ccwgroupdev(dev);
+ gdrv = to_ccwgroupdrv(dev->driver);
+ if (gdrv && gdrv->shutdown)
+ gdrv->shutdown(gdev);
+}
+
static struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
.match = ccwgroup_bus_match,
.uevent = ccwgroup_uevent,
.probe = ccwgroup_probe,
.remove = ccwgroup_remove,
+ .shutdown = ccwgroup_shutdown,
};
/**
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index e7ba16a74ef7..007aaeb4f532 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -26,6 +26,25 @@
static void *sei_page;
+static int chsc_error_from_response(int response)
+{
+ switch (response) {
+ case 0x0001:
+ return 0;
+ case 0x0002:
+ case 0x0003:
+ case 0x0006:
+ case 0x0007:
+ case 0x0008:
+ case 0x000a:
+ return -EINVAL;
+ case 0x0004:
+ return -EOPNOTSUPP;
+ default:
+ return -EIO;
+ }
+}
+
struct chsc_ssd_area {
struct chsc_header request;
u16 :10;
@@ -75,11 +94,11 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
ret = (ccode == 3) ? -ENODEV : -EBUSY;
goto out_free;
}
- if (ssd_area->response.code != 0x0001) {
+ ret = chsc_error_from_response(ssd_area->response.code);
+ if (ret != 0) {
CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
schid.ssid, schid.sch_no,
ssd_area->response.code);
- ret = -EIO;
goto out_free;
}
if (!ssd_area->sch_valid) {
@@ -717,36 +736,15 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
return (ccode == 3) ? -ENODEV : -EBUSY;
switch (secm_area->response.code) {
- case 0x0001: /* Success. */
- ret = 0;
- break;
- case 0x0003: /* Invalid block. */
- case 0x0007: /* Invalid format. */
- case 0x0008: /* Other invalid block. */
- CIO_CRW_EVENT(2, "Error in chsc request block!\n");
- ret = -EINVAL;
- break;
- case 0x0004: /* Command not provided in model. */
- CIO_CRW_EVENT(2, "Model does not provide secm\n");
- ret = -EOPNOTSUPP;
- break;
- case 0x0102: /* cub adresses incorrect */
- CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
- ret = -EINVAL;
- break;
- case 0x0103: /* key error */
- CIO_CRW_EVENT(2, "Access key error in secm\n");
+ case 0x0102:
+ case 0x0103:
ret = -EINVAL;
- break;
- case 0x0105: /* error while starting */
- CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
- ret = -EIO;
- break;
default:
- CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
- secm_area->response.code);
- ret = -EIO;
+ ret = chsc_error_from_response(secm_area->response.code);
}
+ if (ret != 0)
+ CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
+ secm_area->response.code);
return ret;
}
@@ -827,27 +825,14 @@ int chsc_determine_channel_path_description(struct chp_id chpid,
goto out;
}
- switch (scpd_area->response.code) {
- case 0x0001: /* Success. */
+ ret = chsc_error_from_response(scpd_area->response.code);
+ if (ret == 0)
+ /* Success. */
memcpy(desc, &scpd_area->desc,
sizeof(struct channel_path_desc));
- ret = 0;
- break;
- case 0x0003: /* Invalid block. */
- case 0x0007: /* Invalid format. */
- case 0x0008: /* Other invalid block. */
- CIO_CRW_EVENT(2, "Error in chsc request block!\n");
- ret = -EINVAL;
- break;
- case 0x0004: /* Command not provided in model. */
- CIO_CRW_EVENT(2, "Model does not provide scpd\n");
- ret = -EOPNOTSUPP;
- break;
- default:
- CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
+ else
+ CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
scpd_area->response.code);
- ret = -EIO;
- }
out:
free_page((unsigned long)scpd_area);
return ret;
@@ -923,8 +908,9 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
goto out;
}
- switch (scmc_area->response.code) {
- case 0x0001: /* Success. */
+ ret = chsc_error_from_response(scmc_area->response.code);
+ if (ret == 0) {
+ /* Success. */
if (!scmc_area->not_valid) {
chp->cmg = scmc_area->cmg;
chp->shared = scmc_area->shared;
@@ -935,22 +921,9 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
chp->cmg = -1;
chp->shared = -1;
}
- ret = 0;
- break;
- case 0x0003: /* Invalid block. */
- case 0x0007: /* Invalid format. */
- case 0x0008: /* Invalid bit combination. */
- CIO_CRW_EVENT(2, "Error in chsc request block!\n");
- ret = -EINVAL;
- break;
- case 0x0004: /* Command not provided. */
- CIO_CRW_EVENT(2, "Model does not provide scmc\n");
- ret = -EOPNOTSUPP;
- break;
- default:
- CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
+ } else {
+ CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
scmc_area->response.code);
- ret = -EIO;
}
out:
free_page((unsigned long)scmc_area);
@@ -1002,21 +975,17 @@ chsc_enable_facility(int operation_code)
ret = (ret == 3) ? -ENODEV : -EBUSY;
goto out;
}
+
switch (sda_area->response.code) {
- case 0x0001: /* everything ok */
- ret = 0;
- break;
- case 0x0003: /* invalid request block */
- case 0x0007:
- ret = -EINVAL;
- break;
- case 0x0004: /* command not provided */
- case 0x0101: /* facility not provided */
+ case 0x0101:
ret = -EOPNOTSUPP;
break;
- default: /* something went wrong */
- ret = -EIO;
+ default:
+ ret = chsc_error_from_response(sda_area->response.code);
}
+ if (ret != 0)
+ CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
+ operation_code, sda_area->response.code);
out:
free_page((unsigned long)sda_area);
return ret;
@@ -1041,33 +1010,27 @@ chsc_determine_css_characteristics(void)
} __attribute__ ((packed)) *scsc_area;
scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!scsc_area) {
- CIO_MSG_EVENT(0, "Was not able to determine available "
- "CHSCs due to no memory.\n");
+ if (!scsc_area)
return -ENOMEM;
- }
scsc_area->request.length = 0x0010;
scsc_area->request.code = 0x0010;
result = chsc(scsc_area);
if (result) {
- CIO_MSG_EVENT(0, "Was not able to determine available CHSCs, "
- "cc=%i.\n", result);
- result = -EIO;
+ result = (result == 3) ? -ENODEV : -EBUSY;
goto exit;
}
- if (scsc_area->response.code != 1) {
- CIO_MSG_EVENT(0, "Was not able to determine "
- "available CHSCs.\n");
- result = -EIO;
- goto exit;
- }
- memcpy(&css_general_characteristics, scsc_area->general_char,
- sizeof(css_general_characteristics));
- memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
- sizeof(css_chsc_characteristics));
+ result = chsc_error_from_response(scsc_area->response.code);
+ if (result == 0) {
+ memcpy(&css_general_characteristics, scsc_area->general_char,
+ sizeof(css_general_characteristics));
+ memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
+ sizeof(css_chsc_characteristics));
+ } else
+ CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
+ scsc_area->response.code);
exit:
free_page ((unsigned long) scsc_area);
return result;
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 918b8b89cf9a..dc4d87f77f6c 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -26,17 +26,18 @@
#include "ioasm.h"
#include "io_sch.h"
-/*
- * Input :
- * devno - device number
- * ps - pointer to sense ID data area
- * Output : none
+/**
+ * vm_vdev_to_cu_type - Convert vm virtual device into control unit type
+ * for certain devices.
+ * @class: virtual device class
+ * @type: virtual device type
+ *
+ * Returns control unit type if a match was made or %0xffff otherwise.
*/
-static void
-VM_virtual_device_info (__u16 devno, struct senseid *ps)
+static int vm_vdev_to_cu_type(int class, int type)
{
static struct {
- int vrdcvcla, vrdcvtyp, cu_type;
+ int class, type, cu_type;
} vm_devices[] = {
{ 0x08, 0x01, 0x3480 },
{ 0x08, 0x02, 0x3430 },
@@ -68,8 +69,26 @@ VM_virtual_device_info (__u16 devno, struct senseid *ps)
{ 0x40, 0xc0, 0x5080 },
{ 0x80, 0x00, 0x3215 },
};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vm_devices); i++)
+ if (class == vm_devices[i].class && type == vm_devices[i].type)
+ return vm_devices[i].cu_type;
+
+ return 0xffff;
+}
+
+/**
+ * diag_get_dev_info - retrieve device information via DIAG X'210'
+ * @devno: device number
+ * @ps: pointer to sense ID data area
+ *
+ * Returns zero on success, non-zero otherwise.
+ */
+static int diag_get_dev_info(u16 devno, struct senseid *ps)
+{
struct diag210 diag_data;
- int ccode, i;
+ int ccode;
CIO_TRACE_EVENT (4, "VMvdinf");
@@ -79,21 +98,21 @@ VM_virtual_device_info (__u16 devno, struct senseid *ps)
};
ccode = diag210 (&diag_data);
- ps->reserved = 0xff;
+ if ((ccode == 0) || (ccode == 2)) {
+ ps->reserved = 0xff;
- /* Special case for bloody osa devices. */
- if (diag_data.vrdcvcla == 0x02 &&
- diag_data.vrdcvtyp == 0x20) {
- ps->cu_type = 0x3088;
- ps->cu_model = 0x60;
- return;
- }
- for (i = 0; i < ARRAY_SIZE(vm_devices); i++)
- if (diag_data.vrdcvcla == vm_devices[i].vrdcvcla &&
- diag_data.vrdcvtyp == vm_devices[i].vrdcvtyp) {
- ps->cu_type = vm_devices[i].cu_type;
- return;
+ /* Special case for osa devices. */
+ if (diag_data.vrdcvcla == 0x02 && diag_data.vrdcvtyp == 0x20) {
+ ps->cu_type = 0x3088;
+ ps->cu_model = 0x60;
+ return 0;
}
+ ps->cu_type = vm_vdev_to_cu_type(diag_data.vrdcvcla,
+ diag_data.vrdcvtyp);
+ if (ps->cu_type != 0xffff)
+ return 0;
+ }
+
CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):"
"vdev class : %02X, vdev type : %04X \n ... "
"rdev class : %02X, rdev type : %04X, "
@@ -102,6 +121,8 @@ VM_virtual_device_info (__u16 devno, struct senseid *ps)
diag_data.vrdcvcla, diag_data.vrdcvtyp,
diag_data.vrdcrccl, diag_data.vrdccrty,
diag_data.vrdccrmd);
+
+ return -ENODEV;
}
/*
@@ -130,6 +151,7 @@ __ccw_device_sense_id_start(struct ccw_device *cdev)
/* Try on every path. */
ret = -ENODEV;
while (cdev->private->imask != 0) {
+ cdev->private->senseid.cu_type = 0xFFFF;
if ((sch->opm & cdev->private->imask) != 0 &&
cdev->private->iretry > 0) {
cdev->private->iretry--;
@@ -153,7 +175,6 @@ ccw_device_sense_id_start(struct ccw_device *cdev)
int ret;
memset (&cdev->private->senseid, 0, sizeof (struct senseid));
- cdev->private->senseid.cu_type = 0xFFFF;
cdev->private->imask = 0x80;
cdev->private->iretry = 5;
ret = __ccw_device_sense_id_start(cdev);
@@ -173,13 +194,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
- /* Did we get a proper answer ? */
- if (cdev->private->senseid.cu_type != 0xFFFF &&
- cdev->private->senseid.reserved == 0xFF) {
- if (irb->scsw.count < sizeof (struct senseid) - 8)
- cdev->private->flags.esid = 1;
- return 0; /* Success */
- }
+
/* Check the error cases. */
if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
/* Retry Sense ID if requested. */
@@ -231,6 +246,15 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
sch->schid.ssid, sch->schid.sch_no);
return -EACCES;
}
+
+ /* Did we get a proper answer ? */
+ if (irb->scsw.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
+ cdev->private->senseid.reserved == 0xFF) {
+ if (irb->scsw.count < sizeof(struct senseid) - 8)
+ cdev->private->flags.esid = 1;
+ return 0; /* Success */
+ }
+
/* Hmm, whatever happened, try again. */
CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
"subchannel 0.%x.%04x returns status %02X%02X\n",
@@ -283,20 +307,17 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
break;
/* fall through. */
default: /* Sense ID failed. Try asking VM. */
- if (MACHINE_IS_VM) {
- VM_virtual_device_info (cdev->private->dev_id.devno,
+ if (MACHINE_IS_VM)
+ ret = diag_get_dev_info(cdev->private->dev_id.devno,
&cdev->private->senseid);
- if (cdev->private->senseid.cu_type != 0xFFFF) {
- /* Got the device information from VM. */
- ccw_device_sense_id_done(cdev, 0);
- return;
- }
- }
- /*
- * If we can't couldn't identify the device type we
- * consider the device "not operational".
- */
- ccw_device_sense_id_done(cdev, -ENODEV);
+ else
+ /*
+ * If we can't couldn't identify the device type we
+ * consider the device "not operational".
+ */
+ ret = -ENODEV;
+
+ ccw_device_sense_id_done(cdev, ret);
break;
}
}
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 19343f9675c3..291ff6235fe2 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -422,7 +422,7 @@ void s390_adjust_jiffies(void)
/*
* calibrate the delay loop
*/
-void __init calibrate_delay(void)
+void __cpuinit calibrate_delay(void)
{
s390_adjust_jiffies();
/* Print the good old Bogomips line .. */
diff --git a/drivers/scsi/NCR53C9x.h b/drivers/scsi/NCR53C9x.h
index d85cb73a9f69..00a0ba040dba 100644
--- a/drivers/scsi/NCR53C9x.h
+++ b/drivers/scsi/NCR53C9x.h
@@ -1,6 +1,6 @@
/* NCR53C9x.c: Defines and structures for the NCR53C9x generic driver.
*
- * Originaly esp.h: Defines and structures for the Sparc ESP
+ * Originally esp.h: Defines and structures for the Sparc ESP
* (Enhanced SCSI Processor) driver under Linux.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 23f27c9c9895..5ac3a3e8dfaf 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -46,8 +46,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
struct Scsi_Host *instance = cmd->device->host;
/* don't allow DMA if the physical address is bad */
- if (addr & A2091_XFER_MASK ||
- (!dir_in && mm_end_of_chunk (addr, cmd->SCp.this_residual)))
+ if (addr & A2091_XFER_MASK)
{
HDATA(instance)->dma_bounce_len = (cmd->SCp.this_residual + 511)
& ~0x1ff;
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index d7255c8bf281..3aeec963940b 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -54,8 +54,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
* end of a physical memory chunk, then allocate a bounce
* buffer
*/
- if (addr & A3000_XFER_MASK ||
- (!dir_in && mm_end_of_chunk (addr, cmd->SCp.this_residual)))
+ if (addr & A3000_XFER_MASK)
{
HDATA(a3000_host)->dma_bounce_len = (cmd->SCp.this_residual + 511)
& ~0x1ff;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 0e8267c1e915..fb0886140dd7 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -449,9 +449,6 @@ static int aac_slave_configure(struct scsi_device *sdev)
else if (depth < 2)
depth = 2;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
- if (!(((struct aac_dev *)host->hostdata)->adapter_info.options &
- AAC_OPT_NEW_COMM))
- blk_queue_max_segment_size(sdev->request_queue, 65536);
} else
scsi_adjust_queue_depth(sdev, 0, 1);
@@ -1133,6 +1130,12 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
if (error < 0)
goto out_deinit;
+ if (!(aac->adapter_info.options & AAC_OPT_NEW_COMM)) {
+ error = pci_set_dma_max_seg_size(pdev, 65536);
+ if (error)
+ goto out_deinit;
+ }
+
/*
* Lets override negotiations and drop the maximum SG limit to 34
*/
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 190568ebea3c..5a1471c370fa 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -21,7 +21,7 @@
* Modified by Chris Faulhaber <jedgar@fxp.org>
* Added module command-line options
* 19-Jul-99
- * Modified by Adam Fritzler <mid@auk.cx>
+ * Modified by Adam Fritzler
* Added proper detection of the AHA-1640 (MCA version of AHA-1540)
*/
diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile
index e4f70c563bc2..4c549540a35d 100644
--- a/drivers/scsi/aic7xxx/Makefile
+++ b/drivers/scsi/aic7xxx/Makefile
@@ -44,13 +44,8 @@ clean-files += aic79xx_seq.h aic79xx_reg.h aic79xx_reg_print.c
# Dependencies for generated files need to be listed explicitly
-$(obj)/aic7xxx_core.o: $(obj)/aic7xxx_seq.h
-$(obj)/aic7xxx_core.o: $(obj)/aic7xxx_reg.h
-$(obj)/aic79xx_core.o: $(obj)/aic79xx_seq.h
-$(obj)/aic79xx_core.o: $(obj)/aic79xx_reg.h
-
-$(addprefix $(obj)/,$(aic7xxx-y)): $(obj)/aic7xxx_seq.h
-$(addprefix $(obj)/,$(aic79xx-y)): $(obj)/aic79xx_seq.h
+$(addprefix $(src)/,$(aic7xxx-y:.o=.c)): $(obj)/aic7xxx_seq.h $(obj)/aic7xxx_reg.h
+$(addprefix $(src)/,$(aic79xx-y:.o=.c)): $(obj)/aic79xx_seq.h $(obj)/aic79xx_reg.h
aic7xxx-gen-$(CONFIG_AIC7XXX_BUILD_FIRMWARE) := $(obj)/aic7xxx_reg.h
aic7xxx-gen-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) += $(obj)/aic7xxx_reg_print.c
diff --git a/drivers/scsi/aic7xxx/aic79xx_inline.h b/drivers/scsi/aic7xxx/aic79xx_inline.h
index 2ceb67f4af2a..45e55575a0fa 100644
--- a/drivers/scsi/aic7xxx/aic79xx_inline.h
+++ b/drivers/scsi/aic7xxx/aic79xx_inline.h
@@ -417,7 +417,7 @@ ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
- (uint8_t *)ahd->qoutfifo);
}
-/*********************** Miscelaneous Support Functions ***********************/
+/*********************** Miscellaneous Support Functions ***********************/
static __inline struct ahd_initiator_tinfo *
ahd_fetch_transinfo(struct ahd_softc *ahd,
char channel, u_int our_id,
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 014654792901..72fccd9f40df 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -325,7 +325,7 @@ MODULE_PARM_DESC(aic79xx,
" verbose Enable verbose/diagnostic logging\n"
" allow_memio Allow device registers to be memory mapped\n"
" debug Bitmask of debug values to enable\n"
-" no_reset Supress initial bus resets\n"
+" no_reset Suppress initial bus resets\n"
" extended Enable extended geometry on all controllers\n"
" periodic_otag Send an ordered tagged transaction\n"
" periodically to prevent tag starvation.\n"
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index df853676e66a..c9f79fdf9131 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -979,7 +979,7 @@ ahd_aic790X_setup(struct ahd_softc *ahd)
| AHD_FAINT_LED_BUG;
/*
- * IO Cell paramter setup.
+ * IO Cell parameter setup.
*/
AHD_SET_PRECOMP(ahd, AHD_PRECOMP_CUTBACK_29);
@@ -1006,7 +1006,7 @@ ahd_aic790X_setup(struct ahd_softc *ahd)
ahd->bugs |= AHD_INTCOLLISION_BUG|AHD_ABORT_LQI_BUG;
/*
- * IO Cell paramter setup.
+ * IO Cell parameter setup.
*/
AHD_SET_PRECOMP(ahd, AHD_PRECOMP_CUTBACK_29);
AHD_SET_SLEWRATE(ahd, AHD_SLEWRATE_DEF_REVB);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_inline.h b/drivers/scsi/aic7xxx/aic7xxx_inline.h
index 8e1954cdd84f..cba2f23bbe79 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_inline.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_inline.h
@@ -229,7 +229,7 @@ ahc_name(struct ahc_softc *ahc)
return (ahc->name);
}
-/*********************** Miscelaneous Support Functions ***********************/
+/*********************** Miscellaneous Support Functions ***********************/
static __inline void ahc_update_residual(struct ahc_softc *ahc,
struct scb *scb);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 99a3b33a3233..282aff6f852e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -347,7 +347,7 @@ MODULE_PARM_DESC(aic7xxx,
" debug Bitmask of debug values to enable\n"
" no_probe Toggle EISA/VLB controller probing\n"
" probe_eisa_vl Toggle EISA/VLB controller probing\n"
-" no_reset Supress initial bus resets\n"
+" no_reset Suppress initial bus resets\n"
" extended Enable extended geometry on all controllers\n"
" periodic_otag Send an ordered tagged transaction\n"
" periodically to prevent tag starvation.\n"
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index 3bfd9296bbfa..93984c9dfe14 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -6472,7 +6472,7 @@ do_aic7xxx_isr(int irq, void *dev_id)
unsigned long cpu_flags;
struct aic7xxx_host *p;
- p = (struct aic7xxx_host *)dev_id;
+ p = dev_id;
if(!p)
return IRQ_NONE;
spin_lock_irqsave(p->host->host_lock, cpu_flags);
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 37741e9b5c3b..91f85226d08f 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -54,8 +54,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
static int scsi_alloc_out_of_range = 0;
/* use bounce buffer if the physical address is bad */
- if (addr & HDATA(cmd->device->host)->dma_xfer_mask ||
- (!dir_in && mm_end_of_chunk (addr, cmd->SCp.this_residual)))
+ if (addr & HDATA(cmd->device->host)->dma_xfer_mask)
{
HDATA(cmd->device->host)->dma_bounce_len = (cmd->SCp.this_residual + 511)
& ~0x1ff;
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index d63f11e95abf..bd62131b97a1 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -539,9 +539,9 @@ out:
srp_iu_put(iue);
}
-static irqreturn_t ibmvstgt_interrupt(int irq, void *data)
+static irqreturn_t ibmvstgt_interrupt(int dummy, void *data)
{
- struct srp_target *target = (struct srp_target *) data;
+ struct srp_target *target = data;
struct vio_port *vport = target_to_port(target);
vio_disable_interrupts(vport->dma_dev);
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 6c4f0f081785..68e5c632c5d5 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -287,7 +287,7 @@ static int idescsi_end_request(ide_drive_t *, int, int);
static ide_startstop_t
idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
{
- if (HWIF(drive)->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
+ if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
/* force an abort */
HWIF(drive)->OUTB(WIN_IDLEIMMEDIATE,IDE_COMMAND_REG);
@@ -423,7 +423,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
}
/* Clear the interrupt */
- stat = drive->hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if ((stat & DRQ_STAT) == 0) {
/* No more interrupts */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 73270ff892d9..2074701f7e76 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -7053,7 +7053,7 @@ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
* where it can accept new commands.
* Return value:
- * 0 on sucess / -EIO on failure
+ * 0 on success / -EIO on failure
**/
static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
{
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 7505cca8e68e..bb152fb9fec7 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1309,7 +1309,7 @@ ips_intr_copperhead(ips_ha_t * ha)
cstatus.value = (*ha->func.statupd) (ha);
if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) {
- /* Spurious Interupt ? */
+ /* Spurious Interrupt ? */
continue;
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index f26b9538affe..83567b9755b4 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -325,7 +325,7 @@ struct lpfc_vport {
#define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */
#define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */
-#define WORKER_FABRIC_BLOCK_TMO 0x400 /* hba: fabric block timout */
+#define WORKER_FABRIC_BLOCK_TMO 0x400 /* hba: fabric block timeout */
#define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */
#define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 29b4cf9e059b..6cfeba7454d4 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1894,7 +1894,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
uint16_t iotag;
int bars = pci_select_bars(pdev, IORESOURCE_MEM);
- if (pci_enable_device_bars(pdev, bars))
+ if (pci_enable_device_mem(pdev))
goto out;
if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
goto out_disable_device;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index dfc63f6ccd7b..7a9be4c5b7cb 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -880,7 +880,7 @@ lpfc_mbox_get(struct lpfc_hba * phba)
void
lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
{
- /* This function expects to be called from interupt context */
+ /* This function expects to be called from interrupt context */
spin_lock(&phba->hbalock);
list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
spin_unlock(&phba->hbalock);
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 6db77c00e3ee..9f041929aca5 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -3464,12 +3464,12 @@ megaraid_mbox_setup_device_map(adapter_t *adapter)
/*
* START: Interface for the common management module
*
- * This is the module, which interfaces with the common mangement module to
+ * This is the module, which interfaces with the common management module to
* provide support for ioctl and sysfs
*/
/**
- * megaraid_cmm_register - register with the mangement module
+ * megaraid_cmm_register - register with the management module
* @adapter : HBA soft state
*
* Register with the management module, which allows applications to issue
@@ -3557,7 +3557,7 @@ megaraid_cmm_register(adapter_t *adapter)
/**
- * megaraid_cmm_unregister - un-register with the mangement module
+ * megaraid_cmm_unregister - un-register with the management module
* @adapter : HBA soft state
*
* Un-register with the management module.
@@ -3579,7 +3579,7 @@ megaraid_cmm_unregister(adapter_t *adapter)
* @kioc : CMM interface packet
* @action : command action
*
- * This routine is invoked whenever the Common Mangement Module (CMM) has a
+ * This routine is invoked whenever the Common Management Module (CMM) has a
* command for us. The 'action' parameter specifies if this is a new command
* or otherwise.
*/
@@ -3944,7 +3944,7 @@ megaraid_sysfs_get_ldmap_timeout(unsigned long data)
*
* This routine will be called whenever user reads the logical drive
* attributes, go get the current logical drive mapping table from the
- * firmware. We use the managment API's to issue commands to the controller.
+ * firmware. We use the management API's to issue commands to the controller.
*
* NOTE: The commands issuance functionality is not generalized and
* implemented in context of "get ld map" command only. If required, the
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 672c759ac24d..77a62a1b12c3 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -31,7 +31,6 @@
#include <linux/moduleparam.h>
#include <linux/module.h>
#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/uio.h>
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 4b82b2021981..d8b99351b053 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -130,7 +130,7 @@ static int fdomain_config(struct pcmcia_device *link)
cisparse_t parse;
int i, last_ret, last_fn;
u_char tuple_data[64];
- char str[16];
+ char str[22];
struct Scsi_Host *host;
DEBUG(0, "fdomain_config(0x%p)\n", link);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index eb0784c9ff83..6226d88479f5 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1094,7 +1094,7 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *ha)
}
/**
- * qla2x00_mgmt_svr_login() - Login to fabric Managment Service.
+ * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
* @ha: HA context
*
* Returns 0 on success.
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index accaf690eaf0..d6be0762eb91 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -121,7 +121,7 @@
#define MAX_REQS_SERVICED_PER_INTR 16
#define ISCSI_IPADDR_SIZE 4 /* IP address size */
-#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alais name size */
+#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */
#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
#define LSDW(x) ((u32)((u64)(x)))
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index cbe0a17ced5f..49925f92555e 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1098,7 +1098,7 @@ static int qla4xxx_start_firmware(struct scsi_qla_host *ha)
}
config_chip = 1;
- /* Reset clears the semaphore, so aquire again */
+ /* Reset clears the semaphore, so acquire again */
if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
return QLA_ERROR;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b12fb310e399..f243fc30c908 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1569,6 +1569,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
request_fn_proc *request_fn)
{
struct request_queue *q;
+ struct device *dev = shost->shost_gendev.parent;
q = blk_init_queue(request_fn, NULL);
if (!q)
@@ -1583,6 +1584,9 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
blk_queue_max_sectors(q, shost->max_sectors);
blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
blk_queue_segment_boundary(q, shost->dma_boundary);
+ dma_set_seg_boundary(dev, shost->dma_boundary);
+
+ blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
if (!shost->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 91630baea532..3677fbb30b72 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -320,7 +320,7 @@ int scsi_tgt_queue_command(struct scsi_cmnd *cmd, u64 itn_id,
EXPORT_SYMBOL_GPL(scsi_tgt_queue_command);
/*
- * This is run from a interrpt handler normally and the unmap
+ * This is run from a interrupt handler normally and the unmap
* needs process context so we must queue
*/
static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index f2149d0bb999..43a964d635b4 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -6,7 +6,7 @@
*
* The SAS transport class contains common code to deal with SAS HBAs,
* an aproximated representation of SAS topologies in the driver model,
- * and various sysfs attributes to expose these topologies and managment
+ * and various sysfs attributes to expose these topologies and management
* interfaces to userspace.
*
* In addition to the basic SCSI core objects this transport class
diff --git a/drivers/serial/21285.c b/drivers/serial/21285.c
index 6a48dfa1efe8..0276471cb25e 100644
--- a/drivers/serial/21285.c
+++ b/drivers/serial/21285.c
@@ -237,6 +237,12 @@ serial21285_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
+ if (port->info && port->info->tty) {
+ struct tty_struct *tty = port->info->tty;
+ unsigned int b = port->uartclk / (16 * quot);
+ tty_encode_baud_rate(tty, b, b);
+ }
+
switch (termios->c_cflag & CSIZE) {
case CS5:
h_lcr = 0x00;
@@ -277,8 +283,6 @@ serial21285_set_termios(struct uart_port *port, struct ktermios *termios,
if (termios->c_iflag & INPCK)
port->read_status_mask |= RXSTAT_FRAME | RXSTAT_PARITY;
- tty_encode_baud_rate(tty, baud, baud);
-
/*
* Which character status flags should we ignore?
*/
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 0d99120ab5a2..2b8a410e0959 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -84,9 +84,6 @@ extern wait_queue_head_t keypress_wait;
struct tty_driver *serial_driver;
-/* serial subtype definitions */
-#define SERIAL_TYPE_NORMAL 1
-
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index f94109cbb46e..b8a4bd94f51d 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2047,7 +2047,7 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
* Oxford Semi 952 rev B workaround
*/
if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0)
- quot ++;
+ quot++;
if (up->capabilities & UART_CAP_FIFO && up->port.fifosize > 1) {
if (baud < 2400)
@@ -2662,16 +2662,17 @@ static int __devinit serial8250_probe(struct platform_device *dev)
memset(&port, 0, sizeof(struct uart_port));
for (i = 0; p && p->flags != 0; p++, i++) {
- port.iobase = p->iobase;
- port.membase = p->membase;
- port.irq = p->irq;
- port.uartclk = p->uartclk;
- port.regshift = p->regshift;
- port.iotype = p->iotype;
- port.flags = p->flags;
- port.mapbase = p->mapbase;
- port.hub6 = p->hub6;
- port.dev = &dev->dev;
+ port.iobase = p->iobase;
+ port.membase = p->membase;
+ port.irq = p->irq;
+ port.uartclk = p->uartclk;
+ port.regshift = p->regshift;
+ port.iotype = p->iotype;
+ port.flags = p->flags;
+ port.mapbase = p->mapbase;
+ port.hub6 = p->hub6;
+ port.private_data = p->private_data;
+ port.dev = &dev->dev;
if (share_irqs)
port.flags |= UPF_SHARE_IRQ;
ret = serial8250_register_port(&port);
@@ -2812,15 +2813,16 @@ int serial8250_register_port(struct uart_port *port)
if (uart) {
uart_remove_one_port(&serial8250_reg, &uart->port);
- uart->port.iobase = port->iobase;
- uart->port.membase = port->membase;
- uart->port.irq = port->irq;
- uart->port.uartclk = port->uartclk;
- uart->port.fifosize = port->fifosize;
- uart->port.regshift = port->regshift;
- uart->port.iotype = port->iotype;
- uart->port.flags = port->flags | UPF_BOOT_AUTOCONF;
- uart->port.mapbase = port->mapbase;
+ uart->port.iobase = port->iobase;
+ uart->port.membase = port->membase;
+ uart->port.irq = port->irq;
+ uart->port.uartclk = port->uartclk;
+ uart->port.fifosize = port->fifosize;
+ uart->port.regshift = port->regshift;
+ uart->port.iotype = port->iotype;
+ uart->port.flags = port->flags | UPF_BOOT_AUTOCONF;
+ uart->port.mapbase = port->mapbase;
+ uart->port.private_data = port->private_data;
if (port->dev)
uart->port.dev = port->dev;
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index ceb03c9e749f..0a4ac2b6eb5a 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -106,6 +106,32 @@ setup_port(struct serial_private *priv, struct uart_port *port,
}
/*
+ * ADDI-DATA GmbH communication cards <info@addi-data.com>
+ */
+static int addidata_apci7800_setup(struct serial_private *priv,
+ struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ unsigned int bar = 0, offset = board->first_offset;
+ bar = FL_GET_BASE(board->flags);
+
+ if (idx < 2) {
+ offset += idx * board->uart_offset;
+ } else if ((idx >= 2) && (idx < 4)) {
+ bar += 1;
+ offset += ((idx - 2) * board->uart_offset);
+ } else if ((idx >= 4) && (idx < 6)) {
+ bar += 2;
+ offset += ((idx - 4) * board->uart_offset);
+ } else if (idx >= 6) {
+ bar += 3;
+ offset += ((idx - 6) * board->uart_offset);
+ }
+
+ return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
+/*
* AFAVLAB uses a different mixture of BARs and offsets
* Not that ugly ;) -- HW
*/
@@ -752,6 +778,16 @@ pci_default_setup(struct serial_private *priv, struct pciserial_board *board,
*/
static struct pci_serial_quirk pci_serial_quirks[] = {
/*
+ * ADDI-DATA GmbH communication cards <info@addi-data.com>
+ */
+ {
+ .vendor = PCI_VENDOR_ID_ADDIDATA_OLD,
+ .device = PCI_DEVICE_ID_ADDIDATA_APCI7800,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = addidata_apci7800_setup,
+ },
+ /*
* AFAVLAB cards - these may be called via parport_serial
* It is not clear whether this applies to all products.
*/
@@ -1179,6 +1215,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
.base_baud = 115200,
.uart_offset = 8,
},
+ [pbn_b0_8_115200] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
[pbn_b0_1_921600] = {
.flags = FL_BASE0,
@@ -2697,6 +2739,97 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_pasemi_1682M },
/*
+ * ADDI-DATA GmbH communication cards <info@addi-data.com>
+ */
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7500,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_4_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7420,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_2_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7300,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA_OLD,
+ PCI_DEVICE_ID_ADDIDATA_APCI7800,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b1_8_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7500_2,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_4_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7420_2,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_2_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7300_2,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7500_3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_4_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7420_3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_2_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7300_3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7800_3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_8_115200 },
+
+ /*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
*/
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index 1de098e75497..6f09cbd7fc48 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -414,8 +414,9 @@ static int __devinit check_resources(struct pnp_option *option)
*/
static int __devinit serial_pnp_guess_board(struct pnp_dev *dev, int *flags)
{
- if (!(check_name(pnp_dev_name(dev)) || (dev->card && check_name(dev->card->name))))
- return -ENODEV;
+ if (!(check_name(pnp_dev_name(dev)) ||
+ (dev->card && check_name(dev->card->name))))
+ return -ENODEV;
if (check_resources(dev->independent))
return 0;
@@ -452,8 +453,9 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
return -ENODEV;
#ifdef SERIAL_DEBUG_PNP
- printk("Setup PNP port: port %x, mem 0x%lx, irq %d, type %d\n",
- port.iobase, port.mapbase, port.irq, port.iotype);
+ printk(KERN_DEBUG
+ "Setup PNP port: port %x, mem 0x%lx, irq %d, type %d\n",
+ port.iobase, port.mapbase, port.irq, port.iotype);
#endif
port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 8a053ea21e1d..84a054d7e986 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -465,20 +465,24 @@ config SERIAL_DZ
bool "DECstation DZ serial driver"
depends on MACH_DECSTATION && 32BIT
select SERIAL_CORE
- help
- DZ11-family serial controllers for VAXstations, including the
- DC7085, M7814, and M7819.
+ default y
+ ---help---
+ DZ11-family serial controllers for DECstations and VAXstations,
+ including the DC7085, M7814, and M7819.
config SERIAL_DZ_CONSOLE
bool "Support console on DECstation DZ serial driver"
depends on SERIAL_DZ=y
select SERIAL_CORE_CONSOLE
- help
+ default y
+ ---help---
If you say Y here, it will be possible to use a serial port as the
system console (the system console is the device which receives all
kernel messages and warnings and which allows logins in single user
- mode). Note that the firmware uses ttyS0 as the serial console on
- the Maxine and ttyS2 on the others.
+ mode).
+
+ Note that the firmware uses ttyS3 as the serial console on
+ DECstations that use this driver.
If unsure, say Y.
@@ -877,15 +881,15 @@ config SERIAL_SUNHV
systems. Say Y if you want to be able to use this device.
config SERIAL_IP22_ZILOG
- tristate "IP22 Zilog8530 serial support"
- depends on SGI_IP22
+ tristate "SGI Zilog8530 serial support"
+ depends on SGI_HAS_ZILOG
select SERIAL_CORE
help
- This driver supports the Zilog8530 serial ports found on SGI IP22
+ This driver supports the Zilog8530 serial ports found on SGI
systems. Say Y or M if you want to be able to these serial ports.
config SERIAL_IP22_ZILOG_CONSOLE
- bool "Console on IP22 Zilog8530 serial port"
+ bool "Console on SGI Zilog8530 serial port"
depends on SERIAL_IP22_ZILOG=y
select SERIAL_CORE_CONSOLE
@@ -1138,17 +1142,17 @@ config SERIAL_SGI_L1_CONSOLE
say Y. Otherwise, say N.
config SERIAL_MPC52xx
- tristate "Freescale MPC52xx family PSC serial support"
- depends on PPC_MPC52xx
+ tristate "Freescale MPC52xx/MPC512x family PSC serial support"
+ depends on PPC_MPC52xx || PPC_MPC512x
select SERIAL_CORE
help
- This drivers support the MPC52xx PSC serial ports. If you would
- like to use them, you must answer Y or M to this option. Not that
+ This driver supports MPC52xx and MPC512x PSC serial ports. If you would
+ like to use them, you must answer Y or M to this option. Note that
for use as console, it must be included in kernel and not as a
module.
config SERIAL_MPC52xx_CONSOLE
- bool "Console on a Freescale MPC52xx family PSC serial port"
+ bool "Console on a Freescale MPC52xx/MPC512x family PSC serial port"
depends on SERIAL_MPC52xx=y
select SERIAL_CORE_CONSOLE
help
@@ -1156,7 +1160,7 @@ config SERIAL_MPC52xx_CONSOLE
of the Freescale MPC52xx family as a console.
config SERIAL_MPC52xx_CONSOLE_BAUD
- int "Freescale MPC52xx family PSC serial port baud"
+ int "Freescale MPC52xx/MPC512x family PSC serial port baud"
depends on SERIAL_MPC52xx_CONSOLE=y
default "9600"
help
@@ -1295,8 +1299,8 @@ config SERIAL_NETX_CONSOLE
depends on SERIAL_NETX
select SERIAL_CORE_CONSOLE
help
- If you have enabled the serial port on the Motorola IMX
- CPU you can make it the console by answering Y to this option.
+ If you have enabled the serial port on the Hilscher NetX SoC
+ you can make it the console by answering Y to this option.
config SERIAL_OF_PLATFORM
tristate "Serial port on Open Firmware platform bus"
@@ -1318,4 +1322,19 @@ config SERIAL_QE
This driver supports the QE serial ports on Freescale embedded
PowerPC that contain a QUICC Engine.
+config SERIAL_SC26XX
+ tristate "SC2681/SC2692 serial port support"
+ depends on SNI_RM
+ select SERIAL_CORE
+ help
+ This is a driver for the onboard serial ports of
+ older RM400 machines.
+
+config SERIAL_SC26XX_CONSOLE
+ bool "Console on SC2681/SC2692 serial port"
+ depends on SERIAL_SC26XX
+ select SERIAL_CORE_CONSOLE
+ help
+ Support for Console on SC2681/SC2692 serial ports.
+
endmenu
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 2dd41b4cc8db..640cfe44a56d 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_SERIAL_M32R_SIO) += m32r_sio.o
obj-$(CONFIG_SERIAL_MPSC) += mpsc.o
obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
obj-$(CONFIG_ETRAX_SERIAL) += crisv10.o
+obj-$(CONFIG_SERIAL_SC26XX) += sc26xx.o
obj-$(CONFIG_SERIAL_JSM) += jsm/
obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
obj-$(CONFIG_SERIAL_VR41XX) += vr41xx_siu.o
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 111da57f5334..60f52904aad0 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -34,6 +34,7 @@
#include <linux/tty_flip.h>
#include <linux/platform_device.h>
#include <linux/atmel_pdc.h>
+#include <linux/atmel_serial.h>
#include <asm/io.h>
@@ -45,8 +46,6 @@
#include <asm/arch/gpio.h>
#endif
-#include "atmel_serial.h"
-
#if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
diff --git a/drivers/serial/atmel_serial.h b/drivers/serial/atmel_serial.h
deleted file mode 100644
index e0141776517c..000000000000
--- a/drivers/serial/atmel_serial.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * drivers/serial/atmel_serial.h
- *
- * Copyright (C) 2005 Ivan Kokshaysky
- * Copyright (C) SAN People
- *
- * USART registers.
- * Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef ATMEL_SERIAL_H
-#define ATMEL_SERIAL_H
-
-#define ATMEL_US_CR 0x00 /* Control Register */
-#define ATMEL_US_RSTRX (1 << 2) /* Reset Receiver */
-#define ATMEL_US_RSTTX (1 << 3) /* Reset Transmitter */
-#define ATMEL_US_RXEN (1 << 4) /* Receiver Enable */
-#define ATMEL_US_RXDIS (1 << 5) /* Receiver Disable */
-#define ATMEL_US_TXEN (1 << 6) /* Transmitter Enable */
-#define ATMEL_US_TXDIS (1 << 7) /* Transmitter Disable */
-#define ATMEL_US_RSTSTA (1 << 8) /* Reset Status Bits */
-#define ATMEL_US_STTBRK (1 << 9) /* Start Break */
-#define ATMEL_US_STPBRK (1 << 10) /* Stop Break */
-#define ATMEL_US_STTTO (1 << 11) /* Start Time-out */
-#define ATMEL_US_SENDA (1 << 12) /* Send Address */
-#define ATMEL_US_RSTIT (1 << 13) /* Reset Iterations */
-#define ATMEL_US_RSTNACK (1 << 14) /* Reset Non Acknowledge */
-#define ATMEL_US_RETTO (1 << 15) /* Rearm Time-out */
-#define ATMEL_US_DTREN (1 << 16) /* Data Terminal Ready Enable [AT91RM9200 only] */
-#define ATMEL_US_DTRDIS (1 << 17) /* Data Terminal Ready Disable [AT91RM9200 only] */
-#define ATMEL_US_RTSEN (1 << 18) /* Request To Send Enable */
-#define ATMEL_US_RTSDIS (1 << 19) /* Request To Send Disable */
-
-#define ATMEL_US_MR 0x04 /* Mode Register */
-#define ATMEL_US_USMODE (0xf << 0) /* Mode of the USART */
-#define ATMEL_US_USMODE_NORMAL 0
-#define ATMEL_US_USMODE_RS485 1
-#define ATMEL_US_USMODE_HWHS 2
-#define ATMEL_US_USMODE_MODEM 3
-#define ATMEL_US_USMODE_ISO7816_T0 4
-#define ATMEL_US_USMODE_ISO7816_T1 6
-#define ATMEL_US_USMODE_IRDA 8
-#define ATMEL_US_USCLKS (3 << 4) /* Clock Selection */
-#define ATMEL_US_USCLKS_MCK (0 << 4)
-#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
-#define ATMEL_US_USCLKS_SCK (3 << 4)
-#define ATMEL_US_CHRL (3 << 6) /* Character Length */
-#define ATMEL_US_CHRL_5 (0 << 6)
-#define ATMEL_US_CHRL_6 (1 << 6)
-#define ATMEL_US_CHRL_7 (2 << 6)
-#define ATMEL_US_CHRL_8 (3 << 6)
-#define ATMEL_US_SYNC (1 << 8) /* Synchronous Mode Select */
-#define ATMEL_US_PAR (7 << 9) /* Parity Type */
-#define ATMEL_US_PAR_EVEN (0 << 9)
-#define ATMEL_US_PAR_ODD (1 << 9)
-#define ATMEL_US_PAR_SPACE (2 << 9)
-#define ATMEL_US_PAR_MARK (3 << 9)
-#define ATMEL_US_PAR_NONE (4 << 9)
-#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
-#define ATMEL_US_NBSTOP (3 << 12) /* Number of Stop Bits */
-#define ATMEL_US_NBSTOP_1 (0 << 12)
-#define ATMEL_US_NBSTOP_1_5 (1 << 12)
-#define ATMEL_US_NBSTOP_2 (2 << 12)
-#define ATMEL_US_CHMODE (3 << 14) /* Channel Mode */
-#define ATMEL_US_CHMODE_NORMAL (0 << 14)
-#define ATMEL_US_CHMODE_ECHO (1 << 14)
-#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
-#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
-#define ATMEL_US_MSBF (1 << 16) /* Bit Order */
-#define ATMEL_US_MODE9 (1 << 17) /* 9-bit Character Length */
-#define ATMEL_US_CLKO (1 << 18) /* Clock Output Select */
-#define ATMEL_US_OVER (1 << 19) /* Oversampling Mode */
-#define ATMEL_US_INACK (1 << 20) /* Inhibit Non Acknowledge */
-#define ATMEL_US_DSNACK (1 << 21) /* Disable Successive NACK */
-#define ATMEL_US_MAX_ITER (7 << 24) /* Max Iterations */
-#define ATMEL_US_FILTER (1 << 28) /* Infrared Receive Line Filter */
-
-#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
-#define ATMEL_US_RXRDY (1 << 0) /* Receiver Ready */
-#define ATMEL_US_TXRDY (1 << 1) /* Transmitter Ready */
-#define ATMEL_US_RXBRK (1 << 2) /* Break Received / End of Break */
-#define ATMEL_US_ENDRX (1 << 3) /* End of Receiver Transfer */
-#define ATMEL_US_ENDTX (1 << 4) /* End of Transmitter Transfer */
-#define ATMEL_US_OVRE (1 << 5) /* Overrun Error */
-#define ATMEL_US_FRAME (1 << 6) /* Framing Error */
-#define ATMEL_US_PARE (1 << 7) /* Parity Error */
-#define ATMEL_US_TIMEOUT (1 << 8) /* Receiver Time-out */
-#define ATMEL_US_TXEMPTY (1 << 9) /* Transmitter Empty */
-#define ATMEL_US_ITERATION (1 << 10) /* Max number of Repetitions Reached */
-#define ATMEL_US_TXBUFE (1 << 11) /* Transmission Buffer Empty */
-#define ATMEL_US_RXBUFF (1 << 12) /* Reception Buffer Full */
-#define ATMEL_US_NACK (1 << 13) /* Non Acknowledge */
-#define ATMEL_US_RIIC (1 << 16) /* Ring Indicator Input Change [AT91RM9200 only] */
-#define ATMEL_US_DSRIC (1 << 17) /* Data Set Ready Input Change [AT91RM9200 only] */
-#define ATMEL_US_DCDIC (1 << 18) /* Data Carrier Detect Input Change [AT91RM9200 only] */
-#define ATMEL_US_CTSIC (1 << 19) /* Clear to Send Input Change */
-#define ATMEL_US_RI (1 << 20) /* RI */
-#define ATMEL_US_DSR (1 << 21) /* DSR */
-#define ATMEL_US_DCD (1 << 22) /* DCD */
-#define ATMEL_US_CTS (1 << 23) /* CTS */
-
-#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */
-#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */
-#define ATMEL_US_CSR 0x14 /* Channel Status Register */
-#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */
-#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */
-#define ATMEL_US_SYNH (1 << 15) /* Transmit/Receive Sync [AT91SAM9261 only] */
-
-#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
-#define ATMEL_US_CD (0xffff << 0) /* Clock Divider */
-
-#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */
-#define ATMEL_US_TO (0xffff << 0) /* Time-out Value */
-
-#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
-#define ATMEL_US_TG (0xff << 0) /* Timeguard Value */
-
-#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */
-#define ATMEL_US_NER 0x44 /* Number of Errors Register */
-#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
-
-#endif
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index b5e4478de0e3..236af9d33851 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -380,7 +380,7 @@ static void cpm_uart_int_rx(struct uart_port *port)
static irqreturn_t cpm_uart_int(int irq, void *data)
{
u8 events;
- struct uart_port *port = (struct uart_port *)data;
+ struct uart_port *port = data;
struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
smc_t __iomem *smcp = pinfo->smcp;
scc_t __iomem *sccp = pinfo->sccp;
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index a4e23cf47906..383c4e660cd5 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -68,11 +68,6 @@ static char *serial_version = "$Revision: 1.25 $";
struct tty_driver *serial_driver;
-/* serial subtype definitions */
-#ifndef SERIAL_TYPE_NORMAL
-#define SERIAL_TYPE_NORMAL 1
-#endif
-
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
diff --git a/drivers/serial/dz.c b/drivers/serial/dz.c
index d31721f2744d..116211fcd36f 100644
--- a/drivers/serial/dz.c
+++ b/drivers/serial/dz.c
@@ -6,7 +6,7 @@
*
* Email: olivier.lebaillif@ifrsys.com
*
- * Copyright (C) 2004, 2006 Maciej W. Rozycki
+ * Copyright (C) 2004, 2006, 2007 Maciej W. Rozycki
*
* [31-AUG-98] triemer
* Changed IRQ to use Harald's dec internals interrupts.h
@@ -32,38 +32,63 @@
#define SUPPORT_SYSRQ
#endif
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/console.h>
#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
+#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
+#include <asm/atomic.h>
#include <asm/bootinfo.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
#include <asm/dec/interrupts.h>
#include <asm/dec/kn01.h>
#include <asm/dec/kn02.h>
#include <asm/dec/machtype.h>
#include <asm/dec/prom.h>
-#include <asm/irq.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
+#include <asm/dec/system.h>
#include "dz.h"
-static char *dz_name = "DECstation DZ serial driver version ";
-static char *dz_version = "1.03";
+
+MODULE_DESCRIPTION("DECstation DZ serial driver");
+MODULE_LICENSE("GPL");
+
+
+static char dz_name[] __initdata = "DECstation DZ serial driver version ";
+static char dz_version[] __initdata = "1.04";
struct dz_port {
+ struct dz_mux *mux;
struct uart_port port;
unsigned int cflag;
};
-static struct dz_port dz_ports[DZ_NB_PORT];
+struct dz_mux {
+ struct dz_port dport[DZ_NB_PORT];
+ atomic_t map_guard;
+ atomic_t irq_guard;
+ int initialised;
+};
+
+static struct dz_mux dz_mux;
+
+static inline struct dz_port *to_dport(struct uart_port *uport)
+{
+ return container_of(uport, struct dz_port, port);
+}
/*
* ------------------------------------------------------------
@@ -74,21 +99,18 @@ static struct dz_port dz_ports[DZ_NB_PORT];
* ------------------------------------------------------------
*/
-static inline unsigned short dz_in(struct dz_port *dport, unsigned offset)
+static u16 dz_in(struct dz_port *dport, unsigned offset)
{
- volatile unsigned short *addr =
- (volatile unsigned short *) (dport->port.membase + offset);
+ void __iomem *addr = dport->port.membase + offset;
- return *addr;
+ return readw(addr);
}
-static inline void dz_out(struct dz_port *dport, unsigned offset,
- unsigned short value)
+static void dz_out(struct dz_port *dport, unsigned offset, u16 value)
{
- volatile unsigned short *addr =
- (volatile unsigned short *) (dport->port.membase + offset);
+ void __iomem *addr = dport->port.membase + offset;
- *addr = value;
+ writew(value, addr);
}
/*
@@ -103,42 +125,33 @@ static inline void dz_out(struct dz_port *dport, unsigned offset,
static void dz_stop_tx(struct uart_port *uport)
{
- struct dz_port *dport = (struct dz_port *)uport;
- unsigned short tmp, mask = 1 << dport->port.line;
- unsigned long flags;
+ struct dz_port *dport = to_dport(uport);
+ u16 tmp, mask = 1 << dport->port.line;
- spin_lock_irqsave(&dport->port.lock, flags);
tmp = dz_in(dport, DZ_TCR); /* read the TX flag */
tmp &= ~mask; /* clear the TX flag */
dz_out(dport, DZ_TCR, tmp);
- spin_unlock_irqrestore(&dport->port.lock, flags);
}
static void dz_start_tx(struct uart_port *uport)
{
- struct dz_port *dport = (struct dz_port *)uport;
- unsigned short tmp, mask = 1 << dport->port.line;
- unsigned long flags;
+ struct dz_port *dport = to_dport(uport);
+ u16 tmp, mask = 1 << dport->port.line;
- spin_lock_irqsave(&dport->port.lock, flags);
tmp = dz_in(dport, DZ_TCR); /* read the TX flag */
tmp |= mask; /* set the TX flag */
dz_out(dport, DZ_TCR, tmp);
- spin_unlock_irqrestore(&dport->port.lock, flags);
}
static void dz_stop_rx(struct uart_port *uport)
{
- struct dz_port *dport = (struct dz_port *)uport;
- unsigned long flags;
+ struct dz_port *dport = to_dport(uport);
- spin_lock_irqsave(&dport->port.lock, flags);
- dport->cflag &= ~DZ_CREAD;
- dz_out(dport, DZ_LPR, dport->cflag | dport->port.line);
- spin_unlock_irqrestore(&dport->port.lock, flags);
+ dport->cflag &= ~DZ_RXENAB;
+ dz_out(dport, DZ_LPR, dport->cflag);
}
-static void dz_enable_ms(struct uart_port *port)
+static void dz_enable_ms(struct uart_port *uport)
{
/* nothing to do */
}
@@ -170,73 +183,73 @@ static void dz_enable_ms(struct uart_port *port)
* This routine deals with inputs from any lines.
* ------------------------------------------------------------
*/
-static inline void dz_receive_chars(struct dz_port *dport_in)
+static inline void dz_receive_chars(struct dz_mux *mux)
{
- struct dz_port *dport;
+ struct uart_port *uport;
+ struct dz_port *dport = &mux->dport[0];
struct tty_struct *tty = NULL;
struct uart_icount *icount;
int lines_rx[DZ_NB_PORT] = { [0 ... DZ_NB_PORT - 1] = 0 };
- unsigned short status;
unsigned char ch, flag;
+ u16 status;
int i;
- while ((status = dz_in(dport_in, DZ_RBUF)) & DZ_DVAL) {
- dport = &dz_ports[LINE(status)];
- tty = dport->port.info->tty; /* point to the proper dev */
+ while ((status = dz_in(dport, DZ_RBUF)) & DZ_DVAL) {
+ dport = &mux->dport[LINE(status)];
+ uport = &dport->port;
+ tty = uport->info->tty; /* point to the proper dev */
ch = UCHAR(status); /* grab the char */
+ flag = TTY_NORMAL;
- icount = &dport->port.icount;
+ icount = &uport->icount;
icount->rx++;
- flag = TTY_NORMAL;
- if (status & DZ_FERR) { /* frame error */
+ if (unlikely(status & (DZ_OERR | DZ_FERR | DZ_PERR))) {
+
/*
- * There is no separate BREAK status bit, so
- * treat framing errors as BREAKs for Magic SysRq
- * and SAK; normally, otherwise.
+ * There is no separate BREAK status bit, so treat
+ * null characters with framing errors as BREAKs;
+ * normally, otherwise. For this move the Framing
+ * Error bit to a simulated BREAK bit.
*/
- if (uart_handle_break(&dport->port))
- continue;
- if (dport->port.flags & UPF_SAK)
+ if (!ch) {
+ status |= (status & DZ_FERR) >>
+ (ffs(DZ_FERR) - ffs(DZ_BREAK));
+ status &= ~DZ_FERR;
+ }
+
+ /* Handle SysRq/SAK & keep track of the statistics. */
+ if (status & DZ_BREAK) {
+ icount->brk++;
+ if (uart_handle_break(uport))
+ continue;
+ } else if (status & DZ_FERR)
+ icount->frame++;
+ else if (status & DZ_PERR)
+ icount->parity++;
+ if (status & DZ_OERR)
+ icount->overrun++;
+
+ status &= uport->read_status_mask;
+ if (status & DZ_BREAK)
flag = TTY_BREAK;
- else
+ else if (status & DZ_FERR)
flag = TTY_FRAME;
- } else if (status & DZ_OERR) /* overrun error */
- flag = TTY_OVERRUN;
- else if (status & DZ_PERR) /* parity error */
- flag = TTY_PARITY;
-
- /* keep track of the statistics */
- switch (flag) {
- case TTY_FRAME:
- icount->frame++;
- break;
- case TTY_PARITY:
- icount->parity++;
- break;
- case TTY_OVERRUN:
- icount->overrun++;
- break;
- case TTY_BREAK:
- icount->brk++;
- break;
- default:
- break;
+ else if (status & DZ_PERR)
+ flag = TTY_PARITY;
+
}
- if (uart_handle_sysrq_char(&dport->port, ch))
+ if (uart_handle_sysrq_char(uport, ch))
continue;
- if ((status & dport->port.ignore_status_mask) == 0) {
- uart_insert_char(&dport->port,
- status, DZ_OERR, ch, flag);
- lines_rx[LINE(status)] = 1;
- }
+ uart_insert_char(uport, status, DZ_OERR, ch, flag);
+ lines_rx[LINE(status)] = 1;
}
for (i = 0; i < DZ_NB_PORT; i++)
if (lines_rx[i])
- tty_flip_buffer_push(dz_ports[i].port.info->tty);
+ tty_flip_buffer_push(mux->dport[i].port.info->tty);
}
/*
@@ -246,15 +259,15 @@ static inline void dz_receive_chars(struct dz_port *dport_in)
* This routine deals with outputs to any lines.
* ------------------------------------------------------------
*/
-static inline void dz_transmit_chars(struct dz_port *dport_in)
+static inline void dz_transmit_chars(struct dz_mux *mux)
{
- struct dz_port *dport;
+ struct dz_port *dport = &mux->dport[0];
struct circ_buf *xmit;
- unsigned short status;
unsigned char tmp;
+ u16 status;
- status = dz_in(dport_in, DZ_CSR);
- dport = &dz_ports[LINE(status)];
+ status = dz_in(dport, DZ_CSR);
+ dport = &mux->dport[LINE(status)];
xmit = &dport->port.info->xmit;
if (dport->port.x_char) { /* XON/XOFF chars */
@@ -265,7 +278,9 @@ static inline void dz_transmit_chars(struct dz_port *dport_in)
}
/* If nothing to do or stopped or hardware stopped. */
if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
+ spin_lock(&dport->port.lock);
dz_stop_tx(&dport->port);
+ spin_unlock(&dport->port.lock);
return;
}
@@ -282,8 +297,11 @@ static inline void dz_transmit_chars(struct dz_port *dport_in)
uart_write_wakeup(&dport->port);
/* Are we are done. */
- if (uart_circ_empty(xmit))
+ if (uart_circ_empty(xmit)) {
+ spin_lock(&dport->port.lock);
dz_stop_tx(&dport->port);
+ spin_unlock(&dport->port.lock);
+ }
}
/*
@@ -301,7 +319,7 @@ static inline void check_modem_status(struct dz_port *dport)
* 1. No status change interrupt; use a timer.
* 2. Handle the 3100/5000 as appropriate. --macro
*/
- unsigned short status;
+ u16 status;
/* If not the modem line just return. */
if (dport->port.line != DZ_MODEM)
@@ -322,19 +340,20 @@ static inline void check_modem_status(struct dz_port *dport)
* It deals with the multiple ports.
* ------------------------------------------------------------
*/
-static irqreturn_t dz_interrupt(int irq, void *dev)
+static irqreturn_t dz_interrupt(int irq, void *dev_id)
{
- struct dz_port *dport = (struct dz_port *)dev;
- unsigned short status;
+ struct dz_mux *mux = dev_id;
+ struct dz_port *dport = &mux->dport[0];
+ u16 status;
/* get the reason why we just got an irq */
status = dz_in(dport, DZ_CSR);
if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE))
- dz_receive_chars(dport);
+ dz_receive_chars(mux);
if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE))
- dz_transmit_chars(dport);
+ dz_transmit_chars(mux);
return IRQ_HANDLED;
}
@@ -350,7 +369,7 @@ static unsigned int dz_get_mctrl(struct uart_port *uport)
/*
* FIXME: Handle the 3100/5000 as appropriate. --macro
*/
- struct dz_port *dport = (struct dz_port *)uport;
+ struct dz_port *dport = to_dport(uport);
unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
if (dport->port.line == DZ_MODEM) {
@@ -366,8 +385,8 @@ static void dz_set_mctrl(struct uart_port *uport, unsigned int mctrl)
/*
* FIXME: Handle the 3100/5000 as appropriate. --macro
*/
- struct dz_port *dport = (struct dz_port *)uport;
- unsigned short tmp;
+ struct dz_port *dport = to_dport(uport);
+ u16 tmp;
if (dport->port.line == DZ_MODEM) {
tmp = dz_in(dport, DZ_TCR);
@@ -388,15 +407,30 @@ static void dz_set_mctrl(struct uart_port *uport, unsigned int mctrl)
*/
static int dz_startup(struct uart_port *uport)
{
- struct dz_port *dport = (struct dz_port *)uport;
+ struct dz_port *dport = to_dport(uport);
+ struct dz_mux *mux = dport->mux;
unsigned long flags;
- unsigned short tmp;
+ int irq_guard;
+ int ret;
+ u16 tmp;
+
+ irq_guard = atomic_add_return(1, &mux->irq_guard);
+ if (irq_guard != 1)
+ return 0;
+
+ ret = request_irq(dport->port.irq, dz_interrupt,
+ IRQF_SHARED, "dz", mux);
+ if (ret) {
+ atomic_add(-1, &mux->irq_guard);
+ printk(KERN_ERR "dz: Cannot get IRQ %d!\n", dport->port.irq);
+ return ret;
+ }
spin_lock_irqsave(&dport->port.lock, flags);
- /* enable the interrupt and the scanning */
+ /* Enable interrupts. */
tmp = dz_in(dport, DZ_CSR);
- tmp |= DZ_RIE | DZ_TIE | DZ_MSE;
+ tmp |= DZ_RIE | DZ_TIE;
dz_out(dport, DZ_CSR, tmp);
spin_unlock_irqrestore(&dport->port.lock, flags);
@@ -414,7 +448,25 @@ static int dz_startup(struct uart_port *uport)
*/
static void dz_shutdown(struct uart_port *uport)
{
- dz_stop_tx(uport);
+ struct dz_port *dport = to_dport(uport);
+ struct dz_mux *mux = dport->mux;
+ unsigned long flags;
+ int irq_guard;
+ u16 tmp;
+
+ spin_lock_irqsave(&dport->port.lock, flags);
+ dz_stop_tx(&dport->port);
+ spin_unlock_irqrestore(&dport->port.lock, flags);
+
+ irq_guard = atomic_add_return(-1, &mux->irq_guard);
+ if (!irq_guard) {
+ /* Disable interrupts. */
+ tmp = dz_in(dport, DZ_CSR);
+ tmp &= ~(DZ_RIE | DZ_TIE);
+ dz_out(dport, DZ_CSR, tmp);
+
+ free_irq(dport->port.irq, mux);
+ }
}
/*
@@ -431,7 +483,7 @@ static void dz_shutdown(struct uart_port *uport)
*/
static unsigned int dz_tx_empty(struct uart_port *uport)
{
- struct dz_port *dport = (struct dz_port *)uport;
+ struct dz_port *dport = to_dport(uport);
unsigned short tmp, mask = 1 << dport->port.line;
tmp = dz_in(dport, DZ_TCR);
@@ -446,7 +498,7 @@ static void dz_break_ctl(struct uart_port *uport, int break_state)
* FIXME: Can't access BREAK bits in TDR easily;
* reuse the code for polled TX. --macro
*/
- struct dz_port *dport = (struct dz_port *)uport;
+ struct dz_port *dport = to_dport(uport);
unsigned long flags;
unsigned short tmp, mask = 1 << dport->port.line;
@@ -460,12 +512,69 @@ static void dz_break_ctl(struct uart_port *uport, int break_state)
spin_unlock_irqrestore(&uport->lock, flags);
}
+static int dz_encode_baud_rate(unsigned int baud)
+{
+ switch (baud) {
+ case 50:
+ return DZ_B50;
+ case 75:
+ return DZ_B75;
+ case 110:
+ return DZ_B110;
+ case 134:
+ return DZ_B134;
+ case 150:
+ return DZ_B150;
+ case 300:
+ return DZ_B300;
+ case 600:
+ return DZ_B600;
+ case 1200:
+ return DZ_B1200;
+ case 1800:
+ return DZ_B1800;
+ case 2000:
+ return DZ_B2000;
+ case 2400:
+ return DZ_B2400;
+ case 3600:
+ return DZ_B3600;
+ case 4800:
+ return DZ_B4800;
+ case 7200:
+ return DZ_B7200;
+ case 9600:
+ return DZ_B9600;
+ default:
+ return -1;
+ }
+}
+
+
+static void dz_reset(struct dz_port *dport)
+{
+ struct dz_mux *mux = dport->mux;
+
+ if (mux->initialised)
+ return;
+
+ dz_out(dport, DZ_CSR, DZ_CLR);
+ while (dz_in(dport, DZ_CSR) & DZ_CLR);
+ iob();
+
+ /* Enable scanning. */
+ dz_out(dport, DZ_CSR, DZ_MSE);
+
+ mux->initialised = 1;
+}
+
static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
struct ktermios *old_termios)
{
- struct dz_port *dport = (struct dz_port *)uport;
+ struct dz_port *dport = to_dport(uport);
unsigned long flags;
unsigned int cflag, baud;
+ int bflag;
cflag = dport->port.line;
@@ -492,105 +601,127 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
cflag |= DZ_PARODD;
baud = uart_get_baud_rate(uport, termios, old_termios, 50, 9600);
- switch (baud) {
- case 50:
- cflag |= DZ_B50;
- break;
- case 75:
- cflag |= DZ_B75;
- break;
- case 110:
- cflag |= DZ_B110;
- break;
- case 134:
- cflag |= DZ_B134;
- break;
- case 150:
- cflag |= DZ_B150;
- break;
- case 300:
- cflag |= DZ_B300;
- break;
- case 600:
- cflag |= DZ_B600;
- break;
- case 1200:
- cflag |= DZ_B1200;
- break;
- case 1800:
- cflag |= DZ_B1800;
- break;
- case 2000:
- cflag |= DZ_B2000;
- break;
- case 2400:
- cflag |= DZ_B2400;
- break;
- case 3600:
- cflag |= DZ_B3600;
- break;
- case 4800:
- cflag |= DZ_B4800;
- break;
- case 7200:
- cflag |= DZ_B7200;
- break;
- case 9600:
- default:
- cflag |= DZ_B9600;
+ bflag = dz_encode_baud_rate(baud);
+ if (bflag < 0) { /* Try to keep unchanged. */
+ baud = uart_get_baud_rate(uport, old_termios, NULL, 50, 9600);
+ bflag = dz_encode_baud_rate(baud);
+ if (bflag < 0) { /* Resort to 9600. */
+ baud = 9600;
+ bflag = DZ_B9600;
+ }
+ tty_termios_encode_baud_rate(termios, baud, baud);
}
+ cflag |= bflag;
if (termios->c_cflag & CREAD)
cflag |= DZ_RXENAB;
spin_lock_irqsave(&dport->port.lock, flags);
- dz_out(dport, DZ_LPR, cflag | dport->port.line);
+ uart_update_timeout(uport, termios->c_cflag, baud);
+
+ dz_out(dport, DZ_LPR, cflag);
dport->cflag = cflag;
/* setup accept flag */
dport->port.read_status_mask = DZ_OERR;
if (termios->c_iflag & INPCK)
dport->port.read_status_mask |= DZ_FERR | DZ_PERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ dport->port.read_status_mask |= DZ_BREAK;
/* characters to ignore */
uport->ignore_status_mask = 0;
+ if ((termios->c_iflag & (IGNPAR | IGNBRK)) == (IGNPAR | IGNBRK))
+ dport->port.ignore_status_mask |= DZ_OERR;
if (termios->c_iflag & IGNPAR)
dport->port.ignore_status_mask |= DZ_FERR | DZ_PERR;
+ if (termios->c_iflag & IGNBRK)
+ dport->port.ignore_status_mask |= DZ_BREAK;
spin_unlock_irqrestore(&dport->port.lock, flags);
}
-static const char *dz_type(struct uart_port *port)
+static const char *dz_type(struct uart_port *uport)
{
return "DZ";
}
-static void dz_release_port(struct uart_port *port)
+static void dz_release_port(struct uart_port *uport)
{
- /* nothing to do */
+ struct dz_mux *mux = to_dport(uport)->mux;
+ int map_guard;
+
+ iounmap(uport->membase);
+ uport->membase = NULL;
+
+ map_guard = atomic_add_return(-1, &mux->map_guard);
+ if (!map_guard)
+ release_mem_region(uport->mapbase, dec_kn_slot_size);
}
-static int dz_request_port(struct uart_port *port)
+static int dz_map_port(struct uart_port *uport)
{
+ if (!uport->membase)
+ uport->membase = ioremap_nocache(uport->mapbase,
+ dec_kn_slot_size);
+ if (!uport->membase) {
+ printk(KERN_ERR "dz: Cannot map MMIO\n");
+ return -ENOMEM;
+ }
return 0;
}
-static void dz_config_port(struct uart_port *port, int flags)
+static int dz_request_port(struct uart_port *uport)
{
- if (flags & UART_CONFIG_TYPE)
- port->type = PORT_DZ;
+ struct dz_mux *mux = to_dport(uport)->mux;
+ int map_guard;
+ int ret;
+
+ map_guard = atomic_add_return(1, &mux->map_guard);
+ if (map_guard == 1) {
+ if (!request_mem_region(uport->mapbase, dec_kn_slot_size,
+ "dz")) {
+ atomic_add(-1, &mux->map_guard);
+ printk(KERN_ERR
+ "dz: Unable to reserve MMIO resource\n");
+ return -EBUSY;
+ }
+ }
+ ret = dz_map_port(uport);
+ if (ret) {
+ map_guard = atomic_add_return(-1, &mux->map_guard);
+ if (!map_guard)
+ release_mem_region(uport->mapbase, dec_kn_slot_size);
+ return ret;
+ }
+ return 0;
+}
+
+static void dz_config_port(struct uart_port *uport, int flags)
+{
+ struct dz_port *dport = to_dport(uport);
+
+ if (flags & UART_CONFIG_TYPE) {
+ if (dz_request_port(uport))
+ return;
+
+ uport->type = PORT_DZ;
+
+ dz_reset(dport);
+ }
}
/*
- * verify the new serial_struct (for TIOCSSERIAL).
+ * Verify the new serial_struct (for TIOCSSERIAL).
*/
-static int dz_verify_port(struct uart_port *port, struct serial_struct *ser)
+static int dz_verify_port(struct uart_port *uport, struct serial_struct *ser)
{
int ret = 0;
+
if (ser->type != PORT_UNKNOWN && ser->type != PORT_DZ)
ret = -EINVAL;
- if (ser->irq != port->irq)
+ if (ser->irq != uport->irq)
ret = -EINVAL;
return ret;
}
@@ -617,40 +748,32 @@ static struct uart_ops dz_ops = {
static void __init dz_init_ports(void)
{
static int first = 1;
- struct dz_port *dport;
unsigned long base;
- int i;
+ int line;
if (!first)
return;
first = 0;
- if (mips_machtype == MACH_DS23100 ||
- mips_machtype == MACH_DS5100)
- base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_DZ11);
+ if (mips_machtype == MACH_DS23100 || mips_machtype == MACH_DS5100)
+ base = dec_kn_slot_base + KN01_DZ11;
else
- base = CKSEG1ADDR(KN02_SLOT_BASE + KN02_DZ11);
-
- for (i = 0, dport = dz_ports; i < DZ_NB_PORT; i++, dport++) {
- spin_lock_init(&dport->port.lock);
- dport->port.membase = (char *) base;
- dport->port.iotype = UPIO_MEM;
- dport->port.irq = dec_interrupt[DEC_IRQ_DZ11];
- dport->port.line = i;
- dport->port.fifosize = 1;
- dport->port.ops = &dz_ops;
- dport->port.flags = UPF_BOOT_AUTOCONF;
- }
-}
+ base = dec_kn_slot_base + KN02_DZ11;
-static void dz_reset(struct dz_port *dport)
-{
- dz_out(dport, DZ_CSR, DZ_CLR);
- while (dz_in(dport, DZ_CSR) & DZ_CLR);
- iob();
+ for (line = 0; line < DZ_NB_PORT; line++) {
+ struct dz_port *dport = &dz_mux.dport[line];
+ struct uart_port *uport = &dport->port;
- /* enable scanning */
- dz_out(dport, DZ_CSR, DZ_MSE);
+ dport->mux = &dz_mux;
+
+ uport->irq = dec_interrupt[DEC_IRQ_DZ11];
+ uport->fifosize = 1;
+ uport->iotype = UPIO_MEM;
+ uport->flags = UPF_BOOT_AUTOCONF;
+ uport->ops = &dz_ops;
+ uport->line = line;
+ uport->mapbase = base;
+ }
}
#ifdef CONFIG_SERIAL_DZ_CONSOLE
@@ -670,7 +793,7 @@ static void dz_reset(struct dz_port *dport)
*/
static void dz_console_putchar(struct uart_port *uport, int ch)
{
- struct dz_port *dport = (struct dz_port *)uport;
+ struct dz_port *dport = to_dport(uport);
unsigned long flags;
unsigned short csr, tcr, trdy, mask;
int loops = 10000;
@@ -685,7 +808,7 @@ static void dz_console_putchar(struct uart_port *uport, int ch)
iob();
spin_unlock_irqrestore(&dport->port.lock, flags);
- while (loops--) {
+ do {
trdy = dz_in(dport, DZ_CSR);
if (!(trdy & DZ_TRDY))
continue;
@@ -696,7 +819,7 @@ static void dz_console_putchar(struct uart_port *uport, int ch)
dz_out(dport, DZ_TCR, mask);
iob();
udelay(2);
- }
+ } while (loops--);
if (loops) /* Cannot send otherwise. */
dz_out(dport, DZ_TDR, ch);
@@ -717,7 +840,7 @@ static void dz_console_print(struct console *co,
const char *str,
unsigned int count)
{
- struct dz_port *dport = &dz_ports[co->index];
+ struct dz_port *dport = &dz_mux.dport[co->index];
#ifdef DEBUG_DZ
prom_printf((char *) str);
#endif
@@ -726,22 +849,28 @@ static void dz_console_print(struct console *co,
static int __init dz_console_setup(struct console *co, char *options)
{
- struct dz_port *dport = &dz_ports[co->index];
+ struct dz_port *dport = &dz_mux.dport[co->index];
+ struct uart_port *uport = &dport->port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
+ int ret;
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
+ ret = dz_map_port(uport);
+ if (ret)
+ return ret;
dz_reset(dport);
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
return uart_set_options(&dport->port, co, baud, parity, bits, flow);
}
static struct uart_driver dz_reg;
-static struct console dz_sercons = {
+static struct console dz_console = {
.name = "ttyS",
.write = dz_console_print,
.device = uart_console_device,
@@ -755,7 +884,7 @@ static int __init dz_serial_console_init(void)
{
if (!IOASIC) {
dz_init_ports();
- register_console(&dz_sercons);
+ register_console(&dz_console);
return 0;
} else
return -ENXIO;
@@ -763,7 +892,7 @@ static int __init dz_serial_console_init(void)
console_initcall(dz_serial_console_init);
-#define SERIAL_DZ_CONSOLE &dz_sercons
+#define SERIAL_DZ_CONSOLE &dz_console
#else
#define SERIAL_DZ_CONSOLE NULL
#endif /* CONFIG_SERIAL_DZ_CONSOLE */
@@ -789,26 +918,14 @@ static int __init dz_init(void)
dz_init_ports();
-#ifndef CONFIG_SERIAL_DZ_CONSOLE
- /* reset the chip */
- dz_reset(&dz_ports[0]);
-#endif
-
- if (request_irq(dz_ports[0].port.irq, dz_interrupt,
- IRQF_DISABLED, "DZ", &dz_ports[0]))
- panic("Unable to register DZ interrupt");
-
ret = uart_register_driver(&dz_reg);
- if (ret != 0)
+ if (ret)
return ret;
for (i = 0; i < DZ_NB_PORT; i++)
- uart_add_one_port(&dz_reg, &dz_ports[i].port);
+ uart_add_one_port(&dz_reg, &dz_mux.dport[i].port);
- return ret;
+ return 0;
}
module_init(dz_init);
-
-MODULE_DESCRIPTION("DECstation DZ serial driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/serial/dz.h b/drivers/serial/dz.h
index 9674d4e49872..faf169ed27b3 100644
--- a/drivers/serial/dz.h
+++ b/drivers/serial/dz.h
@@ -33,6 +33,8 @@
#define DZ_FERR 0x2000 /* Frame error indicator */
#define DZ_PERR 0x1000 /* Parity error indicator */
+#define DZ_BREAK 0x0800 /* BREAK event software flag */
+
#define LINE(x) ((x & DZ_LINE_MASK) >> 8) /* Get the line number
from the input buffer */
#define UCHAR(x) ((unsigned char)(x & DZ_RBUF_MASK))
@@ -107,8 +109,8 @@
#define DZ_B7200 0x0D00
#define DZ_B9600 0x0E00
-#define DZ_CREAD 0x1000 /* Enable receiver */
-#define DZ_RXENAB 0x1000 /* enable receive char */
+#define DZ_RXENAB 0x1000 /* Receiver Enable */
+
/*
* Addresses for the DZ registers
*/
@@ -124,9 +126,4 @@
#define DZ_XMIT_SIZE 4096 /* buffer size */
#define DZ_WAKEUP_CHARS DZ_XMIT_SIZE/4
-#ifdef MODULE
-int init_module (void)
-void cleanup_module (void)
-#endif
-
#endif /* DZ_SERIAL_H */
diff --git a/drivers/serial/icom.h b/drivers/serial/icom.h
index 027455496745..c8029e0025c9 100644
--- a/drivers/serial/icom.h
+++ b/drivers/serial/icom.h
@@ -20,7 +20,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include<linux/serial_core.h>
+#include <linux/serial_core.h>
#define BAUD_TABLE_LIMIT ((sizeof(icom_acfg_baud)/sizeof(int)) - 1)
static int icom_acfg_baud[] = {
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index dc1967176fe2..56af1f566a4c 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -308,7 +308,7 @@ static void imx_start_tx(struct uart_port *port)
static irqreturn_t imx_rtsint(int irq, void *dev_id)
{
- struct imx_port *sport = (struct imx_port *)dev_id;
+ struct imx_port *sport = dev_id;
unsigned int val = readl(sport->port.membase + USR1) & USR1_RTSS;
unsigned long flags;
@@ -324,7 +324,7 @@ static irqreturn_t imx_rtsint(int irq, void *dev_id)
static irqreturn_t imx_txint(int irq, void *dev_id)
{
- struct imx_port *sport = (struct imx_port *)dev_id;
+ struct imx_port *sport = dev_id;
struct circ_buf *xmit = &sport->port.info->xmit;
unsigned long flags;
diff --git a/drivers/serial/mcf.c b/drivers/serial/mcf.c
index 051fcc2f5ba8..e76fc72c9b36 100644
--- a/drivers/serial/mcf.c
+++ b/drivers/serial/mcf.c
@@ -434,7 +434,7 @@ static struct uart_ops mcf_uart_ops = {
static struct mcf_uart mcf_ports[3];
-#define MCF_MAXPORTS (sizeof(mcf_ports) / sizeof(struct mcf_uart))
+#define MCF_MAXPORTS ARRAY_SIZE(mcf_ports)
/****************************************************************************/
#if defined(CONFIG_SERIAL_MCF_CONSOLE)
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 3c4d29e59b2c..a638f23c6c61 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -16,6 +16,9 @@
* Some of the code has been inspired/copied from the 2.4 code written
* by Dale Farnsworth <dfarnsworth@mvista.com>.
*
+ * Copyright (C) 2008 Freescale Semiconductor Inc.
+ * John Rigby <jrigby@gmail.com>
+ * Added support for MPC5121
* Copyright (C) 2006 Secret Lab Technologies Ltd.
* Grant Likely <grant.likely@secretlab.ca>
* Copyright (C) 2004-2006 Sylvain Munaut <tnt@246tNt.com>
@@ -67,7 +70,6 @@
#include <linux/serial.h>
#include <linux/sysrq.h>
#include <linux/console.h>
-
#include <linux/delay.h>
#include <linux/io.h>
@@ -79,6 +81,7 @@
#endif
#include <asm/mpc52xx.h>
+#include <asm/mpc512x.h>
#include <asm/mpc52xx_psc.h>
#if defined(CONFIG_SERIAL_MPC52xx_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -111,8 +114,8 @@ static struct device_node *mpc52xx_uart_nodes[MPC52xx_PSC_MAXNUM];
static void mpc52xx_uart_of_enumerate(void);
#endif
+
#define PSC(port) ((struct mpc52xx_psc __iomem *)((port)->membase))
-#define FIFO(port) ((struct mpc52xx_psc_fifo __iomem *)(PSC(port)+1))
/* Forward declaration of the interruption handling routine */
@@ -128,15 +131,301 @@ static irqreturn_t mpc52xx_uart_int(int irq, void *dev_id);
#define uart_console(port) (0)
#endif
+/* ======================================================================== */
+/* PSC fifo operations for isolating differences between 52xx and 512x */
+/* ======================================================================== */
+
+struct psc_ops {
+ void (*fifo_init)(struct uart_port *port);
+ int (*raw_rx_rdy)(struct uart_port *port);
+ int (*raw_tx_rdy)(struct uart_port *port);
+ int (*rx_rdy)(struct uart_port *port);
+ int (*tx_rdy)(struct uart_port *port);
+ int (*tx_empty)(struct uart_port *port);
+ void (*stop_rx)(struct uart_port *port);
+ void (*start_tx)(struct uart_port *port);
+ void (*stop_tx)(struct uart_port *port);
+ void (*rx_clr_irq)(struct uart_port *port);
+ void (*tx_clr_irq)(struct uart_port *port);
+ void (*write_char)(struct uart_port *port, unsigned char c);
+ unsigned char (*read_char)(struct uart_port *port);
+ void (*cw_disable_ints)(struct uart_port *port);
+ void (*cw_restore_ints)(struct uart_port *port);
+ unsigned long (*getuartclk)(void *p);
+};
+
+#ifdef CONFIG_PPC_MPC52xx
+#define FIFO_52xx(port) ((struct mpc52xx_psc_fifo __iomem *)(PSC(port)+1))
+static void mpc52xx_psc_fifo_init(struct uart_port *port)
+{
+ struct mpc52xx_psc __iomem *psc = PSC(port);
+ struct mpc52xx_psc_fifo __iomem *fifo = FIFO_52xx(port);
+
+ /* /32 prescaler */
+ out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00);
+
+ out_8(&fifo->rfcntl, 0x00);
+ out_be16(&fifo->rfalarm, 0x1ff);
+ out_8(&fifo->tfcntl, 0x07);
+ out_be16(&fifo->tfalarm, 0x80);
+
+ port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY;
+ out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static int mpc52xx_psc_raw_rx_rdy(struct uart_port *port)
+{
+ return in_be16(&PSC(port)->mpc52xx_psc_status)
+ & MPC52xx_PSC_SR_RXRDY;
+}
+
+static int mpc52xx_psc_raw_tx_rdy(struct uart_port *port)
+{
+ return in_be16(&PSC(port)->mpc52xx_psc_status)
+ & MPC52xx_PSC_SR_TXRDY;
+}
+
+
+static int mpc52xx_psc_rx_rdy(struct uart_port *port)
+{
+ return in_be16(&PSC(port)->mpc52xx_psc_isr)
+ & port->read_status_mask
+ & MPC52xx_PSC_IMR_RXRDY;
+}
+
+static int mpc52xx_psc_tx_rdy(struct uart_port *port)
+{
+ return in_be16(&PSC(port)->mpc52xx_psc_isr)
+ & port->read_status_mask
+ & MPC52xx_PSC_IMR_TXRDY;
+}
+
+static int mpc52xx_psc_tx_empty(struct uart_port *port)
+{
+ return in_be16(&PSC(port)->mpc52xx_psc_status)
+ & MPC52xx_PSC_SR_TXEMP;
+}
+
+static void mpc52xx_psc_start_tx(struct uart_port *port)
+{
+ port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
+ out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static void mpc52xx_psc_stop_tx(struct uart_port *port)
+{
+ port->read_status_mask &= ~MPC52xx_PSC_IMR_TXRDY;
+ out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static void mpc52xx_psc_stop_rx(struct uart_port *port)
+{
+ port->read_status_mask &= ~MPC52xx_PSC_IMR_RXRDY;
+ out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static void mpc52xx_psc_rx_clr_irq(struct uart_port *port)
+{
+}
+
+static void mpc52xx_psc_tx_clr_irq(struct uart_port *port)
+{
+}
+
+static void mpc52xx_psc_write_char(struct uart_port *port, unsigned char c)
+{
+ out_8(&PSC(port)->mpc52xx_psc_buffer_8, c);
+}
+
+static unsigned char mpc52xx_psc_read_char(struct uart_port *port)
+{
+ return in_8(&PSC(port)->mpc52xx_psc_buffer_8);
+}
+
+static void mpc52xx_psc_cw_disable_ints(struct uart_port *port)
+{
+ out_be16(&PSC(port)->mpc52xx_psc_imr, 0);
+}
+
+static void mpc52xx_psc_cw_restore_ints(struct uart_port *port)
+{
+ out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+/* Search for bus-frequency property in this node or a parent */
+static unsigned long mpc52xx_getuartclk(void *p)
+{
#if defined(CONFIG_PPC_MERGE)
-static struct of_device_id mpc52xx_uart_of_match[] = {
- { .type = "serial", .compatible = "fsl,mpc5200-psc-uart", },
- { .type = "serial", .compatible = "mpc5200-psc-uart", }, /* lite5200 */
- { .type = "serial", .compatible = "mpc5200-serial", }, /* efika */
- {},
+ /*
+ * 5200 UARTs have a / 32 prescaler
+ * but the generic serial code assumes 16
+ * so return ipb freq / 2
+ */
+ return mpc52xx_find_ipb_freq(p) / 2;
+#else
+ pr_debug("unexpected call to mpc52xx_getuartclk with arch/ppc\n");
+ return NULL;
+#endif
+}
+
+static struct psc_ops mpc52xx_psc_ops = {
+ .fifo_init = mpc52xx_psc_fifo_init,
+ .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy,
+ .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy,
+ .rx_rdy = mpc52xx_psc_rx_rdy,
+ .tx_rdy = mpc52xx_psc_tx_rdy,
+ .tx_empty = mpc52xx_psc_tx_empty,
+ .stop_rx = mpc52xx_psc_stop_rx,
+ .start_tx = mpc52xx_psc_start_tx,
+ .stop_tx = mpc52xx_psc_stop_tx,
+ .rx_clr_irq = mpc52xx_psc_rx_clr_irq,
+ .tx_clr_irq = mpc52xx_psc_tx_clr_irq,
+ .write_char = mpc52xx_psc_write_char,
+ .read_char = mpc52xx_psc_read_char,
+ .cw_disable_ints = mpc52xx_psc_cw_disable_ints,
+ .cw_restore_ints = mpc52xx_psc_cw_restore_ints,
+ .getuartclk = mpc52xx_getuartclk,
+};
+
+#endif /* CONFIG_MPC52xx */
+
+#ifdef CONFIG_PPC_MPC512x
+#define FIFO_512x(port) ((struct mpc512x_psc_fifo __iomem *)(PSC(port)+1))
+static void mpc512x_psc_fifo_init(struct uart_port *port)
+{
+ out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_RESET_SLICE);
+ out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_ENABLE_SLICE);
+ out_be32(&FIFO_512x(port)->txalarm, 1);
+ out_be32(&FIFO_512x(port)->tximr, 0);
+
+ out_be32(&FIFO_512x(port)->rxcmd, MPC512x_PSC_FIFO_RESET_SLICE);
+ out_be32(&FIFO_512x(port)->rxcmd, MPC512x_PSC_FIFO_ENABLE_SLICE);
+ out_be32(&FIFO_512x(port)->rxalarm, 1);
+ out_be32(&FIFO_512x(port)->rximr, 0);
+
+ out_be32(&FIFO_512x(port)->tximr, MPC512x_PSC_FIFO_ALARM);
+ out_be32(&FIFO_512x(port)->rximr, MPC512x_PSC_FIFO_ALARM);
+}
+
+static int mpc512x_psc_raw_rx_rdy(struct uart_port *port)
+{
+ return !(in_be32(&FIFO_512x(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY);
+}
+
+static int mpc512x_psc_raw_tx_rdy(struct uart_port *port)
+{
+ return !(in_be32(&FIFO_512x(port)->txsr) & MPC512x_PSC_FIFO_FULL);
+}
+
+static int mpc512x_psc_rx_rdy(struct uart_port *port)
+{
+ return in_be32(&FIFO_512x(port)->rxsr)
+ & in_be32(&FIFO_512x(port)->rximr)
+ & MPC512x_PSC_FIFO_ALARM;
+}
+
+static int mpc512x_psc_tx_rdy(struct uart_port *port)
+{
+ return in_be32(&FIFO_512x(port)->txsr)
+ & in_be32(&FIFO_512x(port)->tximr)
+ & MPC512x_PSC_FIFO_ALARM;
+}
+
+static int mpc512x_psc_tx_empty(struct uart_port *port)
+{
+ return in_be32(&FIFO_512x(port)->txsr)
+ & MPC512x_PSC_FIFO_EMPTY;
+}
+
+static void mpc512x_psc_stop_rx(struct uart_port *port)
+{
+ unsigned long rx_fifo_imr;
+
+ rx_fifo_imr = in_be32(&FIFO_512x(port)->rximr);
+ rx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM;
+ out_be32(&FIFO_512x(port)->rximr, rx_fifo_imr);
+}
+
+static void mpc512x_psc_start_tx(struct uart_port *port)
+{
+ unsigned long tx_fifo_imr;
+
+ tx_fifo_imr = in_be32(&FIFO_512x(port)->tximr);
+ tx_fifo_imr |= MPC512x_PSC_FIFO_ALARM;
+ out_be32(&FIFO_512x(port)->tximr, tx_fifo_imr);
+}
+
+static void mpc512x_psc_stop_tx(struct uart_port *port)
+{
+ unsigned long tx_fifo_imr;
+
+ tx_fifo_imr = in_be32(&FIFO_512x(port)->tximr);
+ tx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM;
+ out_be32(&FIFO_512x(port)->tximr, tx_fifo_imr);
+}
+
+static void mpc512x_psc_rx_clr_irq(struct uart_port *port)
+{
+ out_be32(&FIFO_512x(port)->rxisr, in_be32(&FIFO_512x(port)->rxisr));
+}
+
+static void mpc512x_psc_tx_clr_irq(struct uart_port *port)
+{
+ out_be32(&FIFO_512x(port)->txisr, in_be32(&FIFO_512x(port)->txisr));
+}
+
+static void mpc512x_psc_write_char(struct uart_port *port, unsigned char c)
+{
+ out_8(&FIFO_512x(port)->txdata_8, c);
+}
+
+static unsigned char mpc512x_psc_read_char(struct uart_port *port)
+{
+ return in_8(&FIFO_512x(port)->rxdata_8);
+}
+
+static void mpc512x_psc_cw_disable_ints(struct uart_port *port)
+{
+ port->read_status_mask =
+ in_be32(&FIFO_512x(port)->tximr) << 16 |
+ in_be32(&FIFO_512x(port)->rximr);
+ out_be32(&FIFO_512x(port)->tximr, 0);
+ out_be32(&FIFO_512x(port)->rximr, 0);
+}
+
+static void mpc512x_psc_cw_restore_ints(struct uart_port *port)
+{
+ out_be32(&FIFO_512x(port)->tximr,
+ (port->read_status_mask >> 16) & 0x7f);
+ out_be32(&FIFO_512x(port)->rximr, port->read_status_mask & 0x7f);
+}
+
+static unsigned long mpc512x_getuartclk(void *p)
+{
+ return mpc512x_find_ips_freq(p);
+}
+
+static struct psc_ops mpc512x_psc_ops = {
+ .fifo_init = mpc512x_psc_fifo_init,
+ .raw_rx_rdy = mpc512x_psc_raw_rx_rdy,
+ .raw_tx_rdy = mpc512x_psc_raw_tx_rdy,
+ .rx_rdy = mpc512x_psc_rx_rdy,
+ .tx_rdy = mpc512x_psc_tx_rdy,
+ .tx_empty = mpc512x_psc_tx_empty,
+ .stop_rx = mpc512x_psc_stop_rx,
+ .start_tx = mpc512x_psc_start_tx,
+ .stop_tx = mpc512x_psc_stop_tx,
+ .rx_clr_irq = mpc512x_psc_rx_clr_irq,
+ .tx_clr_irq = mpc512x_psc_tx_clr_irq,
+ .write_char = mpc512x_psc_write_char,
+ .read_char = mpc512x_psc_read_char,
+ .cw_disable_ints = mpc512x_psc_cw_disable_ints,
+ .cw_restore_ints = mpc512x_psc_cw_restore_ints,
+ .getuartclk = mpc512x_getuartclk,
};
#endif
+static struct psc_ops *psc_ops;
/* ======================================================================== */
/* UART operations */
@@ -145,8 +434,7 @@ static struct of_device_id mpc52xx_uart_of_match[] = {
static unsigned int
mpc52xx_uart_tx_empty(struct uart_port *port)
{
- int status = in_be16(&PSC(port)->mpc52xx_psc_status);
- return (status & MPC52xx_PSC_SR_TXEMP) ? TIOCSER_TEMT : 0;
+ return psc_ops->tx_empty(port) ? TIOCSER_TEMT : 0;
}
static void
@@ -166,16 +454,14 @@ static void
mpc52xx_uart_stop_tx(struct uart_port *port)
{
/* port->lock taken by caller */
- port->read_status_mask &= ~MPC52xx_PSC_IMR_TXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+ psc_ops->stop_tx(port);
}
static void
mpc52xx_uart_start_tx(struct uart_port *port)
{
/* port->lock taken by caller */
- port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+ psc_ops->start_tx(port);
}
static void
@@ -188,8 +474,7 @@ mpc52xx_uart_send_xchar(struct uart_port *port, char ch)
if (ch) {
/* Make sure tx interrupts are on */
/* Truly necessary ??? They should be anyway */
- port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+ psc_ops->start_tx(port);
}
spin_unlock_irqrestore(&port->lock, flags);
@@ -199,8 +484,7 @@ static void
mpc52xx_uart_stop_rx(struct uart_port *port)
{
/* port->lock taken by caller */
- port->read_status_mask &= ~MPC52xx_PSC_IMR_RXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+ psc_ops->stop_rx(port);
}
static void
@@ -227,12 +511,12 @@ static int
mpc52xx_uart_startup(struct uart_port *port)
{
struct mpc52xx_psc __iomem *psc = PSC(port);
- struct mpc52xx_psc_fifo __iomem *fifo = FIFO(port);
int ret;
/* Request IRQ */
ret = request_irq(port->irq, mpc52xx_uart_int,
- IRQF_DISABLED | IRQF_SAMPLE_RANDOM, "mpc52xx_psc_uart", port);
+ IRQF_DISABLED | IRQF_SAMPLE_RANDOM | IRQF_SHARED,
+ "mpc52xx_psc_uart", port);
if (ret)
return ret;
@@ -242,15 +526,7 @@ mpc52xx_uart_startup(struct uart_port *port)
out_be32(&psc->sicr, 0); /* UART mode DCD ignored */
- out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00); /* /16 prescaler on */
-
- out_8(&fifo->rfcntl, 0x00);
- out_be16(&fifo->rfalarm, 0x1ff);
- out_8(&fifo->tfcntl, 0x07);
- out_be16(&fifo->tfalarm, 0x80);
-
- port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY;
- out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
+ psc_ops->fifo_init(port);
out_8(&psc->command, MPC52xx_PSC_TX_ENABLE);
out_8(&psc->command, MPC52xx_PSC_RX_ENABLE);
@@ -333,8 +609,7 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
* boot for the console, all stuff is not yet ready to receive at that
* time and that just makes the kernel oops */
/* while (j-- && mpc52xx_uart_int_rx_chars(port)); */
- while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) &&
- --j)
+ while (!mpc52xx_uart_tx_empty(port) && --j)
udelay(1);
if (!j)
@@ -462,11 +737,9 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
unsigned short status;
/* While we can read, do so ! */
- while ((status = in_be16(&PSC(port)->mpc52xx_psc_status)) &
- MPC52xx_PSC_SR_RXRDY) {
-
+ while (psc_ops->raw_rx_rdy(port)) {
/* Get the char */
- ch = in_8(&PSC(port)->mpc52xx_psc_buffer_8);
+ ch = psc_ops->read_char(port);
/* Handle sysreq char */
#ifdef SUPPORT_SYSRQ
@@ -481,6 +754,8 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
flag = TTY_NORMAL;
port->icount.rx++;
+ status = in_be16(&PSC(port)->mpc52xx_psc_status);
+
if (status & (MPC52xx_PSC_SR_PE |
MPC52xx_PSC_SR_FE |
MPC52xx_PSC_SR_RB)) {
@@ -510,7 +785,7 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
tty_flip_buffer_push(tty);
- return in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY;
+ return psc_ops->raw_rx_rdy(port);
}
static inline int
@@ -520,7 +795,7 @@ mpc52xx_uart_int_tx_chars(struct uart_port *port)
/* Process out of band chars */
if (port->x_char) {
- out_8(&PSC(port)->mpc52xx_psc_buffer_8, port->x_char);
+ psc_ops->write_char(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
return 1;
@@ -533,8 +808,8 @@ mpc52xx_uart_int_tx_chars(struct uart_port *port)
}
/* Send chars */
- while (in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXRDY) {
- out_8(&PSC(port)->mpc52xx_psc_buffer_8, xmit->buf[xmit->tail]);
+ while (psc_ops->raw_tx_rdy(port)) {
+ psc_ops->write_char(port, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
if (uart_circ_empty(xmit))
@@ -560,7 +835,6 @@ mpc52xx_uart_int(int irq, void *dev_id)
struct uart_port *port = dev_id;
unsigned long pass = ISR_PASS_LIMIT;
unsigned int keepgoing;
- unsigned short status;
spin_lock(&port->lock);
@@ -569,18 +843,12 @@ mpc52xx_uart_int(int irq, void *dev_id)
/* If we don't find anything to do, we stop */
keepgoing = 0;
- /* Read status */
- status = in_be16(&PSC(port)->mpc52xx_psc_isr);
- status &= port->read_status_mask;
-
- /* Do we need to receive chars ? */
- /* For this RX interrupts must be on and some chars waiting */
- if (status & MPC52xx_PSC_IMR_RXRDY)
+ psc_ops->rx_clr_irq(port);
+ if (psc_ops->rx_rdy(port))
keepgoing |= mpc52xx_uart_int_rx_chars(port);
- /* Do we need to send chars ? */
- /* For this, TX must be ready and TX interrupt enabled */
- if (status & MPC52xx_PSC_IMR_TXRDY)
+ psc_ops->tx_clr_irq(port);
+ if (psc_ops->tx_rdy(port))
keepgoing |= mpc52xx_uart_int_tx_chars(port);
/* Limit number of iteration */
@@ -647,36 +915,33 @@ static void
mpc52xx_console_write(struct console *co, const char *s, unsigned int count)
{
struct uart_port *port = &mpc52xx_uart_ports[co->index];
- struct mpc52xx_psc __iomem *psc = PSC(port);
unsigned int i, j;
/* Disable interrupts */
- out_be16(&psc->mpc52xx_psc_imr, 0);
+ psc_ops->cw_disable_ints(port);
/* Wait the TX buffer to be empty */
j = 5000000; /* Maximum wait */
- while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) &&
- --j)
+ while (!mpc52xx_uart_tx_empty(port) && --j)
udelay(1);
/* Write all the chars */
for (i = 0; i < count; i++, s++) {
/* Line return handling */
if (*s == '\n')
- out_8(&psc->mpc52xx_psc_buffer_8, '\r');
+ psc_ops->write_char(port, '\r');
/* Send the char */
- out_8(&psc->mpc52xx_psc_buffer_8, *s);
+ psc_ops->write_char(port, *s);
/* Wait the TX buffer to be empty */
j = 20000; /* Maximum wait */
- while (!(in_be16(&psc->mpc52xx_psc_status) &
- MPC52xx_PSC_SR_TXEMP) && --j)
+ while (!mpc52xx_uart_tx_empty(port) && --j)
udelay(1);
}
/* Restore interrupt state */
- out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
+ psc_ops->cw_restore_ints(port);
}
#if !defined(CONFIG_PPC_MERGE)
@@ -721,7 +986,7 @@ mpc52xx_console_setup(struct console *co, char *options)
{
struct uart_port *port = &mpc52xx_uart_ports[co->index];
struct device_node *np = mpc52xx_uart_nodes[co->index];
- unsigned int ipb_freq;
+ unsigned int uartclk;
struct resource res;
int ret;
@@ -753,17 +1018,16 @@ mpc52xx_console_setup(struct console *co, char *options)
return ret;
}
- /* Search for bus-frequency property in this node or a parent */
- ipb_freq = mpc52xx_find_ipb_freq(np);
- if (ipb_freq == 0) {
- pr_debug("Could not find IPB bus frequency!\n");
+ uartclk = psc_ops->getuartclk(np);
+ if (uartclk == 0) {
+ pr_debug("Could not find uart clock frequency!\n");
return -EINVAL;
}
/* Basic port init. Needed since we use some uart_??? func before
* real init for early access */
spin_lock_init(&port->lock);
- port->uartclk = ipb_freq / 2;
+ port->uartclk = uartclk;
port->ops = &mpc52xx_uart_ops;
port->mapbase = res.start;
port->membase = ioremap(res.start, sizeof(struct mpc52xx_psc));
@@ -945,11 +1209,25 @@ static struct platform_driver mpc52xx_uart_platform_driver = {
/* OF Platform Driver */
/* ======================================================================== */
+static struct of_device_id mpc52xx_uart_of_match[] = {
+#ifdef CONFIG_PPC_MPC52xx
+ { .compatible = "fsl,mpc5200-psc-uart", .data = &mpc52xx_psc_ops, },
+ /* binding used by old lite5200 device trees: */
+ { .compatible = "mpc5200-psc-uart", .data = &mpc52xx_psc_ops, },
+ /* binding used by efika: */
+ { .compatible = "mpc5200-serial", .data = &mpc52xx_psc_ops, },
+#endif
+#ifdef CONFIG_PPC_MPC512x
+ { .compatible = "fsl,mpc5121-psc-uart", .data = &mpc512x_psc_ops, },
+ {},
+#endif
+};
+
static int __devinit
mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
{
int idx = -1;
- unsigned int ipb_freq;
+ unsigned int uartclk;
struct uart_port *port = NULL;
struct resource res;
int ret;
@@ -965,10 +1243,9 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
pr_debug("Found %s assigned to ttyPSC%x\n",
mpc52xx_uart_nodes[idx]->full_name, idx);
- /* Search for bus-frequency property in this node or a parent */
- ipb_freq = mpc52xx_find_ipb_freq(op->node);
- if (ipb_freq == 0) {
- dev_dbg(&op->dev, "Could not find IPB bus frequency!\n");
+ uartclk = psc_ops->getuartclk(op->node);
+ if (uartclk == 0) {
+ dev_dbg(&op->dev, "Could not find uart clock frequency!\n");
return -EINVAL;
}
@@ -976,7 +1253,7 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
port = &mpc52xx_uart_ports[idx];
spin_lock_init(&port->lock);
- port->uartclk = ipb_freq / 2;
+ port->uartclk = uartclk;
port->fifosize = 512;
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF |
@@ -1080,15 +1357,19 @@ mpc52xx_uart_of_enumerate(void)
static int enum_done;
struct device_node *np;
const unsigned int *devno;
+ const struct of_device_id *match;
int i;
if (enum_done)
return;
for_each_node_by_type(np, "serial") {
- if (!of_match_node(mpc52xx_uart_of_match, np))
+ match = of_match_node(mpc52xx_uart_of_match, np);
+ if (!match)
continue;
+ psc_ops = match->data;
+
/* Is a particular device number requested? */
devno = of_get_property(np, "port-number", NULL);
mpc52xx_uart_of_assign(np, devno ? *devno : -1);
@@ -1149,6 +1430,7 @@ mpc52xx_uart_init(void)
return ret;
}
#else
+ psc_ops = &mpc52xx_psc_ops;
ret = platform_driver_register(&mpc52xx_uart_platform_driver);
if (ret) {
printk(KERN_ERR "%s: platform_driver_register failed (%i)\n",
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c
index 4d643c926657..cb3a91967742 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/serial/mpsc.c
@@ -612,6 +612,7 @@ static void mpsc_hw_init(struct mpsc_port_info *pi)
/* No preamble, 16x divider, low-latency, */
writel(0x04400400, pi->mpsc_base + MPSC_MMCRH);
+ mpsc_set_baudrate(pi, pi->default_baud);
if (pi->mirror_regs) {
pi->MPSC_CHR_1_m = 0;
diff --git a/drivers/serial/mux.c b/drivers/serial/mux.c
index 83211013deb8..e94031731a47 100644
--- a/drivers/serial/mux.c
+++ b/drivers/serial/mux.c
@@ -582,7 +582,7 @@ static struct parisc_driver serial_mux_driver = {
};
/**
- * mux_init - Serial MUX initalization procedure.
+ * mux_init - Serial MUX initialization procedure.
*
* Register the Serial MUX driver.
*/
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index e773c8e14962..45de19366030 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -1527,7 +1527,7 @@ static inline void s3c2440_serial_exit(void)
#define s3c2440_uart_inf_at NULL
#endif /* CONFIG_CPU_S3C2440 */
-#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413)
+#if defined(CONFIG_CPU_S3C2412)
static int s3c2412_serial_setsource(struct uart_port *port,
struct s3c24xx_uart_clksrc *clk)
diff --git a/drivers/serial/sc26xx.c b/drivers/serial/sc26xx.c
new file mode 100644
index 000000000000..a350b6d2a181
--- /dev/null
+++ b/drivers/serial/sc26xx.c
@@ -0,0 +1,755 @@
+/*
+ * SC268xx.c: Serial driver for Philiphs SC2681/SC2692 devices.
+ *
+ * Copyright (C) 2006,2007 Thomas Bogendörfer (tsbogend@alpha.franken.de)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/major.h>
+#include <linux/circ_buf.h>
+#include <linux/serial.h>
+#include <linux/sysrq.h>
+#include <linux/console.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+
+#if defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+
+#define SC26XX_MAJOR 204
+#define SC26XX_MINOR_START 205
+#define SC26XX_NR 2
+
+struct uart_sc26xx_port {
+ struct uart_port port[2];
+ u8 dsr_mask[2];
+ u8 cts_mask[2];
+ u8 dcd_mask[2];
+ u8 ri_mask[2];
+ u8 dtr_mask[2];
+ u8 rts_mask[2];
+ u8 imr;
+};
+
+/* register common to both ports */
+#define RD_ISR 0x14
+#define RD_IPR 0x34
+
+#define WR_ACR 0x10
+#define WR_IMR 0x14
+#define WR_OPCR 0x34
+#define WR_OPR_SET 0x38
+#define WR_OPR_CLR 0x3C
+
+/* access common register */
+#define READ_SC(p, r) readb((p)->membase + RD_##r)
+#define WRITE_SC(p, r, v) writeb((v), (p)->membase + WR_##r)
+
+/* register per port */
+#define RD_PORT_MRx 0x00
+#define RD_PORT_SR 0x04
+#define RD_PORT_RHR 0x0c
+
+#define WR_PORT_MRx 0x00
+#define WR_PORT_CSR 0x04
+#define WR_PORT_CR 0x08
+#define WR_PORT_THR 0x0c
+
+/* SR bits */
+#define SR_BREAK (1 << 7)
+#define SR_FRAME (1 << 6)
+#define SR_PARITY (1 << 5)
+#define SR_OVERRUN (1 << 4)
+#define SR_TXRDY (1 << 2)
+#define SR_RXRDY (1 << 0)
+
+#define CR_RES_MR (1 << 4)
+#define CR_RES_RX (2 << 4)
+#define CR_RES_TX (3 << 4)
+#define CR_STRT_BRK (6 << 4)
+#define CR_STOP_BRK (7 << 4)
+#define CR_DIS_TX (1 << 3)
+#define CR_ENA_TX (1 << 2)
+#define CR_DIS_RX (1 << 1)
+#define CR_ENA_RX (1 << 0)
+
+/* ISR bits */
+#define ISR_RXRDYB (1 << 5)
+#define ISR_TXRDYB (1 << 4)
+#define ISR_RXRDYA (1 << 1)
+#define ISR_TXRDYA (1 << 0)
+
+/* IMR bits */
+#define IMR_RXRDY (1 << 1)
+#define IMR_TXRDY (1 << 0)
+
+/* access port register */
+static inline u8 read_sc_port(struct uart_port *p, u8 reg)
+{
+ return readb(p->membase + p->line * 0x20 + reg);
+}
+
+static inline void write_sc_port(struct uart_port *p, u8 reg, u8 val)
+{
+ writeb(val, p->membase + p->line * 0x20 + reg);
+}
+
+#define READ_SC_PORT(p, r) read_sc_port(p, RD_PORT_##r)
+#define WRITE_SC_PORT(p, r, v) write_sc_port(p, WR_PORT_##r, v)
+
+static void sc26xx_enable_irq(struct uart_port *port, int mask)
+{
+ struct uart_sc26xx_port *up;
+ int line = port->line;
+
+ port -= line;
+ up = container_of(port, struct uart_sc26xx_port, port[0]);
+
+ up->imr |= mask << (line * 4);
+ WRITE_SC(port, IMR, up->imr);
+}
+
+static void sc26xx_disable_irq(struct uart_port *port, int mask)
+{
+ struct uart_sc26xx_port *up;
+ int line = port->line;
+
+ port -= line;
+ up = container_of(port, struct uart_sc26xx_port, port[0]);
+
+ up->imr &= ~(mask << (line * 4));
+ WRITE_SC(port, IMR, up->imr);
+}
+
+static struct tty_struct *receive_chars(struct uart_port *port)
+{
+ struct tty_struct *tty = NULL;
+ int limit = 10000;
+ unsigned char ch;
+ char flag;
+ u8 status;
+
+ if (port->info != NULL) /* Unopened serial console */
+ tty = port->info->tty;
+
+ while (limit-- > 0) {
+ status = READ_SC_PORT(port, SR);
+ if (!(status & SR_RXRDY))
+ break;
+ ch = READ_SC_PORT(port, RHR);
+
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (unlikely(status & (SR_BREAK | SR_FRAME |
+ SR_PARITY | SR_OVERRUN))) {
+ if (status & SR_BREAK) {
+ status &= ~(SR_PARITY | SR_FRAME);
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (status & SR_PARITY)
+ port->icount.parity++;
+ else if (status & SR_FRAME)
+ port->icount.frame++;
+ if (status & SR_OVERRUN)
+ port->icount.overrun++;
+
+ status &= port->read_status_mask;
+ if (status & SR_BREAK)
+ flag = TTY_BREAK;
+ else if (status & SR_PARITY)
+ flag = TTY_PARITY;
+ else if (status & SR_FRAME)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+
+ if (status & port->ignore_status_mask)
+ continue;
+
+ tty_insert_flip_char(tty, ch, flag);
+ }
+ return tty;
+}
+
+static void transmit_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit;
+
+ if (!port->info)
+ return;
+
+ xmit = &port->info->xmit;
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ sc26xx_disable_irq(port, IMR_TXRDY);
+ return;
+ }
+ while (!uart_circ_empty(xmit)) {
+ if (!(READ_SC_PORT(port, SR) & SR_TXRDY))
+ break;
+
+ WRITE_SC_PORT(port, THR, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static irqreturn_t sc26xx_interrupt(int irq, void *dev_id)
+{
+ struct uart_sc26xx_port *up = dev_id;
+ struct tty_struct *tty;
+ unsigned long flags;
+ u8 isr;
+
+ spin_lock_irqsave(&up->port[0].lock, flags);
+
+ tty = NULL;
+ isr = READ_SC(&up->port[0], ISR);
+ if (isr & ISR_TXRDYA)
+ transmit_chars(&up->port[0]);
+ if (isr & ISR_RXRDYA)
+ tty = receive_chars(&up->port[0]);
+
+ spin_unlock(&up->port[0].lock);
+
+ if (tty)
+ tty_flip_buffer_push(tty);
+
+ spin_lock(&up->port[1].lock);
+
+ tty = NULL;
+ if (isr & ISR_TXRDYB)
+ transmit_chars(&up->port[1]);
+ if (isr & ISR_RXRDYB)
+ tty = receive_chars(&up->port[1]);
+
+ spin_unlock_irqrestore(&up->port[1].lock, flags);
+
+ if (tty)
+ tty_flip_buffer_push(tty);
+
+ return IRQ_HANDLED;
+}
+
+/* port->lock is not held. */
+static unsigned int sc26xx_tx_empty(struct uart_port *port)
+{
+ return (READ_SC_PORT(port, SR) & SR_TXRDY) ? TIOCSER_TEMT : 0;
+}
+
+/* port->lock held by caller. */
+static void sc26xx_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_sc26xx_port *up;
+ int line = port->line;
+
+ port -= line;
+ up = container_of(port, struct uart_sc26xx_port, port[0]);
+
+ if (up->dtr_mask[line]) {
+ if (mctrl & TIOCM_DTR)
+ WRITE_SC(port, OPR_SET, up->dtr_mask[line]);
+ else
+ WRITE_SC(port, OPR_CLR, up->dtr_mask[line]);
+ }
+ if (up->rts_mask[line]) {
+ if (mctrl & TIOCM_RTS)
+ WRITE_SC(port, OPR_SET, up->rts_mask[line]);
+ else
+ WRITE_SC(port, OPR_CLR, up->rts_mask[line]);
+ }
+}
+
+/* port->lock is held by caller and interrupts are disabled. */
+static unsigned int sc26xx_get_mctrl(struct uart_port *port)
+{
+ struct uart_sc26xx_port *up;
+ int line = port->line;
+ unsigned int mctrl = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR;
+ u8 ipr;
+
+ port -= line;
+ up = container_of(port, struct uart_sc26xx_port, port[0]);
+ ipr = READ_SC(port, IPR) ^ 0xff;
+
+ if (up->dsr_mask[line]) {
+ mctrl &= ~TIOCM_DSR;
+ mctrl |= ipr & up->dsr_mask[line] ? TIOCM_DSR : 0;
+ }
+ if (up->cts_mask[line]) {
+ mctrl &= ~TIOCM_CTS;
+ mctrl |= ipr & up->cts_mask[line] ? TIOCM_CTS : 0;
+ }
+ if (up->dcd_mask[line]) {
+ mctrl &= ~TIOCM_CAR;
+ mctrl |= ipr & up->dcd_mask[line] ? TIOCM_CAR : 0;
+ }
+ if (up->ri_mask[line]) {
+ mctrl &= ~TIOCM_RNG;
+ mctrl |= ipr & up->ri_mask[line] ? TIOCM_RNG : 0;
+ }
+ return mctrl;
+}
+
+/* port->lock held by caller. */
+static void sc26xx_stop_tx(struct uart_port *port)
+{
+ return;
+}
+
+/* port->lock held by caller. */
+static void sc26xx_start_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->info->xmit;
+
+ while (!uart_circ_empty(xmit)) {
+ if (!(READ_SC_PORT(port, SR) & SR_TXRDY)) {
+ sc26xx_enable_irq(port, IMR_TXRDY);
+ break;
+ }
+ WRITE_SC_PORT(port, THR, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+}
+
+/* port->lock held by caller. */
+static void sc26xx_stop_rx(struct uart_port *port)
+{
+}
+
+/* port->lock held by caller. */
+static void sc26xx_enable_ms(struct uart_port *port)
+{
+}
+
+/* port->lock is not held. */
+static void sc26xx_break_ctl(struct uart_port *port, int break_state)
+{
+ if (break_state == -1)
+ WRITE_SC_PORT(port, CR, CR_STRT_BRK);
+ else
+ WRITE_SC_PORT(port, CR, CR_STOP_BRK);
+}
+
+/* port->lock is not held. */
+static int sc26xx_startup(struct uart_port *port)
+{
+ sc26xx_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
+ WRITE_SC(port, OPCR, 0);
+
+ /* reset tx and rx */
+ WRITE_SC_PORT(port, CR, CR_RES_RX);
+ WRITE_SC_PORT(port, CR, CR_RES_TX);
+
+ /* start rx/tx */
+ WRITE_SC_PORT(port, CR, CR_ENA_TX | CR_ENA_RX);
+
+ /* enable irqs */
+ sc26xx_enable_irq(port, IMR_RXRDY);
+ return 0;
+}
+
+/* port->lock is not held. */
+static void sc26xx_shutdown(struct uart_port *port)
+{
+ /* disable interrupst */
+ sc26xx_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
+
+ /* stop tx/rx */
+ WRITE_SC_PORT(port, CR, CR_DIS_TX | CR_DIS_RX);
+}
+
+/* port->lock is not held. */
+static void sc26xx_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ unsigned int quot = uart_get_divisor(port, baud);
+ unsigned int iflag, cflag;
+ unsigned long flags;
+ u8 mr1, mr2, csr;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ while ((READ_SC_PORT(port, SR) & ((1 << 3) | (1 << 2))) != 0xc)
+ udelay(2);
+
+ WRITE_SC_PORT(port, CR, CR_DIS_TX | CR_DIS_RX);
+
+ iflag = termios->c_iflag;
+ cflag = termios->c_cflag;
+
+ port->read_status_mask = SR_OVERRUN;
+ if (iflag & INPCK)
+ port->read_status_mask |= SR_PARITY | SR_FRAME;
+ if (iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= SR_BREAK;
+
+ port->ignore_status_mask = 0;
+ if (iflag & IGNBRK)
+ port->ignore_status_mask |= SR_BREAK;
+ if ((cflag & CREAD) == 0)
+ port->ignore_status_mask |= SR_BREAK | SR_FRAME |
+ SR_PARITY | SR_OVERRUN;
+
+ switch (cflag & CSIZE) {
+ case CS5:
+ mr1 = 0x00;
+ break;
+ case CS6:
+ mr1 = 0x01;
+ break;
+ case CS7:
+ mr1 = 0x02;
+ break;
+ default:
+ case CS8:
+ mr1 = 0x03;
+ break;
+ }
+ mr2 = 0x07;
+ if (cflag & CSTOPB)
+ mr2 = 0x0f;
+ if (cflag & PARENB) {
+ if (cflag & PARODD)
+ mr1 |= (1 << 2);
+ } else
+ mr1 |= (2 << 3);
+
+ switch (baud) {
+ case 50:
+ csr = 0x00;
+ break;
+ case 110:
+ csr = 0x11;
+ break;
+ case 134:
+ csr = 0x22;
+ break;
+ case 200:
+ csr = 0x33;
+ break;
+ case 300:
+ csr = 0x44;
+ break;
+ case 600:
+ csr = 0x55;
+ break;
+ case 1200:
+ csr = 0x66;
+ break;
+ case 2400:
+ csr = 0x88;
+ break;
+ case 4800:
+ csr = 0x99;
+ break;
+ default:
+ case 9600:
+ csr = 0xbb;
+ break;
+ case 19200:
+ csr = 0xcc;
+ break;
+ }
+
+ WRITE_SC_PORT(port, CR, CR_RES_MR);
+ WRITE_SC_PORT(port, MRx, mr1);
+ WRITE_SC_PORT(port, MRx, mr2);
+
+ WRITE_SC(port, ACR, 0x80);
+ WRITE_SC_PORT(port, CSR, csr);
+
+ /* reset tx and rx */
+ WRITE_SC_PORT(port, CR, CR_RES_RX);
+ WRITE_SC_PORT(port, CR, CR_RES_TX);
+
+ WRITE_SC_PORT(port, CR, CR_ENA_TX | CR_ENA_RX);
+ while ((READ_SC_PORT(port, SR) & ((1 << 3) | (1 << 2))) != 0xc)
+ udelay(2);
+
+ /* XXX */
+ uart_update_timeout(port, cflag,
+ (port->uartclk / (16 * quot)));
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *sc26xx_type(struct uart_port *port)
+{
+ return "SC26XX";
+}
+
+static void sc26xx_release_port(struct uart_port *port)
+{
+}
+
+static int sc26xx_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void sc26xx_config_port(struct uart_port *port, int flags)
+{
+}
+
+static int sc26xx_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ return -EINVAL;
+}
+
+static struct uart_ops sc26xx_ops = {
+ .tx_empty = sc26xx_tx_empty,
+ .set_mctrl = sc26xx_set_mctrl,
+ .get_mctrl = sc26xx_get_mctrl,
+ .stop_tx = sc26xx_stop_tx,
+ .start_tx = sc26xx_start_tx,
+ .stop_rx = sc26xx_stop_rx,
+ .enable_ms = sc26xx_enable_ms,
+ .break_ctl = sc26xx_break_ctl,
+ .startup = sc26xx_startup,
+ .shutdown = sc26xx_shutdown,
+ .set_termios = sc26xx_set_termios,
+ .type = sc26xx_type,
+ .release_port = sc26xx_release_port,
+ .request_port = sc26xx_request_port,
+ .config_port = sc26xx_config_port,
+ .verify_port = sc26xx_verify_port,
+};
+
+static struct uart_port *sc26xx_port;
+
+#ifdef CONFIG_SERIAL_SC26XX_CONSOLE
+static void sc26xx_console_putchar(struct uart_port *port, char c)
+{
+ unsigned long flags;
+ int limit = 1000000;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ while (limit-- > 0) {
+ if (READ_SC_PORT(port, SR) & SR_TXRDY) {
+ WRITE_SC_PORT(port, THR, c);
+ break;
+ }
+ udelay(2);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void sc26xx_console_write(struct console *con, const char *s, unsigned n)
+{
+ struct uart_port *port = sc26xx_port;
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (*s == '\n')
+ sc26xx_console_putchar(port, '\r');
+ sc26xx_console_putchar(port, *s++);
+ }
+}
+
+static int __init sc26xx_console_setup(struct console *con, char *options)
+{
+ struct uart_port *port = sc26xx_port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (port->type != PORT_SC26XX)
+ return -1;
+
+ printk(KERN_INFO "Console: ttySC%d (SC26XX)\n", con->index);
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, con, baud, parity, bits, flow);
+}
+
+static struct uart_driver sc26xx_reg;
+static struct console sc26xx_console = {
+ .name = "ttySC",
+ .write = sc26xx_console_write,
+ .device = uart_console_device,
+ .setup = sc26xx_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sc26xx_reg,
+};
+#define SC26XX_CONSOLE &sc26xx_console
+#else
+#define SC26XX_CONSOLE NULL
+#endif
+
+static struct uart_driver sc26xx_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "SC26xx",
+ .dev_name = "ttySC",
+ .major = SC26XX_MAJOR,
+ .minor = SC26XX_MINOR_START,
+ .nr = SC26XX_NR,
+ .cons = SC26XX_CONSOLE,
+};
+
+static u8 sc26xx_flags2mask(unsigned int flags, unsigned int bitpos)
+{
+ unsigned int bit = (flags >> bitpos) & 15;
+
+ return bit ? (1 << (bit - 1)) : 0;
+}
+
+static void __devinit sc26xx_init_masks(struct uart_sc26xx_port *up,
+ int line, unsigned int data)
+{
+ up->dtr_mask[line] = sc26xx_flags2mask(data, 0);
+ up->rts_mask[line] = sc26xx_flags2mask(data, 4);
+ up->dsr_mask[line] = sc26xx_flags2mask(data, 8);
+ up->cts_mask[line] = sc26xx_flags2mask(data, 12);
+ up->dcd_mask[line] = sc26xx_flags2mask(data, 16);
+ up->ri_mask[line] = sc26xx_flags2mask(data, 20);
+}
+
+static int __devinit sc26xx_probe(struct platform_device *dev)
+{
+ struct resource *res;
+ struct uart_sc26xx_port *up;
+ unsigned int *sc26xx_data = dev->dev.platform_data;
+ int err;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ up = kzalloc(sizeof *up, GFP_KERNEL);
+ if (unlikely(!up))
+ return -ENOMEM;
+
+ up->port[0].line = 0;
+ up->port[0].ops = &sc26xx_ops;
+ up->port[0].type = PORT_SC26XX;
+ up->port[0].uartclk = (29491200 / 16); /* arbitrary */
+
+ up->port[0].mapbase = res->start;
+ up->port[0].membase = ioremap_nocache(up->port[0].mapbase, 0x40);
+ up->port[0].iotype = UPIO_MEM;
+ up->port[0].irq = platform_get_irq(dev, 0);
+
+ up->port[0].dev = &dev->dev;
+
+ sc26xx_init_masks(up, 0, sc26xx_data[0]);
+
+ sc26xx_port = &up->port[0];
+
+ up->port[1].line = 1;
+ up->port[1].ops = &sc26xx_ops;
+ up->port[1].type = PORT_SC26XX;
+ up->port[1].uartclk = (29491200 / 16); /* arbitrary */
+
+ up->port[1].mapbase = up->port[0].mapbase;
+ up->port[1].membase = up->port[0].membase;
+ up->port[1].iotype = UPIO_MEM;
+ up->port[1].irq = up->port[0].irq;
+
+ up->port[1].dev = &dev->dev;
+
+ sc26xx_init_masks(up, 1, sc26xx_data[1]);
+
+ err = uart_register_driver(&sc26xx_reg);
+ if (err)
+ goto out_free_port;
+
+ sc26xx_reg.tty_driver->name_base = sc26xx_reg.minor;
+
+ err = uart_add_one_port(&sc26xx_reg, &up->port[0]);
+ if (err)
+ goto out_unregister_driver;
+
+ err = uart_add_one_port(&sc26xx_reg, &up->port[1]);
+ if (err)
+ goto out_remove_port0;
+
+ err = request_irq(up->port[0].irq, sc26xx_interrupt, 0, "sc26xx", up);
+ if (err)
+ goto out_remove_ports;
+
+ dev_set_drvdata(&dev->dev, up);
+ return 0;
+
+out_remove_ports:
+ uart_remove_one_port(&sc26xx_reg, &up->port[1]);
+out_remove_port0:
+ uart_remove_one_port(&sc26xx_reg, &up->port[0]);
+
+out_unregister_driver:
+ uart_unregister_driver(&sc26xx_reg);
+
+out_free_port:
+ kfree(up);
+ sc26xx_port = NULL;
+ return err;
+}
+
+
+static int __exit sc26xx_driver_remove(struct platform_device *dev)
+{
+ struct uart_sc26xx_port *up = dev_get_drvdata(&dev->dev);
+
+ free_irq(up->port[0].irq, up);
+
+ uart_remove_one_port(&sc26xx_reg, &up->port[0]);
+ uart_remove_one_port(&sc26xx_reg, &up->port[1]);
+
+ uart_unregister_driver(&sc26xx_reg);
+
+ kfree(up);
+ sc26xx_port = NULL;
+
+ dev_set_drvdata(&dev->dev, NULL);
+ return 0;
+}
+
+static struct platform_driver sc26xx_driver = {
+ .probe = sc26xx_probe,
+ .remove = __devexit_p(sc26xx_driver_remove),
+ .driver = {
+ .name = "SC26xx",
+ },
+};
+
+static int __init sc26xx_init(void)
+{
+ return platform_driver_register(&sc26xx_driver);
+}
+
+static void __exit sc26xx_exit(void)
+{
+ platform_driver_unregister(&sc26xx_driver);
+}
+
+module_init(sc26xx_init);
+module_exit(sc26xx_exit);
+
+
+MODULE_AUTHOR("Thomas Bogendörfer");
+MODULE_DESCRIPTION("SC681/SC2692 serial driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 3bb5d241dd40..276da148c57e 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -371,7 +371,8 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
*/
termios->c_cflag &= ~CBAUD;
if (old) {
- termios->c_cflag |= old->c_cflag & CBAUD;
+ baud = tty_termios_baud_rate(old);
+ tty_termios_encode_baud_rate(termios, baud, baud);
old = NULL;
continue;
}
@@ -380,7 +381,7 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
* As a last resort, if the quotient is zero,
* default to 9600 bps
*/
- termios->c_cflag |= B9600;
+ tty_termios_encode_baud_rate(termios, 9600, 9600);
}
return 0;
@@ -1977,6 +1978,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *port)
if (state->info && state->info->flags & UIF_INITIALIZED) {
const struct uart_ops *ops = port->ops;
+ int tries;
state->info->flags = (state->info->flags & ~UIF_INITIALIZED)
| UIF_SUSPENDED;
@@ -1990,9 +1992,14 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *port)
/*
* Wait for the transmitter to empty.
*/
- while (!ops->tx_empty(port)) {
+ for (tries = 3; !ops->tx_empty(port) && tries; tries--) {
msleep(10);
}
+ if (!tries)
+ printk(KERN_ERR "%s%s%s%d: Unable to drain transmitter\n",
+ port->dev ? port->dev->bus_id : "",
+ port->dev ? ": " : "",
+ drv->dev_name, port->line);
ops->shutdown(port);
}
@@ -2029,8 +2036,6 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port)
}
port->suspended = 0;
- uart_change_pm(state, 0);
-
/*
* Re-enable the console device after suspending.
*/
@@ -2049,6 +2054,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port)
if (state->info && state->info->tty && termios.c_cflag == 0)
termios = *state->info->tty->termios;
+ uart_change_pm(state, 0);
port->ops->set_termios(port, &termios, NULL);
console_start(port->cons);
}
@@ -2057,6 +2063,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port)
const struct uart_ops *ops = port->ops;
int ret;
+ uart_change_pm(state, 0);
ops->set_mctrl(port, 0);
ret = ops->startup(port);
if (ret == 0) {
@@ -2150,10 +2157,11 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
/*
* Ensure that the modem control lines are de-activated.
+ * keep the DTR setting that is set in uart_set_options()
* We probably don't need a spinlock around this, but
*/
spin_lock_irqsave(&port->lock, flags);
- port->ops->set_mctrl(port, 0);
+ port->ops->set_mctrl(port, port->mctrl & TIOCM_DTR);
spin_unlock_irqrestore(&port->lock, flags);
/*
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index d8b660061c13..164d2a42eb59 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -389,7 +389,7 @@ static void serial_detach(struct pcmcia_device *link)
/*====================================================================*/
static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
- kio_addr_t iobase, int irq)
+ unsigned int iobase, int irq)
{
struct uart_port port;
int line;
@@ -456,7 +456,7 @@ next_tuple(struct pcmcia_device *handle, tuple_t * tuple, cisparse_t * parse)
static int simple_config(struct pcmcia_device *link)
{
- static const kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
+ static const unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
static const int size_table[2] = { 8, 16 };
struct serial_info *info = link->priv;
struct serial_cfg_mem *cfg_mem;
@@ -480,7 +480,7 @@ static int simple_config(struct pcmcia_device *link)
/* If the card is already configured, look up the port and irq */
i = pcmcia_get_configuration_info(link, &config);
if ((i == CS_SUCCESS) && (config.Attributes & CONF_VALID_CLIENT)) {
- kio_addr_t port = 0;
+ unsigned int port = 0;
if ((config.BasePort2 != 0) && (config.NumPorts2 == 8)) {
port = config.BasePort2;
info->slave = 1;
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index 80943409edb0..4e06ab6bcb6e 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -17,10 +17,21 @@
#include <linux/tty.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/init.h>
#include <asm/io.h>
#if defined(CONFIG_OF)
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+
+/* Match table for of_platform binding */
+static struct of_device_id ulite_of_match[] __devinitdata = {
+ { .compatible = "xlnx,opb-uartlite-1.00.b", },
+ { .compatible = "xlnx,xps-uartlite-1.00.a", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ulite_of_match);
+
#endif
#define ULITE_NAME "ttyUL"
@@ -142,7 +153,7 @@ static int ulite_transmit(struct uart_port *port, int stat)
static irqreturn_t ulite_isr(int irq, void *dev_id)
{
- struct uart_port *port = (struct uart_port *)dev_id;
+ struct uart_port *port = dev_id;
int busy;
do {
@@ -275,6 +286,9 @@ static void ulite_release_port(struct uart_port *port)
static int ulite_request_port(struct uart_port *port)
{
+ pr_debug("ulite console: port=%p; port->mapbase=%x\n",
+ port, port->mapbase);
+
if (!request_mem_region(port->mapbase, ULITE_REGION, "uartlite")) {
dev_err(port->dev, "Memory region busy\n");
return -EBUSY;
@@ -375,32 +389,6 @@ static void ulite_console_write(struct console *co, const char *s,
spin_unlock_irqrestore(&port->lock, flags);
}
-#if defined(CONFIG_OF)
-static inline void __init ulite_console_of_find_device(int id)
-{
- struct device_node *np;
- struct resource res;
- const unsigned int *of_id;
- int rc;
-
- for_each_compatible_node(np, NULL, "xilinx,uartlite") {
- of_id = of_get_property(np, "port-number", NULL);
- if ((!of_id) || (*of_id != id))
- continue;
-
- rc = of_address_to_resource(np, 0, &res);
- if (rc)
- continue;
-
- ulite_ports[id].mapbase = res.start;
- of_node_put(np);
- return;
- }
-}
-#else /* CONFIG_OF */
-static inline void __init ulite_console_of_find_device(int id) { /* do nothing */ }
-#endif /* CONFIG_OF */
-
static int __init ulite_console_setup(struct console *co, char *options)
{
struct uart_port *port;
@@ -414,11 +402,7 @@ static int __init ulite_console_setup(struct console *co, char *options)
port = &ulite_ports[co->index];
- /* Check if it is an OF device */
- if (!port->mapbase)
- ulite_console_of_find_device(co->index);
-
- /* Do we have a device now? */
+ /* Has the device been initialized yet? */
if (!port->mapbase) {
pr_debug("console on ttyUL%i not present\n", co->index);
return -ENODEV;
@@ -617,13 +601,6 @@ static int __devexit ulite_of_remove(struct of_device *op)
return ulite_release(&op->dev);
}
-/* Match table for of_platform binding */
-static struct of_device_id __devinit ulite_of_match[] = {
- { .type = "serial", .compatible = "xilinx,uartlite", },
- {},
-};
-MODULE_DEVICE_TABLE(of, ulite_of_match);
-
static struct of_platform_driver ulite_of_driver = {
.owner = THIS_MODULE,
.name = "uartlite",
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index aaaea81e412a..d8107890db15 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -144,10 +144,10 @@ config SPI_OMAP_UWIRE
This hooks up to the MicroWire controller on OMAP1 chips.
config SPI_OMAP24XX
- tristate "McSPI driver for OMAP24xx"
- depends on SPI_MASTER && ARCH_OMAP24XX
+ tristate "McSPI driver for OMAP24xx/OMAP34xx"
+ depends on SPI_MASTER && (ARCH_OMAP24XX || ARCH_OMAP34XX)
help
- SPI master controller for OMAP24xx Multichannel SPI
+ SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI
(McSPI) modules.
config SPI_PXA2XX
@@ -176,6 +176,13 @@ config SPI_S3C24XX_GPIO
the inbuilt hardware cannot provide the transfer mode, or
where the board is using non hardware connected pins.
+config SPI_SH_SCI
+ tristate "SuperH SCI SPI controller"
+ depends on SPI_MASTER && SUPERH
+ select SPI_BITBANG
+ help
+ SPI driver for SuperH SCI blocks.
+
config SPI_TXX9
tristate "Toshiba TXx9 SPI controller"
depends on SPI_MASTER && GENERIC_GPIO && CPU_TX49XX
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 41fbac45c323..7fca043ce723 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
+obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
# ... add above this line ...
# SPI protocol drivers (device/link on bus)
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index ff10808183a3..293b7cab3e57 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -51,7 +51,9 @@ struct atmel_spi {
u8 stopping;
struct list_head queue;
struct spi_transfer *current_transfer;
- unsigned long remaining_bytes;
+ unsigned long current_remaining_bytes;
+ struct spi_transfer *next_transfer;
+ unsigned long next_remaining_bytes;
void *buffer;
dma_addr_t buffer_dma;
@@ -121,6 +123,48 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
gpio_set_value(gpio, !active);
}
+static inline int atmel_spi_xfer_is_last(struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ return msg->transfers.prev == &xfer->transfer_list;
+}
+
+static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer)
+{
+ return xfer->delay_usecs == 0 && !xfer->cs_change;
+}
+
+static void atmel_spi_next_xfer_data(struct spi_master *master,
+ struct spi_transfer *xfer,
+ dma_addr_t *tx_dma,
+ dma_addr_t *rx_dma,
+ u32 *plen)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ u32 len = *plen;
+
+ /* use scratch buffer only when rx or tx data is unspecified */
+ if (xfer->rx_buf)
+ *rx_dma = xfer->rx_dma + xfer->len - len;
+ else {
+ *rx_dma = as->buffer_dma;
+ if (len > BUFFER_SIZE)
+ len = BUFFER_SIZE;
+ }
+ if (xfer->tx_buf)
+ *tx_dma = xfer->tx_dma + xfer->len - len;
+ else {
+ *tx_dma = as->buffer_dma;
+ if (len > BUFFER_SIZE)
+ len = BUFFER_SIZE;
+ memset(as->buffer, 0, len);
+ dma_sync_single_for_device(&as->pdev->dev,
+ as->buffer_dma, len, DMA_TO_DEVICE);
+ }
+
+ *plen = len;
+}
+
/*
* Submit next transfer for DMA.
* lock is held, spi irq is blocked
@@ -130,53 +174,78 @@ static void atmel_spi_next_xfer(struct spi_master *master,
{
struct atmel_spi *as = spi_master_get_devdata(master);
struct spi_transfer *xfer;
- u32 len;
+ u32 len, remaining, total;
dma_addr_t tx_dma, rx_dma;
- xfer = as->current_transfer;
- if (!xfer || as->remaining_bytes == 0) {
- if (xfer)
- xfer = list_entry(xfer->transfer_list.next,
- struct spi_transfer, transfer_list);
- else
- xfer = list_entry(msg->transfers.next,
- struct spi_transfer, transfer_list);
- as->remaining_bytes = xfer->len;
- as->current_transfer = xfer;
- }
+ if (!as->current_transfer)
+ xfer = list_entry(msg->transfers.next,
+ struct spi_transfer, transfer_list);
+ else if (!as->next_transfer)
+ xfer = list_entry(as->current_transfer->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ else
+ xfer = NULL;
- len = as->remaining_bytes;
+ if (xfer) {
+ len = xfer->len;
+ atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+ remaining = xfer->len - len;
- tx_dma = xfer->tx_dma + xfer->len - len;
- rx_dma = xfer->rx_dma + xfer->len - len;
+ spi_writel(as, RPR, rx_dma);
+ spi_writel(as, TPR, tx_dma);
- /* use scratch buffer only when rx or tx data is unspecified */
- if (!xfer->rx_buf) {
- rx_dma = as->buffer_dma;
- if (len > BUFFER_SIZE)
- len = BUFFER_SIZE;
- }
- if (!xfer->tx_buf) {
- tx_dma = as->buffer_dma;
- if (len > BUFFER_SIZE)
- len = BUFFER_SIZE;
- memset(as->buffer, 0, len);
- dma_sync_single_for_device(&as->pdev->dev,
- as->buffer_dma, len, DMA_TO_DEVICE);
+ if (msg->spi->bits_per_word > 8)
+ len >>= 1;
+ spi_writel(as, RCR, len);
+ spi_writel(as, TCR, len);
+
+ dev_dbg(&msg->spi->dev,
+ " start xfer %p: len %u tx %p/%08x rx %p/%08x\n",
+ xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
+ xfer->rx_buf, xfer->rx_dma);
+ } else {
+ xfer = as->next_transfer;
+ remaining = as->next_remaining_bytes;
}
- spi_writel(as, RPR, rx_dma);
- spi_writel(as, TPR, tx_dma);
+ as->current_transfer = xfer;
+ as->current_remaining_bytes = remaining;
- as->remaining_bytes -= len;
- if (msg->spi->bits_per_word > 8)
- len >>= 1;
+ if (remaining > 0)
+ len = remaining;
+ else if (!atmel_spi_xfer_is_last(msg, xfer)
+ && atmel_spi_xfer_can_be_chained(xfer)) {
+ xfer = list_entry(xfer->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ len = xfer->len;
+ } else
+ xfer = NULL;
- /* REVISIT: when xfer->delay_usecs == 0, the PDC "next transfer"
- * mechanism might help avoid the IRQ latency between transfers
- * (and improve the nCS0 errata handling on at91rm9200 chips)
- *
- * We're also waiting for ENDRX before we start the next
+ as->next_transfer = xfer;
+
+ if (xfer) {
+ total = len;
+ atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+ as->next_remaining_bytes = total - len;
+
+ spi_writel(as, RNPR, rx_dma);
+ spi_writel(as, TNPR, tx_dma);
+
+ if (msg->spi->bits_per_word > 8)
+ len >>= 1;
+ spi_writel(as, RNCR, len);
+ spi_writel(as, TNCR, len);
+
+ dev_dbg(&msg->spi->dev,
+ " next xfer %p: len %u tx %p/%08x rx %p/%08x\n",
+ xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
+ xfer->rx_buf, xfer->rx_dma);
+ } else {
+ spi_writel(as, RNCR, 0);
+ spi_writel(as, TNCR, 0);
+ }
+
+ /* REVISIT: We're waiting for ENDRX before we start the next
* transfer because we need to handle some difficult timing
* issues otherwise. If we wait for ENDTX in one transfer and
* then starts waiting for ENDRX in the next, it's difficult
@@ -186,17 +255,7 @@ static void atmel_spi_next_xfer(struct spi_master *master,
*
* It should be doable, though. Just not now...
*/
- spi_writel(as, TNCR, 0);
- spi_writel(as, RNCR, 0);
spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
-
- dev_dbg(&msg->spi->dev,
- " start xfer %p: len %u tx %p/%08x rx %p/%08x imr %03x\n",
- xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
- xfer->rx_buf, xfer->rx_dma, spi_readl(as, IMR));
-
- spi_writel(as, RCR, len);
- spi_writel(as, TCR, len);
spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
}
@@ -294,6 +353,7 @@ atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
spin_lock(&as->lock);
as->current_transfer = NULL;
+ as->next_transfer = NULL;
/* continue if needed */
if (list_empty(&as->queue) || as->stopping)
@@ -377,7 +437,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
spi_writel(as, IDR, pending);
- if (as->remaining_bytes == 0) {
+ if (as->current_remaining_bytes == 0) {
msg->actual_length += xfer->len;
if (!msg->is_dma_mapped)
@@ -387,7 +447,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
if (xfer->delay_usecs)
udelay(xfer->delay_usecs);
- if (msg->transfers.prev == &xfer->transfer_list) {
+ if (atmel_spi_xfer_is_last(msg, xfer)) {
/* report completed message */
atmel_spi_msg_done(master, as, msg, 0,
xfer->cs_change);
@@ -490,9 +550,14 @@ static int atmel_spi_setup(struct spi_device *spi)
if (!(spi->mode & SPI_CPHA))
csr |= SPI_BIT(NCPHA);
- /* TODO: DLYBS and DLYBCT */
- csr |= SPI_BF(DLYBS, 10);
- csr |= SPI_BF(DLYBCT, 10);
+ /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs.
+ *
+ * DLYBCT would add delays between words, slowing down transfers.
+ * It could potentially be useful to cope with DMA bottlenecks, but
+ * in those cases it's probably best to just use a lower bitrate.
+ */
+ csr |= SPI_BF(DLYBS, 0);
+ csr |= SPI_BF(DLYBCT, 0);
/* chipselect must have been muxed as GPIO (e.g. in board setup) */
npcs_pin = (unsigned int)spi->controller_data;
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index ea61724ae225..a6ba11afb03f 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -915,6 +915,28 @@ static u8 __initdata spi2_txdma_id[] = {
OMAP24XX_DMA_SPI2_TX1,
};
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
+static u8 __initdata spi3_rxdma_id[] = {
+ OMAP24XX_DMA_SPI3_RX0,
+ OMAP24XX_DMA_SPI3_RX1,
+};
+
+static u8 __initdata spi3_txdma_id[] = {
+ OMAP24XX_DMA_SPI3_TX0,
+ OMAP24XX_DMA_SPI3_TX1,
+};
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+static u8 __initdata spi4_rxdma_id[] = {
+ OMAP34XX_DMA_SPI4_RX0,
+};
+
+static u8 __initdata spi4_txdma_id[] = {
+ OMAP34XX_DMA_SPI4_TX0,
+};
+#endif
+
static int __init omap2_mcspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -935,7 +957,20 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
txdma_id = spi2_txdma_id;
num_chipselect = 2;
break;
- /* REVISIT omap2430 has a third McSPI ... */
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
+ case 3:
+ rxdma_id = spi3_rxdma_id;
+ txdma_id = spi3_txdma_id;
+ num_chipselect = 2;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP3
+ case 4:
+ rxdma_id = spi4_rxdma_id;
+ txdma_id = spi4_txdma_id;
+ num_chipselect = 1;
+ break;
+#endif
default:
return -EINVAL;
}
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index eb817b8eb024..365e0e355aea 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1526,17 +1526,6 @@ static void pxa2xx_spi_shutdown(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int suspend_devices(struct device *dev, void *pm_message)
-{
- pm_message_t *state = pm_message;
-
- if (dev->power.power_state.event != state->event) {
- dev_warn(dev, "pm state does not match request\n");
- return -1;
- }
-
- return 0;
-}
static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
{
@@ -1544,12 +1533,6 @@ static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
struct ssp_device *ssp = drv_data->ssp;
int status = 0;
- /* Check all childern for current power state */
- if (device_for_each_child(&pdev->dev, &state, suspend_devices) != 0) {
- dev_warn(&pdev->dev, "suspend aborted\n");
- return -1;
- }
-
status = stop_queue(drv_data);
if (status != 0)
return status;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 682a6a48fec3..1ad12afc6ba0 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -18,7 +18,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/autoconf.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -77,39 +76,33 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
#ifdef CONFIG_PM
-/*
- * NOTE: the suspend() method for an spi_master controller driver
- * should verify that all its child devices are marked as suspended;
- * suspend requests delivered through sysfs power/state files don't
- * enforce such constraints.
- */
static int spi_suspend(struct device *dev, pm_message_t message)
{
- int value;
+ int value = 0;
struct spi_driver *drv = to_spi_driver(dev->driver);
- if (!drv || !drv->suspend)
- return 0;
-
/* suspend will stop irqs and dma; no more i/o */
- value = drv->suspend(to_spi_device(dev), message);
- if (value == 0)
- dev->power.power_state = message;
+ if (drv) {
+ if (drv->suspend)
+ value = drv->suspend(to_spi_device(dev), message);
+ else
+ dev_dbg(dev, "... can't suspend\n");
+ }
return value;
}
static int spi_resume(struct device *dev)
{
- int value;
+ int value = 0;
struct spi_driver *drv = to_spi_driver(dev->driver);
- if (!drv || !drv->resume)
- return 0;
-
/* resume may restart the i/o queue */
- value = drv->resume(to_spi_device(dev));
- if (value == 0)
- dev->power.power_state = PMSG_ON;
+ if (drv) {
+ if (drv->resume)
+ value = drv->resume(to_spi_device(dev));
+ else
+ dev_dbg(dev, "... can't resume\n");
+ }
return value;
}
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index 7ef39a6e8c06..d853fceb6bf0 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -1,37 +1,11 @@
/*
- * File: drivers/spi/bfin5xx_spi.c
- * Maintainer:
- * Bryan Wu <bryan.wu@analog.com>
- * Original Author:
- * Luke Yang (Analog Devices Inc.)
- *
- * Created: March. 10th 2006
- * Description: SPI controller driver for Blackfin BF5xx
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * Modified:
- * March 10, 2006 bfin5xx_spi.c Created. (Luke Yang)
- * August 7, 2006 added full duplex mode (Axel Weiss & Luke Yang)
- * July 17, 2007 add support for BF54x SPI0 controller (Bryan Wu)
- * July 30, 2007 add platfrom_resource interface to support multi-port
- * SPI controller (Bryan Wu)
+ * Blackfin On-Chip SPI Driver
*
* Copyright 2004-2007 Analog Devices Inc.
*
- * This program is free software ; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation ; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY ; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Enter bugs at http://blackfin.uclinux.org/
*
- * You should have received a copy of the GNU General Public License
- * along with this program ; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Licensed under the GPL-2 or later.
*/
#include <linux/init.h>
@@ -223,10 +197,9 @@ static void cs_deactive(struct driver_data *drv_data, struct chip_data *chip)
#define MAX_SPI_SSEL 7
/* stop controller and re-config current chip*/
-static int restore_state(struct driver_data *drv_data)
+static void restore_state(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
- int ret = 0;
/* Clear status and disable clock */
write_STAT(drv_data, BIT_STAT_CLR);
@@ -239,13 +212,6 @@ static int restore_state(struct driver_data *drv_data)
bfin_spi_enable(drv_data);
cs_active(drv_data, chip);
-
- if (ret)
- dev_dbg(&drv_data->pdev->dev,
- ": request chip select number %d failed\n",
- chip->chip_select_num);
-
- return ret;
}
/* used to kick off transfer in rx mode */
@@ -286,32 +252,30 @@ static void u8_writer(struct driver_data *drv_data)
dev_dbg(&drv_data->pdev->dev,
"cr8-s is 0x%x\n", read_STAT(drv_data));
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
while (drv_data->tx < drv_data->tx_end) {
write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
while (read_STAT(drv_data) & BIT_STAT_TXS)
cpu_relax();
++drv_data->tx;
}
+
+ /* poll for SPI completion before return */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
}
static void u8_cs_chg_writer(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
while (drv_data->tx < drv_data->tx_end) {
cs_active(drv_data, chip);
write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
while (read_STAT(drv_data) & BIT_STAT_TXS)
cpu_relax();
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
cs_deactive(drv_data, chip);
@@ -350,43 +314,28 @@ static void u8_cs_chg_reader(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
- /* clear TDBR buffer before read(else it will be shifted out) */
- write_TDBR(drv_data, 0xFFFF);
+ while (drv_data->rx < drv_data->rx_end) {
+ cs_active(drv_data, chip);
+ read_RDBR(drv_data); /* kick off */
- cs_active(drv_data, chip);
- dummy_read(drv_data);
+ while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ cpu_relax();
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
- while (drv_data->rx < drv_data->rx_end - 1) {
+ *(u8 *) (drv_data->rx) = read_SHAW(drv_data);
cs_deactive(drv_data, chip);
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
- cpu_relax();
- cs_active(drv_data, chip);
- *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
++drv_data->rx;
}
- cs_deactive(drv_data, chip);
-
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
- cpu_relax();
- *(u8 *) (drv_data->rx) = read_SHAW(drv_data);
- ++drv_data->rx;
}
static void u8_duplex(struct driver_data *drv_data)
{
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
/* in duplex mode, clk is triggered by writing of TDBR */
while (drv_data->rx < drv_data->rx_end) {
write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
- while (read_STAT(drv_data) & BIT_STAT_TXS)
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
cpu_relax();
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
@@ -400,15 +349,12 @@ static void u8_cs_chg_duplex(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
while (drv_data->rx < drv_data->rx_end) {
cs_active(drv_data, chip);
write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
- while (read_STAT(drv_data) & BIT_STAT_TXS)
+
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
cpu_relax();
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
@@ -426,32 +372,30 @@ static void u16_writer(struct driver_data *drv_data)
dev_dbg(&drv_data->pdev->dev,
"cr16 is 0x%x\n", read_STAT(drv_data));
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
while (drv_data->tx < drv_data->tx_end) {
write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
while ((read_STAT(drv_data) & BIT_STAT_TXS))
cpu_relax();
drv_data->tx += 2;
}
+
+ /* poll for SPI completion before return */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
}
static void u16_cs_chg_writer(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
while (drv_data->tx < drv_data->tx_end) {
cs_active(drv_data, chip);
write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
while ((read_STAT(drv_data) & BIT_STAT_TXS))
cpu_relax();
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
cs_deactive(drv_data, chip);
@@ -519,14 +463,10 @@ static void u16_cs_chg_reader(struct driver_data *drv_data)
static void u16_duplex(struct driver_data *drv_data)
{
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
/* in duplex mode, clk is triggered by writing of TDBR */
while (drv_data->tx < drv_data->tx_end) {
write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
- while (read_STAT(drv_data) & BIT_STAT_TXS)
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
cpu_relax();
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
@@ -540,15 +480,11 @@ static void u16_cs_chg_duplex(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
while (drv_data->tx < drv_data->tx_end) {
cs_active(drv_data, chip);
write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
- while (read_STAT(drv_data) & BIT_STAT_TXS)
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
cpu_relax();
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
@@ -616,7 +552,7 @@ static void giveback(struct driver_data *drv_data)
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
{
- struct driver_data *drv_data = (struct driver_data *)dev_id;
+ struct driver_data *drv_data = dev_id;
struct chip_data *chip = drv_data->cur_chip;
struct spi_message *msg = drv_data->cur_msg;
@@ -978,10 +914,7 @@ static void pump_messages(struct work_struct *work)
/* Setup the SSP using the per chip configuration */
drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
- if (restore_state(drv_data)) {
- spin_unlock_irqrestore(&drv_data->lock, flags);
- return;
- };
+ restore_state(drv_data);
list_del_init(&drv_data->cur_msg->queue);
@@ -1187,7 +1120,7 @@ static int setup(struct spi_device *spi)
if ((chip->chip_select_num > 0)
&& (chip->chip_select_num <= spi->master->num_chipselect))
peripheral_request(ssel[spi->master->bus_num]
- [chip->chip_select_num-1], DRV_NAME);
+ [chip->chip_select_num-1], spi->modalias);
cs_deactive(drv_data, chip);
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 2cd8573fb09c..1b0647124933 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -157,7 +157,7 @@
#define SPI_FIFO_BYTE_WIDTH (2)
#define SPI_FIFO_OVERFLOW_MARGIN (2)
-/* DMA burst lenght for half full/empty request trigger */
+/* DMA burst length for half full/empty request trigger */
#define SPI_DMA_BLR (SPI_FIFO_DEPTH * SPI_FIFO_BYTE_WIDTH / 2)
/* Dummy char output to achieve reads.
@@ -1686,17 +1686,6 @@ static void spi_imx_shutdown(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int suspend_devices(struct device *dev, void *pm_message)
-{
- pm_message_t *state = pm_message;
-
- if (dev->power.power_state.event != state->event) {
- dev_warn(dev, "pm state does not match request\n");
- return -1;
- }
-
- return 0;
-}
static int spi_imx_suspend(struct platform_device *pdev, pm_message_t state)
{
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 89d6685a5ca4..6e834b8b9d27 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -237,10 +237,8 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
{
struct s3c24xx_spi *hw;
struct spi_master *master;
- struct spi_board_info *bi;
struct resource *res;
int err = 0;
- int i;
master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
if (master == NULL) {
@@ -348,16 +346,6 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
goto err_register;
}
- /* register all the devices associated */
-
- bi = &hw->pdata->board_info[0];
- for (i = 0; i < hw->pdata->board_size; i++, bi++) {
- dev_info(hw->dev, "registering %s\n", bi->modalias);
-
- bi->controller_data = hw;
- spi_new_device(master, bi);
- }
-
return 0;
err_register:
diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c
index 109d82c1abc0..82ae7d7eca38 100644
--- a/drivers/spi/spi_s3c24xx_gpio.c
+++ b/drivers/spi/spi_s3c24xx_gpio.c
@@ -100,7 +100,6 @@ static int s3c2410_spigpio_probe(struct platform_device *dev)
struct spi_master *master;
struct s3c2410_spigpio *sp;
int ret;
- int i;
master = spi_alloc_master(&dev->dev, sizeof(struct s3c2410_spigpio));
if (master == NULL) {
@@ -143,17 +142,6 @@ static int s3c2410_spigpio_probe(struct platform_device *dev)
if (ret)
goto err_no_bitbang;
- /* register the chips to go with the board */
-
- for (i = 0; i < sp->info->board_size; i++) {
- dev_info(&dev->dev, "registering %p: %s\n",
- &sp->info->board_info[i],
- sp->info->board_info[i].modalias);
-
- sp->info->board_info[i].controller_data = sp;
- spi_new_device(master, sp->info->board_info + i);
- }
-
return 0;
err_no_bitbang:
diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi_sh_sci.c
new file mode 100644
index 000000000000..3dbe71b16d60
--- /dev/null
+++ b/drivers/spi/spi_sh_sci.c
@@ -0,0 +1,205 @@
+/*
+ * SH SCI SPI interface
+ *
+ * Copyright (c) 2008 Magnus Damm
+ *
+ * Based on S3C24XX GPIO based SPI driver, which is:
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#include <asm/spi.h>
+#include <asm/io.h>
+
+struct sh_sci_spi {
+ struct spi_bitbang bitbang;
+
+ void __iomem *membase;
+ unsigned char val;
+ struct sh_spi_info *info;
+ struct platform_device *dev;
+};
+
+#define SCSPTR(sp) (sp->membase + 0x1c)
+#define PIN_SCK (1 << 2)
+#define PIN_TXD (1 << 0)
+#define PIN_RXD PIN_TXD
+#define PIN_INIT ((1 << 1) | (1 << 3) | PIN_SCK | PIN_TXD)
+
+static inline void setbits(struct sh_sci_spi *sp, int bits, int on)
+{
+ /*
+ * We are the only user of SCSPTR so no locking is required.
+ * Reading bit 2 and 0 in SCSPTR gives pin state as input.
+ * Writing the same bits sets the output value.
+ * This makes regular read-modify-write difficult so we
+ * use sp->val to keep track of the latest register value.
+ */
+
+ if (on)
+ sp->val |= bits;
+ else
+ sp->val &= ~bits;
+
+ iowrite8(sp->val, SCSPTR(sp));
+}
+
+static inline void setsck(struct spi_device *dev, int on)
+{
+ setbits(spi_master_get_devdata(dev->master), PIN_SCK, on);
+}
+
+static inline void setmosi(struct spi_device *dev, int on)
+{
+ setbits(spi_master_get_devdata(dev->master), PIN_TXD, on);
+}
+
+static inline u32 getmiso(struct spi_device *dev)
+{
+ struct sh_sci_spi *sp = spi_master_get_devdata(dev->master);
+
+ return (ioread8(SCSPTR(sp)) & PIN_RXD) ? 1 : 0;
+}
+
+#define spidelay(x) ndelay(x)
+
+#define EXPAND_BITBANG_TXRX
+#include <linux/spi/spi_bitbang.h>
+
+static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
+}
+
+static u32 sh_sci_spi_txrx_mode1(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits);
+}
+
+static u32 sh_sci_spi_txrx_mode2(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, word, bits);
+}
+
+static u32 sh_sci_spi_txrx_mode3(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, word, bits);
+}
+
+static void sh_sci_spi_chipselect(struct spi_device *dev, int value)
+{
+ struct sh_sci_spi *sp = spi_master_get_devdata(dev->master);
+
+ if (sp->info && sp->info->chip_select)
+ (sp->info->chip_select)(sp->info, dev->chip_select, value);
+}
+
+static int sh_sci_spi_probe(struct platform_device *dev)
+{
+ struct resource *r;
+ struct spi_master *master;
+ struct sh_sci_spi *sp;
+ int ret;
+
+ master = spi_alloc_master(&dev->dev, sizeof(struct sh_sci_spi));
+ if (master == NULL) {
+ dev_err(&dev->dev, "failed to allocate spi master\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ sp = spi_master_get_devdata(master);
+
+ platform_set_drvdata(dev, sp);
+ sp->info = dev->dev.platform_data;
+
+ /* setup spi bitbang adaptor */
+ sp->bitbang.master = spi_master_get(master);
+ sp->bitbang.master->bus_num = sp->info->bus_num;
+ sp->bitbang.master->num_chipselect = sp->info->num_chipselect;
+ sp->bitbang.chipselect = sh_sci_spi_chipselect;
+
+ sp->bitbang.txrx_word[SPI_MODE_0] = sh_sci_spi_txrx_mode0;
+ sp->bitbang.txrx_word[SPI_MODE_1] = sh_sci_spi_txrx_mode1;
+ sp->bitbang.txrx_word[SPI_MODE_2] = sh_sci_spi_txrx_mode2;
+ sp->bitbang.txrx_word[SPI_MODE_3] = sh_sci_spi_txrx_mode3;
+
+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENOENT;
+ goto err1;
+ }
+ sp->membase = ioremap(r->start, r->end - r->start + 1);
+ if (!sp->membase) {
+ ret = -ENXIO;
+ goto err1;
+ }
+ sp->val = ioread8(SCSPTR(sp));
+ setbits(sp, PIN_INIT, 1);
+
+ ret = spi_bitbang_start(&sp->bitbang);
+ if (!ret)
+ return 0;
+
+ setbits(sp, PIN_INIT, 0);
+ iounmap(sp->membase);
+ err1:
+ spi_master_put(sp->bitbang.master);
+ err0:
+ return ret;
+}
+
+static int sh_sci_spi_remove(struct platform_device *dev)
+{
+ struct sh_sci_spi *sp = platform_get_drvdata(dev);
+
+ iounmap(sp->membase);
+ setbits(sp, PIN_INIT, 0);
+ spi_bitbang_stop(&sp->bitbang);
+ spi_master_put(sp->bitbang.master);
+ return 0;
+}
+
+static struct platform_driver sh_sci_spi_drv = {
+ .probe = sh_sci_spi_probe,
+ .remove = sh_sci_spi_remove,
+ .driver = {
+ .name = "spi_sh_sci",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sh_sci_spi_init(void)
+{
+ return platform_driver_register(&sh_sci_spi_drv);
+}
+module_init(sh_sci_spi_init);
+
+static void __exit sh_sci_spi_exit(void)
+{
+ platform_driver_unregister(&sh_sci_spi_drv);
+}
+module_exit(sh_sci_spi_exit);
+
+MODULE_DESCRIPTION("SH SCI SPI Driver");
+MODULE_AUTHOR("Magnus Damm <damm@opensource.se>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index 1a31f7a72848..2d27d6d6d08e 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -1,7 +1,7 @@
/*
* Broadcom 43xx PCI-SSB bridge module
*
- * This technically is a seperate PCI driver module, but
+ * This technically is a separate PCI driver module, but
* because of its small size we include it in the SSB core
* instead of creating a standalone module.
*
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
new file mode 100644
index 000000000000..9b3f61200000
--- /dev/null
+++ b/drivers/thermal/Kconfig
@@ -0,0 +1,15 @@
+#
+# Generic thermal sysfs drivers configuration
+#
+
+menuconfig THERMAL
+ bool "Generic Thermal sysfs driver"
+ default y
+ help
+ Generic Thermal Sysfs driver offers a generic mechanism for
+ thermal management. Usually it's made up of one or more thermal
+ zone and cooling device.
+ each thermal zone contains its own temperature, trip points,
+ cooling devices.
+ All platforms with ACPI thermal support can use this driver.
+ If you want this support, you should say Y here
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
new file mode 100644
index 000000000000..8ef1232de376
--- /dev/null
+++ b/drivers/thermal/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for sensor chip drivers.
+#
+
+obj-$(CONFIG_THERMAL) += thermal.o
diff --git a/drivers/thermal/thermal.c b/drivers/thermal/thermal.c
new file mode 100644
index 000000000000..3273e348fd14
--- /dev/null
+++ b/drivers/thermal/thermal.c
@@ -0,0 +1,714 @@
+/*
+ * thermal.c - Generic Thermal Management Sysfs support.
+ *
+ * Copyright (C) 2008 Intel Corp
+ * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
+ * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kdev_t.h>
+#include <linux/idr.h>
+#include <linux/thermal.h>
+#include <linux/spinlock.h>
+
+MODULE_AUTHOR("Zhang Rui")
+MODULE_DESCRIPTION("Generic thermal management sysfs support");
+MODULE_LICENSE("GPL");
+
+#define PREFIX "Thermal: "
+
+struct thermal_cooling_device_instance {
+ int id;
+ char name[THERMAL_NAME_LENGTH];
+ struct thermal_zone_device *tz;
+ struct thermal_cooling_device *cdev;
+ int trip;
+ char attr_name[THERMAL_NAME_LENGTH];
+ struct device_attribute attr;
+ struct list_head node;
+};
+
+static DEFINE_IDR(thermal_tz_idr);
+static DEFINE_IDR(thermal_cdev_idr);
+static DEFINE_MUTEX(thermal_idr_lock);
+
+static LIST_HEAD(thermal_tz_list);
+static LIST_HEAD(thermal_cdev_list);
+static DEFINE_MUTEX(thermal_list_lock);
+
+static int get_idr(struct idr *idr, struct mutex *lock, int *id)
+{
+ int err;
+
+ again:
+ if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
+ return -ENOMEM;
+
+ if (lock)
+ mutex_lock(lock);
+ err = idr_get_new(idr, NULL, id);
+ if (lock)
+ mutex_unlock(lock);
+ if (unlikely(err == -EAGAIN))
+ goto again;
+ else if (unlikely(err))
+ return err;
+
+ *id = *id & MAX_ID_MASK;
+ return 0;
+}
+
+static void release_idr(struct idr *idr, struct mutex *lock, int id)
+{
+ if (lock)
+ mutex_lock(lock);
+ idr_remove(idr, id);
+ if (lock)
+ mutex_unlock(lock);
+}
+
+/* sys I/F for thermal zone */
+
+#define to_thermal_zone(_dev) \
+ container_of(_dev, struct thermal_zone_device, device)
+
+static ssize_t
+type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ return sprintf(buf, "%s\n", tz->type);
+}
+
+static ssize_t
+temp_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ if (!tz->ops->get_temp)
+ return -EPERM;
+
+ return tz->ops->get_temp(tz, buf);
+}
+
+static ssize_t
+mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ if (!tz->ops->get_mode)
+ return -EPERM;
+
+ return tz->ops->get_mode(tz, buf);
+}
+
+static ssize_t
+mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int result;
+
+ if (!tz->ops->set_mode)
+ return -EPERM;
+
+ result = tz->ops->set_mode(tz, buf);
+ if (result)
+ return result;
+
+ return count;
+}
+
+static ssize_t
+trip_point_type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int trip;
+
+ if (!tz->ops->get_trip_type)
+ return -EPERM;
+
+ if (!sscanf(attr->attr.name, "trip_point_%d_type", &trip))
+ return -EINVAL;
+
+ return tz->ops->get_trip_type(tz, trip, buf);
+}
+
+static ssize_t
+trip_point_temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int trip;
+
+ if (!tz->ops->get_trip_temp)
+ return -EPERM;
+
+ if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
+ return -EINVAL;
+
+ return tz->ops->get_trip_temp(tz, trip, buf);
+}
+
+static DEVICE_ATTR(type, 0444, type_show, NULL);
+static DEVICE_ATTR(temp, 0444, temp_show, NULL);
+static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
+
+static struct device_attribute trip_point_attrs[] = {
+ __ATTR(trip_point_0_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_0_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_1_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_1_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_2_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_2_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_3_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_3_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_4_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_4_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_5_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_5_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_6_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_6_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_7_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_7_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_8_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_8_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_9_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_9_temp, 0444, trip_point_temp_show, NULL),
+};
+
+#define TRIP_POINT_ATTR_ADD(_dev, _index, result) \
+do { \
+ result = device_create_file(_dev, \
+ &trip_point_attrs[_index * 2]); \
+ if (result) \
+ break; \
+ result = device_create_file(_dev, \
+ &trip_point_attrs[_index * 2 + 1]); \
+} while (0)
+
+#define TRIP_POINT_ATTR_REMOVE(_dev, _index) \
+do { \
+ device_remove_file(_dev, &trip_point_attrs[_index * 2]); \
+ device_remove_file(_dev, &trip_point_attrs[_index * 2 + 1]); \
+} while (0)
+
+/* sys I/F for cooling device */
+#define to_cooling_device(_dev) \
+ container_of(_dev, struct thermal_cooling_device, device)
+
+static ssize_t
+thermal_cooling_device_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+ return sprintf(buf, "%s\n", cdev->type);
+}
+
+static ssize_t
+thermal_cooling_device_max_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+ return cdev->ops->get_max_state(cdev, buf);
+}
+
+static ssize_t
+thermal_cooling_device_cur_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+ return cdev->ops->get_cur_state(cdev, buf);
+}
+
+static ssize_t
+thermal_cooling_device_cur_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+ int state;
+ int result;
+
+ if (!sscanf(buf, "%d\n", &state))
+ return -EINVAL;
+
+ if (state < 0)
+ return -EINVAL;
+
+ result = cdev->ops->set_cur_state(cdev, state);
+ if (result)
+ return result;
+ return count;
+}
+
+static struct device_attribute dev_attr_cdev_type =
+ __ATTR(type, 0444, thermal_cooling_device_type_show, NULL);
+static DEVICE_ATTR(max_state, 0444,
+ thermal_cooling_device_max_state_show, NULL);
+static DEVICE_ATTR(cur_state, 0644,
+ thermal_cooling_device_cur_state_show,
+ thermal_cooling_device_cur_state_store);
+
+static ssize_t
+thermal_cooling_device_trip_point_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_cooling_device_instance *instance;
+
+ instance =
+ container_of(attr, struct thermal_cooling_device_instance, attr);
+
+ if (instance->trip == THERMAL_TRIPS_NONE)
+ return sprintf(buf, "-1\n");
+ else
+ return sprintf(buf, "%d\n", instance->trip);
+}
+
+/* Device management */
+
+/**
+ * thermal_zone_bind_cooling_device - bind a cooling device to a thermal zone
+ * this function is usually called in the thermal zone device .bind callback.
+ * @tz: thermal zone device
+ * @trip: indicates which trip point the cooling devices is
+ * associated with in this thermal zone.
+ * @cdev: thermal cooling device
+ */
+int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ int trip,
+ struct thermal_cooling_device *cdev)
+{
+ struct thermal_cooling_device_instance *dev;
+ struct thermal_cooling_device_instance *pos;
+ int result;
+
+ if (trip >= tz->trips ||
+ (trip < 0 && trip != THERMAL_TRIPS_NONE))
+ return -EINVAL;
+
+ if (!tz || !cdev)
+ return -EINVAL;
+
+ dev =
+ kzalloc(sizeof(struct thermal_cooling_device_instance), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->tz = tz;
+ dev->cdev = cdev;
+ dev->trip = trip;
+ result = get_idr(&tz->idr, &tz->lock, &dev->id);
+ if (result)
+ goto free_mem;
+
+ sprintf(dev->name, "cdev%d", dev->id);
+ result =
+ sysfs_create_link(&tz->device.kobj, &cdev->device.kobj, dev->name);
+ if (result)
+ goto release_idr;
+
+ sprintf(dev->attr_name, "cdev%d_trip_point", dev->id);
+ dev->attr.attr.name = dev->attr_name;
+ dev->attr.attr.mode = 0444;
+ dev->attr.show = thermal_cooling_device_trip_point_show;
+ result = device_create_file(&tz->device, &dev->attr);
+ if (result)
+ goto remove_symbol_link;
+
+ mutex_lock(&tz->lock);
+ list_for_each_entry(pos, &tz->cooling_devices, node)
+ if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
+ result = -EEXIST;
+ break;
+ }
+ if (!result)
+ list_add_tail(&dev->node, &tz->cooling_devices);
+ mutex_unlock(&tz->lock);
+
+ if (!result)
+ return 0;
+
+ device_remove_file(&tz->device, &dev->attr);
+ remove_symbol_link:
+ sysfs_remove_link(&tz->device.kobj, dev->name);
+ release_idr:
+ release_idr(&tz->idr, &tz->lock, dev->id);
+ free_mem:
+ kfree(dev);
+ return result;
+}
+EXPORT_SYMBOL(thermal_zone_bind_cooling_device);
+
+/**
+ * thermal_zone_unbind_cooling_device - unbind a cooling device from a thermal zone
+ * this function is usually called in the thermal zone device .unbind callback.
+ * @tz: thermal zone device
+ * @trip: indicates which trip point the cooling devices is
+ * associated with in this thermal zone.
+ * @cdev: thermal cooling device
+ */
+int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
+ int trip,
+ struct thermal_cooling_device *cdev)
+{
+ struct thermal_cooling_device_instance *pos, *next;
+
+ mutex_lock(&tz->lock);
+ list_for_each_entry_safe(pos, next, &tz->cooling_devices, node) {
+ if (pos->tz == tz && pos->trip == trip
+ && pos->cdev == cdev) {
+ list_del(&pos->node);
+ mutex_unlock(&tz->lock);
+ goto unbind;
+ }
+ }
+ mutex_unlock(&tz->lock);
+
+ return -ENODEV;
+
+ unbind:
+ device_remove_file(&tz->device, &pos->attr);
+ sysfs_remove_link(&tz->device.kobj, pos->name);
+ release_idr(&tz->idr, &tz->lock, pos->id);
+ kfree(pos);
+ return 0;
+}
+EXPORT_SYMBOL(thermal_zone_unbind_cooling_device);
+
+static void thermal_release(struct device *dev)
+{
+ struct thermal_zone_device *tz;
+ struct thermal_cooling_device *cdev;
+
+ if (!strncmp(dev->bus_id, "thermal_zone", sizeof "thermal_zone" - 1)) {
+ tz = to_thermal_zone(dev);
+ kfree(tz);
+ } else {
+ cdev = to_cooling_device(dev);
+ kfree(cdev);
+ }
+}
+
+static struct class thermal_class = {
+ .name = "thermal",
+ .dev_release = thermal_release,
+};
+
+/**
+ * thermal_cooling_device_register - register a new thermal cooling device
+ * @type: the thermal cooling device type.
+ * @devdata: device private data.
+ * @ops: standard thermal cooling devices callbacks.
+ */
+struct thermal_cooling_device *thermal_cooling_device_register(char *type,
+ void *devdata, struct thermal_cooling_device_ops *ops)
+{
+ struct thermal_cooling_device *cdev;
+ struct thermal_zone_device *pos;
+ int result;
+
+ if (strlen(type) >= THERMAL_NAME_LENGTH)
+ return NULL;
+
+ if (!ops || !ops->get_max_state || !ops->get_cur_state ||
+ !ops->set_cur_state)
+ return NULL;
+
+ cdev = kzalloc(sizeof(struct thermal_cooling_device), GFP_KERNEL);
+ if (!cdev)
+ return NULL;
+
+ result = get_idr(&thermal_cdev_idr, &thermal_idr_lock, &cdev->id);
+ if (result) {
+ kfree(cdev);
+ return NULL;
+ }
+
+ strcpy(cdev->type, type);
+ cdev->ops = ops;
+ cdev->device.class = &thermal_class;
+ cdev->devdata = devdata;
+ sprintf(cdev->device.bus_id, "cooling_device%d", cdev->id);
+ result = device_register(&cdev->device);
+ if (result) {
+ release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
+ kfree(cdev);
+ return NULL;
+ }
+
+ /* sys I/F */
+ if (type) {
+ result = device_create_file(&cdev->device,
+ &dev_attr_cdev_type);
+ if (result)
+ goto unregister;
+ }
+
+ result = device_create_file(&cdev->device, &dev_attr_max_state);
+ if (result)
+ goto unregister;
+
+ result = device_create_file(&cdev->device, &dev_attr_cur_state);
+ if (result)
+ goto unregister;
+
+ mutex_lock(&thermal_list_lock);
+ list_add(&cdev->node, &thermal_cdev_list);
+ list_for_each_entry(pos, &thermal_tz_list, node) {
+ if (!pos->ops->bind)
+ continue;
+ result = pos->ops->bind(pos, cdev);
+ if (result)
+ break;
+
+ }
+ mutex_unlock(&thermal_list_lock);
+
+ if (!result)
+ return cdev;
+
+ unregister:
+ release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
+ device_unregister(&cdev->device);
+ return NULL;
+}
+EXPORT_SYMBOL(thermal_cooling_device_register);
+
+/**
+ * thermal_cooling_device_unregister - removes the registered thermal cooling device
+ *
+ * @cdev: the thermal cooling device to remove.
+ *
+ * thermal_cooling_device_unregister() must be called when the device is no
+ * longer needed.
+ */
+void thermal_cooling_device_unregister(struct
+ thermal_cooling_device
+ *cdev)
+{
+ struct thermal_zone_device *tz;
+ struct thermal_cooling_device *pos = NULL;
+
+ if (!cdev)
+ return;
+
+ mutex_lock(&thermal_list_lock);
+ list_for_each_entry(pos, &thermal_cdev_list, node)
+ if (pos == cdev)
+ break;
+ if (pos != cdev) {
+ /* thermal cooling device not found */
+ mutex_unlock(&thermal_list_lock);
+ return;
+ }
+ list_del(&cdev->node);
+ list_for_each_entry(tz, &thermal_tz_list, node) {
+ if (!tz->ops->unbind)
+ continue;
+ tz->ops->unbind(tz, cdev);
+ }
+ mutex_unlock(&thermal_list_lock);
+ if (cdev->type[0])
+ device_remove_file(&cdev->device,
+ &dev_attr_cdev_type);
+ device_remove_file(&cdev->device, &dev_attr_max_state);
+ device_remove_file(&cdev->device, &dev_attr_cur_state);
+
+ release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
+ device_unregister(&cdev->device);
+ return;
+}
+EXPORT_SYMBOL(thermal_cooling_device_unregister);
+
+/**
+ * thermal_zone_device_register - register a new thermal zone device
+ * @type: the thermal zone device type
+ * @trips: the number of trip points the thermal zone support
+ * @devdata: private device data
+ * @ops: standard thermal zone device callbacks
+ *
+ * thermal_zone_device_unregister() must be called when the device is no
+ * longer needed.
+ */
+struct thermal_zone_device *thermal_zone_device_register(char *type,
+ int trips, void *devdata,
+ struct thermal_zone_device_ops *ops)
+{
+ struct thermal_zone_device *tz;
+ struct thermal_cooling_device *pos;
+ int result;
+ int count;
+
+ if (strlen(type) >= THERMAL_NAME_LENGTH)
+ return NULL;
+
+ if (trips > THERMAL_MAX_TRIPS || trips < 0)
+ return NULL;
+
+ if (!ops || !ops->get_temp)
+ return NULL;
+
+ tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL);
+ if (!tz)
+ return NULL;
+
+ INIT_LIST_HEAD(&tz->cooling_devices);
+ idr_init(&tz->idr);
+ mutex_init(&tz->lock);
+ result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id);
+ if (result) {
+ kfree(tz);
+ return NULL;
+ }
+
+ strcpy(tz->type, type);
+ tz->ops = ops;
+ tz->device.class = &thermal_class;
+ tz->devdata = devdata;
+ tz->trips = trips;
+ sprintf(tz->device.bus_id, "thermal_zone%d", tz->id);
+ result = device_register(&tz->device);
+ if (result) {
+ release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
+ kfree(tz);
+ return NULL;
+ }
+
+ /* sys I/F */
+ if (type) {
+ result = device_create_file(&tz->device, &dev_attr_type);
+ if (result)
+ goto unregister;
+ }
+
+ result = device_create_file(&tz->device, &dev_attr_temp);
+ if (result)
+ goto unregister;
+
+ if (ops->get_mode) {
+ result = device_create_file(&tz->device, &dev_attr_mode);
+ if (result)
+ goto unregister;
+ }
+
+ for (count = 0; count < trips; count++) {
+ TRIP_POINT_ATTR_ADD(&tz->device, count, result);
+ if (result)
+ goto unregister;
+ }
+
+ mutex_lock(&thermal_list_lock);
+ list_add_tail(&tz->node, &thermal_tz_list);
+ if (ops->bind)
+ list_for_each_entry(pos, &thermal_cdev_list, node) {
+ result = ops->bind(tz, pos);
+ if (result)
+ break;
+ }
+ mutex_unlock(&thermal_list_lock);
+
+ if (!result)
+ return tz;
+
+ unregister:
+ release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
+ device_unregister(&tz->device);
+ return NULL;
+}
+EXPORT_SYMBOL(thermal_zone_device_register);
+
+/**
+ * thermal_device_unregister - removes the registered thermal zone device
+ *
+ * @tz: the thermal zone device to remove
+ */
+void thermal_zone_device_unregister(struct thermal_zone_device *tz)
+{
+ struct thermal_cooling_device *cdev;
+ struct thermal_zone_device *pos = NULL;
+ int count;
+
+ if (!tz)
+ return;
+
+ mutex_lock(&thermal_list_lock);
+ list_for_each_entry(pos, &thermal_tz_list, node)
+ if (pos == tz)
+ break;
+ if (pos != tz) {
+ /* thermal zone device not found */
+ mutex_unlock(&thermal_list_lock);
+ return;
+ }
+ list_del(&tz->node);
+ if (tz->ops->unbind)
+ list_for_each_entry(cdev, &thermal_cdev_list, node)
+ tz->ops->unbind(tz, cdev);
+ mutex_unlock(&thermal_list_lock);
+
+ if (tz->type[0])
+ device_remove_file(&tz->device, &dev_attr_type);
+ device_remove_file(&tz->device, &dev_attr_temp);
+ if (tz->ops->get_mode)
+ device_remove_file(&tz->device, &dev_attr_mode);
+
+ for (count = 0; count < tz->trips; count++)
+ TRIP_POINT_ATTR_REMOVE(&tz->device, count);
+
+ release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
+ idr_destroy(&tz->idr);
+ mutex_destroy(&tz->lock);
+ device_unregister(&tz->device);
+ return;
+}
+EXPORT_SYMBOL(thermal_zone_device_unregister);
+
+static int __init thermal_init(void)
+{
+ int result = 0;
+
+ result = class_register(&thermal_class);
+ if (result) {
+ idr_destroy(&thermal_tz_idr);
+ idr_destroy(&thermal_cdev_idr);
+ mutex_destroy(&thermal_idr_lock);
+ mutex_destroy(&thermal_list_lock);
+ }
+ return result;
+}
+
+static void __exit thermal_exit(void)
+{
+ class_unregister(&thermal_class);
+ idr_destroy(&thermal_tz_idr);
+ idr_destroy(&thermal_cdev_idr);
+ mutex_destroy(&thermal_idr_lock);
+ mutex_destroy(&thermal_list_lock);
+}
+
+subsys_initcall(thermal_init);
+module_exit(thermal_exit);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index cc246faa3590..2a77e9d42c68 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -417,30 +417,28 @@ static void uio_vma_close(struct vm_area_struct *vma)
idev->vma_count--;
}
-static struct page *uio_vma_nopage(struct vm_area_struct *vma,
- unsigned long address, int *type)
+static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct uio_device *idev = vma->vm_private_data;
- struct page* page = NOPAGE_SIGBUS;
+ struct page *page;
int mi = uio_find_mem_index(vma);
if (mi < 0)
- return page;
+ return VM_FAULT_SIGBUS;
if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL)
page = virt_to_page(idev->info->mem[mi].addr);
else
page = vmalloc_to_page((void*)idev->info->mem[mi].addr);
get_page(page);
- if (type)
- *type = VM_FAULT_MINOR;
- return page;
+ vmf->page = page;
+ return 0;
}
static struct vm_operations_struct uio_vm_ops = {
.open = uio_vma_open,
.close = uio_vma_close,
- .nopage = uio_vma_nopage,
+ .fault = uio_vma_fault,
};
static int uio_mmap_physical(struct vm_area_struct *vma)
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index f8e711147501..fc65c02306dd 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -16,6 +16,7 @@
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/backlight.h>
#include <asm/arch/board.h>
#include <asm/arch/cpu.h>
@@ -69,6 +70,107 @@ static void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo,
}
#endif
+static const u32 contrast_ctr = ATMEL_LCDC_PS_DIV8
+ | ATMEL_LCDC_POL_POSITIVE
+ | ATMEL_LCDC_ENA_PWMENABLE;
+
+#ifdef CONFIG_BACKLIGHT_ATMEL_LCDC
+
+/* some bl->props field just changed */
+static int atmel_bl_update_status(struct backlight_device *bl)
+{
+ struct atmel_lcdfb_info *sinfo = bl_get_data(bl);
+ int power = sinfo->bl_power;
+ int brightness = bl->props.brightness;
+
+ /* REVISIT there may be a meaningful difference between
+ * fb_blank and power ... there seem to be some cases
+ * this doesn't handle correctly.
+ */
+ if (bl->props.fb_blank != sinfo->bl_power)
+ power = bl->props.fb_blank;
+ else if (bl->props.power != sinfo->bl_power)
+ power = bl->props.power;
+
+ if (brightness < 0 && power == FB_BLANK_UNBLANK)
+ brightness = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
+ else if (power != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, brightness);
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR,
+ brightness ? contrast_ctr : 0);
+
+ bl->props.fb_blank = bl->props.power = sinfo->bl_power = power;
+
+ return 0;
+}
+
+static int atmel_bl_get_brightness(struct backlight_device *bl)
+{
+ struct atmel_lcdfb_info *sinfo = bl_get_data(bl);
+
+ return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
+}
+
+static struct backlight_ops atmel_lcdc_bl_ops = {
+ .update_status = atmel_bl_update_status,
+ .get_brightness = atmel_bl_get_brightness,
+};
+
+static void init_backlight(struct atmel_lcdfb_info *sinfo)
+{
+ struct backlight_device *bl;
+
+ sinfo->bl_power = FB_BLANK_UNBLANK;
+
+ if (sinfo->backlight)
+ return;
+
+ bl = backlight_device_register("backlight", &sinfo->pdev->dev,
+ sinfo, &atmel_lcdc_bl_ops);
+ if (IS_ERR(sinfo->backlight)) {
+ dev_err(&sinfo->pdev->dev, "error %ld on backlight register\n",
+ PTR_ERR(bl));
+ return;
+ }
+ sinfo->backlight = bl;
+
+ bl->props.power = FB_BLANK_UNBLANK;
+ bl->props.fb_blank = FB_BLANK_UNBLANK;
+ bl->props.max_brightness = 0xff;
+ bl->props.brightness = atmel_bl_get_brightness(bl);
+}
+
+static void exit_backlight(struct atmel_lcdfb_info *sinfo)
+{
+ if (sinfo->backlight)
+ backlight_device_unregister(sinfo->backlight);
+}
+
+#else
+
+static void init_backlight(struct atmel_lcdfb_info *sinfo)
+{
+ dev_warn(&sinfo->pdev->dev, "backlight control is not available\n");
+}
+
+static void exit_backlight(struct atmel_lcdfb_info *sinfo)
+{
+}
+
+#endif
+
+static void init_contrast(struct atmel_lcdfb_info *sinfo)
+{
+ /* have some default contrast/backlight settings */
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, contrast_ctr);
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, ATMEL_LCDC_CVAL_DEFAULT);
+
+ if (sinfo->lcdcon_is_backlight)
+ init_backlight(sinfo);
+}
+
static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = {
.type = FB_TYPE_PACKED_PIXELS,
@@ -203,6 +305,26 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
var->transp.offset = var->transp.length = 0;
var->xoffset = var->yoffset = 0;
+ /* Saturate vertical and horizontal timings at maximum values */
+ var->vsync_len = min_t(u32, var->vsync_len,
+ (ATMEL_LCDC_VPW >> ATMEL_LCDC_VPW_OFFSET) + 1);
+ var->upper_margin = min_t(u32, var->upper_margin,
+ ATMEL_LCDC_VBP >> ATMEL_LCDC_VBP_OFFSET);
+ var->lower_margin = min_t(u32, var->lower_margin,
+ ATMEL_LCDC_VFP);
+ var->right_margin = min_t(u32, var->right_margin,
+ (ATMEL_LCDC_HFP >> ATMEL_LCDC_HFP_OFFSET) + 1);
+ var->hsync_len = min_t(u32, var->hsync_len,
+ (ATMEL_LCDC_HPW >> ATMEL_LCDC_HPW_OFFSET) + 1);
+ var->left_margin = min_t(u32, var->left_margin,
+ ATMEL_LCDC_HBP + 1);
+
+ /* Some parameters can't be zero */
+ var->vsync_len = max_t(u32, var->vsync_len, 1);
+ var->right_margin = max_t(u32, var->right_margin, 1);
+ var->hsync_len = max_t(u32, var->hsync_len, 1);
+ var->left_margin = max_t(u32, var->left_margin, 1);
+
switch (var->bits_per_pixel) {
case 1:
case 2:
@@ -370,10 +492,6 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
/* Disable all interrupts */
lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL);
- /* Set contrast */
- value = ATMEL_LCDC_PS_DIV8 | ATMEL_LCDC_POL_POSITIVE | ATMEL_LCDC_ENA_PWMENABLE;
- lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, value);
- lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, ATMEL_LCDC_CVAL_DEFAULT);
/* ...wait for DMA engine to become idle... */
while (lcdc_readl(sinfo, ATMEL_LCDC_DMACON) & ATMEL_LCDC_DMABUSY)
msleep(10);
@@ -577,6 +695,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
sinfo->default_monspecs = pdata_sinfo->default_monspecs;
sinfo->atmel_lcdfb_power_control = pdata_sinfo->atmel_lcdfb_power_control;
sinfo->guard_time = pdata_sinfo->guard_time;
+ sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight;
} else {
dev_err(dev, "cannot get default configuration\n");
goto free_info;
@@ -670,6 +789,9 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
goto release_mem;
}
+ /* Initialize PWM for contrast or backlight ("off") */
+ init_contrast(sinfo);
+
/* interrupt */
ret = request_irq(sinfo->irq_base, atmel_lcdfb_interrupt, 0, pdev->name, info);
if (ret) {
@@ -721,6 +843,7 @@ free_cmap:
unregister_irqs:
free_irq(sinfo->irq_base, info);
unmap_mmio:
+ exit_backlight(sinfo);
iounmap(sinfo->mmio);
release_mem:
release_mem_region(info->fix.mmio_start, info->fix.mmio_len);
@@ -755,6 +878,7 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
if (!sinfo)
return 0;
+ exit_backlight(sinfo);
if (sinfo->atmel_lcdfb_power_control)
sinfo->atmel_lcdfb_power_control(0);
unregister_framebuffer(info);
@@ -781,6 +905,9 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
static struct platform_driver atmel_lcdfb_driver = {
.remove = __exit_p(atmel_lcdfb_remove),
+
+// FIXME need suspend, resume
+
.driver = {
.name = "atmel_lcdfb",
.owner = THIS_MODULE,
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index 83ee3e75386c..675abdafc2d8 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -2561,7 +2561,7 @@ static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend)
pci_read_config_dword(rinfo->pdev, i * 4,
&rinfo->cfg_save[i]);
- /* Switch PCI power managment to D2. */
+ /* Switch PCI power management to D2. */
pci_disable_device(rinfo->pdev);
for (;;) {
pci_read_config_word(
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 9609a6c676be..dcd8073c2369 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -50,6 +50,19 @@ config BACKLIGHT_CLASS_DEVICE
To have support for your specific LCD panel you will have to
select the proper drivers which depend on this option.
+config BACKLIGHT_ATMEL_LCDC
+ bool "Atmel LCDC Contrast-as-Backlight control"
+ depends on BACKLIGHT_CLASS_DEVICE && FB_ATMEL
+ default y if MACH_SAM9261EK || MACH_SAM9263EK
+ help
+ This provides a backlight control internal to the Atmel LCDC
+ driver. If the LCD "contrast control" on your board is wired
+ so it controls the backlight brightness, select this option to
+ export this as a PWM-based backlight control.
+
+ If in doubt, it's safe to enable this option; it doesn't kick
+ in unless the board's description says it's wired that way.
+
config BACKLIGHT_CORGI
tristate "Generic (aka Sharp Corgi) Backlight Driver"
depends on BACKLIGHT_CLASS_DEVICE
@@ -67,6 +80,15 @@ config BACKLIGHT_LOCOMO
If you have a Sharp Zaurus SL-5500 (Collie) or SL-5600 (Poodle) say y to
enable the LCD/backlight driver.
+config BACKLIGHT_OMAP1
+ tristate "OMAP1 PWL-based LCD Backlight"
+ depends on BACKLIGHT_CLASS_DEVICE && ARCH_OMAP1
+ default y
+ help
+ This driver controls the LCD backlight level and power for
+ the PWL module of OMAP1 processors. Say Y if your board
+ uses this hardware.
+
config BACKLIGHT_HP680
tristate "HP Jornada 680 Backlight Driver"
depends on BACKLIGHT_CLASS_DEVICE && SH_HP6XX
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 965a78b18118..33f6c7cecc73 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -7,5 +7,6 @@ obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
obj-$(CONFIG_BACKLIGHT_CORGI) += corgi_bl.o
obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
+obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
obj-$(CONFIG_BACKLIGHT_PROGEAR) += progear_bl.o
obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 4840fe217e4d..39394757679c 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -94,8 +94,10 @@ static ssize_t backlight_store_power(struct device *dev,
mutex_lock(&bd->ops_lock);
if (bd->ops) {
pr_debug("backlight: set power to %d\n", power);
- bd->props.power = power;
- backlight_update_status(bd);
+ if (bd->props.power != power) {
+ bd->props.power = power;
+ backlight_update_status(bd);
+ }
rc = count;
}
mutex_unlock(&bd->ops_lock);
@@ -132,8 +134,10 @@ static ssize_t backlight_store_brightness(struct device *dev,
else {
pr_debug("backlight: set brightness to %d\n",
brightness);
- bd->props.brightness = brightness;
- backlight_update_status(bd);
+ if (bd->props.brightness != brightness) {
+ bd->props.brightness = brightness;
+ backlight_update_status(bd);
+ }
rc = count;
}
}
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
new file mode 100644
index 000000000000..891875d53a49
--- /dev/null
+++ b/drivers/video/backlight/omap1_bl.c
@@ -0,0 +1,210 @@
+/*
+ * Backlight driver for OMAP based boards.
+ *
+ * Copyright (c) 2006 Andrzej Zaborowski <balrog@zabor.org>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This package is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this package; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/arch/board.h>
+#include <asm/arch/mux.h>
+
+#define OMAPBL_MAX_INTENSITY 0xff
+
+struct omap_backlight {
+ int powermode;
+ int current_intensity;
+
+ struct device *dev;
+ struct omap_backlight_config *pdata;
+};
+
+static void inline omapbl_send_intensity(int intensity)
+{
+ omap_writeb(intensity, OMAP_PWL_ENABLE);
+}
+
+static void inline omapbl_send_enable(int enable)
+{
+ omap_writeb(enable, OMAP_PWL_CLK_ENABLE);
+}
+
+static void omapbl_blank(struct omap_backlight *bl, int mode)
+{
+ if (bl->pdata->set_power)
+ bl->pdata->set_power(bl->dev, mode);
+
+ switch (mode) {
+ case FB_BLANK_NORMAL:
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_POWERDOWN:
+ omapbl_send_intensity(0);
+ omapbl_send_enable(0);
+ break;
+
+ case FB_BLANK_UNBLANK:
+ omapbl_send_intensity(bl->current_intensity);
+ omapbl_send_enable(1);
+ break;
+ }
+}
+
+#ifdef CONFIG_PM
+static int omapbl_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct backlight_device *dev = platform_get_drvdata(pdev);
+ struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+
+ omapbl_blank(bl, FB_BLANK_POWERDOWN);
+ return 0;
+}
+
+static int omapbl_resume(struct platform_device *pdev)
+{
+ struct backlight_device *dev = platform_get_drvdata(pdev);
+ struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+
+ omapbl_blank(bl, bl->powermode);
+ return 0;
+}
+#else
+#define omapbl_suspend NULL
+#define omapbl_resume NULL
+#endif
+
+static int omapbl_set_power(struct backlight_device *dev, int state)
+{
+ struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+
+ omapbl_blank(bl, state);
+ bl->powermode = state;
+
+ return 0;
+}
+
+static int omapbl_update_status(struct backlight_device *dev)
+{
+ struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+
+ if (bl->current_intensity != dev->props.brightness) {
+ if (bl->powermode == FB_BLANK_UNBLANK)
+ omapbl_send_intensity(dev->props.brightness);
+ bl->current_intensity = dev->props.brightness;
+ }
+
+ if (dev->props.fb_blank != bl->powermode)
+ omapbl_set_power(dev, dev->props.fb_blank);
+
+ return 0;
+}
+
+static int omapbl_get_intensity(struct backlight_device *dev)
+{
+ struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+ return bl->current_intensity;
+}
+
+static struct backlight_ops omapbl_ops = {
+ .get_brightness = omapbl_get_intensity,
+ .update_status = omapbl_update_status,
+};
+
+static int omapbl_probe(struct platform_device *pdev)
+{
+ struct backlight_device *dev;
+ struct omap_backlight *bl;
+ struct omap_backlight_config *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return -ENXIO;
+
+ omapbl_ops.check_fb = pdata->check_fb;
+
+ bl = kzalloc(sizeof(struct omap_backlight), GFP_KERNEL);
+ if (unlikely(!bl))
+ return -ENOMEM;
+
+ dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops);
+ if (IS_ERR(dev)) {
+ kfree(bl);
+ return PTR_ERR(dev);
+ }
+
+ bl->powermode = FB_BLANK_POWERDOWN;
+ bl->current_intensity = 0;
+
+ bl->pdata = pdata;
+ bl->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, dev);
+
+ omap_cfg_reg(PWL); /* Conflicts with UART3 */
+
+ dev->props.fb_blank = FB_BLANK_UNBLANK;
+ dev->props.max_brightness = OMAPBL_MAX_INTENSITY;
+ dev->props.brightness = pdata->default_intensity;
+ omapbl_update_status(dev);
+
+ printk(KERN_INFO "OMAP LCD backlight initialised\n");
+
+ return 0;
+}
+
+static int omapbl_remove(struct platform_device *pdev)
+{
+ struct backlight_device *dev = platform_get_drvdata(pdev);
+ struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+
+ backlight_device_unregister(dev);
+ kfree(bl);
+
+ return 0;
+}
+
+static struct platform_driver omapbl_driver = {
+ .probe = omapbl_probe,
+ .remove = omapbl_remove,
+ .suspend = omapbl_suspend,
+ .resume = omapbl_resume,
+ .driver = {
+ .name = "omap-bl",
+ },
+};
+
+static int __init omapbl_init(void)
+{
+ return platform_driver_register(&omapbl_driver);
+}
+
+static void __exit omapbl_exit(void)
+{
+ platform_driver_unregister(&omapbl_driver);
+}
+
+module_init(omapbl_init);
+module_exit(omapbl_exit);
+
+MODULE_AUTHOR("Andrzej Zaborowski <balrog@zabor.org>");
+MODULE_DESCRIPTION("OMAP LCD Backlight driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index c8e7427a0bc8..0ce791e6f79c 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -498,8 +498,7 @@ static struct lcd_device *lcd_dev;
static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id)
{
-
- /*struct bfin_bf54xfb_info *info = (struct bfin_bf54xfb_info *)dev_id;*/
+ /*struct bfin_bf54xfb_info *info = dev_id;*/
u16 status = bfin_read_EPPI0_STATUS();
diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
index 308850df16fe..69864b1b3f9e 100644
--- a/drivers/video/console/bitblit.c
+++ b/drivers/video/console/bitblit.c
@@ -63,7 +63,7 @@ static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy,
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
struct fb_fillrect region;
- region.color = attr_bgcol_ec(bgshift, vc);
+ region.color = attr_bgcol_ec(bgshift, vc, info);
region.dx = sx * vc->vc_font.width;
region.dy = sy * vc->vc_font.height;
region.width = width * vc->vc_font.width;
@@ -213,7 +213,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
unsigned int bs = info->var.yres - bh;
struct fb_fillrect region;
- region.color = attr_bgcol_ec(bgshift, vc);
+ region.color = attr_bgcol_ec(bgshift, vc, info);
region.rop = ROP_COPY;
if (rw && !bottom_only) {
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 0f32f4a00b2d..022282494d3f 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -84,7 +84,7 @@
#ifdef CONFIG_MAC
#include <asm/macints.h>
#endif
-#if defined(__mc68000__) || defined(CONFIG_APUS)
+#if defined(__mc68000__)
#include <asm/machdep.h>
#include <asm/setup.h>
#endif
@@ -147,7 +147,7 @@ static char fontname[40];
static int info_idx = -1;
/* console rotation */
-static int rotate;
+static int initial_rotation;
static int fbcon_has_sysfs;
static const struct consw fb_con;
@@ -334,10 +334,7 @@ static inline int get_color(struct vc_data *vc, struct fb_info *info,
switch (depth) {
case 1:
{
- int col = ~(0xfff << (max(info->var.green.length,
- max(info->var.red.length,
- info->var.blue.length)))) & 0xff;
-
+ int col = mono_col(info);
/* 0 or 1 */
int fg = (info->fix.visual != FB_VISUAL_MONO01) ? col : 0;
int bg = (info->fix.visual != FB_VISUAL_MONO01) ? 0 : col;
@@ -537,9 +534,9 @@ static int __init fb_console_setup(char *this_opt)
if (!strncmp(options, "rotate:", 7)) {
options += 7;
if (*options)
- rotate = simple_strtoul(options, &options, 0);
- if (rotate > 3)
- rotate = 0;
+ initial_rotation = simple_strtoul(options, &options, 0);
+ if (initial_rotation > 3)
+ initial_rotation = 0;
}
}
return 1;
@@ -989,7 +986,7 @@ static const char *fbcon_startup(void)
ops->graphics = 1;
ops->cur_rotate = -1;
info->fbcon_par = ops;
- p->con_rotate = rotate;
+ p->con_rotate = initial_rotation;
set_blitting_type(vc, info);
if (info->fix.type != FB_TYPE_TEXT) {
@@ -1176,7 +1173,7 @@ static void fbcon_init(struct vc_data *vc, int init)
con_copy_unimap(vc, svc);
ops = info->fbcon_par;
- p->con_rotate = rotate;
+ p->con_rotate = initial_rotation;
set_blitting_type(vc, info);
cols = vc->vc_cols;
@@ -2795,7 +2792,7 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
{
struct fb_info *info = registered_fb[con2fb_map[fg_console]];
struct fbcon_ops *ops = info->fbcon_par;
- struct display *p = &fb_display[fg_console];
+ struct display *disp = &fb_display[fg_console];
int offset, limit, scrollback_old;
if (softback_top) {
@@ -2833,7 +2830,7 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
logo_shown = FBCON_LOGO_CANSHOW;
}
fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK);
- fbcon_redraw_softback(vc, p, lines);
+ fbcon_redraw_softback(vc, disp, lines);
fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK);
return 0;
}
@@ -2855,9 +2852,9 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
fbcon_cursor(vc, CM_ERASE);
- offset = p->yscroll - scrollback_current;
- limit = p->vrows;
- switch (p->scrollmode) {
+ offset = disp->yscroll - scrollback_current;
+ limit = disp->vrows;
+ switch (disp->scrollmode) {
case SCROLL_WRAP_MOVE:
info->var.vmode |= FB_VMODE_YWRAP;
break;
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
index 8e6ef4bc7a5c..3706307e70ed 100644
--- a/drivers/video/console/fbcon.h
+++ b/drivers/video/console/fbcon.h
@@ -93,10 +93,6 @@ struct fbcon_ops {
(((s) >> (fgshift)) & 0x0f)
#define attr_bgcol(bgshift,s) \
(((s) >> (bgshift)) & 0x0f)
-#define attr_bgcol_ec(bgshift,vc) \
- ((vc) ? (((vc)->vc_video_erase_char >> (bgshift)) & 0x0f) : 0)
-#define attr_fgcol_ec(fgshift,vc) \
- ((vc) ? (((vc)->vc_video_erase_char >> (fgshift)) & 0x0f) : 0)
/* Monochrome */
#define attr_bold(s) \
@@ -108,6 +104,49 @@ struct fbcon_ops {
#define attr_blink(s) \
((s) & 0x8000)
+#define mono_col(info) \
+ (~(0xfff << (max((info)->var.green.length, \
+ max((info)->var.red.length, \
+ (info)->var.blue.length)))) & 0xff)
+
+static inline int attr_col_ec(int shift, struct vc_data *vc,
+ struct fb_info *info, int is_fg)
+{
+ int is_mono01;
+ int col;
+ int fg;
+ int bg;
+
+ if (!vc)
+ return 0;
+
+ if (vc->vc_can_do_color)
+ return is_fg ? attr_fgcol(shift,vc->vc_video_erase_char)
+ : attr_bgcol(shift,vc->vc_video_erase_char);
+
+ if (!info)
+ return 0;
+
+ col = mono_col(info);
+ is_mono01 = info->fix.visual == FB_VISUAL_MONO01;
+
+ if (attr_reverse(vc->vc_video_erase_char)) {
+ fg = is_mono01 ? col : 0;
+ bg = is_mono01 ? 0 : col;
+ }
+ else {
+ fg = is_mono01 ? 0 : col;
+ bg = is_mono01 ? col : 0;
+ }
+
+ return is_fg ? fg : bg;
+}
+
+#define attr_bgcol_ec(bgshift,vc,info) \
+ attr_col_ec(bgshift,vc,info,0);
+#define attr_fgcol_ec(fgshift,vc,info) \
+ attr_col_ec(fgshift,vc,info,1);
+
/* Font */
#define REFCOUNT(fd) (((int *)(fd))[-1])
#define FNTSIZE(fd) (((int *)(fd))[-2])
diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/console/fbcon_ccw.c
index 825e6d6972a7..bdf913ecf001 100644
--- a/drivers/video/console/fbcon_ccw.c
+++ b/drivers/video/console/fbcon_ccw.c
@@ -84,7 +84,7 @@ static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
u32 vyres = GETVYRES(ops->p->scrollmode, info);
- region.color = attr_bgcol_ec(bgshift,vc);
+ region.color = attr_bgcol_ec(bgshift,vc,info);
region.dx = sy * vc->vc_font.height;
region.dy = vyres - ((sx + width) * vc->vc_font.width);
region.height = width * vc->vc_font.width;
@@ -198,7 +198,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- region.color = attr_bgcol_ec(bgshift,vc);
+ region.color = attr_bgcol_ec(bgshift,vc,info);
region.rop = ROP_COPY;
if (rw && !bottom_only) {
diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/console/fbcon_cw.c
index c637e6318803..a6819b9d1770 100644
--- a/drivers/video/console/fbcon_cw.c
+++ b/drivers/video/console/fbcon_cw.c
@@ -70,7 +70,7 @@ static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
u32 vxres = GETVXRES(ops->p->scrollmode, info);
- region.color = attr_bgcol_ec(bgshift,vc);
+ region.color = attr_bgcol_ec(bgshift,vc,info);
region.dx = vxres - ((sy + height) * vc->vc_font.height);
region.dy = sx * vc->vc_font.width;
region.height = width * vc->vc_font.width;
@@ -182,7 +182,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- region.color = attr_bgcol_ec(bgshift,vc);
+ region.color = attr_bgcol_ec(bgshift,vc,info);
region.rop = ROP_COPY;
if (rw && !bottom_only) {
diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/console/fbcon_ud.c
index 1473506df5d0..d9b5d6eb68a7 100644
--- a/drivers/video/console/fbcon_ud.c
+++ b/drivers/video/console/fbcon_ud.c
@@ -71,7 +71,7 @@ static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy,
u32 vyres = GETVYRES(ops->p->scrollmode, info);
u32 vxres = GETVXRES(ops->p->scrollmode, info);
- region.color = attr_bgcol_ec(bgshift,vc);
+ region.color = attr_bgcol_ec(bgshift,vc,info);
region.dy = vyres - ((sy + height) * vc->vc_font.height);
region.dx = vxres - ((sx + width) * vc->vc_font.width);
region.width = width * vc->vc_font.width;
@@ -228,7 +228,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- region.color = attr_bgcol_ec(bgshift,vc);
+ region.color = attr_bgcol_ec(bgshift,vc,info);
region.rop = ROP_COPY;
if (rw && !bottom_only) {
diff --git a/drivers/video/console/fonts.c b/drivers/video/console/fonts.c
index 96979c377518..d0c03fd70871 100644
--- a/drivers/video/console/fonts.c
+++ b/drivers/video/console/fonts.c
@@ -15,7 +15,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
-#if defined(__mc68000__) || defined(CONFIG_APUS)
+#if defined(__mc68000__)
#include <asm/setup.h>
#endif
#include <linux/font.h>
@@ -120,7 +120,7 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
for(i=0; i<num_fonts; i++) {
f = fonts[i];
c = f->pref;
-#if defined(__mc68000__) || defined(CONFIG_APUS)
+#if defined(__mc68000__)
#ifdef CONFIG_FONT_PEARL_8x8
if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX)
c = 100;
diff --git a/drivers/video/console/tileblit.c b/drivers/video/console/tileblit.c
index d981fe4d86c6..0056a41e5c35 100644
--- a/drivers/video/console/tileblit.c
+++ b/drivers/video/console/tileblit.c
@@ -40,8 +40,8 @@ static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy,
rect.index = vc->vc_video_erase_char &
((vc->vc_hi_font_mask) ? 0x1ff : 0xff);
- rect.fg = attr_fgcol_ec(fgshift, vc);
- rect.bg = attr_bgcol_ec(bgshift, vc);
+ rect.fg = attr_fgcol_ec(fgshift, vc, info);
+ rect.bg = attr_bgcol_ec(bgshift, vc, info);
rect.sx = sx;
rect.sy = sy;
rect.width = width;
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index f65bcd314d54..6df29a62d720 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1153,8 +1153,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
/* if 512 char mode is already enabled don't re-enable it. */
if ((set) && (ch512 != vga_512_chars)) {
- int i;
-
/* attribute controller */
for (i = 0; i < MAX_NR_CONSOLES; i++) {
struct vc_data *c = vc_cons[i].d;
diff --git a/drivers/video/cyblafb.c b/drivers/video/cyblafb.c
index e23324d10be2..9704b73135f5 100644
--- a/drivers/video/cyblafb.c
+++ b/drivers/video/cyblafb.c
@@ -1156,7 +1156,7 @@ static struct fb_ops cyblafb_ops __devinitdata = {
// need altered timings to display correctly. So I decided that it is much
// better to provide a limited optimized set of modes plus the option of
// using the mode in effect at startup time (might be selected using the
-// vga=??? paramter). After that the user might use fbset to select any
+// vga=??? parameter). After that the user might use fbset to select any
// mode he likes, check_var will not try to alter geometry parameters as
// it would be necessary otherwise.
//
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index a0c5d9d90d74..0f8cfb988c90 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -25,8 +25,8 @@
#include <linux/pagemap.h>
/* this is to find and return the vmalloc-ed fb pages */
-static struct page* fb_deferred_io_nopage(struct vm_area_struct *vma,
- unsigned long vaddr, int *type)
+static int fb_deferred_io_fault(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
{
unsigned long offset;
struct page *page;
@@ -34,18 +34,17 @@ static struct page* fb_deferred_io_nopage(struct vm_area_struct *vma,
/* info->screen_base is in System RAM */
void *screen_base = (void __force *) info->screen_base;
- offset = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
+ offset = vmf->pgoff << PAGE_SHIFT;
if (offset >= info->fix.smem_len)
- return NOPAGE_SIGBUS;
+ return VM_FAULT_SIGBUS;
page = vmalloc_to_page(screen_base + offset);
if (!page)
- return NOPAGE_OOM;
+ return VM_FAULT_SIGBUS;
get_page(page);
- if (type)
- *type = VM_FAULT_MINOR;
- return page;
+ vmf->page = page;
+ return 0;
}
int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync)
@@ -84,7 +83,7 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
}
static struct vm_operations_struct fb_deferred_io_vm_ops = {
- .nopage = fb_deferred_io_nopage,
+ .fault = fb_deferred_io_fault,
.page_mkwrite = fb_deferred_io_mkwrite,
};
diff --git a/drivers/video/fb_draw.h b/drivers/video/fb_draw.h
index cdafbe14ef1f..a2a0618d86a5 100644
--- a/drivers/video/fb_draw.h
+++ b/drivers/video/fb_draw.h
@@ -91,6 +91,7 @@ static inline unsigned long fb_rev_pixels_in_long(unsigned long val,
val = comp(val >> 2, val << 2, REV_PIXELS_MASK2);
if (bswapmask & 3)
val = comp(val >> 4, val << 4, REV_PIXELS_MASK4);
+ return val;
}
static inline u32 fb_shifted_pixels_mask_u32(u32 index, u32 bswapmask)
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index 4ba9c0894416..052e18058498 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -4,7 +4,7 @@
* Copyright (C) 2002 James Simmons <jsimmons@users.sf.net>
*
* Credits:
- *
+ *
* The EDID Parser is a conglomeration from the following sources:
*
* 1. SciTech SNAP Graphics Architecture
@@ -12,13 +12,13 @@
*
* 2. XFree86 4.3.0, interpret_edid.c
* Copyright 1998 by Egbert Eich <Egbert.Eich@Physik.TU-Darmstadt.DE>
- *
- * 3. John Fremlin <vii@users.sourceforge.net> and
+ *
+ * 3. John Fremlin <vii@users.sourceforge.net> and
* Ani Joshi <ajoshi@unixbox.com>
- *
+ *
* Generalized Timing Formula is derived from:
*
- * GTF Spreadsheet by Andy Morrish (1/5/97)
+ * GTF Spreadsheet by Andy Morrish (1/5/97)
* available at http://www.vesa.org
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -36,7 +36,7 @@
#endif
#include "edid.h"
-/*
+/*
* EDID parser
*/
@@ -160,8 +160,8 @@ static int check_edid(unsigned char *edid)
for (i = 0; i < ARRAY_SIZE(brokendb); i++) {
if (!strncmp(manufacturer, brokendb[i].manufacturer, 4) &&
brokendb[i].model == model) {
- fix = brokendb[i].fix;
- break;
+ fix = brokendb[i].fix;
+ break;
}
}
@@ -323,7 +323,7 @@ static void get_dpms_capabilities(unsigned char flags,
(flags & DPMS_SUSPEND) ? "yes" : "no",
(flags & DPMS_STANDBY) ? "yes" : "no");
}
-
+
static void get_chroma(unsigned char *block, struct fb_monspecs *specs)
{
int tmp;
@@ -365,7 +365,7 @@ static void get_chroma(unsigned char *block, struct fb_monspecs *specs)
tmp += 512;
specs->chroma.bluey = tmp/1024;
DPRINTK("BlueY: 0.%03d\n", specs->chroma.bluey);
-
+
tmp = ((block[6] & (3 << 2)) >> 2) | (block[0xd] << 2);
tmp *= 1000;
tmp += 512;
@@ -383,7 +383,7 @@ static void calc_mode_timings(int xres, int yres, int refresh,
struct fb_videomode *mode)
{
struct fb_var_screeninfo *var;
-
+
var = kzalloc(sizeof(struct fb_var_screeninfo), GFP_KERNEL);
if (var) {
@@ -451,11 +451,11 @@ static int get_est_timing(unsigned char *block, struct fb_videomode *mode)
c = block[1];
if (c&0x80) {
- mode[num++] = vesa_modes[9];
+ mode[num++] = vesa_modes[9];
DPRINTK(" 800x600@72Hz\n");
}
if (c&0x40) {
- mode[num++] = vesa_modes[10];
+ mode[num++] = vesa_modes[10];
DPRINTK(" 800x600@75Hz\n");
}
if (c&0x20) {
@@ -495,7 +495,7 @@ static int get_est_timing(unsigned char *block, struct fb_videomode *mode)
static int get_std_timing(unsigned char *block, struct fb_videomode *mode)
{
int xres, yres = 0, refresh, ratio, i;
-
+
xres = (block[0] + 31) * 8;
if (xres <= 256)
return 0;
@@ -519,7 +519,7 @@ static int get_std_timing(unsigned char *block, struct fb_videomode *mode)
DPRINTK(" %dx%d@%dHz\n", xres, yres, refresh);
for (i = 0; i < VESA_MODEDB_SIZE; i++) {
- if (vesa_modes[i].xres == xres &&
+ if (vesa_modes[i].xres == xres &&
vesa_modes[i].yres == yres &&
vesa_modes[i].refresh == refresh) {
*mode = vesa_modes[i];
@@ -536,13 +536,13 @@ static int get_dst_timing(unsigned char *block,
{
int j, num = 0;
- for (j = 0; j < 6; j++, block+= STD_TIMING_DESCRIPTION_SIZE)
+ for (j = 0; j < 6; j++, block += STD_TIMING_DESCRIPTION_SIZE)
num += get_std_timing(block, &mode[num]);
return num;
}
-static void get_detailed_timing(unsigned char *block,
+static void get_detailed_timing(unsigned char *block,
struct fb_videomode *mode)
{
mode->xres = H_ACTIVE;
@@ -553,7 +553,7 @@ static void get_detailed_timing(unsigned char *block,
mode->right_margin = H_SYNC_OFFSET;
mode->left_margin = (H_ACTIVE + H_BLANKING) -
(H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH);
- mode->upper_margin = V_BLANKING - V_SYNC_OFFSET -
+ mode->upper_margin = V_BLANKING - V_SYNC_OFFSET -
V_SYNC_WIDTH;
mode->lower_margin = V_SYNC_OFFSET;
mode->hsync_len = H_SYNC_WIDTH;
@@ -597,7 +597,7 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize)
if (mode == NULL)
return NULL;
- if (edid == NULL || !edid_checksum(edid) ||
+ if (edid == NULL || !edid_checksum(edid) ||
!edid_check_header(edid)) {
kfree(mode);
return NULL;
@@ -632,7 +632,7 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize)
if (block[0] == 0x00 && block[1] == 0x00 && block[3] == 0xfa)
num += get_dst_timing(block + 5, &mode[num]);
}
-
+
/* Yikes, EDID data is totally useless */
if (!num) {
kfree(mode);
@@ -686,7 +686,7 @@ static int fb_get_monitor_limits(unsigned char *edid, struct fb_monspecs *specs)
/* estimate monitor limits based on modes supported */
if (retval) {
struct fb_videomode *modes, *mode;
- int num_modes, i, hz, hscan, pixclock;
+ int num_modes, hz, hscan, pixclock;
int vtotal, htotal;
modes = fb_create_modedb(edid, &num_modes);
@@ -713,7 +713,7 @@ static int fb_get_monitor_limits(unsigned char *edid, struct fb_monspecs *specs)
hscan = (pixclock + htotal / 2) / htotal;
hscan = (hscan + 500) / 1000 * 1000;
hz = (hscan + vtotal / 2) / vtotal;
-
+
if (specs->dclkmax == 0 || specs->dclkmax < pixclock)
specs->dclkmax = pixclock;
@@ -966,8 +966,8 @@ void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
DPRINTK("========================================\n");
}
-/*
- * VESA Generalized Timing Formula (GTF)
+/*
+ * VESA Generalized Timing Formula (GTF)
*/
#define FLYBACK 550
@@ -996,7 +996,7 @@ struct __fb_timings {
* @hfreq: horizontal freq
*
* DESCRIPTION:
- * vblank = right_margin + vsync_len + left_margin
+ * vblank = right_margin + vsync_len + left_margin
*
* given: right_margin = 1 (V_FRONTPORCH)
* vsync_len = 3
@@ -1010,12 +1010,12 @@ static u32 fb_get_vblank(u32 hfreq)
{
u32 vblank;
- vblank = (hfreq * FLYBACK)/1000;
+ vblank = (hfreq * FLYBACK)/1000;
vblank = (vblank + 500)/1000;
return (vblank + V_FRONTPORCH);
}
-/**
+/**
* fb_get_hblank_by_freq - get horizontal blank time given hfreq
* @hfreq: horizontal freq
* @xres: horizontal resolution in pixels
@@ -1031,7 +1031,7 @@ static u32 fb_get_vblank(u32 hfreq)
*
* where: C = ((offset - scale factor) * blank_scale)
* -------------------------------------- + scale factor
- * 256
+ * 256
* M = blank_scale * gradient
*
*/
@@ -1039,7 +1039,7 @@ static u32 fb_get_hblank_by_hfreq(u32 hfreq, u32 xres)
{
u32 c_val, m_val, duty_cycle, hblank;
- c_val = (((H_OFFSET - H_SCALEFACTOR) * H_BLANKSCALE)/256 +
+ c_val = (((H_OFFSET - H_SCALEFACTOR) * H_BLANKSCALE)/256 +
H_SCALEFACTOR) * 1000;
m_val = (H_BLANKSCALE * H_GRADIENT)/256;
m_val = (m_val * 1000000)/hfreq;
@@ -1048,7 +1048,7 @@ static u32 fb_get_hblank_by_hfreq(u32 hfreq, u32 xres)
return (hblank);
}
-/**
+/**
* fb_get_hblank_by_dclk - get horizontal blank time given pixelclock
* @dclk: pixelclock in Hz
* @xres: horizontal resolution in pixels
@@ -1061,7 +1061,7 @@ static u32 fb_get_hblank_by_hfreq(u32 hfreq, u32 xres)
*
* duty cycle = percent of htotal assigned to inactive display
* duty cycle = C - (M * h_period)
- *
+ *
* where: h_period = SQRT(100 - C + (0.4 * xres * M)/dclk) + C - 100
* -----------------------------------------------
* 2 * M
@@ -1077,11 +1077,11 @@ static u32 fb_get_hblank_by_dclk(u32 dclk, u32 xres)
h_period = 100 - C_VAL;
h_period *= h_period;
h_period += (M_VAL * xres * 2 * 1000)/(5 * dclk);
- h_period *=10000;
+ h_period *= 10000;
h_period = int_sqrt(h_period);
h_period -= (100 - C_VAL) * 100;
- h_period *= 1000;
+ h_period *= 1000;
h_period /= 2 * M_VAL;
duty_cycle = C_VAL * 1000 - (M_VAL * h_period)/100;
@@ -1089,7 +1089,7 @@ static u32 fb_get_hblank_by_dclk(u32 dclk, u32 xres)
hblank &= ~15;
return (hblank);
}
-
+
/**
* fb_get_hfreq - estimate hsync
* @vfreq: vertical refresh rate
@@ -1100,13 +1100,13 @@ static u32 fb_get_hblank_by_dclk(u32 dclk, u32 xres)
* (yres + front_port) * vfreq * 1000000
* hfreq = -------------------------------------
* (1000000 - (vfreq * FLYBACK)
- *
+ *
*/
static u32 fb_get_hfreq(u32 vfreq, u32 yres)
{
u32 divisor, hfreq;
-
+
divisor = (1000000 - (vfreq * FLYBACK))/1000;
hfreq = (yres + V_FRONTPORCH) * vfreq * 1000;
return (hfreq/divisor);
@@ -1117,7 +1117,7 @@ static void fb_timings_vfreq(struct __fb_timings *timings)
timings->hfreq = fb_get_hfreq(timings->vfreq, timings->vactive);
timings->vblank = fb_get_vblank(timings->hfreq);
timings->vtotal = timings->vactive + timings->vblank;
- timings->hblank = fb_get_hblank_by_hfreq(timings->hfreq,
+ timings->hblank = fb_get_hblank_by_hfreq(timings->hfreq,
timings->hactive);
timings->htotal = timings->hactive + timings->hblank;
timings->dclk = timings->htotal * timings->hfreq;
@@ -1128,7 +1128,7 @@ static void fb_timings_hfreq(struct __fb_timings *timings)
timings->vblank = fb_get_vblank(timings->hfreq);
timings->vtotal = timings->vactive + timings->vblank;
timings->vfreq = timings->hfreq/timings->vtotal;
- timings->hblank = fb_get_hblank_by_hfreq(timings->hfreq,
+ timings->hblank = fb_get_hblank_by_hfreq(timings->hfreq,
timings->hactive);
timings->htotal = timings->hactive + timings->hblank;
timings->dclk = timings->htotal * timings->hfreq;
@@ -1136,7 +1136,7 @@ static void fb_timings_hfreq(struct __fb_timings *timings)
static void fb_timings_dclk(struct __fb_timings *timings)
{
- timings->hblank = fb_get_hblank_by_dclk(timings->dclk,
+ timings->hblank = fb_get_hblank_by_dclk(timings->dclk,
timings->hactive);
timings->htotal = timings->hactive + timings->hblank;
timings->hfreq = timings->dclk/timings->htotal;
@@ -1156,29 +1156,29 @@ static void fb_timings_dclk(struct __fb_timings *timings)
* @info: pointer to fb_info
*
* DESCRIPTION:
- * Calculates video mode based on monitor specs using VESA GTF.
- * The GTF is best for VESA GTF compliant monitors but is
+ * Calculates video mode based on monitor specs using VESA GTF.
+ * The GTF is best for VESA GTF compliant monitors but is
* specifically formulated to work for older monitors as well.
*
- * If @flag==0, the function will attempt to maximize the
+ * If @flag==0, the function will attempt to maximize the
* refresh rate. Otherwise, it will calculate timings based on
- * the flag and accompanying value.
+ * the flag and accompanying value.
*
- * If FB_IGNOREMON bit is set in @flags, monitor specs will be
+ * If FB_IGNOREMON bit is set in @flags, monitor specs will be
* ignored and @var will be filled with the calculated timings.
*
* All calculations are based on the VESA GTF Spreadsheet
* available at VESA's public ftp (http://www.vesa.org).
- *
+ *
* NOTES:
* The timings generated by the GTF will be different from VESA
* DMT. It might be a good idea to keep a table of standard
* VESA modes as well. The GTF may also not work for some displays,
* such as, and especially, analog TV.
- *
+ *
* REQUIRES:
* A valid info->monspecs, otherwise 'safe numbers' will be used.
- */
+ */
int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_info *info)
{
struct __fb_timings *timings;
@@ -1191,7 +1191,7 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
if (!timings)
return -ENOMEM;
- /*
+ /*
* If monspecs are invalid, use values that are enough
* for 640x480@60
*/
@@ -1214,7 +1214,7 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
timings->hactive = var->xres;
timings->vactive = var->yres;
- if (var->vmode & FB_VMODE_INTERLACED) {
+ if (var->vmode & FB_VMODE_INTERLACED) {
timings->vactive /= 2;
interlace = 2;
}
@@ -1250,9 +1250,9 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
break;
default:
err = -EINVAL;
-
- }
-
+
+ }
+
if (err || (!(flags & FB_IGNOREMON) &&
(timings->vfreq < vfmin || timings->vfreq > vfmax ||
timings->hfreq < hfmin || timings->hfreq > hfmax ||
@@ -1269,7 +1269,7 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
var->upper_margin = (timings->vblank * interlace)/dscan -
(var->vsync_len + var->lower_margin);
}
-
+
kfree(timings);
return err;
}
@@ -1291,7 +1291,7 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var,
return -EINVAL;
}
#endif /* CONFIG_FB_MODE_HELPERS */
-
+
/*
* fb_validate_mode - validates var against monitor capabilities
* @var: pointer to fb_var_screeninfo
@@ -1309,7 +1309,7 @@ int fb_validate_mode(const struct fb_var_screeninfo *var, struct fb_info *info)
u32 hfreq, vfreq, htotal, vtotal, pixclock;
u32 hfmin, hfmax, vfmin, vfmax, dclkmin, dclkmax;
- /*
+ /*
* If monspecs are invalid, use values that are enough
* for 640x480@60
*/
@@ -1333,10 +1333,10 @@ int fb_validate_mode(const struct fb_var_screeninfo *var, struct fb_info *info)
if (!var->pixclock)
return -EINVAL;
pixclock = PICOS2KHZ(var->pixclock) * 1000;
-
- htotal = var->xres + var->right_margin + var->hsync_len +
+
+ htotal = var->xres + var->right_margin + var->hsync_len +
var->left_margin;
- vtotal = var->yres + var->lower_margin + var->vsync_len +
+ vtotal = var->yres + var->lower_margin + var->vsync_len +
var->upper_margin;
if (var->vmode & FB_VMODE_INTERLACED)
@@ -1349,7 +1349,7 @@ int fb_validate_mode(const struct fb_var_screeninfo *var, struct fb_info *info)
vfreq = hfreq/vtotal;
- return (vfreq < vfmin || vfreq > vfmax ||
+ return (vfreq < vfmin || vfreq > vfmax ||
hfreq < hfmin || hfreq > hfmax ||
pixclock < dclkmin || pixclock > dclkmax) ?
-EINVAL : 0;
diff --git a/drivers/video/geode/lxfb_core.c b/drivers/video/geode/lxfb_core.c
index 583185fd7c94..eb6b88171538 100644
--- a/drivers/video/geode/lxfb_core.c
+++ b/drivers/video/geode/lxfb_core.c
@@ -34,7 +34,7 @@ static int fbsize;
* we try to make it something sane - 640x480-60 is sane
*/
-const struct fb_videomode geode_modedb[] __initdata = {
+static const struct fb_videomode geode_modedb[] __initdata = {
/* 640x480-60 */
{ NULL, 60, 640, 480, 39682, 48, 8, 25, 2, 88, 2,
FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
diff --git a/drivers/video/hpfb.c b/drivers/video/hpfb.c
index b18486ad8e17..2eb4fb159084 100644
--- a/drivers/video/hpfb.c
+++ b/drivers/video/hpfb.c
@@ -207,7 +207,8 @@ static struct fb_ops hpfb_ops = {
#define HPFB_FBOMSB 0x5d /* Frame buffer offset */
#define HPFB_FBOLSB 0x5f
-static int __init hpfb_init_one(unsigned long phys_base, unsigned long virt_base)
+static int __devinit hpfb_init_one(unsigned long phys_base,
+ unsigned long virt_base)
{
unsigned long fboff, fb_width, fb_height, fb_start;
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 1a7d7789d877..1d13dd099af8 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1476,7 +1476,7 @@ static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
struct i810fb_par *par = info->par;
u8 __iomem *mmio = par->mmio_start_virtual;
- if (!par->dev_flags & LOCKUP)
+ if (!(par->dev_flags & LOCKUP))
return -ENXIO;
if (cursor->image.width > 64 || cursor->image.height > 64)
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c
index b87ea21d3d78..3a81060137a2 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/igafb.c
@@ -400,6 +400,7 @@ int __init igafb_init(void)
info = kzalloc(size, GFP_ATOMIC);
if (!info) {
printk("igafb_init: can't alloc fb_info\n");
+ pci_dev_put(pdev);
return -ENOMEM;
}
@@ -409,12 +410,14 @@ int __init igafb_init(void)
if ((addr = pdev->resource[0].start) == 0) {
printk("igafb_init: no memory start\n");
kfree(info);
+ pci_dev_put(pdev);
return -ENXIO;
}
if ((info->screen_base = ioremap(addr, 1024*1024*2)) == 0) {
printk("igafb_init: can't remap %lx[2M]\n", addr);
kfree(info);
+ pci_dev_put(pdev);
return -ENXIO;
}
@@ -449,6 +452,7 @@ int __init igafb_init(void)
printk("igafb_init: can't remap %lx[4K]\n", igafb_fix.mmio_start);
iounmap((void *)info->screen_base);
kfree(info);
+ pci_dev_put(pdev);
return -ENXIO;
}
@@ -466,6 +470,7 @@ int __init igafb_init(void)
iounmap((void *)par->io_base);
iounmap(info->screen_base);
kfree(info);
+ pci_dev_put(pdev);
return -ENOMEM;
}
diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/intelfb/intelfb.h
index 2fe3f7def530..836796177942 100644
--- a/drivers/video/intelfb/intelfb.h
+++ b/drivers/video/intelfb/intelfb.h
@@ -111,7 +111,7 @@
#define FIXED_MODE(d) ((d)->fixed_mode)
-/*** Driver paramters ***/
+/*** Driver parameters ***/
#define RINGBUFFER_SIZE KB(64)
#define HW_CURSOR_SIZE KB(4)
diff --git a/drivers/video/intelfb/intelfbhw.c b/drivers/video/intelfb/intelfbhw.c
index 5f6fb7d2c408..fa1fff553565 100644
--- a/drivers/video/intelfb/intelfbhw.c
+++ b/drivers/video/intelfb/intelfbhw.c
@@ -1971,7 +1971,7 @@ void intelfbhw_cursor_reset(struct intelfb_info *dinfo)
static irqreturn_t intelfbhw_irq(int irq, void *dev_id)
{
u16 tmp;
- struct intelfb_info *dinfo = (struct intelfb_info *)dev_id;
+ struct intelfb_info *dinfo = dev_id;
spin_lock(&dinfo->int_lock);
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index 4b6a99b5be0d..5246b0402d76 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -2066,40 +2066,49 @@ static struct fb_info *__devinit neo_alloc_fb_info(struct pci_dev *dev, const st
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2070:
- sprintf(info->fix.id, "MagicGraph 128");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 128");
break;
case FB_ACCEL_NEOMAGIC_NM2090:
- sprintf(info->fix.id, "MagicGraph 128V");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 128V");
break;
case FB_ACCEL_NEOMAGIC_NM2093:
- sprintf(info->fix.id, "MagicGraph 128ZV");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 128ZV");
break;
case FB_ACCEL_NEOMAGIC_NM2097:
- sprintf(info->fix.id, "MagicGraph 128ZV+");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 128ZV+");
break;
case FB_ACCEL_NEOMAGIC_NM2160:
- sprintf(info->fix.id, "MagicGraph 128XD");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 128XD");
break;
case FB_ACCEL_NEOMAGIC_NM2200:
- sprintf(info->fix.id, "MagicGraph 256AV");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 256AV");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
case FB_ACCEL_NEOMAGIC_NM2230:
- sprintf(info->fix.id, "MagicGraph 256AV+");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 256AV+");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
case FB_ACCEL_NEOMAGIC_NM2360:
- sprintf(info->fix.id, "MagicGraph 256ZX");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 256ZX");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
case FB_ACCEL_NEOMAGIC_NM2380:
- sprintf(info->fix.id, "MagicGraph 256XL+");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 256XL+");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 30e14eb1f51e..74517b1b26a6 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -849,9 +849,27 @@ static int nvidiafb_check_var(struct fb_var_screeninfo *var,
if (!mode_valid && info->monspecs.modedb_len)
return -EINVAL;
+ /*
+ * If we're on a flat panel, check if the mode is outside of the
+ * panel dimensions. If so, cap it and try for the next best mode
+ * before bailing out.
+ */
if (par->fpWidth && par->fpHeight && (par->fpWidth < var->xres ||
- par->fpHeight < var->yres))
- return -EINVAL;
+ par->fpHeight < var->yres)) {
+ const struct fb_videomode *mode;
+
+ var->xres = par->fpWidth;
+ var->yres = par->fpHeight;
+
+ mode = fb_find_best_mode(var, &info->modelist);
+ if (!mode) {
+ printk(KERN_ERR PFX "mode out of range of flat "
+ "panel dimensions\n");
+ return -EINVAL;
+ }
+
+ fb_videomode_to_var(var, mode);
+ }
if (var->yres_virtual < var->yres)
var->yres_virtual = var->yres;
diff --git a/drivers/video/omap/lcdc.c b/drivers/video/omap/lcdc.c
index 9085188d815e..fb19ed4992db 100644
--- a/drivers/video/omap/lcdc.c
+++ b/drivers/video/omap/lcdc.c
@@ -312,7 +312,7 @@ static irqreturn_t lcdc_irq_handler(int irq, void *dev_id)
/*
* Change to a new video mode. We defer this to a later time to avoid any
* flicker and not to mess up the current LCD DMA context. For this we disable
- * the LCD controler, which will generate a DONE irq after the last frame has
+ * the LCD controller, which will generate a DONE irq after the last frame has
* been transferred. Then it'll be safe to reconfigure both the LCD controller
* as well as the LCD DMA.
*/
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index 5591dfb22b18..30181b593829 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -1159,6 +1159,11 @@ static void pm2fb_imageblit(struct fb_info *info, const struct fb_image *image)
u32 fgx, bgx;
const u32 *src = (const u32 *)image->data;
u32 xres = (info->var.xres + 31) & ~31;
+ int raster_mode = 1; /* invert bits */
+
+#ifdef __LITTLE_ENDIAN
+ raster_mode |= 3 << 7; /* reverse byte order */
+#endif
if (info->state != FBINFO_STATE_RUNNING)
return;
@@ -1208,9 +1213,8 @@ static void pm2fb_imageblit(struct fb_info *info, const struct fb_image *image)
pm2_WR(par, PM2R_RENDER,
PM2F_RENDER_RECTANGLE |
PM2F_INCREASE_X | PM2F_INCREASE_Y);
- /* BitMapPackEachScanline & invert bits and byte order*/
- /* force background */
- pm2_WR(par, PM2R_RASTERIZER_MODE, (1 << 9) | 1 | (3 << 7));
+ /* BitMapPackEachScanline */
+ pm2_WR(par, PM2R_RASTERIZER_MODE, raster_mode | (1 << 9));
pm2_WR(par, PM2R_CONSTANT_COLOR, fgx);
pm2_WR(par, PM2R_RENDER,
PM2F_RENDER_RECTANGLE |
@@ -1224,8 +1228,7 @@ static void pm2fb_imageblit(struct fb_info *info, const struct fb_image *image)
PM2F_RENDER_RECTANGLE |
PM2F_RENDER_FASTFILL |
PM2F_INCREASE_X | PM2F_INCREASE_Y);
- /* invert bits and byte order*/
- pm2_WR(par, PM2R_RASTERIZER_MODE, 1 | (3 << 7));
+ pm2_WR(par, PM2R_RASTERIZER_MODE, raster_mode);
pm2_WR(par, PM2R_FB_BLOCK_COLOR, fgx);
pm2_WR(par, PM2R_RENDER,
PM2F_RENDER_RECTANGLE |
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index 070659992c18..5dba8cdd0517 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -1227,7 +1227,7 @@ static struct fb_ops pm3fb_ops = {
/* mmio register are already mapped when this function is called */
/* the pm3fb_fix.smem_start is also set */
-static unsigned long pm3fb_size_memory(struct pm3_par *par)
+static unsigned long __devinit pm3fb_size_memory(struct pm3_par *par)
{
unsigned long memsize = 0;
unsigned long tempBypass, i, temp1, temp2;
diff --git a/drivers/video/pmag-aa-fb.c b/drivers/video/pmag-aa-fb.c
index a864438b6008..6515ec11c16b 100644
--- a/drivers/video/pmag-aa-fb.c
+++ b/drivers/video/pmag-aa-fb.c
@@ -150,7 +150,7 @@ static int aafbcon_set_font(struct display *disp, int width, int height)
{
struct aafb_info *info = (struct aafb_info *)disp->fb_info;
struct aafb_cursor *c = &info->cursor;
- u8 fgc = ~attr_bgcol_ec(disp, disp->conp);
+ u8 fgc = ~attr_bgcol_ec(disp, disp->conp, &info->info);
if (width > 64 || height > 64 || width < 0 || height < 0)
return -EINVAL;
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 044a423a72cb..dc3af1c78c56 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -57,8 +57,6 @@
#define GPU_ALIGN_UP(x) _ALIGN_UP((x), 64)
#define GPU_MAX_LINE_LENGTH (65536 - 64)
-#define PS3FB_FULL_MODE_BIT 0x80
-
#define GPU_INTR_STATUS_VSYNC_0 0 /* vsync on head A */
#define GPU_INTR_STATUS_VSYNC_1 1 /* vsync on head B */
#define GPU_INTR_STATUS_FLIP_0 3 /* flip head A */
@@ -118,8 +116,6 @@ struct ps3fb_priv {
unsigned int irq_no;
u64 context_handle, memory_handle;
- void *xdr_ea;
- size_t xdr_size;
struct gpu_driver_info *dinfo;
u64 vblank_count; /* frame count */
@@ -136,42 +132,19 @@ static struct ps3fb_priv ps3fb;
struct ps3fb_par {
u32 pseudo_palette[16];
int mode_id, new_mode_id;
- int res_index;
unsigned int num_frames; /* num of frame buffers */
unsigned int width;
unsigned int height;
- unsigned long full_offset; /* start of fullscreen DDR fb */
- unsigned long fb_offset; /* start of actual DDR fb */
- unsigned long pan_offset;
+ unsigned int ddr_line_length;
+ unsigned int ddr_frame_size;
+ unsigned int xdr_frame_size;
+ unsigned int full_offset; /* start of fullscreen DDR fb */
+ unsigned int fb_offset; /* start of actual DDR fb */
+ unsigned int pan_offset;
};
-struct ps3fb_res_table {
- u32 xres;
- u32 yres;
- u32 xoff;
- u32 yoff;
- u32 type;
-};
-#define PS3FB_RES_FULL 1
-static const struct ps3fb_res_table ps3fb_res[] = {
- /* res_x,y margin_x,y full */
- { 720, 480, 72, 48 , 0},
- { 720, 576, 72, 58 , 0},
- { 1280, 720, 78, 38 , 0},
- { 1920, 1080, 116, 58 , 0},
- /* full mode */
- { 720, 480, 0, 0 , PS3FB_RES_FULL},
- { 720, 576, 0, 0 , PS3FB_RES_FULL},
- { 1280, 720, 0, 0 , PS3FB_RES_FULL},
- { 1920, 1080, 0, 0 , PS3FB_RES_FULL},
- /* vesa: normally full mode */
- { 1280, 768, 0, 0 , 0},
- { 1280, 1024, 0, 0 , 0},
- { 1920, 1200, 0, 0 , 0},
- { 0, 0, 0, 0 , 0} };
-
-/* default resolution */
-#define GPU_RES_INDEX 0 /* 720 x 480 */
+
+#define FIRST_NATIVE_MODE_INDEX 10
static const struct fb_videomode ps3fb_modedb[] = {
/* 60 Hz broadcast modes (modes "1" to "5") */
@@ -211,7 +184,7 @@ static const struct fb_videomode ps3fb_modedb[] = {
"720p", 50, 1124, 644, 13468, 298, 478, 57, 44, 80, 5,
FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED
}, {
- /* 1080 */
+ /* 1080i */
"1080i", 50, 1688, 964, 13468, 264, 600, 94, 62, 88, 5,
FB_SYNC_BROADCAST, FB_VMODE_INTERLACED
}, {
@@ -220,24 +193,7 @@ static const struct fb_videomode ps3fb_modedb[] = {
FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED
},
- /* VESA modes (modes "11" to "13") */
- {
- /* WXGA */
- "wxga", 60, 1280, 768, 12924, 160, 24, 29, 3, 136, 6,
- 0, FB_VMODE_NONINTERLACED,
- FB_MODE_IS_VESA
- }, {
- /* SXGA */
- "sxga", 60, 1280, 1024, 9259, 248, 48, 38, 1, 112, 3,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED,
- FB_MODE_IS_VESA
- }, {
- /* WUXGA */
- "wuxga", 60, 1920, 1200, 6494, 80, 48, 26, 3, 32, 6,
- FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED,
- FB_MODE_IS_VESA
- },
-
+ [FIRST_NATIVE_MODE_INDEX] =
/* 60 Hz broadcast modes (full resolution versions of modes "1" to "5") */
{
/* 480if */
@@ -276,12 +232,30 @@ static const struct fb_videomode ps3fb_modedb[] = {
FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED
}, {
/* 1080if */
- "1080f", 50, 1920, 1080, 13468, 148, 484, 36, 4, 88, 5,
+ "1080if", 50, 1920, 1080, 13468, 148, 484, 36, 4, 88, 5,
FB_SYNC_BROADCAST, FB_VMODE_INTERLACED
}, {
/* 1080pf */
"1080pf", 50, 1920, 1080, 6734, 148, 484, 36, 4, 88, 5,
FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED
+ },
+
+ /* VESA modes (modes "11" to "13") */
+ {
+ /* WXGA */
+ "wxga", 60, 1280, 768, 12924, 160, 24, 29, 3, 136, 6,
+ 0, FB_VMODE_NONINTERLACED,
+ FB_MODE_IS_VESA
+ }, {
+ /* SXGA */
+ "sxga", 60, 1280, 1024, 9259, 248, 48, 38, 1, 112, 3,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED,
+ FB_MODE_IS_VESA
+ }, {
+ /* WUXGA */
+ "wuxga", 60, 1920, 1200, 6494, 80, 48, 26, 3, 32, 6,
+ FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED,
+ FB_MODE_IS_VESA
}
};
@@ -289,110 +263,188 @@ static const struct fb_videomode ps3fb_modedb[] = {
#define HEAD_A
#define HEAD_B
-#define X_OFF(i) (ps3fb_res[i].xoff) /* left/right margin (pixel) */
-#define Y_OFF(i) (ps3fb_res[i].yoff) /* top/bottom margin (pixel) */
-#define WIDTH(i) (ps3fb_res[i].xres) /* width of FB */
-#define HEIGHT(i) (ps3fb_res[i].yres) /* height of FB */
#define BPP 4 /* number of bytes per pixel */
-/* Start of the virtual frame buffer (relative to fullscreen ) */
-#define VP_OFF(i) ((WIDTH(i) * Y_OFF(i) + X_OFF(i)) * BPP)
-
static int ps3fb_mode;
module_param(ps3fb_mode, int, 0);
static char *mode_option __devinitdata;
-static int ps3fb_get_res_table(u32 xres, u32 yres, int mode)
+static int ps3fb_cmp_mode(const struct fb_videomode *vmode,
+ const struct fb_var_screeninfo *var)
{
- int full_mode;
- unsigned int i;
- u32 x, y, f;
-
- full_mode = (mode & PS3FB_FULL_MODE_BIT) ? PS3FB_RES_FULL : 0;
- for (i = 0;; i++) {
- x = ps3fb_res[i].xres;
- y = ps3fb_res[i].yres;
- f = ps3fb_res[i].type;
-
- if (!x) {
- pr_debug("ERROR: ps3fb_get_res_table()\n");
- return -1;
- }
+ long xres, yres, left_margin, right_margin, upper_margin, lower_margin;
+ long dx, dy;
+
+ /* maximum values */
+ if (var->xres > vmode->xres || var->yres > vmode->yres ||
+ var->pixclock > vmode->pixclock ||
+ var->hsync_len > vmode->hsync_len ||
+ var->vsync_len > vmode->vsync_len)
+ return -1;
- if (full_mode == PS3FB_RES_FULL && f != PS3FB_RES_FULL)
- continue;
+ /* progressive/interlaced must match */
+ if ((var->vmode & FB_VMODE_MASK) != vmode->vmode)
+ return -1;
- if (x == xres && (yres == 0 || y == yres))
- break;
+ /* minimum resolution */
+ xres = max(var->xres, 1U);
+ yres = max(var->yres, 1U);
+
+ /* minimum margins */
+ left_margin = max(var->left_margin, vmode->left_margin);
+ right_margin = max(var->right_margin, vmode->right_margin);
+ upper_margin = max(var->upper_margin, vmode->upper_margin);
+ lower_margin = max(var->lower_margin, vmode->lower_margin);
+
+ /* resolution + margins may not exceed native parameters */
+ dx = ((long)vmode->left_margin + (long)vmode->xres +
+ (long)vmode->right_margin) -
+ (left_margin + xres + right_margin);
+ if (dx < 0)
+ return -1;
- x = x - 2 * ps3fb_res[i].xoff;
- y = y - 2 * ps3fb_res[i].yoff;
- if (x == xres && (yres == 0 || y == yres))
- break;
+ dy = ((long)vmode->upper_margin + (long)vmode->yres +
+ (long)vmode->lower_margin) -
+ (upper_margin + yres + lower_margin);
+ if (dy < 0)
+ return -1;
+
+ /* exact match */
+ if (!dx && !dy)
+ return 0;
+
+ /* resolution difference */
+ return (vmode->xres - xres) * (vmode->yres - yres);
+}
+
+static const struct fb_videomode *ps3fb_native_vmode(enum ps3av_mode_num id)
+{
+ return &ps3fb_modedb[FIRST_NATIVE_MODE_INDEX + id - 1];
+}
+
+static const struct fb_videomode *ps3fb_vmode(int id)
+{
+ u32 mode = id & PS3AV_MODE_MASK;
+
+ if (mode < PS3AV_MODE_480I || mode > PS3AV_MODE_WUXGA)
+ return NULL;
+
+ if (mode <= PS3AV_MODE_1080P50 && !(id & PS3AV_MODE_FULL)) {
+ /* Non-fullscreen broadcast mode */
+ return &ps3fb_modedb[mode - 1];
}
- return i;
+
+ return ps3fb_native_vmode(mode);
}
-static unsigned int ps3fb_find_mode(const struct fb_var_screeninfo *var,
+static unsigned int ps3fb_find_mode(struct fb_var_screeninfo *var,
u32 *ddr_line_length, u32 *xdr_line_length)
{
- unsigned int i, mode;
-
- for (i = 0; i < ARRAY_SIZE(ps3fb_modedb); i++)
- if (var->xres == ps3fb_modedb[i].xres &&
- var->yres == ps3fb_modedb[i].yres &&
- var->pixclock == ps3fb_modedb[i].pixclock &&
- var->hsync_len == ps3fb_modedb[i].hsync_len &&
- var->vsync_len == ps3fb_modedb[i].vsync_len &&
- var->left_margin == ps3fb_modedb[i].left_margin &&
- var->right_margin == ps3fb_modedb[i].right_margin &&
- var->upper_margin == ps3fb_modedb[i].upper_margin &&
- var->lower_margin == ps3fb_modedb[i].lower_margin &&
- var->sync == ps3fb_modedb[i].sync &&
- (var->vmode & FB_VMODE_MASK) == ps3fb_modedb[i].vmode)
- goto found;
-
- pr_debug("ps3fb_find_mode: mode not found\n");
- return 0;
+ unsigned int id, best_id;
+ int diff, best_diff;
+ const struct fb_videomode *vmode;
+ long gap;
+
+ best_id = 0;
+ best_diff = INT_MAX;
+ pr_debug("%s: wanted %u [%u] %u x %u [%u] %u\n", __func__,
+ var->left_margin, var->xres, var->right_margin,
+ var->upper_margin, var->yres, var->lower_margin);
+ for (id = PS3AV_MODE_480I; id <= PS3AV_MODE_WUXGA; id++) {
+ vmode = ps3fb_native_vmode(id);
+ diff = ps3fb_cmp_mode(vmode, var);
+ pr_debug("%s: mode %u: %u [%u] %u x %u [%u] %u: diff = %d\n",
+ __func__, id, vmode->left_margin, vmode->xres,
+ vmode->right_margin, vmode->upper_margin,
+ vmode->yres, vmode->lower_margin, diff);
+ if (diff < 0)
+ continue;
+ if (diff < best_diff) {
+ best_id = id;
+ if (!diff)
+ break;
+ best_diff = diff;
+ }
+ }
-found:
- /* Cropped broadcast modes use the full line length */
- *ddr_line_length = ps3fb_modedb[i < 10 ? i + 13 : i].xres * BPP;
+ if (!best_id) {
+ pr_debug("%s: no suitable mode found\n", __func__);
+ return 0;
+ }
- if (ps3_compare_firmware_version(1, 9, 0) >= 0) {
- *xdr_line_length = GPU_ALIGN_UP(max(var->xres,
- var->xres_virtual) * BPP);
- if (*xdr_line_length > GPU_MAX_LINE_LENGTH)
- *xdr_line_length = GPU_MAX_LINE_LENGTH;
- } else
- *xdr_line_length = *ddr_line_length;
+ id = best_id;
+ vmode = ps3fb_native_vmode(id);
- /* Full broadcast modes have the full mode bit set */
- mode = i > 12 ? (i - 12) | PS3FB_FULL_MODE_BIT : i + 1;
+ *ddr_line_length = vmode->xres * BPP;
- pr_debug("ps3fb_find_mode: mode %u\n", mode);
+ /* minimum resolution */
+ if (!var->xres)
+ var->xres = 1;
+ if (!var->yres)
+ var->yres = 1;
- return mode;
-}
+ /* minimum virtual resolution */
+ if (var->xres_virtual < var->xres)
+ var->xres_virtual = var->xres;
+ if (var->yres_virtual < var->yres)
+ var->yres_virtual = var->yres;
-static const struct fb_videomode *ps3fb_default_mode(int id)
-{
- u32 mode = id & PS3AV_MODE_MASK;
- u32 flags;
+ /* minimum margins */
+ if (var->left_margin < vmode->left_margin)
+ var->left_margin = vmode->left_margin;
+ if (var->right_margin < vmode->right_margin)
+ var->right_margin = vmode->right_margin;
+ if (var->upper_margin < vmode->upper_margin)
+ var->upper_margin = vmode->upper_margin;
+ if (var->lower_margin < vmode->lower_margin)
+ var->lower_margin = vmode->lower_margin;
+
+ /* extra margins */
+ gap = ((long)vmode->left_margin + (long)vmode->xres +
+ (long)vmode->right_margin) -
+ ((long)var->left_margin + (long)var->xres +
+ (long)var->right_margin);
+ if (gap > 0) {
+ var->left_margin += gap/2;
+ var->right_margin += (gap+1)/2;
+ pr_debug("%s: rounded up H to %u [%u] %u\n", __func__,
+ var->left_margin, var->xres, var->right_margin);
+ }
- if (mode < 1 || mode > 13)
- return NULL;
+ gap = ((long)vmode->upper_margin + (long)vmode->yres +
+ (long)vmode->lower_margin) -
+ ((long)var->upper_margin + (long)var->yres +
+ (long)var->lower_margin);
+ if (gap > 0) {
+ var->upper_margin += gap/2;
+ var->lower_margin += (gap+1)/2;
+ pr_debug("%s: rounded up V to %u [%u] %u\n", __func__,
+ var->upper_margin, var->yres, var->lower_margin);
+ }
+
+ /* fixed fields */
+ var->pixclock = vmode->pixclock;
+ var->hsync_len = vmode->hsync_len;
+ var->vsync_len = vmode->vsync_len;
+ var->sync = vmode->sync;
- flags = id & ~PS3AV_MODE_MASK;
+ if (ps3_compare_firmware_version(1, 9, 0) >= 0) {
+ *xdr_line_length = GPU_ALIGN_UP(var->xres_virtual * BPP);
+ if (*xdr_line_length > GPU_MAX_LINE_LENGTH)
+ *xdr_line_length = GPU_MAX_LINE_LENGTH;
+ } else
+ *xdr_line_length = *ddr_line_length;
- if (mode <= 10 && flags & PS3FB_FULL_MODE_BIT) {
- /* Full broadcast mode */
- return &ps3fb_modedb[mode + 12];
+ if (vmode->sync & FB_SYNC_BROADCAST) {
+ /* Full broadcast modes have the full mode bit set */
+ if (vmode->xres == var->xres && vmode->yres == var->yres)
+ id |= PS3AV_MODE_FULL;
}
- return &ps3fb_modedb[mode - 1];
+ pr_debug("%s: mode %u\n", __func__, id);
+ return id;
}
static void ps3fb_sync_image(struct device *dev, u64 frame_offset,
@@ -439,8 +491,7 @@ static void ps3fb_sync_image(struct device *dev, u64 frame_offset,
static int ps3fb_sync(struct fb_info *info, u32 frame)
{
struct ps3fb_par *par = info->par;
- int i, error = 0;
- u32 ddr_line_length, xdr_line_length;
+ int error = 0;
u64 ddr_base, xdr_base;
if (frame > par->num_frames - 1) {
@@ -450,16 +501,13 @@ static int ps3fb_sync(struct fb_info *info, u32 frame)
goto out;
}
- i = par->res_index;
- xdr_line_length = info->fix.line_length;
- ddr_line_length = ps3fb_res[i].xres * BPP;
- xdr_base = frame * info->var.yres_virtual * xdr_line_length;
- ddr_base = frame * ps3fb_res[i].yres * ddr_line_length;
+ xdr_base = frame * par->xdr_frame_size;
+ ddr_base = frame * par->ddr_frame_size;
ps3fb_sync_image(info->device, ddr_base + par->full_offset,
ddr_base + par->fb_offset, xdr_base + par->pan_offset,
- par->width, par->height, ddr_line_length,
- xdr_line_length);
+ par->width, par->height, par->ddr_line_length,
+ info->fix.line_length);
out:
return error;
@@ -498,22 +546,11 @@ static int ps3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
u32 xdr_line_length, ddr_line_length;
int mode;
- dev_dbg(info->device, "var->xres:%u info->var.xres:%u\n", var->xres,
- info->var.xres);
- dev_dbg(info->device, "var->yres:%u info->var.yres:%u\n", var->yres,
- info->var.yres);
-
- /* FIXME For now we do exact matches only */
mode = ps3fb_find_mode(var, &ddr_line_length, &xdr_line_length);
if (!mode)
return -EINVAL;
/* Virtual screen */
- if (var->xres_virtual < var->xres)
- var->xres_virtual = var->xres;
- if (var->yres_virtual < var->yres)
- var->yres_virtual = var->yres;
-
if (var->xres_virtual > xdr_line_length / BPP) {
dev_dbg(info->device,
"Horizontal virtual screen size too large\n");
@@ -559,7 +596,7 @@ static int ps3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
}
/* Memory limit */
- if (var->yres_virtual * xdr_line_length > ps3fb.xdr_size) {
+ if (var->yres_virtual * xdr_line_length > info->fix.smem_len) {
dev_dbg(info->device, "Not enough memory\n");
return -ENOMEM;
}
@@ -578,39 +615,38 @@ static int ps3fb_set_par(struct fb_info *info)
{
struct ps3fb_par *par = info->par;
unsigned int mode, ddr_line_length, xdr_line_length, lines, maxlines;
- int i;
- unsigned long offset;
+ unsigned int ddr_xoff, ddr_yoff, offset;
+ const struct fb_videomode *vmode;
u64 dst;
- dev_dbg(info->device, "xres:%d xv:%d yres:%d yv:%d clock:%d\n",
- info->var.xres, info->var.xres_virtual,
- info->var.yres, info->var.yres_virtual, info->var.pixclock);
-
mode = ps3fb_find_mode(&info->var, &ddr_line_length, &xdr_line_length);
if (!mode)
return -EINVAL;
- i = ps3fb_get_res_table(info->var.xres, info->var.yres, mode);
- par->res_index = i;
+ vmode = ps3fb_native_vmode(mode & PS3AV_MODE_MASK);
- info->fix.smem_start = virt_to_abs(ps3fb.xdr_ea);
- info->fix.smem_len = ps3fb.xdr_size;
info->fix.xpanstep = info->var.xres_virtual > info->var.xres ? 1 : 0;
info->fix.ypanstep = info->var.yres_virtual > info->var.yres ? 1 : 0;
info->fix.line_length = xdr_line_length;
- info->screen_base = (char __iomem *)ps3fb.xdr_ea;
+ par->ddr_line_length = ddr_line_length;
+ par->ddr_frame_size = vmode->yres * ddr_line_length;
+ par->xdr_frame_size = info->var.yres_virtual * xdr_line_length;
- par->num_frames = ps3fb.xdr_size /
- max(ps3fb_res[i].yres * ddr_line_length,
- info->var.yres_virtual * xdr_line_length);
+ par->num_frames = info->fix.smem_len /
+ max(par->ddr_frame_size, par->xdr_frame_size);
/* Keep the special bits we cannot set using fb_var_screeninfo */
par->new_mode_id = (par->new_mode_id & ~PS3AV_MODE_MASK) | mode;
par->width = info->var.xres;
par->height = info->var.yres;
- offset = VP_OFF(i);
+
+ /* Start of the virtual frame buffer (relative to fullscreen) */
+ ddr_xoff = info->var.left_margin - vmode->left_margin;
+ ddr_yoff = info->var.upper_margin - vmode->upper_margin;
+ offset = ddr_yoff * ddr_line_length + ddr_xoff * BPP;
+
par->fb_offset = GPU_ALIGN_UP(offset);
par->full_offset = par->fb_offset - offset;
par->pan_offset = info->var.yoffset * xdr_line_length +
@@ -625,16 +661,16 @@ static int ps3fb_set_par(struct fb_info *info)
}
/* Clear XDR frame buffer memory */
- memset(ps3fb.xdr_ea, 0, ps3fb.xdr_size);
+ memset((void __force *)info->screen_base, 0, info->fix.smem_len);
/* Clear DDR frame buffer memory */
- lines = ps3fb_res[i].yres * par->num_frames;
+ lines = vmode->yres * par->num_frames;
if (par->full_offset)
lines++;
- maxlines = ps3fb.xdr_size / ddr_line_length;
+ maxlines = info->fix.smem_len / ddr_line_length;
for (dst = 0; lines; dst += maxlines * ddr_line_length) {
unsigned int l = min(lines, maxlines);
- ps3fb_sync_image(info->device, 0, dst, 0, ps3fb_res[i].xres, l,
+ ps3fb_sync_image(info->device, 0, dst, 0, vmode->xres, l,
ddr_line_length, ddr_line_length);
lines -= l;
}
@@ -797,7 +833,7 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
case PS3FB_IOCTL_SETMODE:
{
struct ps3fb_par *par = info->par;
- const struct fb_videomode *mode;
+ const struct fb_videomode *vmode;
struct fb_var_screeninfo var;
if (copy_from_user(&val, argp, sizeof(val)))
@@ -810,10 +846,10 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
}
dev_dbg(info->device, "PS3FB_IOCTL_SETMODE:%x\n", val);
retval = -EINVAL;
- mode = ps3fb_default_mode(val);
- if (mode) {
+ vmode = ps3fb_vmode(val);
+ if (vmode) {
var = info->var;
- fb_videomode_to_var(&var, mode);
+ fb_videomode_to_var(&var, vmode);
acquire_console_sem();
info->flags |= FBINFO_MISC_USEREVENT;
/* Force, in case only special bits changed */
@@ -975,10 +1011,9 @@ static int ps3fb_xdr_settings(u64 xdr_lpar, struct device *dev)
__func__, status);
return -ENXIO;
}
- dev_dbg(dev,
- "video:%p xdr_ea:%p ioif:%lx lpar:%lx phys:%lx size:%lx\n",
- ps3fb_videomemory.address, ps3fb.xdr_ea, GPU_IOIF, xdr_lpar,
- virt_to_abs(ps3fb.xdr_ea), ps3fb_videomemory.size);
+ dev_dbg(dev, "video:%p ioif:%lx lpar:%lx size:%lx\n",
+ ps3fb_videomemory.address, GPU_IOIF, xdr_lpar,
+ ps3fb_videomemory.size);
status = lv1_gpu_context_attribute(ps3fb.context_handle,
L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP,
@@ -1055,14 +1090,14 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
struct fb_info *info;
struct ps3fb_par *par;
int retval = -ENOMEM;
- u32 xres, yres;
u64 ddr_lpar = 0;
u64 lpar_dma_control = 0;
u64 lpar_driver_info = 0;
u64 lpar_reports = 0;
u64 lpar_reports_size = 0;
u64 xdr_lpar;
- int status, res_index;
+ void *fb_start;
+ int status;
struct task_struct *task;
unsigned long max_ps3fb_size;
@@ -1080,14 +1115,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
if (!ps3fb_mode)
ps3fb_mode = ps3av_get_mode();
- dev_dbg(&dev->core, "ps3av_mode:%d\n", ps3fb_mode);
-
- if (ps3fb_mode > 0 &&
- !ps3av_video_mode2res(ps3fb_mode, &xres, &yres)) {
- res_index = ps3fb_get_res_table(xres, yres, ps3fb_mode);
- dev_dbg(&dev->core, "res_index:%d\n", res_index);
- } else
- res_index = GPU_RES_INDEX;
+ dev_dbg(&dev->core, "ps3fb_mode: %d\n", ps3fb_mode);
atomic_set(&ps3fb.f_count, -1); /* fbcon opens ps3fb */
atomic_set(&ps3fb.ext_flip, 0); /* for flip with vsync */
@@ -1124,7 +1152,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
}
/* vsync interrupt */
- ps3fb.dinfo = ioremap(lpar_driver_info, 128 * 1024);
+ ps3fb.dinfo = (void __force *)ioremap(lpar_driver_info, 128 * 1024);
if (!ps3fb.dinfo) {
dev_err(&dev->core, "%s: ioremap failed\n", __func__);
goto err_gpu_context_free;
@@ -1134,22 +1162,10 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
if (retval)
goto err_iounmap_dinfo;
- /* XDR frame buffer */
- ps3fb.xdr_ea = ps3fb_videomemory.address;
- xdr_lpar = ps3_mm_phys_to_lpar(__pa(ps3fb.xdr_ea));
-
/* Clear memory to prevent kernel info leakage into userspace */
- memset(ps3fb.xdr_ea, 0, ps3fb_videomemory.size);
-
- /*
- * The GPU command buffer is at the start of video memory
- * As we don't use the full command buffer, we can put the actual
- * frame buffer at offset GPU_FB_START and save some precious XDR
- * memory
- */
- ps3fb.xdr_ea += GPU_FB_START;
- ps3fb.xdr_size = ps3fb_videomemory.size - GPU_FB_START;
+ memset(ps3fb_videomemory.address, 0, ps3fb_videomemory.size);
+ xdr_lpar = ps3_mm_phys_to_lpar(__pa(ps3fb_videomemory.address));
retval = ps3fb_xdr_settings(xdr_lpar, &dev->core);
if (retval)
goto err_free_irq;
@@ -1161,15 +1177,22 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
par = info->par;
par->mode_id = ~ps3fb_mode; /* != ps3fb_mode, to trigger change */
par->new_mode_id = ps3fb_mode;
- par->res_index = res_index;
par->num_frames = 1;
- info->screen_base = (char __iomem *)ps3fb.xdr_ea;
info->fbops = &ps3fb_ops;
-
info->fix = ps3fb_fix;
- info->fix.smem_start = virt_to_abs(ps3fb.xdr_ea);
- info->fix.smem_len = ps3fb.xdr_size;
+
+ /*
+ * The GPU command buffer is at the start of video memory
+ * As we don't use the full command buffer, we can put the actual
+ * frame buffer at offset GPU_FB_START and save some precious XDR
+ * memory
+ */
+ fb_start = ps3fb_videomemory.address + GPU_FB_START;
+ info->screen_base = (char __force __iomem *)fb_start;
+ info->fix.smem_start = virt_to_abs(fb_start);
+ info->fix.smem_len = ps3fb_videomemory.size - GPU_FB_START;
+
info->pseudo_palette = par->pseudo_palette;
info->flags = FBINFO_DEFAULT | FBINFO_READS_FAST |
FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
@@ -1180,7 +1203,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
if (!fb_find_mode(&info->var, info, mode_option, ps3fb_modedb,
ARRAY_SIZE(ps3fb_modedb),
- ps3fb_default_mode(par->new_mode_id), 32)) {
+ ps3fb_vmode(par->new_mode_id), 32)) {
retval = -EINVAL;
goto err_fb_dealloc;
}
@@ -1194,9 +1217,9 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
dev->core.driver_data = info;
- dev_info(info->device, "%s %s, using %lu KiB of video memory\n",
+ dev_info(info->device, "%s %s, using %u KiB of video memory\n",
dev_driver_string(info->dev), info->dev->bus_id,
- ps3fb.xdr_size >> 10);
+ info->fix.smem_len >> 10);
task = kthread_run(ps3fbd, info, DEVICE_NAME);
if (IS_ERR(task)) {
@@ -1219,7 +1242,7 @@ err_free_irq:
free_irq(ps3fb.irq_no, &dev->core);
ps3_irq_plug_destroy(ps3fb.irq_no);
err_iounmap_dinfo:
- iounmap((u8 __iomem *)ps3fb.dinfo);
+ iounmap((u8 __force __iomem *)ps3fb.dinfo);
err_gpu_context_free:
lv1_gpu_context_free(ps3fb.context_handle);
err_gpu_memory_free:
@@ -1254,7 +1277,7 @@ static int ps3fb_shutdown(struct ps3_system_bus_device *dev)
framebuffer_release(info);
info = dev->core.driver_data = NULL;
}
- iounmap((u8 __iomem *)ps3fb.dinfo);
+ iounmap((u8 __force __iomem *)ps3fb.dinfo);
status = lv1_gpu_context_free(ps3fb.context_handle);
if (status)
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index b3c31d9dc591..71fa6edb5c47 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -110,6 +110,11 @@ static int debug = 0;
/* useful functions */
+static int is_s3c2412(struct s3c2410fb_info *fbi)
+{
+ return (fbi->drv_type == DRV_S3C2412);
+}
+
/* s3c2410fb_set_lcdaddr
*
* initialise lcd controller address pointers
@@ -501,7 +506,7 @@ static void schedule_palette_update(struct s3c2410fb_info *fbi,
{
unsigned long flags;
unsigned long irqen;
- void __iomem *regs = fbi->io;
+ void __iomem *irq_base = fbi->irq_base;
local_irq_save(flags);
@@ -511,9 +516,9 @@ static void schedule_palette_update(struct s3c2410fb_info *fbi,
fbi->palette_ready = 1;
/* enable IRQ */
- irqen = readl(regs + S3C2410_LCDINTMSK);
+ irqen = readl(irq_base + S3C24XX_LCDINTMSK);
irqen &= ~S3C2410_LCDINT_FRSYNC;
- writel(irqen, regs + S3C2410_LCDINTMSK);
+ writel(irqen, irq_base + S3C24XX_LCDINTMSK);
}
local_irq_restore(flags);
@@ -594,15 +599,17 @@ static int s3c2410fb_setcolreg(unsigned regno,
static int s3c2410fb_blank(int blank_mode, struct fb_info *info)
{
struct s3c2410fb_info *fbi = info->par;
- void __iomem *regs = fbi->io;
+ void __iomem *tpal_reg = fbi->io;
dprintk("blank(mode=%d, info=%p)\n", blank_mode, info);
+ tpal_reg += is_s3c2412(fbi) ? S3C2412_TPAL : S3C2410_TPAL;
+
if (blank_mode == FB_BLANK_UNBLANK)
- writel(0x0, regs + S3C2410_TPAL);
+ writel(0x0, tpal_reg);
else {
dprintk("setting TPAL to output 0x000000\n");
- writel(S3C2410_TPAL_EN, regs + S3C2410_TPAL);
+ writel(S3C2410_TPAL_EN, tpal_reg);
}
return 0;
@@ -663,7 +670,7 @@ static int __init s3c2410fb_map_video_memory(struct fb_info *info)
dma_addr_t map_dma;
unsigned map_size = PAGE_ALIGN(info->fix.smem_len);
- dprintk("map_video_memory(fbi=%p)\n", fbi);
+ dprintk("map_video_memory(fbi=%p) map_size %u\n", fbi, map_size);
info->screen_base = dma_alloc_writecombine(fbi->dev, map_size,
&map_dma, GFP_KERNEL);
@@ -672,7 +679,7 @@ static int __init s3c2410fb_map_video_memory(struct fb_info *info)
/* prevent initial garbage on screen */
dprintk("map_video_memory: clear %p:%08x\n",
info->screen_base, map_size);
- memset(info->screen_base, 0xf0, map_size);
+ memset(info->screen_base, 0x00, map_size);
info->fix.smem_start = map_dma;
@@ -709,6 +716,16 @@ static int s3c2410fb_init_registers(struct fb_info *info)
struct s3c2410fb_mach_info *mach_info = fbi->dev->platform_data;
unsigned long flags;
void __iomem *regs = fbi->io;
+ void __iomem *tpal;
+ void __iomem *lpcsel;
+
+ if (is_s3c2412(fbi)) {
+ tpal = regs + S3C2412_TPAL;
+ lpcsel = regs + S3C2412_TCONSEL;
+ } else {
+ tpal = regs + S3C2410_TPAL;
+ lpcsel = regs + S3C2410_LPCSEL;
+ }
/* Initialise LCD with values from haret */
@@ -724,12 +741,12 @@ static int s3c2410fb_init_registers(struct fb_info *info)
local_irq_restore(flags);
dprintk("LPCSEL = 0x%08lx\n", mach_info->lpcsel);
- writel(mach_info->lpcsel, regs + S3C2410_LPCSEL);
+ writel(mach_info->lpcsel, lpcsel);
- dprintk("replacing TPAL %08x\n", readl(regs + S3C2410_TPAL));
+ dprintk("replacing TPAL %08x\n", readl(tpal));
/* ensure temporary palette disabled */
- writel(0x00, regs + S3C2410_TPAL);
+ writel(0x00, tpal);
return 0;
}
@@ -763,15 +780,15 @@ static void s3c2410fb_write_palette(struct s3c2410fb_info *fbi)
static irqreturn_t s3c2410fb_irq(int irq, void *dev_id)
{
struct s3c2410fb_info *fbi = dev_id;
- void __iomem *regs = fbi->io;
- unsigned long lcdirq = readl(regs + S3C2410_LCDINTPND);
+ void __iomem *irq_base = fbi->irq_base;
+ unsigned long lcdirq = readl(irq_base + S3C24XX_LCDINTPND);
if (lcdirq & S3C2410_LCDINT_FRSYNC) {
if (fbi->palette_ready)
s3c2410fb_write_palette(fbi);
- writel(S3C2410_LCDINT_FRSYNC, regs + S3C2410_LCDINTPND);
- writel(S3C2410_LCDINT_FRSYNC, regs + S3C2410_LCDSRCPND);
+ writel(S3C2410_LCDINT_FRSYNC, irq_base + S3C24XX_LCDINTPND);
+ writel(S3C2410_LCDINT_FRSYNC, irq_base + S3C24XX_LCDSRCPND);
}
return IRQ_HANDLED;
@@ -779,7 +796,8 @@ static irqreturn_t s3c2410fb_irq(int irq, void *dev_id)
static char driver_name[] = "s3c2410fb";
-static int __init s3c2410fb_probe(struct platform_device *pdev)
+static int __init s3c24xxfb_probe(struct platform_device *pdev,
+ enum s3c_drv_type drv_type)
{
struct s3c2410fb_info *info;
struct s3c2410fb_display *display;
@@ -799,6 +817,12 @@ static int __init s3c2410fb_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (mach_info->default_display >= mach_info->num_displays) {
+ dev_err(&pdev->dev, "default is %d but only %d displays\n",
+ mach_info->default_display, mach_info->num_displays);
+ return -EINVAL;
+ }
+
display = mach_info->displays + mach_info->default_display;
irq = platform_get_irq(pdev, 0);
@@ -815,6 +839,7 @@ static int __init s3c2410fb_probe(struct platform_device *pdev)
info = fbinfo->par;
info->dev = &pdev->dev;
+ info->drv_type = drv_type;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
@@ -838,6 +863,8 @@ static int __init s3c2410fb_probe(struct platform_device *pdev)
goto release_mem;
}
+ info->irq_base = info->io + ((drv_type == DRV_S3C2412) ? S3C2412_LCDINTBASE : S3C2410_LCDINTBASE);
+
dprintk("devinit\n");
strcpy(fbinfo->fix.id, driver_name);
@@ -946,6 +973,16 @@ dealloc_fb:
return ret;
}
+static int __init s3c2410fb_probe(struct platform_device *pdev)
+{
+ return s3c24xxfb_probe(pdev, DRV_S3C2410);
+}
+
+static int __init s3c2412fb_probe(struct platform_device *pdev)
+{
+ return s3c24xxfb_probe(pdev, DRV_S3C2412);
+}
+
/* s3c2410fb_stop_lcd
*
* shutdown the lcd controller
@@ -1047,14 +1084,31 @@ static struct platform_driver s3c2410fb_driver = {
},
};
+static struct platform_driver s3c2412fb_driver = {
+ .probe = s3c2412fb_probe,
+ .remove = s3c2410fb_remove,
+ .suspend = s3c2410fb_suspend,
+ .resume = s3c2410fb_resume,
+ .driver = {
+ .name = "s3c2412-lcd",
+ .owner = THIS_MODULE,
+ },
+};
+
int __init s3c2410fb_init(void)
{
- return platform_driver_register(&s3c2410fb_driver);
+ int ret = platform_driver_register(&s3c2410fb_driver);
+
+ if (ret == 0)
+ ret = platform_driver_register(&s3c2412fb_driver);;
+
+ return ret;
}
static void __exit s3c2410fb_cleanup(void)
{
platform_driver_unregister(&s3c2410fb_driver);
+ platform_driver_unregister(&s3c2412fb_driver);
}
module_init(s3c2410fb_init);
diff --git a/drivers/video/s3c2410fb.h b/drivers/video/s3c2410fb.h
index 6ce5dc26c5f7..dbb73b95e2ef 100644
--- a/drivers/video/s3c2410fb.h
+++ b/drivers/video/s3c2410fb.h
@@ -25,13 +25,20 @@
#ifndef __S3C2410FB_H
#define __S3C2410FB_H
+enum s3c_drv_type {
+ DRV_S3C2410,
+ DRV_S3C2412,
+};
+
struct s3c2410fb_info {
struct device *dev;
struct clk *clk;
struct resource *mem;
void __iomem *io;
+ void __iomem *irq_base;
+ enum s3c_drv_type drv_type;
struct s3c2410fb_hw regs;
unsigned int palette_ready;
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 93ae747440cb..73803624c131 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -427,7 +427,7 @@ sisfb_interpret_edid(struct sisfb_monitor *monitor, u8 *buffer)
monitor->feature = buffer[0x18];
- if(!buffer[0x14] & 0x80) {
+ if(!(buffer[0x14] & 0x80)) {
if(!(buffer[0x14] & 0x08)) {
printk(KERN_INFO
"sisfb: WARNING: Monitor does not support separate syncs\n");
@@ -4621,9 +4621,9 @@ sisfb_find_host_bridge(struct sis_video_info *ivideo, struct pci_dev *mypdev,
while((pdev = pci_get_class(PCI_CLASS_BRIDGE_HOST, pdev))) {
temp = pdev->vendor;
- pci_dev_put(pdev);
if(temp == pcivendor) {
ret = 1;
+ pci_dev_put(pdev);
break;
}
}
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 1be95a68d696..e83dfba7e636 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -48,7 +48,7 @@ enum sm501_controller {
HEAD_PANEL = 1,
};
-/* SM501 memory adress */
+/* SM501 memory address */
struct sm501_mem {
unsigned long size;
unsigned long sm_addr;
@@ -641,6 +641,7 @@ static void sm501fb_panel_power(struct sm501fb_info *fbi, int to)
{
unsigned long control;
void __iomem *ctrl_reg = fbi->regs + SM501_DC_PANEL_CONTROL;
+ struct sm501_platdata_fbsub *pd = fbi->pdata->fb_pnl;
control = readl(ctrl_reg);
@@ -657,26 +658,34 @@ static void sm501fb_panel_power(struct sm501fb_info *fbi, int to)
sm501fb_sync_regs(fbi);
mdelay(10);
- control |= SM501_DC_PANEL_CONTROL_BIAS; /* VBIASEN */
- writel(control, ctrl_reg);
- sm501fb_sync_regs(fbi);
- mdelay(10);
-
- control |= SM501_DC_PANEL_CONTROL_FPEN;
- writel(control, ctrl_reg);
+ if (pd->flags & SM501FB_FLAG_PANEL_USE_VBIASEN) {
+ control |= SM501_DC_PANEL_CONTROL_BIAS; /* VBIASEN */
+ writel(control, ctrl_reg);
+ sm501fb_sync_regs(fbi);
+ mdelay(10);
+ }
+ if (pd->flags & SM501FB_FLAG_PANEL_USE_FPEN) {
+ control |= SM501_DC_PANEL_CONTROL_FPEN;
+ writel(control, ctrl_reg);
+ sm501fb_sync_regs(fbi);
+ mdelay(10);
+ }
} else if (!to && (control & SM501_DC_PANEL_CONTROL_VDD) != 0) {
/* disable panel power */
+ if (pd->flags & SM501FB_FLAG_PANEL_USE_FPEN) {
+ control &= ~SM501_DC_PANEL_CONTROL_FPEN;
+ writel(control, ctrl_reg);
+ sm501fb_sync_regs(fbi);
+ mdelay(10);
+ }
- control &= ~SM501_DC_PANEL_CONTROL_FPEN;
- writel(control, ctrl_reg);
- sm501fb_sync_regs(fbi);
- mdelay(10);
-
- control &= ~SM501_DC_PANEL_CONTROL_BIAS;
- writel(control, ctrl_reg);
- sm501fb_sync_regs(fbi);
- mdelay(10);
+ if (pd->flags & SM501FB_FLAG_PANEL_USE_VBIASEN) {
+ control &= ~SM501_DC_PANEL_CONTROL_BIAS;
+ writel(control, ctrl_reg);
+ sm501fb_sync_regs(fbi);
+ mdelay(10);
+ }
control &= ~SM501_DC_PANEL_CONTROL_DATA;
writel(control, ctrl_reg);
@@ -1267,6 +1276,7 @@ static int sm501fb_start(struct sm501fb_info *info,
{
struct resource *res;
struct device *dev;
+ int k;
int ret;
info->dev = dev = &pdev->dev;
@@ -1328,6 +1338,13 @@ static int sm501fb_start(struct sm501fb_info *info,
info->fbmem_len = (res->end - res->start)+1;
+ /* clear framebuffer memory - avoids garbage data on unused fb */
+ memset(info->fbmem, 0, info->fbmem_len);
+
+ /* clear palette ram - undefined at power on */
+ for (k = 0; k < (256 * 3); k++)
+ writel(0, info->regs + SM501_DC_PANEL_PALETTE + (k * 4));
+
/* enable display controller */
sm501_unit_power(dev->parent, SM501_GATE_DISPLAY, 1);
@@ -1681,6 +1698,15 @@ static int sm501fb_suspend_fb(struct sm501fb_info *info,
if (par->screen.size == 0)
return 0;
+ /* blank the relevant interface to ensure unit power minimised */
+ (par->ops.fb_blank)(FB_BLANK_POWERDOWN, fbi);
+
+ /* tell console/fb driver we are suspending */
+
+ acquire_console_sem();
+ fb_set_suspend(fbi, 1);
+ release_console_sem();
+
/* backup copies in case chip is powered down over suspend */
par->store_fb = vmalloc(par->screen.size);
@@ -1700,12 +1726,6 @@ static int sm501fb_suspend_fb(struct sm501fb_info *info,
memcpy_fromio(par->store_fb, par->screen.k_addr, par->screen.size);
memcpy_fromio(par->store_cursor, par->cursor.k_addr, par->cursor.size);
- /* blank the relevant interface to ensure unit power minimised */
- (par->ops.fb_blank)(FB_BLANK_POWERDOWN, fbi);
-
- acquire_console_sem();
- fb_set_suspend(fbi, 1);
- release_console_sem();
return 0;
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index 057bdd593800..71e179ea5f95 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -1342,7 +1342,7 @@ out_err:
}
#ifndef MODULE
-static void tdfxfb_setup(char *options)
+static void __init tdfxfb_setup(char *options)
{
char *this_opt;
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index a14ef894d571..be27b9c1ed72 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -2003,12 +2003,12 @@ static void __devexit uvesafb_exit(void)
module_exit(uvesafb_exit);
-static inline int param_get_scroll(char *buffer, struct kernel_param *kp)
+static int param_get_scroll(char *buffer, struct kernel_param *kp)
{
return 0;
}
-static inline int param_set_scroll(const char *val, struct kernel_param *kp)
+static int param_set_scroll(const char *val, struct kernel_param *kp)
{
ypan = 0;
@@ -2022,11 +2022,11 @@ static inline int param_set_scroll(const char *val, struct kernel_param *kp)
return 0;
}
-#define param_check_scroll(name, p) __param_check(name, p, void);
+#define param_check_scroll(name, p) __param_check(name, p, void)
module_param_named(scroll, ypan, scroll, 0);
MODULE_PARM_DESC(scroll,
- "Scrolling mode, set to 'redraw', ''ypan' or 'ywrap'");
+ "Scrolling mode, set to 'redraw', 'ypan', or 'ywrap'");
module_param_named(vgapal, pmi_setpal, invbool, 0);
MODULE_PARM_DESC(vgapal, "Set palette using VGA registers");
module_param_named(pmipal, pmi_setpal, bool, 0);
diff --git a/drivers/video/vermilion/vermilion.c b/drivers/video/vermilion/vermilion.c
index 1c656667b937..2aa71eb67c2b 100644
--- a/drivers/video/vermilion/vermilion.c
+++ b/drivers/video/vermilion/vermilion.c
@@ -651,7 +651,7 @@ static int vmlfb_check_var_locked(struct fb_var_screeninfo *var,
return -EINVAL;
}
- pitch = __ALIGN_MASK((var->xres * var->bits_per_pixel) >> 3, 0x3F);
+ pitch = ALIGN((var->xres * var->bits_per_pixel) >> 3, 0x40);
mem = pitch * var->yres_virtual;
if (mem > vinfo->vram_contig_size) {
return -ENOMEM;
@@ -785,8 +785,7 @@ static int vmlfb_set_par_locked(struct vml_info *vinfo)
int clock;
vinfo->bytes_per_pixel = var->bits_per_pixel >> 3;
- vinfo->stride =
- __ALIGN_MASK(var->xres_virtual * vinfo->bytes_per_pixel, 0x3F);
+ vinfo->stride = ALIGN(var->xres_virtual * vinfo->bytes_per_pixel, 0x40);
info->fix.line_length = vinfo->stride;
if (!subsys)
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index e38d3b7c3ad7..7b3a8423f485 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -459,8 +459,8 @@ static int __devexit xilinxfb_of_remove(struct of_device *op)
}
/* Match table for of_platform binding */
-static struct of_device_id __devinit xilinxfb_of_match[] = {
- { .compatible = "xilinx,ml300-fb", },
+static struct of_device_id xilinxfb_of_match[] __devinitdata = {
+ { .compatible = "xlnx,plb-tft-cntlr-ref-1.00.a", },
{},
};
MODULE_DEVICE_TABLE(of, xilinxfb_of_match);
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 9e33fc4da875..3dd6294d10b6 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -1,8 +1,35 @@
# Virtio always gets selected by whoever wants it.
config VIRTIO
- bool
+ tristate
# Similarly the virtio ring implementation.
config VIRTIO_RING
- bool
+ tristate
depends on VIRTIO
+
+config VIRTIO_PCI
+ tristate "PCI driver for virtio devices (EXPERIMENTAL)"
+ depends on PCI && EXPERIMENTAL
+ select VIRTIO
+ select VIRTIO_RING
+ ---help---
+ This drivers provides support for virtio based paravirtual device
+ drivers over PCI. This requires that your VMM has appropriate PCI
+ virtio backends. Most QEMU based VMMs should support these devices
+ (like KVM or Xen).
+
+ Currently, the ABI is not considered stable so there is no guarantee
+ that this version of the driver will work with your VMM.
+
+ If unsure, say M.
+
+config VIRTIO_BALLOON
+ tristate "Virtio balloon driver (EXPERIMENTAL)"
+ select VIRTIO
+ select VIRTIO_RING
+ ---help---
+ This driver supports increasing and decreasing the amount
+ of memory within a KVM guest.
+
+ If unsure, say M.
+
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index f70e40971dd9..6738c446c199 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_VIRTIO) += virtio.o
obj-$(CONFIG_VIRTIO_RING) += virtio_ring.o
+obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
+obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 69d7ea02cd48..b535483bc556 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -102,9 +102,13 @@ static int virtio_dev_remove(struct device *_d)
struct virtio_driver *drv = container_of(dev->dev.driver,
struct virtio_driver, driver);
- dev->config->set_status(dev, dev->config->get_status(dev)
- & ~VIRTIO_CONFIG_S_DRIVER);
drv->remove(dev);
+
+ /* Driver should have reset device. */
+ BUG_ON(dev->config->get_status(dev));
+
+ /* Acknowledge the device's existence again. */
+ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
return 0;
}
@@ -130,6 +134,10 @@ int register_virtio_device(struct virtio_device *dev)
dev->dev.bus = &virtio_bus;
sprintf(dev->dev.bus_id, "%u", dev->index);
+ /* We always start by resetting the device, in case a previous
+ * driver messed it up. This also tests that code path a little. */
+ dev->config->reset(dev);
+
/* Acknowledge that we've seen the device. */
add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
@@ -148,55 +156,18 @@ void unregister_virtio_device(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(unregister_virtio_device);
-int __virtio_config_val(struct virtio_device *vdev,
- u8 type, void *val, size_t size)
-{
- void *token;
- unsigned int len;
-
- token = vdev->config->find(vdev, type, &len);
- if (!token)
- return -ENOENT;
-
- if (len != size)
- return -EIO;
-
- vdev->config->get(vdev, token, val, size);
- return 0;
-}
-EXPORT_SYMBOL_GPL(__virtio_config_val);
-
-int virtio_use_bit(struct virtio_device *vdev,
- void *token, unsigned int len, unsigned int bitnum)
-{
- unsigned long bits[16];
-
- /* This makes it convenient to pass-through find() results. */
- if (!token)
- return 0;
-
- /* bit not in range of this bitfield? */
- if (bitnum * 8 >= len / 2)
- return 0;
-
- /* Giant feature bitfields are silly. */
- BUG_ON(len > sizeof(bits));
- vdev->config->get(vdev, token, bits, len);
-
- if (!test_bit(bitnum, bits))
- return 0;
-
- /* Set acknowledge bit, and write it back. */
- set_bit(bitnum + len * 8 / 2, bits);
- vdev->config->set(vdev, token, bits, len);
- return 1;
-}
-EXPORT_SYMBOL_GPL(virtio_use_bit);
-
static int virtio_init(void)
{
if (bus_register(&virtio_bus) != 0)
panic("virtio bus registration failed");
return 0;
}
+
+static void __exit virtio_exit(void)
+{
+ bus_unregister(&virtio_bus);
+}
core_initcall(virtio_init);
+module_exit(virtio_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
new file mode 100644
index 000000000000..c8a4332d1132
--- /dev/null
+++ b/drivers/virtio/virtio_balloon.c
@@ -0,0 +1,285 @@
+/* Virtio balloon implementation, inspired by Dor Loar and Marcelo
+ * Tosatti's implementations.
+ *
+ * Copyright 2008 Rusty Russell IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+//#define DEBUG
+#include <linux/virtio.h>
+#include <linux/virtio_balloon.h>
+#include <linux/swap.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/delay.h>
+
+struct virtio_balloon
+{
+ struct virtio_device *vdev;
+ struct virtqueue *inflate_vq, *deflate_vq;
+
+ /* Where the ballooning thread waits for config to change. */
+ wait_queue_head_t config_change;
+
+ /* The thread servicing the balloon. */
+ struct task_struct *thread;
+
+ /* Waiting for host to ack the pages we released. */
+ struct completion acked;
+
+ /* Do we have to tell Host *before* we reuse pages? */
+ bool tell_host_first;
+
+ /* The pages we've told the Host we're not using. */
+ unsigned int num_pages;
+ struct list_head pages;
+
+ /* The array of pfns we tell the Host about. */
+ unsigned int num_pfns;
+ u32 pfns[256];
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static void balloon_ack(struct virtqueue *vq)
+{
+ struct virtio_balloon *vb;
+ unsigned int len;
+
+ vb = vq->vq_ops->get_buf(vq, &len);
+ if (vb)
+ complete(&vb->acked);
+}
+
+static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
+{
+ struct scatterlist sg;
+
+ sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
+
+ init_completion(&vb->acked);
+
+ /* We should always be able to add one buffer to an empty queue. */
+ if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) != 0)
+ BUG();
+ vq->vq_ops->kick(vq);
+
+ /* When host has read buffer, this completes via balloon_ack */
+ wait_for_completion(&vb->acked);
+}
+
+static void fill_balloon(struct virtio_balloon *vb, size_t num)
+{
+ /* We can only do one array worth at a time. */
+ num = min(num, ARRAY_SIZE(vb->pfns));
+
+ for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) {
+ struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY);
+ if (!page) {
+ if (printk_ratelimit())
+ dev_printk(KERN_INFO, &vb->vdev->dev,
+ "Out of puff! Can't get %zu pages\n",
+ num);
+ /* Sleep for at least 1/5 of a second before retry. */
+ msleep(200);
+ break;
+ }
+ vb->pfns[vb->num_pfns] = page_to_pfn(page);
+ totalram_pages--;
+ vb->num_pages++;
+ list_add(&page->lru, &vb->pages);
+ }
+
+ /* Didn't get any? Oh well. */
+ if (vb->num_pfns == 0)
+ return;
+
+ tell_host(vb, vb->inflate_vq);
+}
+
+static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ __free_page(pfn_to_page(pfns[i]));
+ totalram_pages++;
+ }
+}
+
+static void leak_balloon(struct virtio_balloon *vb, size_t num)
+{
+ struct page *page;
+
+ /* We can only do one array worth at a time. */
+ num = min(num, ARRAY_SIZE(vb->pfns));
+
+ for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) {
+ page = list_first_entry(&vb->pages, struct page, lru);
+ list_del(&page->lru);
+ vb->pfns[vb->num_pfns] = page_to_pfn(page);
+ vb->num_pages--;
+ }
+
+ if (vb->tell_host_first) {
+ tell_host(vb, vb->deflate_vq);
+ release_pages_by_pfn(vb->pfns, vb->num_pfns);
+ } else {
+ release_pages_by_pfn(vb->pfns, vb->num_pfns);
+ tell_host(vb, vb->deflate_vq);
+ }
+}
+
+static void virtballoon_changed(struct virtio_device *vdev)
+{
+ struct virtio_balloon *vb = vdev->priv;
+
+ wake_up(&vb->config_change);
+}
+
+static inline int towards_target(struct virtio_balloon *vb)
+{
+ u32 v;
+ __virtio_config_val(vb->vdev,
+ offsetof(struct virtio_balloon_config, num_pages),
+ &v);
+ return v - vb->num_pages;
+}
+
+static void update_balloon_size(struct virtio_balloon *vb)
+{
+ __le32 actual = cpu_to_le32(vb->num_pages);
+
+ vb->vdev->config->set(vb->vdev,
+ offsetof(struct virtio_balloon_config, actual),
+ &actual, sizeof(actual));
+}
+
+static int balloon(void *_vballoon)
+{
+ struct virtio_balloon *vb = _vballoon;
+
+ set_freezable();
+ while (!kthread_should_stop()) {
+ int diff;
+
+ try_to_freeze();
+ wait_event_interruptible(vb->config_change,
+ (diff = towards_target(vb)) != 0
+ || kthread_should_stop());
+ if (diff > 0)
+ fill_balloon(vb, diff);
+ else if (diff < 0)
+ leak_balloon(vb, -diff);
+ update_balloon_size(vb);
+ }
+ return 0;
+}
+
+static int virtballoon_probe(struct virtio_device *vdev)
+{
+ struct virtio_balloon *vb;
+ int err;
+
+ vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
+ if (!vb) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&vb->pages);
+ vb->num_pages = 0;
+ init_waitqueue_head(&vb->config_change);
+ vb->vdev = vdev;
+
+ /* We expect two virtqueues. */
+ vb->inflate_vq = vdev->config->find_vq(vdev, 0, balloon_ack);
+ if (IS_ERR(vb->inflate_vq)) {
+ err = PTR_ERR(vb->inflate_vq);
+ goto out_free_vb;
+ }
+
+ vb->deflate_vq = vdev->config->find_vq(vdev, 1, balloon_ack);
+ if (IS_ERR(vb->deflate_vq)) {
+ err = PTR_ERR(vb->deflate_vq);
+ goto out_del_inflate_vq;
+ }
+
+ vb->thread = kthread_run(balloon, vb, "vballoon");
+ if (IS_ERR(vb->thread)) {
+ err = PTR_ERR(vb->thread);
+ goto out_del_deflate_vq;
+ }
+
+ vb->tell_host_first
+ = vdev->config->feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
+
+ return 0;
+
+out_del_deflate_vq:
+ vdev->config->del_vq(vb->deflate_vq);
+out_del_inflate_vq:
+ vdev->config->del_vq(vb->inflate_vq);
+out_free_vb:
+ kfree(vb);
+out:
+ return err;
+}
+
+static void virtballoon_remove(struct virtio_device *vdev)
+{
+ struct virtio_balloon *vb = vdev->priv;
+
+ kthread_stop(vb->thread);
+
+ /* There might be pages left in the balloon: free them. */
+ while (vb->num_pages)
+ leak_balloon(vb, vb->num_pages);
+
+ /* Now we reset the device so we can clean up the queues. */
+ vdev->config->reset(vdev);
+
+ vdev->config->del_vq(vb->deflate_vq);
+ vdev->config->del_vq(vb->inflate_vq);
+ kfree(vb);
+}
+
+static struct virtio_driver virtio_balloon = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtballoon_probe,
+ .remove = __devexit_p(virtballoon_remove),
+ .config_changed = virtballoon_changed,
+};
+
+static int __init init(void)
+{
+ return register_virtio_driver(&virtio_balloon);
+}
+
+static void __exit fini(void)
+{
+ unregister_virtio_driver(&virtio_balloon);
+}
+module_init(init);
+module_exit(fini);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio balloon driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
new file mode 100644
index 000000000000..26f787ddd5ff
--- /dev/null
+++ b/drivers/virtio/virtio_pci.c
@@ -0,0 +1,446 @@
+/*
+ * Virtio PCI driver
+ *
+ * This module allows virtio devices to be used over a virtual PCI device.
+ * This can be used with QEMU based VMMs like KVM or Xen.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
+#include <linux/virtio_pci.h>
+#include <linux/highmem.h>
+#include <linux/spinlock.h>
+
+MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
+MODULE_DESCRIPTION("virtio-pci");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1");
+
+/* Our device structure */
+struct virtio_pci_device
+{
+ struct virtio_device vdev;
+ struct pci_dev *pci_dev;
+
+ /* the IO mapping for the PCI config space */
+ void *ioaddr;
+
+ /* a list of queues so we can dispatch IRQs */
+ spinlock_t lock;
+ struct list_head virtqueues;
+};
+
+struct virtio_pci_vq_info
+{
+ /* the actual virtqueue */
+ struct virtqueue *vq;
+
+ /* the number of entries in the queue */
+ int num;
+
+ /* the index of the queue */
+ int queue_index;
+
+ /* the virtual address of the ring queue */
+ void *queue;
+
+ /* the list node for the virtqueues list */
+ struct list_head node;
+};
+
+/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
+static struct pci_device_id virtio_pci_id_table[] = {
+ { 0x1af4, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
+
+/* A PCI device has it's own struct device and so does a virtio device so
+ * we create a place for the virtio devices to show up in sysfs. I think it
+ * would make more sense for virtio to not insist on having it's own device. */
+static struct device virtio_pci_root = {
+ .parent = NULL,
+ .bus_id = "virtio-pci",
+};
+
+/* Unique numbering for devices under the kvm root */
+static unsigned int dev_index;
+
+/* Convert a generic virtio device to our structure */
+static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
+{
+ return container_of(vdev, struct virtio_pci_device, vdev);
+}
+
+/* virtio config->feature() implementation */
+static bool vp_feature(struct virtio_device *vdev, unsigned bit)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ u32 mask;
+
+ /* Since this function is supposed to have the side effect of
+ * enabling a queried feature, we simulate that by doing a read
+ * from the host feature bitmask and then writing to the guest
+ * feature bitmask */
+ mask = ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
+ if (mask & (1 << bit)) {
+ mask |= (1 << bit);
+ iowrite32(mask, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
+ }
+
+ return !!(mask & (1 << bit));
+}
+
+/* virtio config->get() implementation */
+static void vp_get(struct virtio_device *vdev, unsigned offset,
+ void *buf, unsigned len)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ void *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset;
+ u8 *ptr = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ ptr[i] = ioread8(ioaddr + i);
+}
+
+/* the config->set() implementation. it's symmetric to the config->get()
+ * implementation */
+static void vp_set(struct virtio_device *vdev, unsigned offset,
+ const void *buf, unsigned len)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ void *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset;
+ const u8 *ptr = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ iowrite8(ptr[i], ioaddr + i);
+}
+
+/* config->{get,set}_status() implementations */
+static u8 vp_get_status(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+}
+
+static void vp_set_status(struct virtio_device *vdev, u8 status)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ /* We should never be setting status to 0. */
+ BUG_ON(status == 0);
+ return iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+}
+
+static void vp_reset(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ /* 0 status means a reset. */
+ return iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+}
+
+/* the notify function used when creating a virt queue */
+static void vp_notify(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_vq_info *info = vq->priv;
+
+ /* we write the queue's selector into the notification register to
+ * signal the other end */
+ iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
+}
+
+/* A small wrapper to also acknowledge the interrupt when it's handled.
+ * I really need an EIO hook for the vring so I can ack the interrupt once we
+ * know that we'll be handling the IRQ but before we invoke the callback since
+ * the callback may notify the host which results in the host attempting to
+ * raise an interrupt that we would then mask once we acknowledged the
+ * interrupt. */
+static irqreturn_t vp_interrupt(int irq, void *opaque)
+{
+ struct virtio_pci_device *vp_dev = opaque;
+ struct virtio_pci_vq_info *info;
+ irqreturn_t ret = IRQ_NONE;
+ u8 isr;
+
+ /* reading the ISR has the effect of also clearing it so it's very
+ * important to save off the value. */
+ isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
+
+ /* It's definitely not us if the ISR was not high */
+ if (!isr)
+ return IRQ_NONE;
+
+ /* Configuration change? Tell driver if it wants to know. */
+ if (isr & VIRTIO_PCI_ISR_CONFIG) {
+ struct virtio_driver *drv;
+ drv = container_of(vp_dev->vdev.dev.driver,
+ struct virtio_driver, driver);
+
+ if (drv->config_changed)
+ drv->config_changed(&vp_dev->vdev);
+ }
+
+ spin_lock(&vp_dev->lock);
+ list_for_each_entry(info, &vp_dev->virtqueues, node) {
+ if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ spin_unlock(&vp_dev->lock);
+
+ return ret;
+}
+
+/* the config->find_vq() implementation */
+static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
+ void (*callback)(struct virtqueue *vq))
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_vq_info *info;
+ struct virtqueue *vq;
+ u16 num;
+ int err;
+
+ /* Select the queue we're interested in */
+ iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+
+ /* Check if queue is either not available or already active. */
+ num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
+ if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
+ return ERR_PTR(-ENOENT);
+
+ /* allocate and fill out our structure the represents an active
+ * queue */
+ info = kmalloc(sizeof(struct virtio_pci_vq_info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ info->queue_index = index;
+ info->num = num;
+
+ info->queue = kzalloc(PAGE_ALIGN(vring_size(num,PAGE_SIZE)), GFP_KERNEL);
+ if (info->queue == NULL) {
+ err = -ENOMEM;
+ goto out_info;
+ }
+
+ /* activate the queue */
+ iowrite32(virt_to_phys(info->queue) >> PAGE_SHIFT,
+ vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+
+ /* create the vring */
+ vq = vring_new_virtqueue(info->num, vdev, info->queue,
+ vp_notify, callback);
+ if (!vq) {
+ err = -ENOMEM;
+ goto out_activate_queue;
+ }
+
+ vq->priv = info;
+ info->vq = vq;
+
+ spin_lock(&vp_dev->lock);
+ list_add(&info->node, &vp_dev->virtqueues);
+ spin_unlock(&vp_dev->lock);
+
+ return vq;
+
+out_activate_queue:
+ iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+ kfree(info->queue);
+out_info:
+ kfree(info);
+ return ERR_PTR(err);
+}
+
+/* the config->del_vq() implementation */
+static void vp_del_vq(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_vq_info *info = vq->priv;
+
+ spin_lock(&vp_dev->lock);
+ list_del(&info->node);
+ spin_unlock(&vp_dev->lock);
+
+ vring_del_virtqueue(vq);
+
+ /* Select and deactivate the queue */
+ iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+ iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+
+ kfree(info->queue);
+ kfree(info);
+}
+
+static struct virtio_config_ops virtio_pci_config_ops = {
+ .feature = vp_feature,
+ .get = vp_get,
+ .set = vp_set,
+ .get_status = vp_get_status,
+ .set_status = vp_set_status,
+ .reset = vp_reset,
+ .find_vq = vp_find_vq,
+ .del_vq = vp_del_vq,
+};
+
+/* the PCI probing function */
+static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ struct virtio_pci_device *vp_dev;
+ int err;
+
+ /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
+ if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
+ return -ENODEV;
+
+ if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
+ printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
+ VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
+ return -ENODEV;
+ }
+
+ /* allocate our structure and fill it out */
+ vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
+ if (vp_dev == NULL)
+ return -ENOMEM;
+
+ snprintf(vp_dev->vdev.dev.bus_id, BUS_ID_SIZE, "virtio%d", dev_index);
+ vp_dev->vdev.index = dev_index;
+ dev_index++;
+
+ vp_dev->vdev.dev.parent = &virtio_pci_root;
+ vp_dev->vdev.config = &virtio_pci_config_ops;
+ vp_dev->pci_dev = pci_dev;
+ INIT_LIST_HEAD(&vp_dev->virtqueues);
+ spin_lock_init(&vp_dev->lock);
+
+ /* enable the device */
+ err = pci_enable_device(pci_dev);
+ if (err)
+ goto out;
+
+ err = pci_request_regions(pci_dev, "virtio-pci");
+ if (err)
+ goto out_enable_device;
+
+ vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
+ if (vp_dev->ioaddr == NULL)
+ goto out_req_regions;
+
+ pci_set_drvdata(pci_dev, vp_dev);
+
+ /* we use the subsystem vendor/device id as the virtio vendor/device
+ * id. this allows us to use the same PCI vendor/device id for all
+ * virtio devices and to identify the particular virtio driver by
+ * the subsytem ids */
+ vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
+ vp_dev->vdev.id.device = pci_dev->subsystem_device;
+
+ /* register a handler for the queue with the PCI device's interrupt */
+ err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
+ vp_dev->vdev.dev.bus_id, vp_dev);
+ if (err)
+ goto out_set_drvdata;
+
+ /* finally register the virtio device */
+ err = register_virtio_device(&vp_dev->vdev);
+ if (err)
+ goto out_req_irq;
+
+ return 0;
+
+out_req_irq:
+ free_irq(pci_dev->irq, vp_dev);
+out_set_drvdata:
+ pci_set_drvdata(pci_dev, NULL);
+ pci_iounmap(pci_dev, vp_dev->ioaddr);
+out_req_regions:
+ pci_release_regions(pci_dev);
+out_enable_device:
+ pci_disable_device(pci_dev);
+out:
+ kfree(vp_dev);
+ return err;
+}
+
+static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
+{
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+
+ free_irq(pci_dev->irq, vp_dev);
+ pci_set_drvdata(pci_dev, NULL);
+ pci_iounmap(pci_dev, vp_dev->ioaddr);
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+ kfree(vp_dev);
+}
+
+#ifdef CONFIG_PM
+static int virtio_pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
+{
+ pci_save_state(pci_dev);
+ pci_set_power_state(pci_dev, PCI_D3hot);
+ return 0;
+}
+
+static int virtio_pci_resume(struct pci_dev *pci_dev)
+{
+ pci_restore_state(pci_dev);
+ pci_set_power_state(pci_dev, PCI_D0);
+ return 0;
+}
+#endif
+
+static struct pci_driver virtio_pci_driver = {
+ .name = "virtio-pci",
+ .id_table = virtio_pci_id_table,
+ .probe = virtio_pci_probe,
+ .remove = virtio_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = virtio_pci_suspend,
+ .resume = virtio_pci_resume,
+#endif
+};
+
+static int __init virtio_pci_init(void)
+{
+ int err;
+
+ err = device_register(&virtio_pci_root);
+ if (err)
+ return err;
+
+ err = pci_register_driver(&virtio_pci_driver);
+ if (err)
+ device_unregister(&virtio_pci_root);
+
+ return err;
+}
+
+module_init(virtio_pci_init);
+
+static void __exit virtio_pci_exit(void)
+{
+ device_unregister(&virtio_pci_root);
+ pci_unregister_driver(&virtio_pci_driver);
+}
+
+module_exit(virtio_pci_exit);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 1dc04b6684e6..3a28c1382131 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -87,6 +87,8 @@ static int vring_add_buf(struct virtqueue *_vq,
if (vq->num_free < out + in) {
pr_debug("Can't add buf len %i - avail = %i\n",
out + in, vq->num_free);
+ /* We notify *even if* VRING_USED_F_NO_NOTIFY is set here. */
+ vq->notify(&vq->vq);
END_USE(vq);
return -ENOSPC;
}
@@ -97,16 +99,14 @@ static int vring_add_buf(struct virtqueue *_vq,
head = vq->free_head;
for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
- vq->vring.desc[i].addr = (page_to_pfn(sg_page(sg))<<PAGE_SHIFT)
- + sg->offset;
+ vq->vring.desc[i].addr = sg_phys(sg);
vq->vring.desc[i].len = sg->length;
prev = i;
sg++;
}
for (; in; i = vq->vring.desc[i].next, in--) {
vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
- vq->vring.desc[i].addr = (page_to_pfn(sg_page(sg))<<PAGE_SHIFT)
- + sg->offset;
+ vq->vring.desc[i].addr = sg_phys(sg);
vq->vring.desc[i].len = sg->length;
prev = i;
sg++;
@@ -171,16 +171,6 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
vq->num_free++;
}
-/* FIXME: We need to tell other side about removal, to synchronize. */
-static void vring_shutdown(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
- unsigned int i;
-
- for (i = 0; i < vq->vring.num; i++)
- detach_buf(vq, i);
-}
-
static inline bool more_used(const struct vring_virtqueue *vq)
{
return vq->last_used_idx != vq->vring.used->idx;
@@ -220,7 +210,17 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
return ret;
}
-static bool vring_restart(struct virtqueue *_vq)
+static void vring_disable_cb(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ START_USE(vq);
+ BUG_ON(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
+ vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+ END_USE(vq);
+}
+
+static bool vring_enable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -253,26 +253,34 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
if (unlikely(vq->broken))
return IRQ_HANDLED;
+ /* Other side may have missed us turning off the interrupt,
+ * but we should preserve disable semantic for virtio users. */
+ if (unlikely(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
+ pr_debug("virtqueue interrupt after disable for %p\n", vq);
+ return IRQ_HANDLED;
+ }
+
pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
- if (vq->vq.callback && !vq->vq.callback(&vq->vq))
- vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+ if (vq->vq.callback)
+ vq->vq.callback(&vq->vq);
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(vring_interrupt);
static struct virtqueue_ops vring_vq_ops = {
.add_buf = vring_add_buf,
.get_buf = vring_get_buf,
.kick = vring_kick,
- .restart = vring_restart,
- .shutdown = vring_shutdown,
+ .disable_cb = vring_disable_cb,
+ .enable_cb = vring_enable_cb,
};
struct virtqueue *vring_new_virtqueue(unsigned int num,
struct virtio_device *vdev,
void *pages,
void (*notify)(struct virtqueue *),
- bool (*callback)(struct virtqueue *))
+ void (*callback)(struct virtqueue *))
{
struct vring_virtqueue *vq;
unsigned int i;
@@ -311,9 +319,12 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
return &vq->vq;
}
+EXPORT_SYMBOL_GPL(vring_new_virtqueue);
void vring_del_virtqueue(struct virtqueue *vq)
{
kfree(to_vvq(vq));
}
+EXPORT_SYMBOL_GPL(vring_del_virtqueue);
+MODULE_LICENSE("GPL");
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 8236d447adf5..c4493091c655 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -42,5 +42,15 @@ config W1_MASTER_DS1WM
in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
hx4700.
+config W1_MASTER_GPIO
+ tristate "GPIO 1-wire busmaster"
+ depends on GENERIC_GPIO
+ help
+ Say Y here if you want to communicate with your 1-wire devices using
+ GPIO pins. This driver uses the GPIO API to control the wire.
+
+ This support is also available as a module. If so, the module
+ will be called w1-gpio.ko.
+
endmenu
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index 11551b328186..1420b5bbdda8 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_W1_MASTER_MATROX) += matrox_w1.o
obj-$(CONFIG_W1_MASTER_DS2490) += ds2490.o
obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o
obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o
+obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 5747997f8d7d..688e435b4d9a 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -361,11 +361,12 @@ static int ds1wm_probe(struct platform_device *pdev)
goto err1;
}
ds1wm_data->irq = res->start;
- ds1wm_data->active_high = (res->flags & IORESOURCE_IRQ_HIGHEDGE) ?
- 1 : 0;
+ ds1wm_data->active_high = plat->active_high;
- set_irq_type(ds1wm_data->irq, ds1wm_data->active_high ?
- IRQ_TYPE_EDGE_RISING : IRQ_TYPE_EDGE_FALLING);
+ if (res->flags & IORESOURCE_IRQ_HIGHEDGE)
+ set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING);
+ if (res->flags & IORESOURCE_IRQ_LOWEDGE)
+ set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED,
"ds1wm", ds1wm_data);
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
new file mode 100644
index 000000000000..9e1138a75e8b
--- /dev/null
+++ b/drivers/w1/masters/w1-gpio.c
@@ -0,0 +1,124 @@
+/*
+ * w1-gpio - GPIO w1 bus master driver
+ *
+ * Copyright (C) 2007 Ville Syrjala <syrjala@sci.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/w1-gpio.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+
+#include <asm/gpio.h>
+
+static void w1_gpio_write_bit_dir(void *data, u8 bit)
+{
+ struct w1_gpio_platform_data *pdata = data;
+
+ if (bit)
+ gpio_direction_input(pdata->pin);
+ else
+ gpio_direction_output(pdata->pin, 0);
+}
+
+static void w1_gpio_write_bit_val(void *data, u8 bit)
+{
+ struct w1_gpio_platform_data *pdata = data;
+
+ gpio_set_value(pdata->pin, bit);
+}
+
+static u8 w1_gpio_read_bit(void *data)
+{
+ struct w1_gpio_platform_data *pdata = data;
+
+ return gpio_get_value(pdata->pin);
+}
+
+static int __init w1_gpio_probe(struct platform_device *pdev)
+{
+ struct w1_bus_master *master;
+ struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+ int err;
+
+ if (!pdata)
+ return -ENXIO;
+
+ master = kzalloc(sizeof(struct w1_bus_master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ err = gpio_request(pdata->pin, "w1");
+ if (err)
+ goto free_master;
+
+ master->data = pdata;
+ master->read_bit = w1_gpio_read_bit;
+
+ if (pdata->is_open_drain) {
+ gpio_direction_output(pdata->pin, 1);
+ master->write_bit = w1_gpio_write_bit_val;
+ } else {
+ gpio_direction_input(pdata->pin);
+ master->write_bit = w1_gpio_write_bit_dir;
+ }
+
+ err = w1_add_master_device(master);
+ if (err)
+ goto free_gpio;
+
+ platform_set_drvdata(pdev, master);
+
+ return 0;
+
+ free_gpio:
+ gpio_free(pdata->pin);
+ free_master:
+ kfree(master);
+
+ return err;
+}
+
+static int __exit w1_gpio_remove(struct platform_device *pdev)
+{
+ struct w1_bus_master *master = platform_get_drvdata(pdev);
+ struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+
+ w1_remove_master_device(master);
+ gpio_free(pdata->pin);
+ kfree(master);
+
+ return 0;
+}
+
+static struct platform_driver w1_gpio_driver = {
+ .driver = {
+ .name = "w1-gpio",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(w1_gpio_remove),
+};
+
+static int __init w1_gpio_init(void)
+{
+ return platform_driver_probe(&w1_gpio_driver, w1_gpio_probe);
+}
+
+static void __exit w1_gpio_exit(void)
+{
+ platform_driver_unregister(&w1_gpio_driver);
+}
+
+module_init(w1_gpio_init);
+module_exit(w1_gpio_exit);
+
+MODULE_DESCRIPTION("GPIO w1 bus master driver");
+MODULE_AUTHOR("Ville Syrjala <syrjala@sci.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 112f4ec59035..fb28acaeed6c 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -92,6 +92,7 @@ struct w1_therm_family_converter
int (*convert)(u8 rom[9]);
};
+/* The return value is millidegrees Centigrade. */
static inline int w1_DS18B20_convert_temp(u8 rom[9]);
static inline int w1_DS18S20_convert_temp(u8 rom[9]);
@@ -113,7 +114,7 @@ static struct w1_therm_family_converter w1_therm_families[] = {
static inline int w1_DS18B20_convert_temp(u8 rom[9])
{
s16 t = (rom[1] << 8) | rom[0];
- t /= 16;
+ t = t*1000/16;
return t;
}
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 33e50310e9e0..7293c9b11f91 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -675,7 +675,6 @@ static void w1_slave_found(void *data, u64 rn)
struct w1_slave *sl;
struct list_head *ent;
struct w1_reg_num *tmp;
- int family_found = 0;
struct w1_master *dev;
u64 rn_le = cpu_to_le64(rn);
@@ -698,9 +697,6 @@ static void w1_slave_found(void *data, u64 rn)
sl->reg_num.crc == tmp->crc) {
set_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags);
break;
- } else if (sl->reg_num.family == tmp->family) {
- family_found = 1;
- break;
}
slave_count++;
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index cecbedd473a4..61dde863bd40 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -52,7 +52,7 @@
* overflow periods respectively.
*
* Also, since we can't really expect userspace to be responsive enough
- * before the overflow happens, we maintain two seperate timers .. One in
+ * before the overflow happens, we maintain two separate timers .. One in
* the kernel for clearing out WOVF every 2ms or so (again, this depends on
* HZ == 1000), and another for monitoring userspace writes to the WDT device.
*