summaryrefslogtreecommitdiff
path: root/drivers/staging/iio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/iio')
-rw-r--r--drivers/staging/iio/Documentation/device.txt49
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h159
-rw-r--r--drivers/staging/iio/Documentation/lis3l02dqbuffersimple.c171
-rw-r--r--drivers/staging/iio/Documentation/overview.txt62
-rw-r--r--drivers/staging/iio/Documentation/ring.txt61
-rw-r--r--drivers/staging/iio/Documentation/trigger.txt38
-rw-r--r--drivers/staging/iio/Documentation/userspace.txt60
-rw-r--r--drivers/staging/iio/Kconfig47
-rw-r--r--drivers/staging/iio/Makefile16
-rw-r--r--drivers/staging/iio/TODO69
-rw-r--r--drivers/staging/iio/accel/Kconfig27
-rw-r--r--drivers/staging/iio/accel/Makefile11
-rw-r--r--drivers/staging/iio/accel/accel.h167
-rw-r--r--drivers/staging/iio/accel/kxsd9.c395
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h232
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c926
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c600
-rw-r--r--drivers/staging/iio/accel/sca3000.h298
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c1509
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c331
-rw-r--r--drivers/staging/iio/adc/Kconfig13
-rw-r--r--drivers/staging/iio/adc/Makefile8
-rw-r--r--drivers/staging/iio/adc/adc.h13
-rw-r--r--drivers/staging/iio/adc/max1363.h269
-rw-r--r--drivers/staging/iio/adc/max1363_core.c623
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c241
-rw-r--r--drivers/staging/iio/chrdev.h118
-rw-r--r--drivers/staging/iio/iio.h411
-rw-r--r--drivers/staging/iio/industrialio-core.c851
-rw-r--r--drivers/staging/iio/industrialio-ring.c568
-rw-r--r--drivers/staging/iio/industrialio-trigger.c399
-rw-r--r--drivers/staging/iio/light/Kconfig13
-rw-r--r--drivers/staging/iio/light/Makefile5
-rw-r--r--drivers/staging/iio/light/light.h12
-rw-r--r--drivers/staging/iio/light/tsl2561.c276
-rw-r--r--drivers/staging/iio/ring_generic.h283
-rw-r--r--drivers/staging/iio/ring_hw.h22
-rw-r--r--drivers/staging/iio/ring_sw.c433
-rw-r--r--drivers/staging/iio/ring_sw.h189
-rw-r--r--drivers/staging/iio/sysfs.h293
-rw-r--r--drivers/staging/iio/trigger.h151
-rw-r--r--drivers/staging/iio/trigger/Kconfig21
-rw-r--r--drivers/staging/iio/trigger/Makefile5
-rw-r--r--drivers/staging/iio/trigger/iio-trig-gpio.c202
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c228
-rw-r--r--drivers/staging/iio/trigger_consumer.h45
46 files changed, 10920 insertions, 0 deletions
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
new file mode 100644
index 0000000..6916cd3
--- /dev/null
+++ b/drivers/staging/iio/Documentation/device.txt
@@ -0,0 +1,49 @@
+IIO Device drivers
+
+This is not intended to provide a comprehensive guide to writing an
+IIO device driver. For further information see the drivers within the
+subsystem.
+
+The crucial structure for device drivers in iio is iio_dev.
+
+First allocate one using:
+
+struct iio_dev *indio_dev = iio_allocate_device();
+
+The fill in the following.
+
+indio_dev->dev.parent
+ the struct device associated with the underlying hardware.
+
+indio_dev->num_interrupt_lines
+ number of event triggering hardware lines the device has.
+
+indio_dev->event_attrs
+ attributes used to enable / disable hardware events - note the
+ attributes are embedded in iio_event_attr structures with an
+ associated iio_event_handler which may or may note be shared.
+ If num_interrupt_lines = 0, then no need to fill this in.
+
+indio_dev->attrs
+ general attributes such as polled access to device channels.
+
+indio_dev->dev_data
+ private device specific data.
+
+indio_dev->driver_module
+ typically set to THIS_MODULE. Used to specify ownership of some
+ iio created resources.
+
+indio_dev->modes
+ whether direct access and / or ring buffer access is supported.
+
+Once these are set up, a call to iio_device_register(indio_dev),
+will register the device with the iio core.
+
+Worth noting here is that, if a ring buffer is to be used, it can be
+allocated prior to registering the device with the iio-core, but must
+be registered afterwards (otherwise the whole parentage of devices
+gets confused)
+
+On remove iio_device_unregister(indio_dev) will remove the device from
+the core, and iio_free_device will clean up.
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
new file mode 100644
index 0000000..74d3124
--- /dev/null
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -0,0 +1,159 @@
+/* IIO - useful set of util functionality
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#define IIO_EVENT_CODE_RING_50_FULL 200
+#define IIO_EVENT_CODE_RING_75_FULL 201
+#define IIO_EVENT_CODE_RING_100_FULL 202
+
+struct iio_event_data {
+ int id;
+ __s64 timestamp;
+};
+
+
+inline char *find_ring_subelement(const char *directory, const char *subelement)
+{
+ DIR *dp;
+ const struct dirent *ent;
+ int pos;
+ char temp[100];
+ char *returnstring;
+ dp = opendir(directory);
+ if (dp == NULL) {
+ printf("could not directory: %s\n", directory);
+ return NULL;
+ }
+ while (ent = readdir(dp), ent != NULL) {
+ if (strcmp(ent->d_name, ".") != 0 &&
+ strcmp(ent->d_name, "..") != 0) {
+ if (strncmp(ent->d_name, subelement, strlen(subelement)) == 0) {
+ int length = sprintf(temp, "%s%s%s", directory, ent->d_name, "/");
+ returnstring = malloc(length+1);
+ strncpy(returnstring, temp, length+1);
+ return returnstring;
+
+ }
+ }
+ }
+ return 0;
+}
+
+
+char *find_type_by_name(const char *name, const char *type)
+{
+ const char *iio_dir = "/sys/class/iio/";
+ const struct dirent *ent;
+ int cnt, pos, pos2;
+
+ FILE *nameFile;
+ DIR *dp;
+ char thisname[100];
+ char temp[100];
+
+ char *returnstring = NULL;
+ struct stat Stat;
+ pos = sprintf(temp, "%s", iio_dir);
+ dp = opendir(iio_dir);
+ if (dp == NULL) {
+ printf("No industrialio devices available");
+ return NULL;
+ }
+ while (ent = readdir(dp), ent != NULL) {
+ cnt++;
+ /*reject . and .. */
+ if (strcmp(ent->d_name, ".") != 0 &&
+ strcmp(ent->d_name, "..") != 0) {
+ /*make sure it isn't a trigger!*/
+ if (strncmp(ent->d_name, type, strlen(type)) == 0) {
+ /* build full path to new file */
+ pos2 = pos + sprintf(temp + pos, "%s/", ent->d_name);
+ sprintf(temp + pos2, "name");
+ printf("search location %s\n", temp);
+ nameFile = fopen(temp, "r");
+ if (!nameFile) {
+ sprintf(temp + pos2, "modalias", ent->d_name);
+ nameFile = fopen(temp, "r");
+ if (!nameFile) {
+ printf("Failed to find a name for device\n");
+ return NULL;
+ }
+ }
+ fscanf(nameFile, "%s", thisname);
+ if (strcmp(name, thisname) == 0) {
+ returnstring = malloc(strlen(temp) + 1);
+ sprintf(temp + pos2, "");
+ strcpy(returnstring, temp);
+ return returnstring;
+ }
+ fclose(nameFile);
+
+ }
+ }
+ }
+}
+
+int write_sysfs_int(char *filename, char *basedir, int val)
+{
+ int ret;
+ FILE *sysfsfp;
+ char temp[100];
+ sprintf(temp, "%s%s", basedir, filename);
+ sysfsfp = fopen(temp, "w");
+ if (sysfsfp == NULL)
+ return -1;
+ fprintf(sysfsfp, "%d", val);
+ fclose(sysfsfp);
+ return 0;
+}
+
+/**
+ * write_sysfs_string_and_verify() - string write, readback and verify
+ * @filename: name of file to write to
+ * @basedir: the sysfs directory in which the file is to be found
+ * @val: the string to write
+ **/
+int write_sysfs_string_and_verify(char *filename, char *basedir, char *val)
+{
+ int ret;
+ FILE *sysfsfp;
+ char temp[100];
+ sprintf(temp, "%s%s", basedir, filename);
+ sysfsfp = fopen(temp, "w");
+ if (sysfsfp == NULL)
+ return -1;
+ fprintf(sysfsfp, "%s", val);
+ fclose(sysfsfp);
+
+ sysfsfp = fopen(temp, "r");
+ if (sysfsfp == NULL)
+ return -1;
+ fscanf(sysfsfp, "%s", temp);
+ if (strcmp(temp, val) != 0) {
+ printf("Possible failure in string write %s to %s%s \n",
+ val,
+ basedir,
+ filename);
+ return -1;
+ }
+ return 0;
+}
+
+int read_sysfs_posint(char *filename, char *basedir)
+{
+ int ret;
+ FILE *sysfsfp;
+ char temp[100];
+ sprintf(temp, "%s%s", basedir, filename);
+ sysfsfp = fopen(temp, "r");
+ if (sysfsfp == NULL)
+ return -1;
+ fscanf(sysfsfp, "%d\n", &ret);
+ fclose(sysfsfp);
+ return ret;
+}
diff --git a/drivers/staging/iio/Documentation/lis3l02dqbuffersimple.c b/drivers/staging/iio/Documentation/lis3l02dqbuffersimple.c
new file mode 100644
index 0000000..2b5cfc5
--- /dev/null
+++ b/drivers/staging/iio/Documentation/lis3l02dqbuffersimple.c
@@ -0,0 +1,171 @@
+/* Industrialio test ring buffer with a lis3l02dq acceleromter
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Assumes suitable udev rules are used to create the dev nodes as named here.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/dir.h>
+
+#include <linux/types.h>
+#include <dirent.h>
+#include "iio_util.h"
+
+static const char *ring_access = "/dev/iio/lis3l02dq_ring_access";
+static const char *ring_event = "/dev/iio/lis3l02dq_ring_event";
+static const char *device_name = "lis3l02dq";
+static const char *trigger_name = "lis3l02dq-dev0";
+static int NumVals = 3;
+static int scan_ts = 1;
+static int RingLength = 128;
+
+/*
+ * Could get this from ring bps, but only after starting the ring
+ * which is a bit late for it to be useful
+ */
+int size_from_scanmode(int numVals, int timestamp)
+{
+ if (numVals && timestamp)
+ return 16;
+ else if (timestamp)
+ return 8;
+ else
+ return numVals*2;
+}
+
+int main(int argc, char **argv)
+{
+ int i, j, k, toread;
+ FILE *fp_ev;
+ int fp;
+ char *data;
+ size_t read_size;
+ struct iio_event_data dat;
+
+ char *BaseDirectoryName,
+ *TriggerDirectoryName,
+ *RingBufferDirectoryName;
+
+ BaseDirectoryName = find_type_by_name(device_name, "device");
+ if (BaseDirectoryName == NULL) {
+ printf("Failed to find the %s \n", device_name);
+ return -1;
+ }
+ TriggerDirectoryName = find_type_by_name(trigger_name, "trigger");
+ if (TriggerDirectoryName == NULL) {
+ printf("Failed to find the %s\n", trigger_name);
+ return -1;
+ }
+ RingBufferDirectoryName = find_ring_subelement(BaseDirectoryName,
+ "ring_buffer");
+ if (RingBufferDirectoryName == NULL) {
+ printf("Failed to find ring buffer\n");
+ return -1;
+ }
+
+ if (write_sysfs_string_and_verify("trigger/current_trigger",
+ BaseDirectoryName,
+ (char *)trigger_name) < 0) {
+ printf("Failed to write current_trigger file \n");
+ return -1;
+ }
+
+ /* Setup ring buffer parameters */
+ if (write_sysfs_int("length", RingBufferDirectoryName,
+ RingLength) < 0) {
+ printf("Failed to open the ring buffer length file \n");
+ return -1;
+ }
+
+ /* Enable the ring buffer */
+ if (write_sysfs_int("ring_enable", RingBufferDirectoryName, 1) < 0) {
+ printf("Failed to open the ring buffer control file \n");
+ return -1;
+ };
+
+ data = malloc(size_from_scanmode(NumVals, scan_ts)*RingLength);
+ if (!data) {
+ printf("Could not allocate space for usespace data store\n");
+ return -1;
+ }
+
+ /* Attempt to open non blocking the access dev */
+ fp = open(ring_access, O_RDONLY | O_NONBLOCK);
+ if (fp == -1) { /*If it isn't there make the node */
+ printf("Failed to open %s\n", ring_access);
+ return -1;
+ }
+ /* Attempt to open the event access dev (blocking this time) */
+ fp_ev = fopen(ring_event, "rb");
+ if (fp_ev == NULL) {
+ printf("Failed to open %s\n", ring_event);
+ return -1;
+ }
+
+ /* Wait for events 10 times */
+ for (j = 0; j < 10; j++) {
+ read_size = fread(&dat, 1, sizeof(struct iio_event_data),
+ fp_ev);
+ switch (dat.id) {
+ case IIO_EVENT_CODE_RING_100_FULL:
+ toread = RingLength;
+ break;
+ case IIO_EVENT_CODE_RING_75_FULL:
+ toread = RingLength*3/4;
+ break;
+ case IIO_EVENT_CODE_RING_50_FULL:
+ toread = RingLength/2;
+ break;
+ default:
+ printf("Unexpecteded event code\n");
+ continue;
+ }
+ read_size = read(fp,
+ data,
+ toread*size_from_scanmode(NumVals, scan_ts));
+ if (read_size == -EAGAIN) {
+ printf("nothing available \n");
+ continue;
+ }
+
+ for (i = 0;
+ i < read_size/size_from_scanmode(NumVals, scan_ts);
+ i++) {
+ for (k = 0; k < NumVals; k++) {
+ __s16 val = *(__s16 *)(&data[i*size_from_scanmode(NumVals, scan_ts)
+ + (k)*2]);
+ printf("%05d ", val);
+ }
+ printf(" %lld\n",
+ *(__s64 *)(&data[(i+1)*size_from_scanmode(NumVals, scan_ts)
+ - sizeof(__s64)]));
+ }
+ }
+
+ /* Stop the ring buffer */
+ if (write_sysfs_int("ring_enable", RingBufferDirectoryName, 0) < 0) {
+ printf("Failed to open the ring buffer control file \n");
+ return -1;
+ };
+
+ /* Disconnect from the trigger - writing something that doesn't exist.*/
+ write_sysfs_string_and_verify("trigger/current_trigger",
+ BaseDirectoryName, "NULL");
+ free(BaseDirectoryName);
+ free(TriggerDirectoryName);
+ free(RingBufferDirectoryName);
+ free(data);
+
+ return 0;
+}
diff --git a/drivers/staging/iio/Documentation/overview.txt b/drivers/staging/iio/Documentation/overview.txt
new file mode 100644
index 0000000..64584ad
--- /dev/null
+++ b/drivers/staging/iio/Documentation/overview.txt
@@ -0,0 +1,62 @@
+Overview of IIO
+
+The Industrial I/O subsytem is intended to provide support for devices
+that in some sense are analog to digital convertors (ADCs). As many
+actual devices combine some ADCs with digital to analog convertors
+(DACs) the intention is to add that functionality at a future date
+(hence the name).
+
+The aim is to fill the gap between the somewhat similar hwmon and
+input subsystems. Hwmon is very much directed at low sample rate
+sensors used in applications such as fan speed control and temperature
+measurement. Input is, as it's name suggests focused on input
+devices. In some cases there is considerable overlap between these and
+IIO.
+
+A typical device falling into this category would be connected via SPI
+or I2C.
+
+Functionality of IIO
+
+* Basic device registration and handling. This is very similar to
+hwmon with simple polled access to device channels via sysfs.
+
+* Event chrdevs. These are similar to input in that they provide a
+route to user space for hardware triggered events. Such events include
+threshold detectors, free-fall detectors and more complex action
+detection. They events themselves are currently very simple with
+merely an event code and a timestamp. Any data associated with the
+event must be accessed via polling. Note a given device may have one
+or more event channel. These events are turned on or off (if possible)
+via sysfs interfaces.
+
+* Hardware ring buffer support. Some recent sensors have included
+fifo / ring buffers on the sensor chip. These greatly reduce the load
+on the host CPU by buffering relatively large numbers of data samples
+based on an internal sampling clock. Examples include VTI SCA3000
+series and Analog Device ADXL345 accelerometers. Each ring buffer
+typically has an event chrdev (similar to the more general ones above)
+to pass on events such as buffer 50% full and an access chrdev via
+which the raw data it self may be read back.
+
+* Trigger and software ring buffer support. In many data analysis
+applications it it useful to be able to capture data based on some
+external signal (trigger). These triggers might be a data ready
+signal, a gpio line connected to some external system or an on
+processor periodic interrupt. A single trigger many initialize data
+capture or reading from a number of sensors. These triggers are
+used in iio to fill software ring buffers acting in a very similar
+fashion to the hardware buffers described above.
+
+Other documentation:
+
+userspace.txt - overview of ring buffer reading from userspace
+
+device.txt - elemennts of a typical device driver.
+
+trigger.txt - elements of a typical trigger driver.
+
+ring.txt - additional elements required for ring buffer support
+
+
+
diff --git a/drivers/staging/iio/Documentation/ring.txt b/drivers/staging/iio/Documentation/ring.txt
new file mode 100644
index 0000000..d2ca683
--- /dev/null
+++ b/drivers/staging/iio/Documentation/ring.txt
@@ -0,0 +1,61 @@
+Ring buffer support within IIO
+
+This document is intended as a general overview of the functionality
+a ring buffer may supply and how it is specified within IIO. For more
+specific information on a given ring buffer implementation, see the
+comments in the source code. Note that the intention is to allow
+some drivers to specify ring buffers choice at probe or runtime, but
+for now the selection is hard coded within a given driver.
+
+A given ring buffer implementation typically embedded a struct
+iio_ring_buffer and it is a pointer to this that is provided to the
+IIO core. Access to the embedding structure is typically done via
+container_of functions.
+
+struct iio_ring_buffer contains 4 function pointers
+(preenable, postenable, predisable, postdisable).
+These are used to perform implementation specific steps on either side
+of the core changing it's current mode to indicate that the ring buffer
+is enabled or disabled (along with enabling triggering etc as appropriate).
+
+Also in struct iio_ring_buffer is a struct iio_ring_access_funcs.
+The function pointers within here are used to allow the core to handle
+as much ring buffer functionality as possible. Note almost all of these
+are optional.
+
+mark_in_use, unmark_in_use
+ Basically indicate that not changes should be made to the ring
+ buffer state that will effect the form of the data being captures
+ (e.g. scan elements or length)
+
+store_to
+ If possible, push data to ring buffer.
+
+read_last
+ If possible get the most recent entry from the buffer (without removal).
+ This provides polling like functionality whilst the ring buffering is in
+ use without a separate read from the device.
+
+rip_lots
+ The primary ring buffer reading function. Note that it may well not return
+ as much data as requested. The deadoffset is used to indicate that some
+ initial data in the data array is not guaranteed to be valid.
+
+mark_param_changed
+ Used to indicate that something has changed. Used in conjunction with
+request_update
+ If parameters have changed that require reinitialization or configuration of
+ the ring buffer this will trigger it.
+
+get_bpd, set_bpd
+ Get/set the number of bytes for a given reading (single element, not sample set)
+ The value of bps (bytes per set) is created from a combination of this and the
+ enabled scan elements.
+
+get_length / set_length
+ Get/set the number of sample sets that may be held by the buffer.
+
+is_enabled
+ Query if ring buffer is in use
+enable
+ Start the ring buffer.
diff --git a/drivers/staging/iio/Documentation/trigger.txt b/drivers/staging/iio/Documentation/trigger.txt
new file mode 100644
index 0000000..650157f
--- /dev/null
+++ b/drivers/staging/iio/Documentation/trigger.txt
@@ -0,0 +1,38 @@
+IIO trigger drivers.
+
+Many triggers are provided by hardware that will also be registered as
+an IIO device. Whilst this can create device specific complexities
+such triggers are registered with the core in the same way as
+stand-alone triggers.
+
+struct iio_trig *trig = iio_allocate_trigger();
+
+allocates a trigger structure. The key elements to then fill in within
+a driver are:
+
+trig->control_attrs
+ Any sysfs attributes needed to control parameters of the trigger
+
+trig->private_data
+ Device specific private data.
+
+trig->owner
+ Typically set to THIS_MODULE. Used to ensure correct
+ ownership of core allocated resources.
+
+trig->name
+ A unique name for the trigger.
+
+When these have been set call:
+
+iio_trigger_register(trig);
+
+to register the trigger with the core, making it available to trigger
+consumers.
+
+
+Trigger Consumers
+
+Currently triggers are only used for the filling of software ring
+buffers and as such any device supporting INDIO_RING_TRIGGERED has the
+consumer interface automatically created.
diff --git a/drivers/staging/iio/Documentation/userspace.txt b/drivers/staging/iio/Documentation/userspace.txt
new file mode 100644
index 0000000..661015a
--- /dev/null
+++ b/drivers/staging/iio/Documentation/userspace.txt
@@ -0,0 +1,60 @@
+Userspace access to IIO
+
+Example, ST Microelectronics LIS3L02DQ accelerometer.
+
+Typical sysfs entries (pruned for clarity)
+
+/sys/class/iio
+ device0 - iio_dev related elements
+ name - driver specific identifier (here lis3l02dq)
+ accel_x - polled (or from ring) raw readout of acceleration
+ accel_x_gain - hardware gain (calibration)
+ accel_x_offset - hardware offset (calibration)
+ available_sampling_frequency
+
+ available_sampling_frequency - what options are there
+ sampling_frequency - control of internal sampling frequency
+ scan_elements - controls which channels will be stored in the ring buffer
+ scan_en_accel_x
+ scan_en_accel_y
+ scan_en_timestamp
+ device - link to underlying hardware device
+ uevent - udev related element
+
+ thresh - unified threshold used for detection on all axis
+ event_line0_sources - which events are enabled
+ accel_x_high - enable x axis high threshold event
+ accel_x_low - enable x axis low threshold event
+
+ event_line0 - event interface
+ dev - major:minor for the chrdev (note major allocation dynamic)
+ trigger - consumer attachement
+ current_trigger - name based association with a trigger
+ ring_buffer0 - ring buffer interface
+ bps - byptes per sample (read only), dependant on scan element selection
+ length - (rw) specificy length fo software ring buffer (typically ro in hw case)
+ ring_enable - turn the ring on. If its the first to be enabled attached to this
+ trigger will also enable the trigger.
+ ring_access0
+ dev - major:minor for ring buffer access chrdev
+ ring_event_line0
+ dev - major:minor for ring buffer event chrdev
+
+ trigger0 - data ready trigger elements
+ name - unqiue name of trigger
+
+Udev will create the following entries under /dev by default:
+
+ring_access0 - ring access chrdev
+ring_event0 - ring event chrdev
+event_line0 - general event chrdev.
+
+For the example code we assume the following rules have been used to ensure
+unique and consistent naming of these for the lis3l02dq in question:
+
+KERNEL="ring_event_line*", ID="spi1.0", DRIVER="lis3l02dq", NAME="iio/lis3l02dq_ring_event"
+KERNEL="event_line*", ID="spi1.0", DRIVER="lis3l02dq", NAME="iio/lis3l02dq_event"
+KERNEL="ring_access*", ID="spi1.0", DRIVER="lis3l02dq", NAME="iio/lis3l02dq_ring_access"
+
+The files, lis3l02dqbuffersimple.c and iio_util.h in this directory provide an example
+of how to use the ring buffer and event interfaces.
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
new file mode 100644
index 0000000..beb99a5
--- /dev/null
+++ b/drivers/staging/iio/Kconfig
@@ -0,0 +1,47 @@
+#
+# Industrial I/O subsytem configuration
+#
+
+menuconfig IIO
+ tristate "Industrial I/O support"
+ ---help---
+ The industrial I/O subsystem provides a unified framework for
+ drivers for many different types of embedded sensors using a
+ number of different physical interfaces (i2c, spi etc). See
+ Documentation/industrialio for more information.
+if IIO
+
+config IIO_RING_BUFFER
+ bool "Enable ring buffer support within IIO"
+ help
+ Provide core support for various ring buffer based data
+ acquisition methods.
+
+if IIO_RING_BUFFER
+
+config IIO_SW_RING
+ tristate "Industrial I/O lock free software ring"
+ help
+ example software ring buffer implementation. The design aim
+ of this particular realization was to minize write locking
+ with the intention that some devices would be able to write
+ in interrupt context.
+
+endif # IIO_RINGBUFFER
+
+config IIO_TRIGGER
+ boolean "Enable triggered sampling support"
+ help
+ Provides IIO core support for triggers. Currently these
+ are used to initialize capture of samples to push into
+ ring buffers. The triggers are effectively a 'capture
+ data now' interrupt.
+
+
+source "drivers/staging/iio/accel/Kconfig"
+source "drivers/staging/iio/adc/Kconfig"
+source "drivers/staging/iio/light/Kconfig"
+
+source "drivers/staging/iio/trigger/Kconfig"
+
+endif # IIO
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
new file mode 100644
index 0000000..7ec0218
--- /dev/null
+++ b/drivers/staging/iio/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for the industrial I/O core.
+#
+
+obj-$(CONFIG_IIO) += industrialio.o
+industrialio-y := industrialio-core.o
+industrialio-$(CONFIG_IIO_RING_BUFFER) += industrialio-ring.o
+industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o
+
+obj-$(CONFIG_IIO_SW_RING) += ring_sw.o
+
+obj-y += accel/
+obj-y += adc/
+obj-y += light/
+
+obj-y += trigger/ \ No newline at end of file
diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO
new file mode 100644
index 0000000..15da0c2
--- /dev/null
+++ b/drivers/staging/iio/TODO
@@ -0,0 +1,69 @@
+2009 8/18
+
+Core:
+1) Get reviews
+2) Additional testing
+3) Ensure all desirable features present by adding more devices.
+ Major changes not expected except in response to comments
+
+Max1363 core:
+1) Possibly add sysfs exports of constant useful to userspace.
+Would be nice
+2) Support hardware generated interrupts
+3) Expand device set. Lots of other maxim adc's have very
+ similar interfaces.
+
+TSL2561
+Would be nice
+1) Open question of userspace vs kernel space balance when
+converting to useful light measurements from device ones.
+2) Add sysfs elements necessary to allow device agnostic
+unit conversion.
+
+LIS3L02DQ core
+
+LIS3L02DQ ring
+
+KXSD9
+Currently minimal driver, would be nice to add:
+1) Support for all chip generated interrupts (events),
+basically get support up to level of lis3l02dq driver.
+
+Ring buffer core
+
+SCA3000
+Would be nice
+1) Testing on devices other than sca3000-e05
+
+Trigger core support
+1) Discussion of approach. Is it general enough?
+
+Ring Buffer:
+1) Discussion of approach.
+There are probably better ways of doing this. The
+intention is to allow for more than one software ring
+buffer implementation as different users will have
+different requirements. This one suits mid range
+frequencies (100Hz - 4kHz).
+2) Lots of testing
+
+Periodic Timer trigger
+1) Move to a more general hardware periodic timer request
+subsystem. Current approach is abusing purpose of RTC.
+Initial discussions have taken place, but no actual code
+is in place as yet. This topic will be reopened on lkml
+shortly. I don't really envision this patch being merged
+in anything like its current form.
+
+GPIO trigger
+1) Add control over the type of interrupt etc. This will
+necessitate a header that is also visible from arch board
+files. (avoided at the moment to keep the driver set
+contained in staging).
+
+Documentation
+1) Lots of cleanup and expansion.
+2) Some device require indvidual docs.
+
+Contact: Jonathan Cameron <jic23@cam.ac.uk>.
+Mailing list: LKML.
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
new file mode 100644
index 0000000..fef3da4
--- /dev/null
+++ b/drivers/staging/iio/accel/Kconfig
@@ -0,0 +1,27 @@
+#
+# Accelerometer drivers
+#
+comment "Accelerometers"
+
+config KXSD9
+ tristate "Kionix KXSD9 Accelerometer Driver"
+ depends on SPI
+ help
+ Say yes here to build support for the Kionix KXSD9 accelerometer.
+ Currently this only supports the device via an SPI interface.
+
+config LIS3L02DQ
+ tristate "ST Microelectronics LIS3L02DQ Accelerometer Driver"
+ depends on SPI
+ help
+ Say yes here to build SPI support for the ST microelectronics
+ accelerometer. The driver supplies direct access via sysfs files
+ and an event interface via a character device.
+
+config SCA3000
+ depends on IIO_RING_BUFFER
+ depends on SPI
+ tristate "VTI SCA3000 series accelerometers"
+ help
+ Say yes here to build support for the VTI SCA3000 series of SPI
+ accelerometers. These devices use a hardware ring buffer. \ No newline at end of file
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
new file mode 100644
index 0000000..d5335f9
--- /dev/null
+++ b/drivers/staging/iio/accel/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for industrial I/O accelerometer drivers
+#
+obj-$(CONFIG_KXSD9) += kxsd9.o
+
+lis3l02dq-y := lis3l02dq_core.o
+lis3l02dq-$(CONFIG_IIO_RING_BUFFER) += lis3l02dq_ring.o
+obj-$(CONFIG_LIS3L02DQ) += lis3l02dq.o
+
+sca3000-y := sca3000_core.o sca3000_ring.o
+obj-$(CONFIG_SCA3000) += sca3000.o \ No newline at end of file
diff --git a/drivers/staging/iio/accel/accel.h b/drivers/staging/iio/accel/accel.h
new file mode 100644
index 0000000..811fa05
--- /dev/null
+++ b/drivers/staging/iio/accel/accel.h
@@ -0,0 +1,167 @@
+
+#include "../sysfs.h"
+
+/* Accelerometer types of attribute */
+
+#define IIO_DEV_ATTR_ACCEL_X_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(accel_x_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_Y_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(accel_y_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_Z_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(accel_z_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_X_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(accel_x_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_Y_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(accel_y_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_Z_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(accel_z_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_X(_show, _addr) \
+ IIO_DEVICE_ATTR(accel_x, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_Y(_show, _addr) \
+ IIO_DEVICE_ATTR(accel_y, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_Z(_show, _addr) \
+ IIO_DEVICE_ATTR(accel_z, S_IRUGO, _show, NULL, _addr)
+
+/* Thresholds are somewhat chip dependent - may need quite a few defs here */
+/* For unified thesholds (shared across all directions */
+
+/**
+ * IIO_DEV_ATTR_ACCEL_THRESH: unified threshold
+ * @_mode: read/write
+ * @_show: read detector threshold value
+ * @_store: write detector theshold value
+ * @_addr: driver specific data, typically a register address
+ *
+ * This one is for cases where as single threshold covers all directions
+ **/
+#define IIO_DEV_ATTR_ACCEL_THRESH(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(thresh, _mode, _show, _store, _addr)
+
+/**
+ * IIO_DEV_ATTR_ACCEL_THRESH_X: independant direction threshold, x axis
+ * @_mode: readable / writable
+ * @_show: read x axis detector theshold value
+ * @_store: write x axis detector threshold value
+ * @_addr: device driver dependant, typically a register address
+ **/
+#define IIO_DEV_ATTR_ACCEL_THRESH_X(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(thresh_accel_x, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_THRESH_Y(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(thresh_accel_y, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_THRESH_Z(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(thresh_accel_z, _mode, _show, _store, _addr)
+
+
+/**
+ * IIO_EVENT_ATTR_ACCEL_X_HIGH: threshold event, x acceleration
+ * @_show: read x acceleration high threshold
+ * @_store: write x acceleration high threshold
+ * @_mask: device dependant, typically a bit mask
+ * @_handler: the iio_handler associated with this attribute
+ **/
+#define IIO_EVENT_ATTR_ACCEL_X_HIGH(_show, _store, _mask, _handler) \
+ IIO_EVENT_ATTR(accel_x_high, _show, _store, _mask, _handler)
+
+/**
+ * IIO_EVENT_ATTR_ACCEL_X_HIGH_SH: threshold event, x accel high, shared handler
+ * @_evlist: event list used to share the handler
+ * @_show: attribute read
+ * @_store: attribute write
+ * @_mask: driver specific data, typically a bit mask
+ **/
+#define IIO_EVENT_ATTR_ACCEL_X_HIGH_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(accel_x_high, _evlist, _show, _store, _mask)
+
+/**
+ * IIO_EVENT_CODE_ACCEL_X_HIGH - event code for x axis high accel threshold
+ **/
+#define IIO_EVENT_CODE_ACCEL_X_HIGH IIO_EVENT_CODE_ACCEL_BASE
+
+#define IIO_EVENT_ATTR_ACCEL_Y_HIGH(_show, _store, _mask, _handler) \
+ IIO_EVENT_ATTR(accel_y_high, _show, _store, _mask, _handler)
+
+#define IIO_EVENT_ATTR_ACCEL_Y_HIGH_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(accel_y_high, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_CODE_ACCEL_Y_HIGH (IIO_EVENT_CODE_ACCEL_BASE + 1)
+
+#define IIO_EVENT_ATTR_ACCEL_Z_HIGH(_show, _store, _mask, _handler) \
+ IIO_EVENT_ATTR(accel_z_high, _show, _store, _mask, _handler)
+
+#define IIO_EVENT_ATTR_ACCEL_Z_HIGH_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(accel_z_high, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_CODE_ACCEL_Z_HIGH (IIO_EVENT_CODE_ACCEL_BASE + 2)
+
+#define IIO_EVENT_ATTR_ACCEL_X_LOW(_show, _store, _mask, _handler) \
+ IIO_EVENT_ATTR(accel_x_low, _show, _store, _mask, _handler)
+
+#define IIO_EVENT_ATTR_ACCEL_X_LOW_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(accel_x_low, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_CODE_ACCEL_X_LOW (IIO_EVENT_CODE_ACCEL_BASE + 3)
+
+#define IIO_EVENT_ATTR_ACCEL_Y_LOW(_show, _store, _mask, _handler) \
+ IIO_EVENT_ATTR(accel_y_low, _show, _store, _mask, _handler)
+
+#define IIO_EVENT_ATTR_ACCEL_Y_LOW_SH(_evlist, _show, _store, _mask)\
+ IIO_EVENT_ATTR_SH(accel_y_low, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_CODE_ACCEL_Y_LOW (IIO_EVENT_CODE_ACCEL_BASE + 4)
+
+#define IIO_EVENT_ATTR_ACCEL_Z_LOW(_show, _store, _mask, _handler) \
+ IIO_EVENT_ATTR(accel_z_low, _show, _store, _mask, _handler)
+
+#define IIO_EVENT_ATTR_ACCEL_Z_LOW_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(accel_z_low, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_CODE_ACCEL_Z_LOW (IIO_EVENT_CODE_ACCEL_BASE + 5)
+
+#define IIO_EVENT_ATTR_FREE_FALL_DETECT(_show, _store, _mask, _handler) \
+ IIO_EVENT_ATTR(free_fall, _show, _store, _mask, _handler)
+
+#define IIO_EVENT_ATTR_FREE_FALL_DETECT_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(free_fall, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_CODE_FREE_FALL (IIO_EVENT_CODE_ACCEL_BASE + 6)
+
+
+#define IIO_EVENT_ATTR_ACCEL_X_ROC_HIGH_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(accel_x_roc_high, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_CODE_ACCEL_X_ROC_HIGH (IIO_EVENT_CODE_ACCEL_BASE + 10)
+
+#define IIO_EVENT_ATTR_ACCEL_X_ROC_LOW_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(accel_x_roc_low, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_CODE_ACCEL_X_ROC_LOW (IIO_EVENT_CODE_ACCEL_BASE + 11)
+
+#define IIO_EVENT_ATTR_ACCEL_Y_ROC_HIGH_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(accel_y_roc_high, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_CODE_ACCEL_Y_ROC_HIGH (IIO_EVENT_CODE_ACCEL_BASE + 12)
+
+#define IIO_EVENT_ATTR_ACCEL_Y_ROC_LOW_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(accel_y_roc_low, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_CODE_ACCEL_Y_ROC_LOW (IIO_EVENT_CODE_ACCEL_BASE + 13)
+
+#define IIO_EVENT_ATTR_ACCEL_Z_ROC_HIGH_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(accel_z_roc_high, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_CODE_ACCEL_Z_ROC_HIGH (IIO_EVENT_CODE_ACCEL_BASE + 14)
+
+#define IIO_EVENT_ATTR_ACCEL_Z_ROC_LOW_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(accel_z_roc_low, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_CODE_ACCEL_Z_ROC_LOW (IIO_EVENT_CODE_ACCEL_BASE + 15)
diff --git a/drivers/staging/iio/accel/kxsd9.c b/drivers/staging/iio/accel/kxsd9.c
new file mode 100644
index 0000000..33d16b6
--- /dev/null
+++ b/drivers/staging/iio/accel/kxsd9.c
@@ -0,0 +1,395 @@
+/*
+ * kxsd9.c simple support for the Kionix KXSD9 3D
+ * accelerometer.
+ *
+ * Copyright (c) 2008-2009 Jonathan Cameron <jic23@cam.ac.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The i2c interface is very similar, so shouldn't be a problem once
+ * I have a suitable wire made up.
+ *
+ * TODO: Support the motion detector
+ * Uses register address incrementing so could have a
+ * heavily optimized ring buffer access function.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/rtc.h>
+#include <linux/delay.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../adc/adc.h"
+#include "accel.h"
+
+#define KXSD9_REG_X 0x00
+#define KXSD9_REG_Y 0x02
+#define KXSD9_REG_Z 0x04
+#define KXSD9_REG_AUX 0x06
+#define KXSD9_REG_RESET 0x0a
+#define KXSD9_REG_CTRL_C 0x0c
+
+#define KXSD9_FS_8 0x00
+#define KXSD9_FS_6 0x01
+#define KXSD9_FS_4 0x02
+#define KXSD9_FS_2 0x03
+#define KXSD9_FS_MASK 0x03
+
+#define KXSD9_REG_CTRL_B 0x0d
+#define KXSD9_REG_CTRL_A 0x0e
+
+#define KXSD9_READ(a) (0x80 | (a))
+#define KXSD9_WRITE(a) (a)
+
+#define IIO_DEV_ATTR_ACCEL_SET_RANGE(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(accel_range, _mode, _show, _store, 0)
+
+#define KXSD9_STATE_RX_SIZE 2
+#define KXSD9_STATE_TX_SIZE 4
+/**
+ * struct kxsd9_state - device related storage
+ * @buf_lock: protect the rx and tx buffers.
+ * @indio_dev: associated industrial IO device
+ * @us: spi device
+ * @rx: single rx buffer storage
+ * @tx: single tx buffer storage
+ **/
+struct kxsd9_state {
+ struct mutex buf_lock;
+ struct iio_dev *indio_dev;
+ struct spi_device *us;
+ u8 *rx;
+ u8 *tx;
+};
+
+/* This may want to move to mili g to allow for non integer ranges */
+static ssize_t kxsd9_read_accel_range(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ ssize_t len = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct kxsd9_state *st = indio_dev->dev_data;
+ struct spi_transfer xfer = {
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ };
+ struct spi_message msg;
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = KXSD9_READ(KXSD9_REG_CTRL_C);
+ st->tx[1] = 0;
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ goto error_ret;
+
+ switch (st->rx[1] & KXSD9_FS_MASK) {
+ case KXSD9_FS_8:
+ len += sprintf(buf, "8\n");
+ break;
+ case KXSD9_FS_6:
+ len += sprintf(buf, "6\n");
+ break;
+ case KXSD9_FS_4:
+ len += sprintf(buf, "4\n");
+ break;
+ case KXSD9_FS_2:
+ len += sprintf(buf, "2\n");
+ break;
+ }
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+
+ return ret ? ret : len;
+}
+static ssize_t kxsd9_write_accel_range(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ long readin;
+ struct spi_message msg;
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct kxsd9_state *st = indio_dev->dev_data;
+ u8 val;
+ struct spi_transfer xfers[] = {
+ {
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ }, {
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .tx_buf = st->tx,
+ },
+ };
+
+ ret = strict_strtol(buf, 10, &readin);
+ if (ret)
+ return ret;
+ switch (readin) {
+ case 8:
+ val = KXSD9_FS_8;
+ break;
+ case 6:
+ val = KXSD9_FS_6;
+ break;
+ case 4:
+ val = KXSD9_FS_4;
+ break;
+ case 2:
+ val = KXSD9_FS_2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = KXSD9_READ(KXSD9_REG_CTRL_C);
+ st->tx[1] = 0;
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ goto error_ret;
+ st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
+ st->tx[1] = (st->rx[1] & ~KXSD9_FS_MASK) | val;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret ? ret : len;
+}
+static ssize_t kxsd9_read_accel(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct spi_message msg;
+ int ret;
+ ssize_t len = 0;
+ u16 val;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct kxsd9_state *st = indio_dev->dev_data;
+ struct spi_transfer xfers[] = {
+ {
+ .bits_per_word = 8,
+ .len = 1,
+ .cs_change = 0,
+ .delay_usecs = 200,
+ .tx_buf = st->tx,
+ }, {
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .rx_buf = st->rx,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = KXSD9_READ(this_attr->address);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ goto error_ret;
+ val = (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
+ len = sprintf(buf, "%d\n", val);
+error_ret:
+ mutex_unlock(&st->buf_lock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_ACCEL_X(kxsd9_read_accel, KXSD9_REG_X);
+static IIO_DEV_ATTR_ACCEL_Y(kxsd9_read_accel, KXSD9_REG_Y);
+static IIO_DEV_ATTR_ACCEL_Z(kxsd9_read_accel, KXSD9_REG_Z);
+static IIO_DEV_ATTR_ADC(0, kxsd9_read_accel, KXSD9_REG_AUX);
+static IIO_DEV_ATTR_ACCEL_SET_RANGE(S_IRUGO | S_IWUSR,
+ kxsd9_read_accel_range,
+ kxsd9_write_accel_range);
+
+static struct attribute *kxsd9_attributes[] = {
+ &iio_dev_attr_accel_x.dev_attr.attr,
+ &iio_dev_attr_accel_y.dev_attr.attr,
+ &iio_dev_attr_accel_z.dev_attr.attr,
+ &iio_dev_attr_adc_0.dev_attr.attr,
+ &iio_dev_attr_accel_range.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group kxsd9_attribute_group = {
+ .attrs = kxsd9_attributes,
+};
+
+static int __devinit kxsd9_power_up(struct spi_device *spi)
+{
+ int ret;
+ struct spi_transfer xfers[2] = {
+ {
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ },
+ };
+ struct spi_message msg;
+ u8 *tx2;
+ u8 *tx = kmalloc(2, GFP_KERNEL);
+
+ if (tx == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ tx2 = kmalloc(2, GFP_KERNEL);
+ if (tx2 == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+ tx[0] = 0x0d;
+ tx[1] = 0x40;
+
+ tx2[0] = 0x0c;
+ tx2[1] = 0x9b;
+
+ xfers[0].tx_buf = tx;
+ xfers[1].tx_buf = tx2;
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(spi, &msg);
+
+ kfree(tx2);
+error_free_tx:
+ kfree(tx);
+error_ret:
+ return ret;
+
+};
+
+static int __devinit kxsd9_probe(struct spi_device *spi)
+{
+
+ struct kxsd9_state *st;
+ int ret = 0;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ spi_set_drvdata(spi, st);
+
+ st->rx = kmalloc(sizeof(*st->rx)*KXSD9_STATE_RX_SIZE,
+ GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kmalloc(sizeof(*st->tx)*KXSD9_STATE_TX_SIZE,
+ GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+ st->indio_dev->dev.parent = &spi->dev;
+ /* for now */
+ st->indio_dev->num_interrupt_lines = 0;
+ st->indio_dev->event_attrs = NULL;
+
+ st->indio_dev->attrs = &kxsd9_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ spi->mode = SPI_MODE_0;
+ spi_setup(spi);
+ kxsd9_power_up(spi);
+
+ return 0;
+
+error_free_dev:
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int __devexit kxsd9_remove(struct spi_device *spi)
+{
+ struct kxsd9_state *st = spi_get_drvdata(spi);
+
+ iio_device_unregister(st->indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver kxsd9_driver = {
+ .driver = {
+ .name = "kxsd9",
+ .owner = THIS_MODULE,
+ },
+ .probe = kxsd9_probe,
+ .remove = __devexit_p(kxsd9_remove),
+};
+
+static __init int kxsd9_spi_init(void)
+{
+ return spi_register_driver(&kxsd9_driver);
+}
+module_init(kxsd9_spi_init);
+
+static __exit void kxsd9_spi_exit(void)
+{
+ spi_unregister_driver(&kxsd9_driver);
+}
+module_exit(kxsd9_spi_exit);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
+MODULE_DESCRIPTION("Kionix KXSD9 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
new file mode 100644
index 0000000..91a5375
--- /dev/null
+++ b/drivers/staging/iio/accel/lis3l02dq.h
@@ -0,0 +1,232 @@
+/*
+ * LISL02DQ.h -- support STMicroelectronics LISD02DQ
+ * 3d 2g Linear Accelerometers via SPI
+ *
+ * Copyright (c) 2007 Jonathan Cameron <jic23@cam.ac.uk>
+ *
+ * Loosely based upon tle62x0.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SPI_LIS3L02DQ_H_
+#define SPI_LIS3L02DQ_H_
+#define LIS3L02DQ_READ_REG(a) ((a) | 0x80)
+#define LIS3L02DQ_WRITE_REG(a) a
+
+/* Calibration parameters */
+#define LIS3L02DQ_REG_OFFSET_X_ADDR 0x16
+#define LIS3L02DQ_REG_OFFSET_Y_ADDR 0x17
+#define LIS3L02DQ_REG_OFFSET_Z_ADDR 0x18
+
+#define LIS3L02DQ_REG_GAIN_X_ADDR 0x19
+#define LIS3L02DQ_REG_GAIN_Y_ADDR 0x1A
+#define LIS3L02DQ_REG_GAIN_Z_ADDR 0x1B
+
+/* Control Register (1 of 2) */
+#define LIS3L02DQ_REG_CTRL_1_ADDR 0x20
+/* Power ctrl - either bit set corresponds to on*/
+#define LIS3L02DQ_REG_CTRL_1_PD_ON 0xC0
+
+/* Decimation Factor */
+#define LIS3L02DQ_DEC_MASK 0x30
+#define LIS3L02DQ_REG_CTRL_1_DF_128 0x00
+#define LIS3L02DQ_REG_CTRL_1_DF_64 0x10
+#define LIS3L02DQ_REG_CTRL_1_DF_32 0x20
+#define LIS3L02DQ_REG_CTRL_1_DF_8 (0x10 | 0x20)
+
+/* Self Test Enable */
+#define LIS3L02DQ_REG_CTRL_1_SELF_TEST_ON 0x08
+
+/* Axes enable ctrls */
+#define LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE 0x04
+#define LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE 0x02
+#define LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE 0x01
+
+/* Control Register (2 of 2) */
+#define LIS3L02DQ_REG_CTRL_2_ADDR 0x21
+
+/* Block Data Update only after MSB and LSB read */
+#define LIS3L02DQ_REG_CTRL_2_BLOCK_UPDATE 0x40
+
+/* Set to big endian output */
+#define LIS3L02DQ_REG_CTRL_2_BIG_ENDIAN 0x20
+
+/* Reboot memory content */
+#define LIS3L02DQ_REG_CTRL_2_REBOOT_MEMORY 0x10
+
+/* Interupt Enable - applies data ready to the RDY pad */
+#define LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT 0x08
+
+/* Enable Data Ready Generation - relationship with previous unclear in docs */
+#define LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION 0x04
+
+/* SPI 3 wire mode */
+#define LIS3L02DQ_REG_CTRL_2_THREE_WIRE_SPI_MODE 0x02
+
+/* Data alignment, default is 12 bit right justified
+ * - option for 16 bit left justified */
+#define LIS3L02DQ_REG_CTRL_2_DATA_ALIGNMENT_16_BIT_LEFT_JUSTIFIED 0x01
+
+/* Interupt related stuff */
+#define LIS3L02DQ_REG_WAKE_UP_CFG_ADDR 0x23
+
+/* Switch from or combination fo conditions to and */
+#define LIS3L02DQ_REG_WAKE_UP_CFG_BOOLEAN_AND 0x80
+
+/* Latch interupt request,
+ * if on ack must be given by reading the ack register */
+#define LIS3L02DQ_REG_WAKE_UP_CFG_LATCH_SRC 0x40
+
+/* Z Interupt on High (above threshold)*/
+#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Z_HIGH 0x20
+/* Z Interupt on Low */
+#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Z_LOW 0x10
+/* Y Interupt on High */
+#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Y_HIGH 0x08
+/* Y Interupt on Low */
+#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Y_LOW 0x04
+/* X Interupt on High */
+#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_HIGH 0x02
+/* X Interupt on Low */
+#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_LOW 0x01
+
+/* Register that gives description of what caused interupt
+ * - latched if set in CFG_ADDRES */
+#define LIS3L02DQ_REG_WAKE_UP_SRC_ADDR 0x24
+/* top bit ignored */
+/* Interupt Active */
+#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_ACTIVATED 0x40
+/* Interupts that have been triggered */
+#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_HIGH 0x20
+#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_LOW 0x10
+#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_HIGH 0x08
+#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_LOW 0x04
+#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_HIGH 0x02
+#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_LOW 0x01
+
+#define LIS3L02DQ_REG_WAKE_UP_ACK_ADDR 0x25
+
+/* Status register */
+#define LIS3L02DQ_REG_STATUS_ADDR 0x27
+/* XYZ axis data overrun - first is all overrun? */
+#define LIS3L02DQ_REG_STATUS_XYZ_OVERRUN 0x80
+#define LIS3L02DQ_REG_STATUS_Z_OVERRUN 0x40
+#define LIS3L02DQ_REG_STATUS_Y_OVERRUN 0x20
+#define LIS3L02DQ_REG_STATUS_X_OVERRUN 0x10
+/* XYZ new data available - first is all 3 available? */
+#define LIS3L02DQ_REG_STATUS_XYZ_NEW_DATA 0x08
+#define LIS3L02DQ_REG_STATUS_Z_NEW_DATA 0x04
+#define LIS3L02DQ_REG_STATUS_Y_NEW_DATA 0x02
+#define LIS3L02DQ_REG_STATUS_X_NEW_DATA 0x01
+
+/* The accelerometer readings - low and high bytes.
+Form of high byte dependant on justification set in ctrl reg */
+#define LIS3L02DQ_REG_OUT_X_L_ADDR 0x28
+#define LIS3L02DQ_REG_OUT_X_H_ADDR 0x29
+#define LIS3L02DQ_REG_OUT_Y_L_ADDR 0x2A
+#define LIS3L02DQ_REG_OUT_Y_H_ADDR 0x2B
+#define LIS3L02DQ_REG_OUT_Z_L_ADDR 0x2C
+#define LIS3L02DQ_REG_OUT_Z_H_ADDR 0x2D
+
+/* Threshold values for all axes and both above and below thresholds
+ * - i.e. there is only one value */
+#define LIS3L02DQ_REG_THS_L_ADDR 0x2E
+#define LIS3L02DQ_REG_THS_H_ADDR 0x2F
+
+#define LIS3L02DQ_DEFAULT_CTRL1 (LIS3L02DQ_REG_CTRL_1_PD_ON \
+ | LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE \
+ | LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE \
+ | LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE \
+ | LIS3L02DQ_REG_CTRL_1_DF_128)
+
+#define LIS3L02DQ_DEFAULT_CTRL2 0
+
+#define LIS3L02DQ_MAX_TX 12
+#define LIS3L02DQ_MAX_RX 12
+/**
+ * struct lis3l02dq_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @work_cont_thresh: CLEAN
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct lis3l02dq_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ struct iio_work_cont work_cont_thresh;
+ bool inter;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+
+int lis3l02dq_spi_read_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 *val);
+
+int lis3l02dq_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 *val);
+#define LIS3L02DQ_SCAN_ACC_X 0
+#define LIS3L02DQ_SCAN_ACC_Y 1
+#define LIS3L02DQ_SCAN_ACC_Z 2
+
+
+#ifdef CONFIG_IIO_RING_BUFFER
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+void lis3l02dq_remove_trigger(struct iio_dev *indio_dev);
+int lis3l02dq_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t lis3l02dq_read_accel_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int lis3l02dq_configure_ring(struct iio_dev *indio_dev);
+void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev);
+
+int lis3l02dq_initialize_ring(struct iio_ring_buffer *ring);
+void lis3l02dq_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void lis3l02dq_remove_trigger(struct iio_dev *indio_dev) {};
+static inline int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+};
+
+static inline ssize_t
+lis3l02dq_read_accel_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+};
+
+static int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+};
+static inline void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev)
+{};
+static inline int lis3l02dq_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+};
+static inline void lis3l02dq_uninitialize_ring(struct iio_ring_buffer *ring) {};
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_LIS3L02DQ_H_ */
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
new file mode 100644
index 0000000..f008837
--- /dev/null
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -0,0 +1,926 @@
+/*
+ * lis3l02dq.c support STMicroelectronics LISD02DQ
+ * 3d 2g Linear Accelerometers via SPI
+ *
+ * Copyright (c) 2007 Jonathan Cameron <jic23@cam.ac.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Settings:
+ * 16 bit left justified mode used.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "accel.h"
+
+#include "lis3l02dq.h"
+
+/* At the moment the spi framework doesn't allow global setting of cs_change.
+ * It's in the likely to be added comment at the top of spi.h.
+ * This means that use cannot be made of spi_write etc.
+ */
+
+/**
+ * lis3l02dq_spi_read_reg_8() - read single byte from a single register
+ * @dev: device asosciated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be read
+ * @val: pass back the resulting value
+ **/
+int lis3l02dq_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct lis3l02dq_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfer = {
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = LIS3L02DQ_READ_REG(reg_address);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->us, &msg);
+ *val = st->rx[1];
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * lis3l02dq_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be writen
+ * @val: the value to write
+ **/
+int lis3l02dq_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 *val)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct lis3l02dq_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfer = {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = LIS3L02DQ_WRITE_REG(reg_address);
+ st->tx[1] = *val;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * lisl302dq_spi_write_reg_s16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int lis3l02dq_spi_write_reg_s16(struct device *dev,
+ u8 lower_reg_address,
+ s16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct lis3l02dq_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = { {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .tx_buf = st->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = LIS3L02DQ_WRITE_REG(lower_reg_address);
+ st->tx[1] = value & 0xFF;
+ st->tx[2] = LIS3L02DQ_WRITE_REG(lower_reg_address + 1);
+ st->tx[3] = (value >> 8) & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * lisl302dq_spi_read_reg_s16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int lis3l02dq_spi_read_reg_s16(struct device *dev,
+ u8 lower_reg_address,
+ s16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct lis3l02dq_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = { {
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .tx_buf = st->tx + 2,
+ .rx_buf = st->rx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = LIS3L02DQ_READ_REG(lower_reg_address);
+ st->tx[1] = 0;
+ st->tx[2] = LIS3L02DQ_READ_REG(lower_reg_address+1);
+ st->tx[3] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 16 bit register");
+ goto error_ret;
+ }
+ *val = (s16)(st->rx[1]) | ((s16)(st->rx[3]) << 8);
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+/**
+ * lis3l02dq_read_signed() - attribute function used for 8 bit signed values
+ * @dev: the child device associated with the iio_dev or iio_trigger
+ * @attr: the attribute being processed
+ * @buf: buffer into which put the output string
+ **/
+static ssize_t lis3l02dq_read_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ s8 val;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = lis3l02dq_spi_read_reg_8(dev, this_attr->address, (u8 *)&val);
+
+ return ret ? ret : sprintf(buf, "%d\n", val);
+}
+
+static ssize_t lis3l02dq_read_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u8 val;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = lis3l02dq_spi_read_reg_8(dev, this_attr->address, &val);
+
+ return ret ? ret : sprintf(buf, "%d\n", val);
+}
+
+static ssize_t lis3l02dq_write_signed(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ long valin;
+ s8 val;
+ int ret;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = strict_strtol(buf, 10, &valin);
+ if (ret)
+ goto error_ret;
+ val = valin;
+ ret = lis3l02dq_spi_write_reg_8(dev, this_attr->address, (u8 *)&val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t lis3l02dq_write_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ ulong valin;
+ u8 val;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = strict_strtoul(buf, 10, &valin);
+ if (ret)
+ goto err_ret;
+ val = valin;
+ ret = lis3l02dq_spi_write_reg_8(dev, this_attr->address, &val);
+
+err_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t lis3l02dq_read_16bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ s16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = lis3l02dq_spi_read_reg_s16(dev, this_attr->address, &val);
+
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t lis3l02dq_read_accel(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_RING_TRIGGERED)
+ ret = lis3l02dq_read_accel_from_ring(dev, attr, buf);
+ else
+ ret = lis3l02dq_read_16bit_signed(dev, attr, buf);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t lis3l02dq_write_16bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = lis3l02dq_spi_write_reg_s16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t lis3l02dq_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret, len = 0;
+ s8 t;
+ ret = lis3l02dq_spi_read_reg_8(dev,
+ LIS3L02DQ_REG_CTRL_1_ADDR,
+ (u8 *)&t);
+ if (ret)
+ return ret;
+ t &= LIS3L02DQ_DEC_MASK;
+ switch (t) {
+ case LIS3L02DQ_REG_CTRL_1_DF_128:
+ len = sprintf(buf, "280\n");
+ break;
+ case LIS3L02DQ_REG_CTRL_1_DF_64:
+ len = sprintf(buf, "560\n");
+ break;
+ case LIS3L02DQ_REG_CTRL_1_DF_32:
+ len = sprintf(buf, "1120\n");
+ break;
+ case LIS3L02DQ_REG_CTRL_1_DF_8:
+ len = sprintf(buf, "4480\n");
+ break;
+ }
+ return len;
+}
+
+static ssize_t lis3l02dq_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ long val;
+ int ret;
+ u8 t;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = lis3l02dq_spi_read_reg_8(dev,
+ LIS3L02DQ_REG_CTRL_1_ADDR,
+ &t);
+ if (ret)
+ goto error_ret_mutex;
+ /* Wipe the bits clean */
+ t &= ~LIS3L02DQ_DEC_MASK;
+ switch (val) {
+ case 280:
+ t |= LIS3L02DQ_REG_CTRL_1_DF_128;
+ break;
+ case 560:
+ t |= LIS3L02DQ_REG_CTRL_1_DF_64;
+ break;
+ case 1120:
+ t |= LIS3L02DQ_REG_CTRL_1_DF_32;
+ break;
+ case 4480:
+ t |= LIS3L02DQ_REG_CTRL_1_DF_8;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error_ret_mutex;
+ };
+
+ ret = lis3l02dq_spi_write_reg_8(dev,
+ LIS3L02DQ_REG_CTRL_1_ADDR,
+ &t);
+
+error_ret_mutex:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static int lis3l02dq_initial_setup(struct lis3l02dq_state *st)
+{
+ int ret;
+ u8 val, valtest;
+
+ st->us->mode = SPI_MODE_3;
+
+ spi_setup(st->us);
+
+ val = LIS3L02DQ_DEFAULT_CTRL1;
+ /* Write suitable defaults to ctrl1 */
+ ret = lis3l02dq_spi_write_reg_8(&st->indio_dev->dev,
+ LIS3L02DQ_REG_CTRL_1_ADDR,
+ &val);
+ if (ret) {
+ dev_err(&st->us->dev, "problem with setup control register 1");
+ goto err_ret;
+ }
+ /* Repeat as sometimes doesn't work first time?*/
+ ret = lis3l02dq_spi_write_reg_8(&st->indio_dev->dev,
+ LIS3L02DQ_REG_CTRL_1_ADDR,
+ &val);
+ if (ret) {
+ dev_err(&st->us->dev, "problem with setup control register 1");
+ goto err_ret;
+ }
+
+ /* Read back to check this has worked acts as loose test of correct
+ * chip */
+ ret = lis3l02dq_spi_read_reg_8(&st->indio_dev->dev,
+ LIS3L02DQ_REG_CTRL_1_ADDR,
+ &valtest);
+ if (ret || (valtest != val)) {
+ dev_err(&st->indio_dev->dev, "device not playing ball");
+ ret = -EINVAL;
+ goto err_ret;
+ }
+
+ val = LIS3L02DQ_DEFAULT_CTRL2;
+ ret = lis3l02dq_spi_write_reg_8(&st->indio_dev->dev,
+ LIS3L02DQ_REG_CTRL_2_ADDR,
+ &val);
+ if (ret) {
+ dev_err(&st->us->dev, "problem with setup control register 2");
+ goto err_ret;
+ }
+
+ val = LIS3L02DQ_REG_WAKE_UP_CFG_LATCH_SRC;
+ ret = lis3l02dq_spi_write_reg_8(&st->indio_dev->dev,
+ LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
+ &val);
+ if (ret)
+ dev_err(&st->us->dev, "problem with interrupt cfg register");
+err_ret:
+
+ return ret;
+}
+
+static IIO_DEV_ATTR_ACCEL_X_OFFSET(S_IWUSR | S_IRUGO,
+ lis3l02dq_read_signed,
+ lis3l02dq_write_signed,
+ LIS3L02DQ_REG_OFFSET_X_ADDR);
+
+static IIO_DEV_ATTR_ACCEL_Y_OFFSET(S_IWUSR | S_IRUGO,
+ lis3l02dq_read_signed,
+ lis3l02dq_write_signed,
+ LIS3L02DQ_REG_OFFSET_Y_ADDR);
+
+static IIO_DEV_ATTR_ACCEL_Z_OFFSET(S_IWUSR | S_IRUGO,
+ lis3l02dq_read_signed,
+ lis3l02dq_write_signed,
+ LIS3L02DQ_REG_OFFSET_Z_ADDR);
+
+static IIO_DEV_ATTR_ACCEL_X_GAIN(S_IWUSR | S_IRUGO,
+ lis3l02dq_read_unsigned,
+ lis3l02dq_write_unsigned,
+ LIS3L02DQ_REG_GAIN_X_ADDR);
+
+static IIO_DEV_ATTR_ACCEL_Y_GAIN(S_IWUSR | S_IRUGO,
+ lis3l02dq_read_unsigned,
+ lis3l02dq_write_unsigned,
+ LIS3L02DQ_REG_GAIN_Y_ADDR);
+
+static IIO_DEV_ATTR_ACCEL_Z_GAIN(S_IWUSR | S_IRUGO,
+ lis3l02dq_read_unsigned,
+ lis3l02dq_write_unsigned,
+ LIS3L02DQ_REG_GAIN_Z_ADDR);
+
+static IIO_DEV_ATTR_ACCEL_THRESH(S_IWUSR | S_IRUGO,
+ lis3l02dq_read_16bit_signed,
+ lis3l02dq_write_16bit_signed,
+ LIS3L02DQ_REG_THS_L_ADDR);
+
+/* RFC The reading method for these will change depending on whether
+ * ring buffer capture is in use. Is it worth making these take two
+ * functions and let the core handle which to call, or leave as in this
+ * driver where it is the drivers problem to manage this?
+ */
+
+static IIO_DEV_ATTR_ACCEL_X(lis3l02dq_read_accel,
+ LIS3L02DQ_REG_OUT_X_L_ADDR);
+
+static IIO_DEV_ATTR_ACCEL_Y(lis3l02dq_read_accel,
+ LIS3L02DQ_REG_OUT_Y_L_ADDR);
+
+static IIO_DEV_ATTR_ACCEL_Z(lis3l02dq_read_accel,
+ LIS3L02DQ_REG_OUT_Z_L_ADDR);
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ lis3l02dq_read_frequency,
+ lis3l02dq_write_frequency);
+
+static IIO_CONST_ATTR_AVAIL_SAMP_FREQ("280 560 1120 4480");
+
+static ssize_t lis3l02dq_read_interrupt_config(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ s8 val;
+ struct iio_event_attr *this_attr = to_iio_event_attr(attr);
+
+ ret = lis3l02dq_spi_read_reg_8(dev,
+ LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
+ (u8 *)&val);
+
+ return ret ? ret : sprintf(buf, "%d\n",
+ (val & this_attr->mask) ? 1 : 0);;
+}
+
+static ssize_t lis3l02dq_write_interrupt_config(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_event_attr *this_attr = to_iio_event_attr(attr);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ int ret, currentlyset, changed = 0;
+ u8 valold, controlold;
+ bool val;
+
+ val = !(buf[0] == '0');
+
+ mutex_lock(&indio_dev->mlock);
+ /* read current value */
+ ret = lis3l02dq_spi_read_reg_8(dev,
+ LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
+ &valold);
+ if (ret)
+ goto error_mutex_unlock;
+
+ /* read current control */
+ ret = lis3l02dq_spi_read_reg_8(dev,
+ LIS3L02DQ_REG_CTRL_2_ADDR,
+ &controlold);
+ if (ret)
+ goto error_mutex_unlock;
+ currentlyset = !!(valold & this_attr->mask);
+ if (val == false && currentlyset) {
+ valold &= ~this_attr->mask;
+ changed = 1;
+ iio_remove_event_from_list(this_attr->listel,
+ &indio_dev->interrupts[0]
+ ->ev_list);
+ } else if (val == true && !currentlyset) {
+ changed = 1;
+ valold |= this_attr->mask;
+ iio_add_event_to_list(this_attr->listel,
+ &indio_dev->interrupts[0]->ev_list);
+ }
+
+ if (changed) {
+ ret = lis3l02dq_spi_write_reg_8(dev,
+ LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
+ &valold);
+ if (ret)
+ goto error_mutex_unlock;
+ /* This always enables the interrupt, even if we've remove the
+ * last thing using it. For this device we can use the reference
+ * count on the handler to tell us if anyone wants the interrupt
+ */
+ controlold = this_attr->listel->refcount ?
+ (controlold | LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT) :
+ (controlold & ~LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT);
+ ret = lis3l02dq_spi_write_reg_8(dev,
+ LIS3L02DQ_REG_CTRL_2_ADDR,
+ &controlold);
+ if (ret)
+ goto error_mutex_unlock;
+ }
+error_mutex_unlock:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+
+static int lis3l02dq_thresh_handler_th(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct lis3l02dq_state *st = dev_info->dev_data;
+
+ /* Stash the timestamp somewhere convenient for the bh */
+ st->last_timestamp = timestamp;
+ schedule_work(&st->work_cont_thresh.ws);
+
+ return 0;
+}
+
+
+/* Unforunately it appears the interrupt won't clear unless you read from the
+ * src register.
+ */
+static void lis3l02dq_thresh_handler_bh_no_check(struct work_struct *work_s)
+{
+ struct iio_work_cont *wc
+ = container_of(work_s, struct iio_work_cont, ws_nocheck);
+ struct lis3l02dq_state *st = wc->st;
+ u8 t;
+
+ lis3l02dq_spi_read_reg_8(&st->indio_dev->dev,
+ LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
+ &t);
+
+ if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_HIGH)
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_ACCEL_Z_HIGH,
+ st->last_timestamp);
+
+ if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_LOW)
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_ACCEL_Z_LOW,
+ st->last_timestamp);
+
+ if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_HIGH)
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_ACCEL_Y_HIGH,
+ st->last_timestamp);
+
+ if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_LOW)
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_ACCEL_Y_LOW,
+ st->last_timestamp);
+
+ if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_HIGH)
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_ACCEL_X_HIGH,
+ st->last_timestamp);
+
+ if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_LOW)
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_ACCEL_X_LOW,
+ st->last_timestamp);
+ /* reenable the irq */
+ enable_irq(st->us->irq);
+ /* Ack and allow for new interrupts */
+ lis3l02dq_spi_read_reg_8(&st->indio_dev->dev,
+ LIS3L02DQ_REG_WAKE_UP_ACK_ADDR,
+ &t);
+
+ return;
+}
+
+/* A shared handler for a number of threshold types */
+IIO_EVENT_SH(threshold, &lis3l02dq_thresh_handler_th);
+
+IIO_EVENT_ATTR_ACCEL_X_HIGH_SH(iio_event_threshold,
+ lis3l02dq_read_interrupt_config,
+ lis3l02dq_write_interrupt_config,
+ LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_HIGH);
+
+IIO_EVENT_ATTR_ACCEL_Y_HIGH_SH(iio_event_threshold,
+ lis3l02dq_read_interrupt_config,
+ lis3l02dq_write_interrupt_config,
+ LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Y_HIGH);
+
+IIO_EVENT_ATTR_ACCEL_Z_HIGH_SH(iio_event_threshold,
+ lis3l02dq_read_interrupt_config,
+ lis3l02dq_write_interrupt_config,
+ LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Z_HIGH);
+
+IIO_EVENT_ATTR_ACCEL_X_LOW_SH(iio_event_threshold,
+ lis3l02dq_read_interrupt_config,
+ lis3l02dq_write_interrupt_config,
+ LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_LOW);
+
+IIO_EVENT_ATTR_ACCEL_Y_LOW_SH(iio_event_threshold,
+ lis3l02dq_read_interrupt_config,
+ lis3l02dq_write_interrupt_config,
+ LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Y_LOW);
+
+IIO_EVENT_ATTR_ACCEL_Z_LOW_SH(iio_event_threshold,
+ lis3l02dq_read_interrupt_config,
+ lis3l02dq_write_interrupt_config,
+ LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Z_LOW);
+
+static struct attribute *lis3l02dq_event_attributes[] = {
+ &iio_event_attr_accel_x_high.dev_attr.attr,
+ &iio_event_attr_accel_y_high.dev_attr.attr,
+ &iio_event_attr_accel_z_high.dev_attr.attr,
+ &iio_event_attr_accel_x_low.dev_attr.attr,
+ &iio_event_attr_accel_y_low.dev_attr.attr,
+ &iio_event_attr_accel_z_low.dev_attr.attr,
+ NULL
+};
+
+static struct attribute_group lis3l02dq_event_attribute_group = {
+ .attrs = lis3l02dq_event_attributes,
+};
+
+static IIO_CONST_ATTR(name, "lis3l02dq");
+
+static struct attribute *lis3l02dq_attributes[] = {
+ &iio_dev_attr_accel_x_offset.dev_attr.attr,
+ &iio_dev_attr_accel_y_offset.dev_attr.attr,
+ &iio_dev_attr_accel_z_offset.dev_attr.attr,
+ &iio_dev_attr_accel_x_gain.dev_attr.attr,
+ &iio_dev_attr_accel_y_gain.dev_attr.attr,
+ &iio_dev_attr_accel_z_gain.dev_attr.attr,
+ &iio_dev_attr_thresh.dev_attr.attr,
+ &iio_dev_attr_accel_x.dev_attr.attr,
+ &iio_dev_attr_accel_y.dev_attr.attr,
+ &iio_dev_attr_accel_z.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_available_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group lis3l02dq_attribute_group = {
+ .attrs = lis3l02dq_attributes,
+};
+
+static int __devinit lis3l02dq_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct lis3l02dq_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used tor removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*LIS3L02DQ_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*LIS3L02DQ_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &lis3l02dq_event_attribute_group;
+ st->indio_dev->attrs = &lis3l02dq_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = lis3l02dq_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = lis3l02dq_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
+ /* This is a little unusual, in that the device seems
+ to need a full read of the interrupt source reg before
+ the interrupt will reset.
+ Hence the two handlers are the same */
+ iio_init_work_cont(&st->work_cont_thresh,
+ lis3l02dq_thresh_handler_bh_no_check,
+ lis3l02dq_thresh_handler_bh_no_check,
+ LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
+ 0,
+ st);
+ st->inter = 0;
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ "lis3l02dq");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = lis3l02dq_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ /* Get the device into a sane initial state */
+ ret = lis3l02dq_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+ return 0;
+
+error_remove_trigger:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ lis3l02dq_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ lis3l02dq_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ lis3l02dq_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+/* Power down the device */
+static int lis3l02dq_stop_device(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct lis3l02dq_state *st = indio_dev->dev_data;
+ u8 val = 0;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = lis3l02dq_spi_write_reg_8(&indio_dev->dev,
+ LIS3L02DQ_REG_CTRL_1_ADDR,
+ &val);
+ if (ret) {
+ dev_err(&st->us->dev, "problem with turning device off: ctrl1");
+ goto err_ret;
+ }
+
+ ret = lis3l02dq_spi_write_reg_8(&indio_dev->dev,
+ LIS3L02DQ_REG_CTRL_2_ADDR,
+ &val);
+ if (ret)
+ dev_err(&st->us->dev, "problem with turning device off: ctrl2");
+err_ret:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int lis3l02dq_remove(struct spi_device *spi)
+{
+ int ret;
+ struct lis3l02dq_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ ret = lis3l02dq_stop_device(indio_dev);
+ if (ret)
+ goto err_ret;
+
+ flush_scheduled_work();
+
+ lis3l02dq_remove_trigger(indio_dev);
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ lis3l02dq_uninitialize_ring(indio_dev->ring);
+ lis3l02dq_unconfigure_ring(indio_dev);
+ iio_device_unregister(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+
+err_ret:
+ return ret;
+}
+
+static struct spi_driver lis3l02dq_driver = {
+ .driver = {
+ .name = "lis3l02dq",
+ .owner = THIS_MODULE,
+ },
+ .probe = lis3l02dq_probe,
+ .remove = __devexit_p(lis3l02dq_remove),
+};
+
+static __init int lis3l02dq_init(void)
+{
+ return spi_register_driver(&lis3l02dq_driver);
+}
+module_init(lis3l02dq_init);
+
+static __exit void lis3l02dq_exit(void)
+{
+ spi_unregister_driver(&lis3l02dq_driver);
+}
+module_exit(lis3l02dq_exit);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
+MODULE_DESCRIPTION("ST LIS3L02DQ Accelerometer SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
new file mode 100644
index 0000000..a6b7c72
--- /dev/null
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -0,0 +1,600 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_sw.h"
+#include "accel.h"
+#include "../trigger.h"
+#include "lis3l02dq.h"
+
+/**
+ * combine_8_to_16() utility function to munge to u8s into u16
+ **/
+static inline u16 combine_8_to_16(u8 lower, u8 upper)
+{
+ u16 _lower = lower;
+ u16 _upper = upper;
+ return _lower | (_upper << 8);
+}
+
+/**
+ * lis3l02dq_scan_el_set_state() set whether a scan contains a given channel
+ * @scan_el: associtate iio scan element attribute
+ * @indio_dev: the device structure
+ * @bool: desired state
+ *
+ * mlock already held when this is called.
+ **/
+static int lis3l02dq_scan_el_set_state(struct iio_scan_el *scan_el,
+ struct iio_dev *indio_dev,
+ bool state)
+{
+ u8 t, mask;
+ int ret;
+
+ ret = lis3l02dq_spi_read_reg_8(&indio_dev->dev,
+ LIS3L02DQ_REG_CTRL_1_ADDR,
+ &t);
+ if (ret)
+ goto error_ret;
+ switch (scan_el->label) {
+ case LIS3L02DQ_REG_OUT_X_L_ADDR:
+ mask = LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
+ break;
+ case LIS3L02DQ_REG_OUT_Y_L_ADDR:
+ mask = LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
+ break;
+ case LIS3L02DQ_REG_OUT_Z_L_ADDR:
+ mask = LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error_ret;
+ }
+
+ if (!(mask & t) == state) {
+ if (state)
+ t |= mask;
+ else
+ t &= ~mask;
+ ret = lis3l02dq_spi_write_reg_8(&indio_dev->dev,
+ LIS3L02DQ_REG_CTRL_1_ADDR,
+ &t);
+ }
+error_ret:
+ return ret;
+
+}
+static IIO_SCAN_EL_C(accel_x, LIS3L02DQ_SCAN_ACC_X, IIO_SIGNED(16),
+ LIS3L02DQ_REG_OUT_X_L_ADDR,
+ &lis3l02dq_scan_el_set_state);
+static IIO_SCAN_EL_C(accel_y, LIS3L02DQ_SCAN_ACC_Y, IIO_SIGNED(16),
+ LIS3L02DQ_REG_OUT_Y_L_ADDR,
+ &lis3l02dq_scan_el_set_state);
+static IIO_SCAN_EL_C(accel_z, LIS3L02DQ_SCAN_ACC_Z, IIO_SIGNED(16),
+ LIS3L02DQ_REG_OUT_Z_L_ADDR,
+ &lis3l02dq_scan_el_set_state);
+static IIO_SCAN_EL_TIMESTAMP;
+
+static struct attribute *lis3l02dq_scan_el_attrs[] = {
+ &iio_scan_el_accel_x.dev_attr.attr,
+ &iio_scan_el_accel_y.dev_attr.attr,
+ &iio_scan_el_accel_z.dev_attr.attr,
+ &iio_scan_el_timestamp.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group lis3l02dq_scan_el_group = {
+ .attrs = lis3l02dq_scan_el_attrs,
+ .name = "scan_elements",
+};
+
+/**
+ * lis3l02dq_poll_func_th() top half interrupt handler called by trigger
+ * @private_data: iio_dev
+ **/
+static void lis3l02dq_poll_func_th(struct iio_dev *indio_dev)
+{
+ struct lis3l02dq_state *st = iio_dev_get_devdata(indio_dev);
+ st->last_timestamp = indio_dev->trig->timestamp;
+ schedule_work(&st->work_trigger_to_ring);
+ /* Indicate that this interrupt is being handled */
+
+ /* Technically this is trigger related, but without this
+ * handler running there is currently now way for the interrupt
+ * to clear.
+ */
+ st->inter = 1;
+}
+
+/**
+ * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static int lis3l02dq_data_rdy_trig_poll(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct lis3l02dq_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_trigger *trig = st->trig;
+
+ trig->timestamp = timestamp;
+ iio_trigger_poll(trig);
+
+ return IRQ_HANDLED;
+}
+
+/* This is an event as it is a response to a physical interrupt */
+IIO_EVENT_SH(data_rdy_trig, &lis3l02dq_data_rdy_trig_poll);
+
+/**
+ * lis3l02dq_read_accel_from_ring() individual acceleration read from ring
+ **/
+ssize_t lis3l02dq_read_accel_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_scan_el *el = NULL;
+ int ret, len = 0, i = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ s16 *data;
+
+ while (dev_info->scan_el_attrs->attrs[i]) {
+ el = to_iio_scan_el((struct device_attribute *)
+ (dev_info->scan_el_attrs->attrs[i]));
+ /* label is in fact the address */
+ if (el->label == this_attr->address)
+ break;
+ i++;
+ }
+ if (!dev_info->scan_el_attrs->attrs[i]) {
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ /* If this element is in the scan mask */
+ ret = iio_scan_mask_query(dev_info, el->number);
+ if (ret < 0)
+ goto error_ret;
+ if (ret) {
+ data = kmalloc(dev_info->ring->access.get_bpd(dev_info->ring),
+ GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+ ret = dev_info->ring->access.read_last(dev_info->ring,
+ (u8 *)data);
+ if (ret)
+ goto error_free_data;
+ } else {
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ len = iio_scan_mask_count_to_right(dev_info, el->number);
+ if (len < 0) {
+ ret = len;
+ goto error_free_data;
+ }
+ len = sprintf(buf, "ring %d\n", data[len]);
+error_free_data:
+ kfree(data);
+error_ret:
+ return ret ? ret : len;
+
+}
+
+static const u8 read_all_tx_array[] =
+{
+ LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
+ LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
+ LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
+ LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
+ LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
+ LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
+};
+
+/**
+ * lis3l02dq_read_all() Reads all channels currently selected
+ * @st: device specific state
+ * @rx_array: (dma capable) recieve array, must be at least
+ * 4*number of channels
+ **/
+int lis3l02dq_read_all(struct lis3l02dq_state *st, u8 *rx_array)
+{
+ struct spi_transfer *xfers;
+ struct spi_message msg;
+ int ret, i, j = 0;
+
+ xfers = kzalloc((st->indio_dev->scan_count) * 2
+ * sizeof(*xfers), GFP_KERNEL);
+ if (!xfers)
+ return -ENOMEM;
+
+ mutex_lock(&st->buf_lock);
+
+ for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++) {
+ if (st->indio_dev->scan_mask & (1 << i)) {
+ /* lower byte */
+ xfers[j].tx_buf = st->tx + 2*j;
+ st->tx[2*j] = read_all_tx_array[i*4];
+ st->tx[2*j + 1] = 0;
+ if (rx_array)
+ xfers[j].rx_buf = rx_array + j*2;
+ xfers[j].bits_per_word = 8;
+ xfers[j].len = 2;
+ xfers[j].cs_change = 1;
+ j++;
+
+ /* upper byte */
+ xfers[j].tx_buf = st->tx + 2*j;
+ st->tx[2*j] = read_all_tx_array[i*4 + 2];
+ st->tx[2*j + 1] = 0;
+ if (rx_array)
+ xfers[j].rx_buf = rx_array + j*2;
+ xfers[j].bits_per_word = 8;
+ xfers[j].len = 2;
+ xfers[j].cs_change = 1;
+ j++;
+ }
+ }
+ /* After these are transmitted, the rx_buff should have
+ * values in alternate bytes
+ */
+ spi_message_init(&msg);
+ for (j = 0; j < st->indio_dev->scan_count * 2; j++)
+ spi_message_add_tail(&xfers[j], &msg);
+
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+ kfree(xfers);
+
+ return ret;
+}
+
+
+/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
+ * specific to be rolled into the core.
+ */
+static void lis3l02dq_trigger_bh_to_ring(struct work_struct *work_s)
+{
+ struct lis3l02dq_state *st
+ = container_of(work_s, struct lis3l02dq_state,
+ work_trigger_to_ring);
+
+ u8 *rx_array;
+ int i = 0;
+ u16 *data;
+ size_t datasize = st->indio_dev
+ ->ring->access.get_bpd(st->indio_dev->ring);
+
+ data = kmalloc(datasize , GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&st->us->dev, "memory alloc failed in ring bh");
+ return;
+ }
+ /* Due to interleaved nature of transmission this buffer must be
+ * twice the number of bytes, or 4 times the number of channels
+ */
+ rx_array = kmalloc(4 * (st->indio_dev->scan_count), GFP_KERNEL);
+ if (rx_array == NULL) {
+ dev_err(&st->us->dev, "memory alloc failed in ring bh");
+ kfree(data);
+ return;
+ }
+
+ /* whilst trigger specific, if this read does nto occur the data
+ ready interrupt will not be cleared. Need to add a mechanism
+ to provide a dummy read function if this is not triggering on
+ the data ready function but something else is.
+ */
+ st->inter = 0;
+
+ if (st->indio_dev->scan_count)
+ if (lis3l02dq_read_all(st, rx_array) >= 0)
+ for (; i < st->indio_dev->scan_count; i++)
+ data[i] = combine_8_to_16(rx_array[i*4+1],
+ rx_array[i*4+3]);
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (st->indio_dev->scan_timestamp)
+ *((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
+
+ st->indio_dev->ring->access.store_to(st->indio_dev->ring,
+ (u8 *)data,
+ st->last_timestamp);
+
+ iio_trigger_notify_done(st->indio_dev->trig);
+ kfree(rx_array);
+ kfree(data);
+
+ return;
+}
+/* in these circumstances is it better to go with unaligned packing and
+ * deal with the cost?*/
+static int lis3l02dq_data_rdy_ring_preenable(struct iio_dev *indio_dev)
+{
+ size_t size;
+ /* Check if there are any scan elements enabled, if not fail*/
+ if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
+ return -EINVAL;
+
+ if (indio_dev->ring->access.set_bpd) {
+ if (indio_dev->scan_timestamp)
+ if (indio_dev->scan_count) /* Timestamp and data */
+ size = 2*sizeof(s64);
+ else /* Timestamp only */
+ size = sizeof(s64);
+ else /* Data only */
+ size = indio_dev->scan_count*sizeof(s16);
+ indio_dev->ring->access.set_bpd(indio_dev->ring, size);
+ }
+
+ return 0;
+}
+
+static int lis3l02dq_data_rdy_ring_postenable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_attach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+
+static int lis3l02dq_data_rdy_ring_predisable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_dettach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+
+
+/* Caller responsible for locking as necessary. */
+static int __lis3l02dq_write_data_ready_config(struct device *dev,
+ struct
+ iio_event_handler_list *list,
+ bool state)
+{
+ int ret;
+ u8 valold;
+ bool currentlyset;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+
+/* Get the current event mask register */
+ ret = lis3l02dq_spi_read_reg_8(dev,
+ LIS3L02DQ_REG_CTRL_2_ADDR,
+ &valold);
+ if (ret)
+ goto error_ret;
+/* Find out if data ready is already on */
+ currentlyset
+ = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
+
+/* Disable requested */
+ if (!state && currentlyset) {
+
+ valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
+ /* The double write is to overcome a hardware bug?*/
+ ret = lis3l02dq_spi_write_reg_8(dev,
+ LIS3L02DQ_REG_CTRL_2_ADDR,
+ &valold);
+ if (ret)
+ goto error_ret;
+ ret = lis3l02dq_spi_write_reg_8(dev,
+ LIS3L02DQ_REG_CTRL_2_ADDR,
+ &valold);
+ if (ret)
+ goto error_ret;
+
+ iio_remove_event_from_list(list,
+ &indio_dev->interrupts[0]
+ ->ev_list);
+
+/* Enable requested */
+ } else if (state && !currentlyset) {
+ /* if not set, enable requested */
+ valold |= LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
+ iio_add_event_to_list(list, &indio_dev->interrupts[0]->ev_list);
+ ret = lis3l02dq_spi_write_reg_8(dev,
+ LIS3L02DQ_REG_CTRL_2_ADDR,
+ &valold);
+ if (ret)
+ goto error_ret;
+ }
+
+ return 0;
+error_ret:
+ return ret;
+}
+
+/**
+ * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
+ *
+ * If disabling the interrupt also does a final read to ensure it is clear.
+ * This is only important in some cases where the scan enable elements are
+ * switched before the ring is reenabled.
+ **/
+static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct lis3l02dq_state *st = trig->private_data;
+ int ret = 0;
+ u8 t;
+ __lis3l02dq_write_data_ready_config(&st->indio_dev->dev,
+ &iio_event_data_rdy_trig,
+ state);
+ if (state == false) {
+ /* possible quirk with handler currently worked around
+ by ensuring the work queue is empty */
+ flush_scheduled_work();
+ /* Clear any outstanding ready events */
+ ret = lis3l02dq_read_all(st, NULL);
+ }
+ lis3l02dq_spi_read_reg_8(&st->indio_dev->dev,
+ LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
+ &t);
+ return ret;
+}
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *lis3l02dq_trigger_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+
+static const struct attribute_group lis3l02dq_trigger_attr_group = {
+ .attrs = lis3l02dq_trigger_attrs,
+};
+
+/**
+ * lis3l02dq_trig_try_reen() try renabling irq for data rdy trigger
+ * @trig: the datardy trigger
+ *
+ * As the trigger may occur on any data element being updated it is
+ * really rather likely to occur during the read from the previous
+ * trigger event. The only way to discover if this has occured on
+ * boards not supporting level interrupts is to take a look at the line.
+ * If it is indicating another interrupt and we don't seem to have a
+ * handler looking at it, then we need to notify the core that we need
+ * to tell the triggering core to try reading all these again.
+ **/
+static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
+{
+ struct lis3l02dq_state *st = trig->private_data;
+ enable_irq(st->us->irq);
+ /* If gpio still high (or high again) */
+ if (gpio_get_value(irq_to_gpio(st->us->irq)))
+ if (st->inter == 0) {
+ /* already interrupt handler dealing with it */
+ disable_irq_nosync(st->us->irq);
+ if (st->inter == 1) {
+ /* interrupt handler snuck in between test
+ * and disable */
+ enable_irq(st->us->irq);
+ return 0;
+ }
+ return -EAGAIN;
+ }
+ /* irq reenabled so success! */
+ return 0;
+}
+
+int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct lis3l02dq_state *state = indio_dev->dev_data;
+
+ state->trig = iio_allocate_trigger();
+ state->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ if (!state->trig->name) {
+ ret = -ENOMEM;
+ goto error_free_trig;
+ }
+ snprintf((char *)state->trig->name,
+ IIO_TRIGGER_NAME_LENGTH,
+ "lis3l02dq-dev%d", indio_dev->id);
+ state->trig->dev.parent = &state->us->dev;
+ state->trig->owner = THIS_MODULE;
+ state->trig->private_data = state;
+ state->trig->set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state;
+ state->trig->try_reenable = &lis3l02dq_trig_try_reen;
+ state->trig->control_attrs = &lis3l02dq_trigger_attr_group;
+ ret = iio_trigger_register(state->trig);
+ if (ret)
+ goto error_free_trig_name;
+
+ return 0;
+
+error_free_trig_name:
+ kfree(state->trig->name);
+error_free_trig:
+ iio_free_trigger(state->trig);
+
+ return ret;
+}
+
+void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct lis3l02dq_state *state = indio_dev->dev_data;
+
+ iio_trigger_unregister(state->trig);
+ kfree(state->trig->name);
+ iio_free_trigger(state->trig);
+}
+
+void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->pollfunc);
+ iio_sw_rb_free(indio_dev->ring);
+}
+
+int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
+{
+ int ret = 0;
+ struct lis3l02dq_state *st = indio_dev->dev_data;
+ struct iio_ring_buffer *ring;
+ INIT_WORK(&st->work_trigger_to_ring, lis3l02dq_trigger_bh_to_ring);
+ /* Set default scan mode */
+
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_x.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_y.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_z.number);
+ indio_dev->scan_timestamp = true;
+
+ indio_dev->scan_el_attrs = &lis3l02dq_scan_el_group;
+
+ ring = iio_sw_rb_allocate(indio_dev);
+ if (!ring) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ indio_dev->ring = ring;
+ /* Effectively select the ring buffer implementation */
+ iio_ring_sw_register_funcs(&ring->access);
+ ring->preenable = &lis3l02dq_data_rdy_ring_preenable;
+ ring->postenable = &lis3l02dq_data_rdy_ring_postenable;
+ ring->predisable = &lis3l02dq_data_rdy_ring_predisable;
+ ring->owner = THIS_MODULE;
+
+ indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
+ if (indio_dev->pollfunc == NULL) {
+ ret = -ENOMEM;
+ goto error_iio_sw_rb_free;;
+ }
+ indio_dev->pollfunc->poll_func_main = &lis3l02dq_poll_func_th;
+ indio_dev->pollfunc->private_data = indio_dev;
+ indio_dev->modes |= INDIO_RING_TRIGGERED;
+ return 0;
+
+error_iio_sw_rb_free:
+ iio_sw_rb_free(indio_dev->ring);
+ return ret;
+}
+
+int lis3l02dq_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return iio_ring_buffer_register(ring);
+}
+
+void lis3l02dq_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+ iio_ring_buffer_unregister(ring);
+}
+
+
+int lis3l02dq_set_ring_length(struct iio_dev *indio_dev, int length)
+{
+ /* Set sensible defaults for the ring buffer */
+ if (indio_dev->ring->access.set_length)
+ return indio_dev->ring->access.set_length(indio_dev->ring, 500);
+ return 0;
+}
+
+
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
new file mode 100644
index 0000000..29e11da
--- /dev/null
+++ b/drivers/staging/iio/accel/sca3000.h
@@ -0,0 +1,298 @@
+/*
+ * sca3000.c -- support VTI sca3000 series accelerometers
+ * via SPI
+ *
+ * Copyright (c) 2007 Jonathan Cameron <jic23@cam.ac.uk>
+ *
+ * Partly based upon tle62x0.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Initial mode is direct measurement.
+ *
+ * Untested things
+ *
+ * Temperature reading (the e05 I'm testing with doesn't have a sensor)
+ *
+ * Free fall detection mode - supported but untested as I'm not droping my
+ * dubious wire rig far enough to test it.
+ *
+ * Unsupported as yet
+ *
+ * Time stamping of data from ring. Various ideas on how to do this but none
+ * are remotely simple. Suggestions welcome.
+ *
+ * Individual enabling disabling of channels going into ring buffer
+ *
+ * Overflow handling (this is signaled for all but 8 bit ring buffer mode.)
+ *
+ * Motion detector using AND combinations of signals.
+ *
+ * Note: Be very careful about not touching an register bytes marked
+ * as reserved on the data sheet. They really mean it as changing convents of
+ * some will cause the device to lock up.
+ *
+ * Known issues - on rare occasions the interrupts lock up. Not sure why as yet.
+ * Can probably alleviate this by reading the interrupt register on start, but
+ * that is really just brushing the problem under the carpet.
+ */
+#define SCA3000_WRITE_REG(a) (((a) << 2) | 0x02)
+#define SCA3000_READ_REG(a) ((a) << 2)
+
+#define SCA3000_REG_ADDR_REVID 0x00
+#define SCA3000_REVID_MAJOR_MASK 0xf0
+#define SCA3000_REVID_MINOR_MASK 0x0f
+
+#define SCA3000_REG_ADDR_STATUS 0x02
+#define SCA3000_LOCKED 0x20
+#define SCA3000_EEPROM_CS_ERROR 0x02
+#define SCA3000_SPI_FRAME_ERROR 0x01
+
+/* All reads done using register decrement so no need to directly access LSBs */
+#define SCA3000_REG_ADDR_X_MSB 0x05
+#define SCA3000_REG_ADDR_Y_MSB 0x07
+#define SCA3000_REG_ADDR_Z_MSB 0x09
+
+#define SCA3000_REG_ADDR_RING_OUT 0x0f
+
+/* Temp read untested - the e05 doesn't have the sensor */
+#define SCA3000_REG_ADDR_TEMP_MSB 0x13
+
+#define SCA3000_REG_ADDR_MODE 0x14
+#define SCA3000_MODE_PROT_MASK 0x28
+
+#define SCA3000_RING_BUF_ENABLE 0x80
+#define SCA3000_RING_BUF_8BIT 0x40
+/* Free fall detection triggers an interrupt if the acceleration
+ * is below a threshold for equivalent of 25cm drop
+ */
+#define SCA3000_FREE_FALL_DETECT 0x10
+#define SCA3000_MEAS_MODE_NORMAL 0x00
+#define SCA3000_MEAS_MODE_OP_1 0x01
+#define SCA3000_MEAS_MODE_OP_2 0x02
+
+/* In motion detection mode the accelerations are band pass filtered
+ * (aprox 1 - 25Hz) and then a programmable theshold used to trigger
+ * and interrupt.
+ */
+#define SCA3000_MEAS_MODE_MOT_DET 0x03
+
+#define SCA3000_REG_ADDR_BUF_COUNT 0x15
+
+#define SCA3000_REG_ADDR_INT_STATUS 0x16
+
+#define SCA3000_INT_STATUS_THREE_QUARTERS 0x80
+#define SCA3000_INT_STATUS_HALF 0x40
+
+#define SCA3000_INT_STATUS_FREE_FALL 0x08
+#define SCA3000_INT_STATUS_Y_TRIGGER 0x04
+#define SCA3000_INT_STATUS_X_TRIGGER 0x02
+#define SCA3000_INT_STATUS_Z_TRIGGER 0x01
+
+/* Used to allow accesss to multiplexed registers */
+#define SCA3000_REG_ADDR_CTRL_SEL 0x18
+/* Only available for SCA3000-D03 and SCA3000-D01 */
+#define SCA3000_REG_CTRL_SEL_I2C_DISABLE 0x01
+#define SCA3000_REG_CTRL_SEL_MD_CTRL 0x02
+#define SCA3000_REG_CTRL_SEL_MD_Y_TH 0x03
+#define SCA3000_REG_CTRL_SEL_MD_X_TH 0x04
+#define SCA3000_REG_CTRL_SEL_MD_Z_TH 0x05
+/* BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device
+ will not function */
+#define SCA3000_REG_CTRL_SEL_OUT_CTRL 0x0B
+#define SCA3000_OUT_CTRL_PROT_MASK 0xE0
+#define SCA3000_OUT_CTRL_BUF_X_EN 0x10
+#define SCA3000_OUT_CTRL_BUF_Y_EN 0x08
+#define SCA3000_OUT_CTRL_BUF_Z_EN 0x04
+#define SCA3000_OUT_CTRL_BUF_DIV_4 0x02
+#define SCA3000_OUT_CTRL_BUF_DIV_2 0x01
+
+/* Control which motion detector interrupts are on.
+ * For now only OR combinations are supported.x
+ */
+#define SCA3000_MD_CTRL_PROT_MASK 0xC0
+#define SCA3000_MD_CTRL_OR_Y 0x01
+#define SCA3000_MD_CTRL_OR_X 0x02
+#define SCA3000_MD_CTRL_OR_Z 0x04
+/* Currently unsupported */
+#define SCA3000_MD_CTRL_AND_Y 0x08
+#define SCA3000_MD_CTRL_AND_X 0x10
+#define SAC3000_MD_CTRL_AND_Z 0x20
+
+/* Some control registers of complex access methods requiring this register to
+ * be used to remove a lock.
+ */
+#define SCA3000_REG_ADDR_UNLOCK 0x1e
+
+#define SCA3000_REG_ADDR_INT_MASK 0x21
+#define SCA3000_INT_MASK_PROT_MASK 0x1C
+
+#define SCA3000_INT_MASK_RING_THREE_QUARTER 0x80
+#define SCA3000_INT_MASK_RING_HALF 0x40
+
+#define SCA3000_INT_MASK_ALL_INTS 0x02
+#define SCA3000_INT_MASK_ACTIVE_HIGH 0x01
+#define SCA3000_INT_MASK_ACTIVE_LOW 0x00
+
+/* Values of mulipexed registers (write to ctrl_data after select) */
+#define SCA3000_REG_ADDR_CTRL_DATA 0x22
+
+/* Measurment modes available on some sca3000 series chips. Code assumes others
+ * may become available in the future.
+ *
+ * Bypass - Bypass the low-pass filter in the signal channel so as to increase
+ * signal bandwidth.
+ *
+ * Narrow - Narrow low-pass filtering of the signal channel and half output
+ * data rate by decimation.
+ *
+ * Wide - Widen low-pass filtering of signal channel to increase bandwidth
+ */
+#define SCA3000_OP_MODE_BYPASS 0x01
+#define SCA3000_OP_MODE_NARROW 0x02
+#define SCA3000_OP_MODE_WIDE 0x04
+#define SCA3000_MAX_TX 6
+#define SCA3000_MAX_RX 2
+
+/**
+ * struct sca3000_state - device instance state information
+ * @us: the associated spi device
+ * @info: chip variant information
+ * @indio_dev: device information used by the IIO core
+ * @interrupt_handler_ws: event interrupt handler for all events
+ * @last_timestamp: the timestamp of the last event
+ * @mo_det_use_count: reference counter for the motion detection unit
+ * @lock: lock used to protect elements of sca3000_state
+ * and the underlying device state.
+ * @bpse: number of bits per scan element
+ * @tx: dma-able transmit buffer
+ * @rx: dma-able receive buffer
+ **/
+struct sca3000_state {
+ struct spi_device *us;
+ const struct sca3000_chip_info *info;
+ struct iio_dev *indio_dev;
+ struct work_struct interrupt_handler_ws;
+ s64 last_timestamp;
+ int mo_det_use_count;
+ struct mutex lock;
+ int bpse;
+ u8 *tx;
+ /* not used during a ring buffer read */
+ u8 *rx;
+};
+
+/**
+ * struct sca3000_chip_info - model dependant parameters
+ * @name: model identification
+ * @temp_output: some devices have temperature sensors.
+ * @measurement_mode_freq: normal mode sampling frequency
+ * @option_mode_1: first optional mode. Not all models have one
+ * @option_mode_1_freq: option mode 1 sampling frequency
+ * @option_mode_2: second optional mode. Not all chips have one
+ * @option_mode_2_freq: option mode 2 sampling frequency
+ *
+ * This structure is used to hold information about the functionality of a given
+ * sca3000 variant.
+ **/
+struct sca3000_chip_info {
+ const char *name;
+ bool temp_output;
+ int measurement_mode_freq;
+ int option_mode_1;
+ int option_mode_1_freq;
+ int option_mode_2;
+ int option_mode_2_freq;
+};
+
+/**
+ * sca3000_read_data() read a series of values from the device
+ * @dev: device
+ * @reg_address_high: start address (decremented read)
+ * @rx: pointer where recieved data is placed. Callee
+ * responsible for freeing this.
+ * @len: number of bytes to read
+ *
+ * The main lock must be held.
+ **/
+int sca3000_read_data(struct sca3000_state *st,
+ u8 reg_address_high,
+ u8 **rx_p,
+ int len);
+
+/**
+ * sca3000_write_reg() write a single register
+ * @address: address of register on chip
+ * @val: value to be written to register
+ *
+ * The main lock must be held.
+ **/
+int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val);
+
+/* Conversion function for use with the ring buffer when in 11bit mode */
+static inline int sca3000_11bit_convert(uint8_t msb, uint8_t lsb)
+{
+ int16_t val;
+
+ val = ((lsb >> 3) & 0x1C) | (msb << 5);
+ val |= (val & (1 << 12)) ? 0xE000 : 0;
+
+ return val;
+};
+
+static inline int sca3000_13bit_convert(uint8_t msb, uint8_t lsb)
+{
+ s16 val;
+
+ val = ((lsb >> 3) & 0x1F) | (msb << 5);
+ /* sign fill */
+ val |= (val & (1 << 12)) ? 0xE000 : 0;
+
+ return val;
+};
+
+
+#ifdef CONFIG_IIO_RING_BUFFER
+/**
+ * sca3000_register_ring_funcs() setup the ring state change functions
+ **/
+void sca3000_register_ring_funcs(struct iio_dev *indio_dev);
+
+/**
+ * sca3000_configure_ring() - allocate and configure ring buffer
+ * @indio_dev: iio-core device whose ring is to be configured
+ *
+ * The hardware ring buffer needs far fewer ring buffer functions than
+ * a software one as a lot of things are handled automatically.
+ * This function also tells the iio core that our device supports a
+ * hardware ring buffer mode.
+ **/
+int sca3000_configure_ring(struct iio_dev *indio_dev);
+
+/**
+ * sca3000_unconfigure_ring() - deallocate the ring buffer
+ * @indio_dev: iio-core device whose ring we are freeing
+ **/
+void sca3000_unconfigure_ring(struct iio_dev *indio_dev);
+
+/**
+ * sca3000_ring_int_process() handles ring related event pushing and escalation
+ * @val: the event code
+ **/
+void sca3000_ring_int_process(u8 val, struct iio_ring_buffer *ring);
+
+#else
+static inline void sca3000_register_ring_funcs(struct iio_dev *indio_dev) {};
+
+static inline
+int sca3000_register_ring_access_and_init(struct iio_dev *indio_dev)
+{
+ return 0;
+};
+
+static inline void sca3000_ring_int_process(u8 val, void *ring) {};
+
+#endif
+
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
new file mode 100644
index 0000000..e27e3b7
--- /dev/null
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -0,0 +1,1509 @@
+/*
+ * sca3000_core.c -- support VTI sca3000 series accelerometers via SPI
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Copyright (c) 2009 Jonathan Cameron <jic23@cam.ac.uk>
+ *
+ * See industrialio/accels/sca3000.h for comments.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_generic.h"
+
+#include "accel.h"
+#include "sca3000.h"
+
+enum sca3000_variant {
+ d01,
+ d03,
+ e02,
+ e04,
+ e05,
+ l01,
+};
+
+/* Note where option modes are not defined, the chip simply does not
+ * support any.
+ * Other chips in the sca3000 series use i2c and are not included here.
+ *
+ * Some of these devices are only listed in the family data sheet and
+ * do not actually appear to be available.
+ */
+static const struct sca3000_chip_info sca3000_spi_chip_info_tbl[] = {
+ {
+ .name = "sca3000-d01",
+ .temp_output = true,
+ .measurement_mode_freq = 250,
+ .option_mode_1 = SCA3000_OP_MODE_BYPASS,
+ .option_mode_1_freq = 250,
+ }, {
+ /* No data sheet available - may be the same as the 3100-d03?*/
+ .name = "sca3000-d03",
+ .temp_output = true,
+ }, {
+ .name = "sca3000-e02",
+ .measurement_mode_freq = 125,
+ .option_mode_1 = SCA3000_OP_MODE_NARROW,
+ .option_mode_1_freq = 63,
+ }, {
+ .name = "sca3000-e04",
+ .measurement_mode_freq = 100,
+ .option_mode_1 = SCA3000_OP_MODE_NARROW,
+ .option_mode_1_freq = 50,
+ .option_mode_2 = SCA3000_OP_MODE_WIDE,
+ .option_mode_2_freq = 400,
+ }, {
+ .name = "sca3000-e05",
+ .measurement_mode_freq = 200,
+ .option_mode_1 = SCA3000_OP_MODE_NARROW,
+ .option_mode_1_freq = 50,
+ .option_mode_2 = SCA3000_OP_MODE_WIDE,
+ .option_mode_2_freq = 400,
+ }, {
+ /* No data sheet available.
+ * Frequencies are unknown.
+ */
+ .name = "sca3000-l01",
+ .temp_output = true,
+ .option_mode_1 = SCA3000_OP_MODE_BYPASS,
+ },
+};
+
+
+int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val)
+{
+ struct spi_transfer xfer = {
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .tx_buf = st->tx,
+ };
+ struct spi_message msg;
+
+ st->tx[0] = SCA3000_WRITE_REG(address);
+ st->tx[1] = val;
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(st->us, &msg);
+}
+
+int sca3000_read_data(struct sca3000_state *st,
+ uint8_t reg_address_high,
+ u8 **rx_p,
+ int len)
+{
+ int ret;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .bits_per_word = 8,
+ .len = len + 1,
+ .cs_change = 1,
+ .tx_buf = st->tx,
+ };
+
+ *rx_p = kmalloc(len + 1, GFP_KERNEL);
+ if (*rx_p == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ xfer.rx_buf = *rx_p;
+ st->tx[0] = SCA3000_READ_REG(reg_address_high);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ ret = spi_sync(st->us, &msg);
+
+ if (ret) {
+ dev_err(get_device(&st->us->dev), "problem reading register");
+ goto error_free_rx;
+ }
+
+ return 0;
+error_free_rx:
+ kfree(*rx_p);
+error_ret:
+ return ret;
+
+}
+/**
+ * sca3000_reg_lock_on() test if the ctrl register lock is on
+ *
+ * Lock must be held.
+ **/
+static int sca3000_reg_lock_on(struct sca3000_state *st)
+{
+ u8 *rx;
+ int ret;
+
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_STATUS, &rx, 1);
+
+ if (ret < 0)
+ return ret;
+ ret = !(rx[1] & SCA3000_LOCKED);
+ kfree(rx);
+
+ return ret;
+}
+
+/**
+ * __sca3000_unlock_reg_lock() unlock the control registers
+ *
+ * Note the device does not appear to support doing this in a single transfer.
+ * This should only ever be used as part of ctrl reg read.
+ * Lock must be held before calling this
+ **/
+static int __sca3000_unlock_reg_lock(struct sca3000_state *st)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer[3] = {
+ {
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .tx_buf = st->tx,
+ }, {
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .tx_buf = st->tx + 2,
+ }, {
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .tx_buf = st->tx + 4,
+ },
+ };
+ st->tx[0] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
+ st->tx[1] = 0x00;
+ st->tx[2] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
+ st->tx[3] = 0x50;
+ st->tx[4] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
+ st->tx[5] = 0xA0;
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+ spi_message_add_tail(&xfer[1], &msg);
+ spi_message_add_tail(&xfer[2], &msg);
+
+ return spi_sync(st->us, &msg);
+}
+
+/**
+ * sca3000_write_ctrl_reg() write to a lock protect ctrl register
+ * @sel: selects which registers we wish to write to
+ * @val: the value to be written
+ *
+ * Certain control registers are protected against overwriting by the lock
+ * register and use a shared write address. This function allows writing of
+ * these registers.
+ * Lock must be held.
+ **/
+static int sca3000_write_ctrl_reg(struct sca3000_state *st,
+ uint8_t sel,
+ uint8_t val)
+{
+
+ int ret;
+
+ ret = sca3000_reg_lock_on(st);
+ if (ret < 0)
+ goto error_ret;
+ if (ret) {
+ ret = __sca3000_unlock_reg_lock(st);
+ if (ret)
+ goto error_ret;
+ }
+
+ /* Set the control select register */
+ ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, sel);
+ if (ret)
+ goto error_ret;
+
+ /* Write the actual value into the register */
+ ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_DATA, val);
+
+error_ret:
+ return ret;
+}
+
+/* Crucial that lock is called before calling this */
+/**
+ * sca3000_read_ctrl_reg() read from lock protected control register.
+ *
+ * Lock must be held.
+ **/
+static int sca3000_read_ctrl_reg(struct sca3000_state *st,
+ u8 ctrl_reg,
+ u8 **rx_p)
+{
+ int ret;
+
+ ret = sca3000_reg_lock_on(st);
+ if (ret < 0)
+ goto error_ret;
+ if (ret) {
+ ret = __sca3000_unlock_reg_lock(st);
+ if (ret)
+ goto error_ret;
+ }
+ /* Set the control select register */
+ ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, ctrl_reg);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_CTRL_DATA, rx_p, 1);
+
+error_ret:
+ return ret;
+}
+
+#ifdef SCA3000_DEBUG
+/**
+ * sca3000_check_status() check the status register
+ *
+ * Only used for debugging purposes
+ **/
+static int sca3000_check_status(struct device *dev)
+{
+ u8 *rx;
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = indio_dev->dev_data;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_STATUS, &rx, 1);
+ if (ret < 0)
+ goto error_ret;
+ if (rx[1] & SCA3000_EEPROM_CS_ERROR)
+ dev_err(dev, "eeprom error \n");
+ if (rx[1] & SCA3000_SPI_FRAME_ERROR)
+ dev_err(dev, "Previous SPI Frame was corrupt\n");
+ kfree(rx);
+
+error_ret:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+#endif /* SCA3000_DEBUG */
+
+/**
+ * sca3000_read_13bit_signed() sysfs interface to read 13 bit signed registers
+ *
+ * These are described as signed 12 bit on the data sheet, which appears
+ * to be a conventional 2's complement 13 bit.
+ **/
+static ssize_t sca3000_read_13bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int len = 0, ret;
+ int val;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ u8 *rx;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = indio_dev->dev_data;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data(st, this_attr->address, &rx, 2);
+ if (ret < 0)
+ goto error_ret;
+ val = sca3000_13bit_convert(rx[1], rx[2]);
+ len += sprintf(buf + len, "%d\n", val);
+ kfree(rx);
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+
+static ssize_t sca3000_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct sca3000_state *st = dev_info->dev_data;
+ return sprintf(buf, "%s\n", st->info->name);
+}
+/**
+ * sca3000_show_reg() - sysfs interface to read the chip revision number
+ **/
+static ssize_t sca3000_show_rev(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int len = 0, ret;
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct sca3000_state *st = dev_info->dev_data;
+
+ u8 *rx;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_REVID, &rx, 1);
+ if (ret < 0)
+ goto error_ret;
+ len += sprintf(buf + len,
+ "major=%d, minor=%d\n",
+ rx[1] & SCA3000_REVID_MAJOR_MASK,
+ rx[1] & SCA3000_REVID_MINOR_MASK);
+ kfree(rx);
+
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+/**
+ * sca3000_show_available_measurement_modes() display available modes
+ *
+ * This is all read from chip specific data in the driver. Not all
+ * of the sca3000 series support modes other than normal.
+ **/
+static ssize_t
+sca3000_show_available_measurement_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct sca3000_state *st = dev_info->dev_data;
+ int len = 0;
+
+ len += sprintf(buf + len, "0 - normal mode");
+ switch (st->info->option_mode_1) {
+ case SCA3000_OP_MODE_NARROW:
+ len += sprintf(buf + len, ", 1 - narrow mode");
+ break;
+ case SCA3000_OP_MODE_BYPASS:
+ len += sprintf(buf + len, ", 1 - bypass mode");
+ break;
+ };
+ switch (st->info->option_mode_2) {
+ case SCA3000_OP_MODE_WIDE:
+ len += sprintf(buf + len, ", 2 - wide mode");
+ break;
+ }
+ /* always supported */
+ len += sprintf(buf + len, " 3 - motion detection \n");
+
+ return len;
+}
+
+/**
+ * sca3000_show_measurmenet_mode() sysfs read of current mode
+ **/
+static ssize_t
+sca3000_show_measurement_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct sca3000_state *st = dev_info->dev_data;
+ int len = 0, ret;
+ u8 *rx;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1);
+ if (ret)
+ goto error_ret;
+ /* mask bottom 2 bits - only ones that are relevant */
+ rx[1] &= 0x03;
+ switch (rx[1]) {
+ case SCA3000_MEAS_MODE_NORMAL:
+ len += sprintf(buf + len, "0 - normal mode\n");
+ break;
+ case SCA3000_MEAS_MODE_MOT_DET:
+ len += sprintf(buf + len, "3 - motion detection\n");
+ break;
+ case SCA3000_MEAS_MODE_OP_1:
+ switch (st->info->option_mode_1) {
+ case SCA3000_OP_MODE_NARROW:
+ len += sprintf(buf + len, "1 - narrow mode\n");
+ break;
+ case SCA3000_OP_MODE_BYPASS:
+ len += sprintf(buf + len, "1 - bypass mode\n");
+ break;
+ };
+ break;
+ case SCA3000_MEAS_MODE_OP_2:
+ switch (st->info->option_mode_2) {
+ case SCA3000_OP_MODE_WIDE:
+ len += sprintf(buf + len, "2 - wide mode\n");
+ break;
+ }
+ break;
+ };
+
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+/**
+ * sca3000_store_measurement_mode() set the current mode
+ **/
+static ssize_t
+sca3000_store_measurement_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct sca3000_state *st = dev_info->dev_data;
+ int ret;
+ u8 *rx;
+ int mask = 0x03;
+ long val;
+
+ mutex_lock(&st->lock);
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1);
+ if (ret)
+ goto error_ret;
+ rx[1] &= ~mask;
+ rx[1] |= (val & mask);
+ ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, rx[1]);
+ if (ret)
+ goto error_free_rx;
+ mutex_unlock(&st->lock);
+
+ return len;
+
+error_free_rx:
+ kfree(rx);
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+
+/* Not even vaguely standard attributes so defined here rather than
+ * in the relevant IIO core headers
+ */
+static IIO_DEVICE_ATTR(available_measurement_modes, S_IRUGO,
+ sca3000_show_available_measurement_modes,
+ NULL, 0);
+
+static IIO_DEVICE_ATTR(measurement_mode, S_IRUGO | S_IWUSR,
+ sca3000_show_measurement_mode,
+ sca3000_store_measurement_mode,
+ 0);
+
+/* More standard attributes */
+
+static IIO_DEV_ATTR_NAME(sca3000_show_name);
+static IIO_DEV_ATTR_REV(sca3000_show_rev);
+
+static IIO_DEV_ATTR_ACCEL_X(sca3000_read_13bit_signed,
+ SCA3000_REG_ADDR_X_MSB);
+static IIO_DEV_ATTR_ACCEL_Y(sca3000_read_13bit_signed,
+ SCA3000_REG_ADDR_Y_MSB);
+static IIO_DEV_ATTR_ACCEL_Z(sca3000_read_13bit_signed,
+ SCA3000_REG_ADDR_Z_MSB);
+
+
+/**
+ * sca3000_read_av_freq() sysfs function to get available frequencies
+ *
+ * The later modes are only relevant to the ring buffer - and depend on current
+ * mode. Note that data sheet gives rather wide tolerances for these so integer
+ * division will give good enough answer and not all chips have them specified
+ * at all.
+ **/
+static ssize_t sca3000_read_av_freq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = indio_dev->dev_data;
+ int len = 0, ret;
+ u8 *rx;
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1);
+ mutex_unlock(&st->lock);
+ if (ret)
+ goto error_ret;
+ rx[1] &= 0x03;
+ switch (rx[1]) {
+ case SCA3000_MEAS_MODE_NORMAL:
+ len += sprintf(buf + len, "%d %d %d\n",
+ st->info->measurement_mode_freq,
+ st->info->measurement_mode_freq/2,
+ st->info->measurement_mode_freq/4);
+ break;
+ case SCA3000_MEAS_MODE_OP_1:
+ len += sprintf(buf + len, "%d %d %d\n",
+ st->info->option_mode_1_freq,
+ st->info->option_mode_1_freq/2,
+ st->info->option_mode_1_freq/4);
+ break;
+ case SCA3000_MEAS_MODE_OP_2:
+ len += sprintf(buf + len, "%d %d %d\n",
+ st->info->option_mode_2_freq,
+ st->info->option_mode_2_freq/2,
+ st->info->option_mode_2_freq/4);
+ break;
+ };
+ kfree(rx);
+ return len;
+error_ret:
+ return ret;
+}
+/**
+ * __sca3000_get_base_frequency() obtain mode specific base frequency
+ *
+ * lock must be held
+ **/
+static inline int __sca3000_get_base_freq(struct sca3000_state *st,
+ const struct sca3000_chip_info *info,
+ int *base_freq)
+{
+ int ret;
+ u8 *rx;
+
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1);
+ if (ret)
+ goto error_ret;
+ switch (0x03 & rx[1]) {
+ case SCA3000_MEAS_MODE_NORMAL:
+ *base_freq = info->measurement_mode_freq;
+ break;
+ case SCA3000_MEAS_MODE_OP_1:
+ *base_freq = info->option_mode_1_freq;
+ break;
+ case SCA3000_MEAS_MODE_OP_2:
+ *base_freq = info->option_mode_2_freq;
+ break;
+ };
+ kfree(rx);
+error_ret:
+ return ret;
+}
+
+/**
+ * sca3000_read_frequency() sysfs interface to get the current frequency
+ **/
+static ssize_t sca3000_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = indio_dev->dev_data;
+ int ret, len = 0, base_freq = 0;
+ u8 *rx;
+ mutex_lock(&st->lock);
+ ret = __sca3000_get_base_freq(st, st->info, &base_freq);
+ if (ret)
+ goto error_ret_mut;
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL, &rx);
+ mutex_unlock(&st->lock);
+ if (ret)
+ goto error_ret;
+ if (base_freq > 0)
+ switch (rx[1]&0x03) {
+ case 0x00:
+ case 0x03:
+ len = sprintf(buf, "%d\n", base_freq);
+ break;
+ case 0x01:
+ len = sprintf(buf, "%d\n", base_freq/2);
+ break;
+ case 0x02:
+ len = sprintf(buf, "%d\n", base_freq/4);
+ break;
+ };
+ kfree(rx);
+ return len;
+error_ret_mut:
+ mutex_unlock(&st->lock);
+error_ret:
+ return ret;
+}
+
+/**
+ * sca3000_set_frequency() sysfs interface to set the current frequency
+ **/
+static ssize_t sca3000_set_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = indio_dev->dev_data;
+ int ret, base_freq = 0;
+ u8 *rx;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&st->lock);
+ /* What mode are we in? */
+ ret = __sca3000_get_base_freq(st, st->info, &base_freq);
+ if (ret)
+ goto error_free_lock;
+
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL, &rx);
+ if (ret)
+ goto error_free_lock;
+ /* clear the bits */
+ rx[1] &= ~0x03;
+
+ if (val == base_freq/2) {
+ rx[1] |= SCA3000_OUT_CTRL_BUF_DIV_2;
+ } else if (val == base_freq/4) {
+ rx[1] |= SCA3000_OUT_CTRL_BUF_DIV_4;
+ } else if (val != base_freq) {
+ ret = -EINVAL;
+ goto error_free_lock;
+ }
+ ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL, rx[1]);
+error_free_lock:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+/* Should only really be registered if ring buffer support is compiled in.
+ * Does no harm however and doing it right would add a fair bit of complexity
+ */
+static IIO_DEV_ATTR_AVAIL_SAMP_FREQ(sca3000_read_av_freq);
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ sca3000_read_frequency,
+ sca3000_set_frequency);
+
+
+/**
+ * sca3000_read_temp() sysfs interface to get the temperature when available
+ *
+* The alignment of data in here is downright odd. See data sheet.
+* Converting this into a meaningful value is left to inline functions in
+* userspace part of header.
+**/
+static ssize_t sca3000_read_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = indio_dev->dev_data;
+ int len = 0, ret;
+ int val;
+ u8 *rx;
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_TEMP_MSB, &rx, 2);
+ if (ret < 0)
+ goto error_ret;
+ val = ((rx[1]&0x3F) << 3) | ((rx[2] & 0xE0) >> 5);
+ len += sprintf(buf + len, "%d\n", val);
+ kfree(rx);
+
+ return len;
+
+error_ret:
+ return ret;
+}
+static IIO_DEV_ATTR_TEMP(sca3000_read_temp);
+
+/**
+ * sca3000_show_thresh() sysfs query of a theshold
+ **/
+static ssize_t sca3000_show_thresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = indio_dev->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int len = 0, ret;
+ u8 *rx;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_ctrl_reg(st,
+ this_attr->address,
+ &rx);
+ mutex_unlock(&st->lock);
+ if (ret)
+ return ret;
+ len += sprintf(buf + len, "%d\n", rx[1]);
+ kfree(rx);
+
+ return len;
+}
+
+/**
+ * sca3000_write_thresh() sysfs control of threshold
+ **/
+static ssize_t sca3000_write_thresh(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = indio_dev->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
+ ret = sca3000_write_ctrl_reg(st, this_attr->address, val);
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_ACCEL_THRESH_X(S_IRUGO | S_IWUSR,
+ sca3000_show_thresh,
+ sca3000_write_thresh,
+ SCA3000_REG_CTRL_SEL_MD_X_TH);
+static IIO_DEV_ATTR_ACCEL_THRESH_Y(S_IRUGO | S_IWUSR,
+ sca3000_show_thresh,
+ sca3000_write_thresh,
+ SCA3000_REG_CTRL_SEL_MD_Y_TH);
+static IIO_DEV_ATTR_ACCEL_THRESH_Z(S_IRUGO | S_IWUSR,
+ sca3000_show_thresh,
+ sca3000_write_thresh,
+ SCA3000_REG_CTRL_SEL_MD_Z_TH);
+
+static struct attribute *sca3000_attributes[] = {
+ &iio_dev_attr_name.dev_attr.attr,
+ &iio_dev_attr_revision.dev_attr.attr,
+ &iio_dev_attr_accel_x.dev_attr.attr,
+ &iio_dev_attr_accel_y.dev_attr.attr,
+ &iio_dev_attr_accel_z.dev_attr.attr,
+ &iio_dev_attr_thresh_accel_x.dev_attr.attr,
+ &iio_dev_attr_thresh_accel_y.dev_attr.attr,
+ &iio_dev_attr_thresh_accel_z.dev_attr.attr,
+ &iio_dev_attr_available_measurement_modes.dev_attr.attr,
+ &iio_dev_attr_measurement_mode.dev_attr.attr,
+ &iio_dev_attr_available_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute *sca3000_attributes_with_temp[] = {
+ &iio_dev_attr_name.dev_attr.attr,
+ &iio_dev_attr_revision.dev_attr.attr,
+ &iio_dev_attr_accel_x.dev_attr.attr,
+ &iio_dev_attr_accel_y.dev_attr.attr,
+ &iio_dev_attr_accel_z.dev_attr.attr,
+ &iio_dev_attr_thresh_accel_x.dev_attr.attr,
+ &iio_dev_attr_thresh_accel_y.dev_attr.attr,
+ &iio_dev_attr_thresh_accel_z.dev_attr.attr,
+ &iio_dev_attr_available_measurement_modes.dev_attr.attr,
+ &iio_dev_attr_measurement_mode.dev_attr.attr,
+ &iio_dev_attr_available_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ /* Only present if temp sensor is */
+ &iio_dev_attr_temp.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group sca3000_attribute_group = {
+ .attrs = sca3000_attributes,
+};
+
+static const struct attribute_group sca3000_attribute_group_with_temp = {
+ .attrs = sca3000_attributes_with_temp,
+};
+
+/* RING RELATED interrupt handler */
+/* depending on event, push to the ring buffer event chrdev or the event one */
+
+/**
+ * sca3000_interrupt_handler_bh() - handling ring and non ring events
+ *
+ * This function is complicated by the fact that the devices can signify ring
+ * and non ring events via the same interrupt line and they can only
+ * be distinguished via a read of the relevant status register.
+ **/
+static void sca3000_interrupt_handler_bh(struct work_struct *work_s)
+{
+ struct sca3000_state *st
+ = container_of(work_s, struct sca3000_state,
+ interrupt_handler_ws);
+ u8 *rx;
+ int ret;
+
+ /* Could lead if badly timed to an extra read of status reg,
+ * but ensures no interrupt is missed.
+ */
+ enable_irq(st->us->irq);
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_INT_STATUS,
+ &rx, 1);
+ mutex_unlock(&st->lock);
+ if (ret)
+ goto done;
+
+ sca3000_ring_int_process(rx[1], st->indio_dev->ring);
+
+ if (rx[1] & SCA3000_INT_STATUS_FREE_FALL)
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_FREE_FALL,
+ st->last_timestamp);
+
+ if (rx[1] & SCA3000_INT_STATUS_Y_TRIGGER)
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_ACCEL_Y_HIGH,
+ st->last_timestamp);
+
+ if (rx[1] & SCA3000_INT_STATUS_X_TRIGGER)
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_ACCEL_X_HIGH,
+ st->last_timestamp);
+
+ if (rx[1] & SCA3000_INT_STATUS_Z_TRIGGER)
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_ACCEL_Z_HIGH,
+ st->last_timestamp);
+
+done:
+ kfree(rx);
+ return;
+}
+
+/**
+ * sca3000_handler_th() handles all interrupt events from device
+ *
+ * These devices deploy unified interrupt status registers meaning
+ * all interrupts must be handled together
+ **/
+static int sca3000_handler_th(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct sca3000_state *st = dev_info->dev_data;
+
+ st->last_timestamp = timestamp;
+ schedule_work(&st->interrupt_handler_ws);
+
+ return 0;
+}
+
+/**
+ * sca3000_query_mo_det() is motion detection enabled for this axis
+ *
+ * First queries if motion detection is enabled and then if this axis is
+ * on.
+ **/
+static ssize_t sca3000_query_mo_det(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = indio_dev->dev_data;
+ struct iio_event_attr *this_attr = to_iio_event_attr(attr);
+ int ret, len = 0;
+ u8 *rx;
+ u8 protect_mask = 0x03;
+
+ /* read current value of mode register */
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1);
+ if (ret)
+ goto error_ret;
+
+ if ((rx[1]&protect_mask) != SCA3000_MEAS_MODE_MOT_DET)
+ len += sprintf(buf + len, "0\n");
+ else {
+ kfree(rx);
+ ret = sca3000_read_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_MD_CTRL,
+ &rx);
+ if (ret)
+ goto error_ret;
+ /* only supporting logical or's for now */
+ len += sprintf(buf + len, "%d\n",
+ (rx[1] & this_attr->mask) ? 1 : 0);
+ }
+ kfree(rx);
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+/**
+ * sca3000_query_free_fall_mode() is free fall mode enabled
+ **/
+static ssize_t sca3000_query_free_fall_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret, len;
+ u8 *rx;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = indio_dev->dev_data;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1);
+ mutex_unlock(&st->lock);
+ if (ret)
+ return ret;
+ len = sprintf(buf, "%d\n",
+ !!(rx[1] & SCA3000_FREE_FALL_DETECT));
+ kfree(rx);
+
+ return len;
+}
+/**
+ * sca3000_query_ring_int() is the hardware ring status interrupt enabled
+ **/
+static ssize_t sca3000_query_ring_int(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_event_attr *this_attr = to_iio_event_attr(attr);
+ int ret, len;
+ u8 *rx;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = indio_dev->dev_data;
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_INT_MASK, &rx, 1);
+ mutex_unlock(&st->lock);
+ if (ret)
+ return ret;
+ len = sprintf(buf, "%d\n", (rx[1] & this_attr->mask) ? 1 : 0);
+ kfree(rx);
+
+ return len;
+}
+/**
+ * sca3000_set_ring_int() set state of ring status interrupt
+ **/
+static ssize_t sca3000_set_ring_int(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = indio_dev->dev_data;
+ struct iio_event_attr *this_attr = to_iio_event_attr(attr);
+
+ long val;
+ int ret;
+ u8 *rx;
+
+ mutex_lock(&st->lock);
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_INT_MASK, &rx, 1);
+ if (ret)
+ goto error_ret;
+ if (val)
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_ADDR_INT_MASK,
+ rx[1] | this_attr->mask);
+ else
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_ADDR_INT_MASK,
+ rx[1] & ~this_attr->mask);
+ kfree(rx);
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+/**
+ * sca3000_set_free_fall_mode() simple on off control for free fall int
+ *
+ * In these chips the free fall detector should send an interrupt if
+ * the device falls more than 25cm. This has not been tested due
+ * to fragile wiring.
+ **/
+
+static ssize_t sca3000_set_free_fall_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = indio_dev->dev_data;
+ long val;
+ int ret;
+ u8 *rx;
+ u8 protect_mask = SCA3000_FREE_FALL_DETECT;
+
+ mutex_lock(&st->lock);
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+
+ /* read current value of mode register */
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1);
+ if (ret)
+ goto error_ret;
+
+ /*if off and should be on*/
+ if (val && !(rx[1] & protect_mask))
+ ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
+ (rx[1] | SCA3000_FREE_FALL_DETECT));
+ /* if on and should be off */
+ else if (!val && (rx[1]&protect_mask))
+ ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
+ (rx[1] & ~protect_mask));
+
+ kfree(rx);
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+/**
+ * sca3000_set_mo_det() simple on off control for motion detector
+ *
+ * This is a per axis control, but enabling any will result in the
+ * motion detector unit being enabled.
+ * N.B. enabling motion detector stops normal data acquisition.
+ * There is a complexity in knowing which mode to return to when
+ * this mode is disabled. Currently normal mode is assumed.
+ **/
+static ssize_t sca3000_set_mo_det(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = indio_dev->dev_data;
+ struct iio_event_attr *this_attr = to_iio_event_attr(attr);
+ long val;
+ int ret;
+ u8 *rx;
+ u8 protect_mask = 0x03;
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&st->lock);
+ /* First read the motion detector config to find out if
+ * this axis is on*/
+ ret = sca3000_read_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_MD_CTRL,
+ &rx);
+ if (ret)
+ goto exit_point;
+ /* Off and should be on */
+ if (val && !(rx[1] & this_attr->mask)) {
+ ret = sca3000_write_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_MD_CTRL,
+ rx[1] | this_attr->mask);
+ if (ret)
+ goto exit_point_free_rx;
+ st->mo_det_use_count++;
+ } else if (!val && (rx[1]&this_attr->mask)) {
+ ret = sca3000_write_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_MD_CTRL,
+ rx[1] & ~(this_attr->mask));
+ if (ret)
+ goto exit_point_free_rx;
+ st->mo_det_use_count--;
+ } else /* relies on clean state for device on boot */
+ goto exit_point_free_rx;
+ kfree(rx);
+ /* read current value of mode register */
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1);
+ if (ret)
+ goto exit_point;
+ /*if off and should be on*/
+ if ((st->mo_det_use_count)
+ && ((rx[1]&protect_mask) != SCA3000_MEAS_MODE_MOT_DET))
+ ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
+ (rx[1] & ~protect_mask)
+ | SCA3000_MEAS_MODE_MOT_DET);
+ /* if on and should be off */
+ else if (!(st->mo_det_use_count)
+ && ((rx[1]&protect_mask) == SCA3000_MEAS_MODE_MOT_DET))
+ ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
+ (rx[1] & ~protect_mask));
+exit_point_free_rx:
+ kfree(rx);
+exit_point:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+/* Shared event handler for all events as single event status register */
+IIO_EVENT_SH(all, &sca3000_handler_th);
+
+/* Free fall detector related event attribute */
+IIO_EVENT_ATTR_FREE_FALL_DETECT_SH(iio_event_all,
+ sca3000_query_free_fall_mode,
+ sca3000_set_free_fall_mode,
+ 0)
+
+/* Motion detector related event attributes */
+IIO_EVENT_ATTR_ACCEL_X_HIGH_SH(iio_event_all,
+ sca3000_query_mo_det,
+ sca3000_set_mo_det,
+ SCA3000_MD_CTRL_OR_X);
+
+IIO_EVENT_ATTR_ACCEL_Y_HIGH_SH(iio_event_all,
+ sca3000_query_mo_det,
+ sca3000_set_mo_det,
+ SCA3000_MD_CTRL_OR_Y);
+
+IIO_EVENT_ATTR_ACCEL_Z_HIGH_SH(iio_event_all,
+ sca3000_query_mo_det,
+ sca3000_set_mo_det,
+ SCA3000_MD_CTRL_OR_Z);
+
+/* Hardware ring buffer related event attributes */
+IIO_EVENT_ATTR_RING_50_FULL_SH(iio_event_all,
+ sca3000_query_ring_int,
+ sca3000_set_ring_int,
+ SCA3000_INT_MASK_RING_HALF);
+
+IIO_EVENT_ATTR_RING_75_FULL_SH(iio_event_all,
+ sca3000_query_ring_int,
+ sca3000_set_ring_int,
+ SCA3000_INT_MASK_RING_THREE_QUARTER);
+
+static struct attribute *sca3000_event_attributes[] = {
+ &iio_event_attr_free_fall.dev_attr.attr,
+ &iio_event_attr_accel_x_high.dev_attr.attr,
+ &iio_event_attr_accel_y_high.dev_attr.attr,
+ &iio_event_attr_accel_z_high.dev_attr.attr,
+ &iio_event_attr_ring_50_full.dev_attr.attr,
+ &iio_event_attr_ring_75_full.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group sca3000_event_attribute_group = {
+ .attrs = sca3000_event_attributes,
+};
+
+/**
+ * sca3000_clean_setup() get the device into a predictable state
+ *
+ * Devices use flash memory to store many of the register values
+ * and hence can come up in somewhat unpredictable states.
+ * Hence reset everything on driver load.
+ **/
+static int sca3000_clean_setup(struct sca3000_state *st)
+{
+ int ret;
+ u8 *rx;
+
+ mutex_lock(&st->lock);
+ /* Ensure all interrupts have been acknowledged */
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_INT_STATUS, &rx, 1);
+ if (ret)
+ goto error_ret;
+ kfree(rx);
+
+ /* Turn off all motion detection channels */
+ ret = sca3000_read_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_MD_CTRL,
+ &rx);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_write_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_MD_CTRL,
+ rx[1] & SCA3000_MD_CTRL_PROT_MASK);
+ kfree(rx);
+ if (ret)
+ goto error_ret;
+
+ /* Disable ring buffer */
+ sca3000_read_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_OUT_CTRL,
+ &rx);
+ /* Frequency of ring buffer sampling deliberately restricted to make
+ * debugging easier - add control of this later */
+ ret = sca3000_write_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_OUT_CTRL,
+ (rx[1] & SCA3000_OUT_CTRL_PROT_MASK)
+ | SCA3000_OUT_CTRL_BUF_X_EN
+ | SCA3000_OUT_CTRL_BUF_Y_EN
+ | SCA3000_OUT_CTRL_BUF_Z_EN
+ | SCA3000_OUT_CTRL_BUF_DIV_4);
+ kfree(rx);
+
+ if (ret)
+ goto error_ret;
+ /* Enable interrupts, relevant to mode and set up as active low */
+ ret = sca3000_read_data(st,
+ SCA3000_REG_ADDR_INT_MASK,
+ &rx, 1);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_ADDR_INT_MASK,
+ (rx[1] & SCA3000_INT_MASK_PROT_MASK)
+ | SCA3000_INT_MASK_ACTIVE_LOW);
+ kfree(rx);
+ if (ret)
+ goto error_ret;
+ /* Select normal measurement mode, free fall off, ring off */
+ /* Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5
+ * as that occurs in one of the example on the datasheet */
+ ret = sca3000_read_data(st,
+ SCA3000_REG_ADDR_MODE,
+ &rx, 1);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_ADDR_MODE,
+ (rx[1] & SCA3000_MODE_PROT_MASK));
+ kfree(rx);
+ st->bpse = 11;
+
+error_ret:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static int __devinit __sca3000_probe(struct spi_device *spi,
+ enum sca3000_variant variant)
+{
+ int ret, regdone = 0;
+ struct sca3000_state *st;
+
+ st = kzalloc(sizeof(struct sca3000_state), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ spi_set_drvdata(spi, st);
+
+ st->tx = kmalloc(sizeof(*st->tx)*6, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_clear_st;
+ }
+ st->rx = kmalloc(sizeof(*st->rx)*3, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+ st->us = spi;
+ mutex_init(&st->lock);
+ st->info = &sca3000_spi_chip_info_tbl[variant];
+
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &sca3000_event_attribute_group;
+ if (st->info->temp_output)
+ st->indio_dev->attrs = &sca3000_attribute_group_with_temp;
+ else
+ st->indio_dev->attrs = &sca3000_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ sca3000_configure_ring(st->indio_dev);
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret < 0)
+ goto error_free_dev;
+ regdone = 1;
+ ret = iio_ring_buffer_register(st->indio_dev->ring);
+ if (ret < 0)
+ goto error_unregister_dev;
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
+ INIT_WORK(&st->interrupt_handler_ws,
+ sca3000_interrupt_handler_bh);
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_FALLING,
+ "sca3000");
+ if (ret)
+ goto error_unregister_ring;
+ /* RFC
+ * Probably a common situation. All interrupts need an ack
+ * and there is only one handler so the complicated list system
+ * is overkill. At very least a simpler registration method
+ * might be worthwhile.
+ */
+ iio_add_event_to_list(iio_event_attr_accel_z_high.listel,
+ &st->indio_dev
+ ->interrupts[0]->ev_list);
+ }
+ sca3000_register_ring_funcs(st->indio_dev);
+ ret = sca3000_clean_setup(st);
+ if (ret)
+ goto error_unregister_interrupt_line;
+ return 0;
+
+error_unregister_interrupt_line:
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_unregister_ring:
+ iio_ring_buffer_unregister(st->indio_dev->ring);
+error_unregister_dev:
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_rx:
+ kfree(st->rx);
+error_free_tx:
+ kfree(st->tx);
+error_clear_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int sca3000_stop_all_interrupts(struct sca3000_state *st)
+{
+ int ret;
+ u8 *rx;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_INT_MASK, &rx, 1);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_write_reg(st, SCA3000_REG_ADDR_INT_MASK,
+ (rx[1] & ~(SCA3000_INT_MASK_RING_THREE_QUARTER
+ | SCA3000_INT_MASK_RING_HALF
+ | SCA3000_INT_MASK_ALL_INTS)));
+error_ret:
+ kfree(rx);
+ return ret;
+
+}
+
+static int sca3000_remove(struct spi_device *spi)
+{
+ struct sca3000_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+ int ret;
+ /* Must ensure no interrupts can be generated after this!*/
+ ret = sca3000_stop_all_interrupts(st);
+ if (ret)
+ return ret;
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+ iio_unregister_interrupt_line(indio_dev, 0);
+ iio_ring_buffer_unregister(indio_dev->ring);
+ sca3000_unconfigure_ring(indio_dev);
+ iio_device_unregister(indio_dev);
+
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+}
+
+/* These macros save on an awful lot of repeated code */
+#define SCA3000_VARIANT_PROBE(_name) \
+ static int __devinit \
+ sca3000_##_name##_probe(struct spi_device *spi) \
+ { \
+ return __sca3000_probe(spi, _name); \
+ }
+
+#define SCA3000_VARIANT_SPI_DRIVER(_name) \
+ struct spi_driver sca3000_##_name##_driver = { \
+ .driver = { \
+ .name = "sca3000_" #_name, \
+ .owner = THIS_MODULE, \
+ }, \
+ .probe = sca3000_##_name##_probe, \
+ .remove = __devexit_p(sca3000_remove), \
+ }
+
+SCA3000_VARIANT_PROBE(d01);
+static SCA3000_VARIANT_SPI_DRIVER(d01);
+
+SCA3000_VARIANT_PROBE(d03);
+static SCA3000_VARIANT_SPI_DRIVER(d03);
+
+SCA3000_VARIANT_PROBE(e02);
+static SCA3000_VARIANT_SPI_DRIVER(e02);
+
+SCA3000_VARIANT_PROBE(e04);
+static SCA3000_VARIANT_SPI_DRIVER(e04);
+
+SCA3000_VARIANT_PROBE(e05);
+static SCA3000_VARIANT_SPI_DRIVER(e05);
+
+SCA3000_VARIANT_PROBE(l01);
+static SCA3000_VARIANT_SPI_DRIVER(l01);
+
+static __init int sca3000_init(void)
+{
+ int ret;
+
+ ret = spi_register_driver(&sca3000_d01_driver);
+ if (ret)
+ goto error_ret;
+ ret = spi_register_driver(&sca3000_d03_driver);
+ if (ret)
+ goto error_unreg_d01;
+ ret = spi_register_driver(&sca3000_e02_driver);
+ if (ret)
+ goto error_unreg_d03;
+ ret = spi_register_driver(&sca3000_e04_driver);
+ if (ret)
+ goto error_unreg_e02;
+ ret = spi_register_driver(&sca3000_e05_driver);
+ if (ret)
+ goto error_unreg_e04;
+ ret = spi_register_driver(&sca3000_l01_driver);
+ if (ret)
+ goto error_unreg_e05;
+
+ return 0;
+
+error_unreg_e05:
+ spi_unregister_driver(&sca3000_e05_driver);
+error_unreg_e04:
+ spi_unregister_driver(&sca3000_e04_driver);
+error_unreg_e02:
+ spi_unregister_driver(&sca3000_e02_driver);
+error_unreg_d03:
+ spi_unregister_driver(&sca3000_d03_driver);
+error_unreg_d01:
+ spi_unregister_driver(&sca3000_d01_driver);
+error_ret:
+
+ return ret;
+}
+
+static __exit void sca3000_exit(void)
+{
+ spi_unregister_driver(&sca3000_l01_driver);
+ spi_unregister_driver(&sca3000_e05_driver);
+ spi_unregister_driver(&sca3000_e04_driver);
+ spi_unregister_driver(&sca3000_e02_driver);
+ spi_unregister_driver(&sca3000_d03_driver);
+ spi_unregister_driver(&sca3000_d01_driver);
+}
+
+module_init(sca3000_init);
+module_exit(sca3000_exit);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
+MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
new file mode 100644
index 0000000..d5ea237
--- /dev/null
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -0,0 +1,331 @@
+/*
+ * sca3000_ring.c -- support VTI sca3000 series accelerometers via SPI
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Copyright (c) 2009 Jonathan Cameron <jic23@cam.ac.uk>
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_generic.h"
+#include "../ring_hw.h"
+#include "accel.h"
+#include "sca3000.h"
+
+/* RFC / future work
+ *
+ * The internal ring buffer doesn't actually change what it holds depending
+ * on which signals are enabled etc, merely whether you can read them.
+ * As such the scan mode selection is somewhat different than for a software
+ * ring buffer and changing it actually covers any data already in the buffer.
+ * Currently scan elements aren't configured so it doesn't matter.
+ */
+
+/**
+ * sca3000_rip_hw_rb() - main ring access function, pulls data from ring
+ * @r: the ring
+ * @count: number of samples to try and pull
+ * @data: output the actual samples pulled from the hw ring
+ * @dead_offset: cheating a bit here: Set to 1 so as to allow for the
+ * leading byte used in bus comms.
+ *
+ * Currently does not provide timestamps. As the hardware doesn't add them they
+ * can only be inferred aproximately from ring buffer events such as 50% full
+ * and knowledge of when buffer was last emptied. This is left to userspace.
+ **/
+static int sca3000_rip_hw_rb(struct iio_ring_buffer *r,
+ size_t count, u8 **data, int *dead_offset)
+{
+ struct iio_hw_ring_buffer *hw_ring = iio_to_hw_ring_buf(r);
+ struct iio_dev *indio_dev = hw_ring->private;
+ struct sca3000_state *st = indio_dev->dev_data;
+ u8 *rx;
+ int ret, num_available, num_read = 0;
+ int bytes_per_sample = 1;
+
+ if (st->bpse == 11)
+ bytes_per_sample = 2;
+
+ mutex_lock(&st->lock);
+ /* Check how much data is available:
+ * RFC: Implement an ioctl to not bother checking whether there
+ * is enough data in the ring? Afterall, if we are responding
+ * to an interrupt we have a minimum content guaranteed so it
+ * seems slight silly to waste time checking it is there.
+ */
+ ret = sca3000_read_data(st,
+ SCA3000_REG_ADDR_BUF_COUNT,
+ &rx, 1);
+ if (ret)
+ goto error_ret;
+ else
+ num_available = rx[1];
+ /* num_available is the total number of samples available
+ * i.e. number of time points * number of channels.
+ */
+ kfree(rx);
+ if (count > num_available * bytes_per_sample)
+ num_read = num_available*bytes_per_sample;
+ else
+ num_read = count - (count % (bytes_per_sample));
+
+ /* Avoid the read request byte */
+ *dead_offset = 1;
+ ret = sca3000_read_data(st,
+ SCA3000_REG_ADDR_RING_OUT,
+ data, num_read);
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : num_read;
+}
+
+/* This is only valid with all 3 elements enabled */
+static int sca3000_ring_get_length(struct iio_ring_buffer *r)
+{
+ return 64;
+}
+
+/* only valid if resolution is kept at 11bits */
+static int sca3000_ring_get_bpd(struct iio_ring_buffer *r)
+{
+ return 6;
+}
+static void sca3000_ring_release(struct device *dev)
+{
+ struct iio_ring_buffer *r = to_iio_ring_buffer(dev);
+ kfree(iio_to_hw_ring_buf(r));
+}
+
+static IIO_RING_ENABLE_ATTR;
+static IIO_RING_BPS_ATTR;
+static IIO_RING_LENGTH_ATTR;
+
+/**
+ * sca3000_show_ring_bpse() -sysfs function to query bits per sample from ring
+ * @dev: ring buffer device
+ * @attr: this device attribute
+ * @buf: buffer to write to
+ **/
+static ssize_t sca3000_show_ring_bpse(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int len = 0, ret;
+ u8 *rx;
+ struct iio_ring_buffer *r = dev_get_drvdata(dev);
+ struct sca3000_state *st = r->indio_dev->dev_data;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1);
+ if (ret)
+ goto error_ret;
+ len = sprintf(buf, "%d\n", (rx[1] & SCA3000_RING_BUF_8BIT) ? 8 : 11);
+ kfree(rx);
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+/**
+ * sca3000_store_ring_bpse() - bits per scan element
+ * @dev: ring buffer device
+ * @attr: attribute called from
+ * @buf: input from userspace
+ * @len: length of input
+ **/
+static ssize_t sca3000_store_ring_bpse(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_ring_buffer *r = dev_get_drvdata(dev);
+ struct sca3000_state *st = r->indio_dev->dev_data;
+ int ret;
+ u8 *rx;
+ long val;
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&st->lock);
+
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1);
+ if (!ret)
+ switch (val) {
+ case 8:
+ ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
+ rx[1] | SCA3000_RING_BUF_8BIT);
+ st->bpse = 8;
+ break;
+ case 11:
+ ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
+ rx[1] & ~SCA3000_RING_BUF_8BIT);
+ st->bpse = 11;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static IIO_CONST_ATTR(bpse_available, "8 11");
+
+static IIO_DEV_ATTR_BPSE(S_IRUGO | S_IWUSR,
+ sca3000_show_ring_bpse,
+ sca3000_store_ring_bpse);
+
+/*
+ * Ring buffer attributes
+ * This device is a bit unusual in that the sampling frequency and bpse
+ * only apply to the ring buffer. At all times full rate and accuracy
+ * is available via direct reading from registers.
+ */
+static struct attribute *iio_ring_attributes[] = {
+ &dev_attr_length.attr,
+ &dev_attr_bps.attr,
+ &dev_attr_ring_enable.attr,
+ &iio_dev_attr_bpse.dev_attr.attr,
+ &iio_const_attr_bpse_available.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group sca3000_ring_attr = {
+ .attrs = iio_ring_attributes,
+};
+
+static const struct attribute_group *sca3000_ring_attr_groups[] = {
+ &sca3000_ring_attr,
+ NULL
+};
+
+static struct device_type sca3000_ring_type = {
+ .release = sca3000_ring_release,
+ .groups = sca3000_ring_attr_groups,
+};
+
+static struct iio_ring_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
+{
+ struct iio_ring_buffer *buf;
+ struct iio_hw_ring_buffer *ring;
+
+ ring = kzalloc(sizeof *ring, GFP_KERNEL);
+ if (!ring)
+ return 0;
+ ring->private = indio_dev;
+ buf = &ring->buf;
+ iio_ring_buffer_init(buf, indio_dev);
+ buf->dev.type = &sca3000_ring_type;
+ device_initialize(&buf->dev);
+ buf->dev.parent = &indio_dev->dev;
+ dev_set_drvdata(&buf->dev, (void *)buf);
+
+ return buf;
+}
+
+static inline void sca3000_rb_free(struct iio_ring_buffer *r)
+{
+ if (r)
+ iio_put_ring_buffer(r);
+}
+
+int sca3000_configure_ring(struct iio_dev *indio_dev)
+{
+ indio_dev->ring = sca3000_rb_allocate(indio_dev);
+ if (indio_dev->ring == NULL)
+ return -ENOMEM;
+ indio_dev->modes |= INDIO_RING_HARDWARE_BUFFER;
+
+ indio_dev->ring->access.rip_lots = &sca3000_rip_hw_rb;
+ indio_dev->ring->access.get_length = &sca3000_ring_get_length;
+ indio_dev->ring->access.get_bpd = &sca3000_ring_get_bpd;
+
+ return 0;
+}
+
+void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ sca3000_rb_free(indio_dev->ring);
+}
+
+static inline
+int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
+{
+ struct sca3000_state *st = indio_dev->dev_data;
+ int ret;
+ u8 *rx;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1);
+ if (ret)
+ goto error_ret;
+ if (state) {
+ printk(KERN_INFO "supposedly enabling ring buffer\n");
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_ADDR_MODE,
+ (rx[1] | SCA3000_RING_BUF_ENABLE));
+ } else
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_ADDR_MODE,
+ (rx[1] & ~SCA3000_RING_BUF_ENABLE));
+ kfree(rx);
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+/**
+ * sca3000_hw_ring_preenable() hw ring buffer preenable function
+ *
+ * Very simple enable function as the chip will allows normal reads
+ * during ring buffer operation so as long as it is indeed running
+ * before we notify the core, the precise ordering does not matter.
+ **/
+static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev)
+{
+ return __sca3000_hw_ring_state_set(indio_dev, 1);
+}
+
+static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
+{
+ return __sca3000_hw_ring_state_set(indio_dev, 0);
+}
+
+void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
+{
+ indio_dev->ring->preenable = &sca3000_hw_ring_preenable;
+ indio_dev->ring->postdisable = &sca3000_hw_ring_postdisable;
+}
+
+/**
+ * sca3000_ring_int_process() ring specific interrupt handling.
+ *
+ * This is only split from the main interrupt handler so as to
+ * reduce the amount of code if the ring buffer is not enabled.
+ **/
+void sca3000_ring_int_process(u8 val, struct iio_ring_buffer *ring)
+{
+ if (val & SCA3000_INT_STATUS_THREE_QUARTERS)
+ iio_push_or_escallate_ring_event(ring,
+ IIO_EVENT_CODE_RING_75_FULL,
+ 0);
+ else if (val & SCA3000_INT_STATUS_HALF)
+ iio_push_ring_event(ring,
+ IIO_EVENT_CODE_RING_50_FULL, 0);
+}
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
new file mode 100644
index 0000000..b8c2858
--- /dev/null
+++ b/drivers/staging/iio/adc/Kconfig
@@ -0,0 +1,13 @@
+#
+# ADC drivers
+#
+comment "Analog to digital convertors"
+
+config MAX1363
+ tristate "MAXIM max1363 ADC driver"
+ depends on I2C
+ help
+ Say yes here to build support for many MAXIM i2c analog to digital
+ convertors (ADC). (max1361, max1362, max1363, max1364, max1136,
+ max1136, max1137, max1138, max1139, max1236, max1237, max11238,
+ max1239) Provides direct access via sysfs.
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
new file mode 100644
index 0000000..0c2b6f3
--- /dev/null
+++ b/drivers/staging/iio/adc/Makefile
@@ -0,0 +1,8 @@
+
+# Makefile for industrial I/O ADC drivers
+#
+
+max1363-y := max1363_core.o
+max1363-$(CONFIG_IIO_RING_BUFFER) += max1363_ring.o
+
+obj-$(CONFIG_MAX1363) += max1363.o
diff --git a/drivers/staging/iio/adc/adc.h b/drivers/staging/iio/adc/adc.h
new file mode 100644
index 0000000..d925b2c
--- /dev/null
+++ b/drivers/staging/iio/adc/adc.h
@@ -0,0 +1,13 @@
+/*
+ * adc.h - sysfs attributes associated with ADCs
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Copyright (c) 2008 Jonathan Cameron <jic23@cam.ac.uk>
+ *
+ */
+
+#define IIO_DEV_ATTR_ADC(_num, _show, _addr) \
+ IIO_DEVICE_ATTR(adc_##_num, S_IRUGO, _show, NULL, _addr)
diff --git a/drivers/staging/iio/adc/max1363.h b/drivers/staging/iio/adc/max1363.h
new file mode 100644
index 0000000..8aca81f
--- /dev/null
+++ b/drivers/staging/iio/adc/max1363.h
@@ -0,0 +1,269 @@
+#ifndef _MAX1363_H_
+#define _MAX1363_H_
+
+#define MAX1363_SETUP_BYTE(a) ((a) | 0x80)
+
+/* There is a fair bit more defined here than currently
+ * used, but the intention is to support everything these
+ * chips do in the long run */
+
+/* see data sheets */
+/* max1363 and max1236, max1237, max1238, max1239 */
+#define MAX1363_SETUP_AIN3_IS_AIN3_REF_IS_VDD 0x00
+#define MAX1363_SETUP_AIN3_IS_REF_EXT_TO_REF 0x20
+#define MAX1363_SETUP_AIN3_IS_AIN3_REF_IS_INT 0x40
+#define MAX1363_SETUP_AIN3_IS_REF_REF_IS_INT 0x60
+#define MAX1363_SETUP_POWER_UP_INT_REF 0x10
+#define MAX1363_SETUP_POWER_DOWN_INT_REF 0x00
+
+/* think about includeing max11600 etc - more settings */
+#define MAX1363_SETUP_EXT_CLOCK 0x08
+#define MAX1363_SETUP_INT_CLOCK 0x00
+#define MAX1363_SETUP_UNIPOLAR 0x00
+#define MAX1363_SETUP_BIPOLAR 0x04
+#define MAX1363_SETUP_RESET 0x00
+#define MAX1363_SETUP_NORESET 0x02
+/* max1363 only - though don't care on others.
+ * For now monitor modes are not implemented as the relevant
+ * line is not connected on my test board.
+ * The definitions are here as I intend to add this soon.
+ */
+#define MAX1363_SETUP_MONITOR_SETUP 0x01
+
+/* Specific to the max1363 */
+#define MAX1363_MON_RESET_CHAN(a) (1 << ((a) + 4))
+#define MAX1363_MON_CONV_RATE_133ksps 0
+#define MAX1363_MON_CONV_RATE_66_5ksps 0x02
+#define MAX1363_MON_CONV_RATE_33_3ksps 0x04
+#define MAX1363_MON_CONV_RATE_16_6ksps 0x06
+#define MAX1363_MON_CONV_RATE_8_3ksps 0x08
+#define MAX1363_MON_CONV_RATE_4_2ksps 0x0A
+#define MAX1363_MON_CONV_RATE_2_0ksps 0x0C
+#define MAX1363_MON_CONV_RATE_1_0ksps 0x0E
+#define MAX1363_MON_INT_ENABLE 0x01
+
+/* defined for readability reasons */
+/* All chips */
+#define MAX1363_CONFIG_BYTE(a) ((a))
+
+#define MAX1363_CONFIG_SE 0x01
+#define MAX1363_CONFIG_DE 0x00
+#define MAX1363_CONFIG_SCAN_TO_CS 0x00
+#define MAX1363_CONFIG_SCAN_SINGLE_8 0x20
+#define MAX1363_CONFIG_SCAN_MONITOR_MODE 0x40
+#define MAX1363_CONFIG_SCAN_SINGLE_1 0x60
+/* max123{6-9} only */
+#define MAX1236_SCAN_MID_TO_CHANNEL 0x40
+
+/* max1363 only - merely part of channel selects or don't care for others*/
+#define MAX1363_CONFIG_EN_MON_MODE_READ 0x18
+
+#define MAX1363_CHANNEL_SEL(a) ((a) << 1)
+
+/* max1363 strictly 0x06 - but doesn't matter */
+#define MAX1363_CHANNEL_SEL_MASK 0x1E
+#define MAX1363_SCAN_MASK 0x60
+#define MAX1363_SE_DE_MASK 0x01
+
+/**
+ * struct max1363_mode - scan mode information
+ * @name: Name used to identify the scan mode.
+ * @conf: The corresponding value of the configuration register
+ * @numvals: The number of values returned by a single scan
+ */
+struct max1363_mode {
+ const char *name;
+ int8_t conf;
+ int numvals;
+};
+
+#define MAX1363_MODE_SINGLE(_num) { \
+ .name = #_num, \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
+ | MAX1363_CONFIG_SCAN_SINGLE_1 \
+ | MAX1363_CONFIG_SE, \
+ .numvals = 1, \
+ }
+
+#define MAX1363_MODE_SINGLE_TIMES_8(_num) { \
+ .name = #_num"x8", \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
+ | MAX1363_CONFIG_SCAN_SINGLE_8 \
+ | MAX1363_CONFIG_SE, \
+ .numvals = 8, \
+ }
+
+#define MAX1363_MODE_SCAN_TO_CHANNEL(_num) { \
+ .name = "0..."#_num, \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
+ | MAX1363_CONFIG_SCAN_TO_CS \
+ | MAX1363_CONFIG_SE, \
+ .numvals = _num + 1, \
+ }
+
+
+/* note not available for max1363 hence naming */
+#define MAX1236_MODE_SCAN_MID_TO_CHANNEL(_mid, _num) { \
+ .name = #_mid"..."#_num, \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
+ | MAX1236_SCAN_MID_TO_CHANNEL \
+ | MAX1363_CONFIG_SE, \
+ .numvals = _num - _mid + 1 \
+}
+
+#define MAX1363_MODE_DIFF_SINGLE(_nump, _numm) { \
+ .name = #_nump"-"#_numm, \
+ .conf = MAX1363_CHANNEL_SEL(_nump) \
+ | MAX1363_CONFIG_SCAN_SINGLE_1 \
+ | MAX1363_CONFIG_DE, \
+ .numvals = 1, \
+ }
+
+#define MAX1363_MODE_DIFF_SINGLE_TIMES_8(_nump, _numm) { \
+ .name = #_nump"-"#_numm, \
+ .conf = MAX1363_CHANNEL_SEL(_nump) \
+ | MAX1363_CONFIG_SCAN_SINGLE_8 \
+ | MAX1363_CONFIG_DE, \
+ .numvals = 1, \
+ }
+
+/* Can't think how to automate naming so specify for now */
+#define MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(_name, _num, _numvals) { \
+ .name = #_name, \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
+ | MAX1363_CONFIG_SCAN_TO_CS \
+ | MAX1363_CONFIG_DE, \
+ .numvals = _numvals, \
+ }
+
+/* note only available for max1363 hence naming */
+#define MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL_NAMED(_name, _num, _numvals) { \
+ .name = #_name, \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
+ | MAX1236_SCAN_MID_TO_CHANNEL \
+ | MAX1363_CONFIG_SE, \
+ .numvals = _numvals, \
+}
+
+/* Not currently handled */
+#define MAX1363_MODE_MONITOR { \
+ .name = "monitor", \
+ .conf = MAX1363_CHANNEL_SEL(3) \
+ | MAX1363_CONFIG_SCAN_MONITOR_MODE \
+ | MAX1363_CONFIG_SE, \
+ .numvals = 10, \
+ }
+
+/* This may seem an overly long winded way to do this, but at least it makes
+ * clear what all the various options actually do. Alternative suggestions
+ * that don't require user to have intimate knowledge of the chip welcomed.
+ */
+
+/* This must be maintained along side the max1363_mode_table in max1363_core */
+enum max1363_modes {
+ /* Single read of a single channel */
+ _s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7, _s8, _s9, _s10, _s11,
+ /* Eight reads of a single channel */
+ se0, se1, se2, se3, se4, se5, se6, se7, se8, se9, se10, se11,
+ /* Scan to channel */
+ s0to1, s0to2, s0to3, s0to4, s0to5, s0to6,
+ s0to7, s0to8, s0to9, s0to10, s0to11,
+ /* Differential single read */
+ d0m1, d2m3, d4m5, d6m7, d8m9, d10m11,
+ d1m0, d3m2, d5m4, d7m6, d9m8, d11m10,
+ /* Differential single read 8 times */
+ de0m1, de2m3, de4m5, de6m7, de8m9, de10m11,
+ de1m0, de3m2, de5m4, de7m6, de9m8, de11m10,
+ /* Differential scan to channel */
+ d0m1to2m3, d0m1to4m5, d0m1to6m7, d0m1to8m9, d0m1to10m11,
+ d1m0to3m2, d1m0to5m4, d1m0to7m6, d1m0to9m8, d1m0to11m10,
+ /* Scan mid to channel max123{6-9} only */
+ s2to3, s6to7, s6to8, s6to9, s6to10, s6to11,
+ /* Differential scan mid to channel */
+ s6m7to8m9, s6m7to10m11, s7m6to9m8, s7m6to11m10,
+};
+
+/**
+ * struct max1363_chip_info - chip specifc information
+ * @name: indentification string for chip
+ * @num_inputs: number of physical inputs on chip
+ * @int_vref_mv: the internal reference voltage
+ * @monitor_mode: whether the chip supports monitor interrupts
+ * @mode_list: array of available scan modes
+ * @num_modes: the number of scan modes available
+ * @default_mode: the scan mode in which the chip starts up
+ */
+struct max1363_chip_info {
+ const char *name;
+ u8 num_inputs;
+ u16 int_vref_mv;
+ bool monitor_mode;
+ const enum max1363_modes *mode_list;
+ int num_modes;
+ enum max1363_modes default_mode;
+};
+
+
+/**
+ * struct max1363_state - driver instance specific data
+ * @indio_dev: the industrial I/O device
+ * @client: i2c_client
+ * @setupbyte: cache of current device setup byte
+ * @configbyte: cache of current device config byte
+ * @chip_info: chip model specific constants, available modes etc
+ * @current_mode: the scan mode of this chip
+ * @poll_work: bottom half of polling interrupt handler
+ * @protect_ring: used to ensure only one polling bh running at a time
+ * @reg: supply regulator
+ */
+struct max1363_state {
+ struct iio_dev *indio_dev;
+ struct i2c_client *client;
+ char setupbyte;
+ char configbyte;
+ const struct max1363_chip_info *chip_info;
+ const struct max1363_mode *current_mode;
+ struct work_struct poll_work;
+ atomic_t protect_ring;
+ struct iio_trigger *trig;
+ struct regulator *reg;
+};
+#ifdef CONFIG_IIO_RING_BUFFER
+
+ssize_t max1363_scan_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev);
+void max1363_ring_cleanup(struct iio_dev *indio_dev);
+
+int max1363_initialize_ring(struct iio_ring_buffer *ring);
+void max1363_uninitialize_ring(struct iio_ring_buffer *ring);
+
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void max1363_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+};
+
+static inline int max1363_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+};
+
+
+static inline ssize_t max1363_scan_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+};
+
+static inline int
+max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev)
+{
+ return 0;
+};
+
+static inline void max1363_ring_cleanup(struct iio_dev *indio_dev) {};
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* _MAX1363_H_ */
diff --git a/drivers/staging/iio/adc/max1363_core.c b/drivers/staging/iio/adc/max1363_core.c
new file mode 100644
index 0000000..9703881
--- /dev/null
+++ b/drivers/staging/iio/adc/max1363_core.c
@@ -0,0 +1,623 @@
+ /*
+ * linux/drivers/industrialio/adc/max1363.c
+ * Copyright (C) 2008 Jonathan Cameron
+ *
+ * based on linux/drivers/i2c/chips/max123x
+ * Copyright (C) 2002-2004 Stefan Eletzhofer
+ *
+ * based on linux/drivers/acron/char/pcf8583.c
+ * Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * max1363.c
+ *
+ * Partial support for max1363 and similar chips.
+ *
+ * Not currently implemented.
+ *
+ * - Monitor interrrupt generation.
+ * - Control of internal reference.
+ * - Sysfs scan interface currently assumes unipolar mode.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+#include <linux/regulator/consumer.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#include "max1363.h"
+
+/* Available scan modes.
+ * Awkwardly the associated enum is in the header so it is available to
+ * the ring buffer code.
+ */
+static const struct max1363_mode max1363_mode_table[] = {
+ MAX1363_MODE_SINGLE(0),
+ MAX1363_MODE_SINGLE(1),
+ MAX1363_MODE_SINGLE(2),
+ MAX1363_MODE_SINGLE(3),
+ MAX1363_MODE_SINGLE(4),
+ MAX1363_MODE_SINGLE(5),
+ MAX1363_MODE_SINGLE(6),
+ MAX1363_MODE_SINGLE(7),
+ MAX1363_MODE_SINGLE(8),
+ MAX1363_MODE_SINGLE(9),
+ MAX1363_MODE_SINGLE(10),
+ MAX1363_MODE_SINGLE(11),
+
+ MAX1363_MODE_SINGLE_TIMES_8(0),
+ MAX1363_MODE_SINGLE_TIMES_8(1),
+ MAX1363_MODE_SINGLE_TIMES_8(2),
+ MAX1363_MODE_SINGLE_TIMES_8(3),
+ MAX1363_MODE_SINGLE_TIMES_8(4),
+ MAX1363_MODE_SINGLE_TIMES_8(5),
+ MAX1363_MODE_SINGLE_TIMES_8(6),
+ MAX1363_MODE_SINGLE_TIMES_8(7),
+ MAX1363_MODE_SINGLE_TIMES_8(8),
+ MAX1363_MODE_SINGLE_TIMES_8(9),
+ MAX1363_MODE_SINGLE_TIMES_8(10),
+ MAX1363_MODE_SINGLE_TIMES_8(11),
+
+ MAX1363_MODE_SCAN_TO_CHANNEL(1),
+ MAX1363_MODE_SCAN_TO_CHANNEL(2),
+ MAX1363_MODE_SCAN_TO_CHANNEL(3),
+ MAX1363_MODE_SCAN_TO_CHANNEL(4),
+ MAX1363_MODE_SCAN_TO_CHANNEL(5),
+ MAX1363_MODE_SCAN_TO_CHANNEL(6),
+ MAX1363_MODE_SCAN_TO_CHANNEL(7),
+ MAX1363_MODE_SCAN_TO_CHANNEL(8),
+ MAX1363_MODE_SCAN_TO_CHANNEL(9),
+ MAX1363_MODE_SCAN_TO_CHANNEL(10),
+ MAX1363_MODE_SCAN_TO_CHANNEL(11),
+
+ MAX1363_MODE_DIFF_SINGLE(0, 1),
+ MAX1363_MODE_DIFF_SINGLE(2, 3),
+ MAX1363_MODE_DIFF_SINGLE(4, 5),
+ MAX1363_MODE_DIFF_SINGLE(6, 7),
+ MAX1363_MODE_DIFF_SINGLE(8, 9),
+ MAX1363_MODE_DIFF_SINGLE(10, 11),
+ MAX1363_MODE_DIFF_SINGLE(1, 0),
+ MAX1363_MODE_DIFF_SINGLE(3, 2),
+ MAX1363_MODE_DIFF_SINGLE(5, 4),
+ MAX1363_MODE_DIFF_SINGLE(7, 6),
+ MAX1363_MODE_DIFF_SINGLE(9, 8),
+ MAX1363_MODE_DIFF_SINGLE(11, 10),
+
+ MAX1363_MODE_DIFF_SINGLE_TIMES_8(0, 1),
+ MAX1363_MODE_DIFF_SINGLE_TIMES_8(2, 3),
+ MAX1363_MODE_DIFF_SINGLE_TIMES_8(4, 5),
+ MAX1363_MODE_DIFF_SINGLE_TIMES_8(6, 7),
+ MAX1363_MODE_DIFF_SINGLE_TIMES_8(8, 9),
+ MAX1363_MODE_DIFF_SINGLE_TIMES_8(10, 11),
+ MAX1363_MODE_DIFF_SINGLE_TIMES_8(1, 0),
+ MAX1363_MODE_DIFF_SINGLE_TIMES_8(3, 2),
+ MAX1363_MODE_DIFF_SINGLE_TIMES_8(5, 4),
+ MAX1363_MODE_DIFF_SINGLE_TIMES_8(7, 6),
+ MAX1363_MODE_DIFF_SINGLE_TIMES_8(9, 8),
+ MAX1363_MODE_DIFF_SINGLE_TIMES_8(11, 10),
+
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(0-1...2-3, 2, 2),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(0-1...4-5, 4, 3),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(0-1...6-7, 6, 4),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(0-1...8-9, 8, 5),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(0-1...10-11, 10, 6),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(1-0...3-2, 3, 2),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(1-0...5-4, 5, 3),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(1-0...7-6, 7, 4),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(1-0...9-8, 9, 5),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(1-0...11-10, 11, 6),
+
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(2, 3),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 7),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 8),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 9),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 10),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 11),
+
+ MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL_NAMED(6-7...8-9, 8, 2),
+ MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL_NAMED(6-7...10-11, 10, 3),
+ MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL_NAMED(7-6...9-8, 9, 2),
+ MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL_NAMED(7-6...11-10, 11, 3),
+};
+
+/* Applies to max1363 */
+static const enum max1363_modes max1363_mode_list[] = {
+ _s0, _s1, _s2, _s3,
+ se0, se1, se2, se3,
+ s0to1, s0to2, s0to3,
+ d0m1, d2m3, d1m0, d3m2,
+ de0m1, de2m3, de1m0, de3m2,
+ d0m1to2m3, d1m0to3m2,
+};
+
+/* Appies to max1236, max1237 */
+static const enum max1363_modes max1236_mode_list[] = {
+ _s0, _s1, _s2, _s3,
+ se0, se1, se2, se3,
+ s0to1, s0to2, s0to3,
+ d0m1, d2m3, d1m0, d3m2,
+ de0m1, de2m3, de1m0, de3m2,
+ d0m1to2m3, d1m0to3m2,
+ s2to3,
+};
+
+/* Applies to max1238, max1239 */
+static const enum max1363_modes max1238_mode_list[] = {
+ _s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7, _s8, _s9, _s10, _s11,
+ se0, se1, se2, se3, se4, se5, se6, se7, se8, se9, se10, se11,
+ s0to1, s0to2, s0to3, s0to4, s0to5, s0to6,
+ s0to7, s0to8, s0to9, s0to10, s0to11,
+ d0m1, d2m3, d4m5, d6m7, d8m9, d10m11,
+ d1m0, d3m2, d5m4, d7m6, d9m8, d11m10,
+ de0m1, de2m3, de4m5, de6m7, de8m9, de10m11,
+ de1m0, de3m2, de5m4, de7m6, de9m8, de11m10,
+ d0m1to2m3, d0m1to4m5, d0m1to6m7, d0m1to8m9, d0m1to10m11,
+ d1m0to3m2, d1m0to5m4, d1m0to7m6, d1m0to9m8, d1m0to11m10,
+ s6to7, s6to8, s6to9, s6to10, s6to11,
+ s6m7to8m9, s6m7to10m11, s7m6to9m8, s7m6to11m10,
+};
+
+
+enum { max1361,
+ max1362,
+ max1363,
+ max1364,
+ max1136,
+ max1137,
+ max1138,
+ max1139,
+ max1236,
+ max1237,
+ max1238,
+ max1239,
+};
+
+/* max1363 and max1368 tested - rest from data sheet */
+static const struct max1363_chip_info max1363_chip_info_tbl[] = {
+ {
+ .name = "max1361",
+ .num_inputs = 4,
+ .monitor_mode = 1,
+ .mode_list = max1363_mode_list,
+ .num_modes = ARRAY_SIZE(max1363_mode_list),
+ .default_mode = s0to3,
+ }, {
+ .name = "max1362",
+ .num_inputs = 4,
+ .monitor_mode = 1,
+ .mode_list = max1363_mode_list,
+ .num_modes = ARRAY_SIZE(max1363_mode_list),
+ .default_mode = s0to3,
+ }, {
+ .name = "max1363",
+ .num_inputs = 4,
+ .monitor_mode = 1,
+ .mode_list = max1363_mode_list,
+ .num_modes = ARRAY_SIZE(max1363_mode_list),
+ .default_mode = s0to3,
+ }, {
+ .name = "max1364",
+ .num_inputs = 4,
+ .monitor_mode = 1,
+ .mode_list = max1363_mode_list,
+ .num_modes = ARRAY_SIZE(max1363_mode_list),
+ .default_mode = s0to3,
+ }, {
+ .name = "max1136",
+ .num_inputs = 4,
+ .int_vref_mv = 4096,
+ .mode_list = max1236_mode_list,
+ .num_modes = ARRAY_SIZE(max1236_mode_list),
+ .default_mode = s0to3,
+ }, {
+ .name = "max1137",
+ .num_inputs = 4,
+ .int_vref_mv = 2048,
+ .mode_list = max1236_mode_list,
+ .num_modes = ARRAY_SIZE(max1236_mode_list),
+ .default_mode = s0to3,
+ }, {
+ .name = "max1138",
+ .num_inputs = 12,
+ .int_vref_mv = 4096,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ }, {
+ .name = "max1139",
+ .num_inputs = 12,
+ .int_vref_mv = 2048,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ }, {
+ .name = "max1236",
+ .num_inputs = 4,
+ .int_vref_mv = 4096,
+ .mode_list = max1236_mode_list,
+ .num_modes = ARRAY_SIZE(max1236_mode_list),
+ .default_mode = s0to3,
+ }, {
+ .name = "max1237",
+ .num_inputs = 4,
+ .int_vref_mv = 2048,
+ .mode_list = max1236_mode_list,
+ .num_modes = ARRAY_SIZE(max1236_mode_list),
+ .default_mode = s0to3,
+ }, {
+ .name = "max1238",
+ .num_inputs = 12,
+ .int_vref_mv = 4096,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ }, {
+ .name = "max1239",
+ .num_inputs = 12,
+ .int_vref_mv = 2048,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ },
+};
+
+static int max1363_write_basic_config(struct i2c_client *client,
+ unsigned char d1,
+ unsigned char d2)
+{
+ int ret;
+ u8 *tx_buf = kmalloc(2 , GFP_KERNEL);
+ if (!tx_buf)
+ return -ENOMEM;
+ tx_buf[0] = d1;
+ tx_buf[1] = d2;
+
+ ret = i2c_master_send(client, tx_buf, 2);
+ kfree(tx_buf);
+ return (ret > 0) ? 0 : ret;
+}
+
+static int max1363_set_scan_mode(struct max1363_state *st)
+{
+ st->configbyte &= ~(MAX1363_CHANNEL_SEL_MASK
+ | MAX1363_SCAN_MASK
+ | MAX1363_SE_DE_MASK);
+ st->configbyte |= st->current_mode->conf;
+
+ return max1363_write_basic_config(st->client,
+ st->setupbyte,
+ st->configbyte);
+}
+
+static int max1363_initial_setup(struct max1363_state *st)
+{
+ st->setupbyte = MAX1363_SETUP_AIN3_IS_AIN3_REF_IS_VDD
+ | MAX1363_SETUP_POWER_UP_INT_REF
+ | MAX1363_SETUP_INT_CLOCK
+ | MAX1363_SETUP_UNIPOLAR
+ | MAX1363_SETUP_NORESET;
+
+ /* Set scan mode writes the config anyway so wait until then*/
+ st->setupbyte = MAX1363_SETUP_BYTE(st->setupbyte);
+ st->current_mode = &max1363_mode_table[st->chip_info->default_mode];
+ st->configbyte = MAX1363_CONFIG_BYTE(st->configbyte);
+
+ return max1363_set_scan_mode(st);
+}
+
+static ssize_t max1363_show_av_scan_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = dev_info->dev_data;
+ int i, len = 0;
+
+ for (i = 0; i < st->chip_info->num_modes; i++)
+ len += sprintf(buf + len, "%s ",
+ max1363_mode_table[st->chip_info
+ ->mode_list[i]].name);
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+
+/* The dev here is the sysfs related one, not the underlying i2c one */
+static ssize_t max1363_scan_direct(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = dev_info->dev_data;
+ int len = 0, ret, i;
+ struct i2c_client *client = st->client;
+ char *rxbuf;
+
+ if (st->current_mode->numvals == 0)
+ return 0;
+ rxbuf = kmalloc(st->current_mode->numvals*2, GFP_KERNEL);
+ if (rxbuf == NULL)
+ return -ENOMEM;
+
+ /* Interpretation depends on whether these are signed or not!*/
+ /* Assume not for now */
+ ret = i2c_master_recv(client, rxbuf, st->current_mode->numvals*2);
+
+ if (ret < 0)
+ return ret;
+ for (i = 0; i < st->current_mode->numvals; i++)
+ len += sprintf(buf+len, "%d ",
+ ((int)(rxbuf[i*2+0]&0x0F) << 8)
+ + ((int)(rxbuf[i*2+1])));
+ kfree(rxbuf);
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static ssize_t max1363_scan(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ int ret;
+
+ mutex_lock(&dev_info->mlock);
+ if (dev_info->currentmode == INDIO_RING_TRIGGERED)
+ ret = max1363_scan_from_ring(dev, attr, buf);
+ else
+ ret = max1363_scan_direct(dev, attr, buf);
+ mutex_unlock(&dev_info->mlock);
+
+ return ret;
+}
+
+/* Cannot query the device, so use local copy of state */
+static ssize_t max1363_show_scan_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = dev_info->dev_data;
+
+ return sprintf(buf, "%s\n", st->current_mode->name);
+}
+
+static const struct max1363_mode
+*__max1363_find_mode_in_ci(const struct max1363_chip_info *info,
+ const char *buf)
+{
+ int i;
+ for (i = 0; i < info->num_modes; i++)
+ if (strcmp(max1363_mode_table[info->mode_list[i]].name, buf)
+ == 0)
+ return &max1363_mode_table[info->mode_list[i]];
+ return NULL;
+}
+
+static ssize_t max1363_store_scan_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = dev_info->dev_data;
+ const struct max1363_mode *new_mode;
+ int ret;
+
+ mutex_lock(&dev_info->mlock);
+ new_mode = NULL;
+ /* Avoid state changes if a ring buffer is enabled */
+ if (!iio_ring_enabled(dev_info)) {
+ new_mode
+ = __max1363_find_mode_in_ci(st->chip_info, buf);
+ if (!new_mode) {
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ st->current_mode = new_mode;
+ ret = max1363_set_scan_mode(st);
+ if (ret)
+ goto error_ret;
+ } else {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ mutex_unlock(&dev_info->mlock);
+
+ return len;
+
+error_ret:
+ mutex_unlock(&dev_info->mlock);
+
+ return ret;
+}
+
+IIO_DEV_ATTR_AVAIL_SCAN_MODES(max1363_show_av_scan_modes);
+IIO_DEV_ATTR_SCAN_MODE(S_IRUGO | S_IWUSR,
+ max1363_show_scan_mode,
+ max1363_store_scan_mode);
+
+IIO_DEV_ATTR_SCAN(max1363_scan);
+
+static ssize_t max1363_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = dev_info->dev_data;
+ return sprintf(buf, "%s\n", st->chip_info->name);
+}
+
+IIO_DEVICE_ATTR(name, S_IRUGO, max1363_show_name, NULL, 0);
+
+/*name export */
+
+static struct attribute *max1363_attributes[] = {
+ &iio_dev_attr_available_scan_modes.dev_attr.attr,
+ &iio_dev_attr_scan_mode.dev_attr.attr,
+ &iio_dev_attr_scan.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group max1363_attribute_group = {
+ .attrs = max1363_attributes,
+};
+
+static int __devinit max1363_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret, i, regdone = 0;
+ struct max1363_state *st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ /* this is only used for device removal purposes */
+ i2c_set_clientdata(client, st);
+
+ atomic_set(&st->protect_ring, 0);
+
+ /* Find the chip model specific data */
+ for (i = 0; i < ARRAY_SIZE(max1363_chip_info_tbl); i++)
+ if (!strcmp(max1363_chip_info_tbl[i].name, id->name)) {
+ st->chip_info = &max1363_chip_info_tbl[i];
+ break;
+ };
+ /* Unsupported chip */
+ if (!st->chip_info) {
+ dev_err(&client->dev, "%s is not supported\n", id->name);
+ ret = -ENODEV;
+ goto error_free_st;
+ }
+ st->reg = regulator_get(&client->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ goto error_put_reg;
+ }
+ st->client = client;
+
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_disable_reg;
+ }
+
+ /* Estabilish that the iio_dev is a child of the i2c device */
+ st->indio_dev->dev.parent = &client->dev;
+ st->indio_dev->attrs = &max1363_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = max1363_initial_setup(st);
+ if (ret)
+ goto error_free_device;
+
+ ret = max1363_register_ring_funcs_and_init(st->indio_dev);
+ if (ret)
+ goto error_free_device;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_cleanup_ring;
+ regdone = 1;
+ ret = max1363_initialize_ring(st->indio_dev->ring);
+ if (ret)
+ goto error_cleanup_ring;
+ return 0;
+error_cleanup_ring:
+ max1363_ring_cleanup(st->indio_dev);
+error_free_device:
+ if (!regdone)
+ iio_free_device(st->indio_dev);
+ else
+ iio_device_unregister(st->indio_dev);
+error_disable_reg:
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+error_put_reg:
+ if (!IS_ERR(st->reg))
+ regulator_put(st->reg);
+error_free_st:
+ kfree(st);
+
+error_ret:
+ return ret;
+}
+
+static int max1363_remove(struct i2c_client *client)
+{
+ struct max1363_state *st = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = st->indio_dev;
+ max1363_uninitialize_ring(indio_dev->ring);
+ max1363_ring_cleanup(indio_dev);
+ iio_device_unregister(indio_dev);
+ if (!IS_ERR(st->reg)) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
+ }
+ kfree(st);
+
+ return 0;
+}
+
+static const struct i2c_device_id max1363_id[] = {
+ { "max1361", max1361 },
+ { "max1362", max1362 },
+ { "max1363", max1363 },
+ { "max1364", max1364 },
+ { "max1136", max1136 },
+ { "max1137", max1137 },
+ { "max1138", max1138 },
+ { "max1139", max1139 },
+ { "max1236", max1236 },
+ { "max1237", max1237 },
+ { "max1238", max1238 },
+ { "max1239", max1239 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, max1363_id);
+
+static struct i2c_driver max1363_driver = {
+ .driver = {
+ .name = "max1363",
+ },
+ .probe = max1363_probe,
+ .remove = max1363_remove,
+ .id_table = max1363_id,
+};
+
+static __init int max1363_init(void)
+{
+ return i2c_add_driver(&max1363_driver);
+}
+
+static __exit void max1363_exit(void)
+{
+ i2c_del_driver(&max1363_driver);
+}
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
+MODULE_DESCRIPTION("Maxim 1363 ADC");
+MODULE_LICENSE("GPL v2");
+
+module_init(max1363_init);
+module_exit(max1363_exit);
diff --git a/drivers/staging/iio/adc/max1363_ring.c b/drivers/staging/iio/adc/max1363_ring.c
new file mode 100644
index 0000000..a953eac
--- /dev/null
+++ b/drivers/staging/iio/adc/max1363_ring.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * max1363_ring.c
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+
+#include "../iio.h"
+#include "../ring_generic.h"
+#include "../ring_sw.h"
+#include "../trigger.h"
+#include "../sysfs.h"
+
+#include "max1363.h"
+
+ssize_t max1363_scan_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *info = dev_info->dev_data;
+ int i, ret, len = 0;
+ char *ring_data;
+
+ ring_data = kmalloc(info->current_mode->numvals*2, GFP_KERNEL);
+ if (ring_data == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ ret = dev_info->ring->access.read_last(dev_info->ring, ring_data);
+ if (ret)
+ goto error_free_ring_data;
+ len += sprintf(buf+len, "ring ");
+ for (i = 0; i < info->current_mode->numvals; i++)
+ len += sprintf(buf + len, "%d ",
+ ((int)(ring_data[i*2 + 0] & 0x0F) << 8)
+ + ((int)(ring_data[i*2 + 1])));
+ len += sprintf(buf + len, "\n");
+ kfree(ring_data);
+
+ return len;
+
+error_free_ring_data:
+ kfree(ring_data);
+error_ret:
+ return ret;
+}
+
+/**
+ * max1363_ring_preenable() setup the parameters of the ring before enabling
+ *
+ * The complex nature of the setting of the nuber of bytes per datum is due
+ * to this driver currently ensuring that the timestamp is stored at an 8
+ * byte boundary.
+ **/
+static int max1363_ring_preenable(struct iio_dev *indio_dev)
+{
+ struct max1363_state *st = indio_dev->dev_data;
+ size_t d_size;
+
+ if (indio_dev->ring->access.set_bpd) {
+ d_size = st->current_mode->numvals*2 + sizeof(s64);
+ if (d_size % 8)
+ d_size += 8 - (d_size % 8);
+ indio_dev->ring->access.set_bpd(indio_dev->ring, d_size);
+ }
+
+ return 0;
+}
+
+/**
+ * max1363_ring_postenable() typical ring post enable
+ *
+ * Only not moved into the core for the hardware ring buffer cases
+ * that are more sophisticated.
+ **/
+static int max1363_ring_postenable(struct iio_dev *indio_dev)
+{
+ if (indio_dev->trig == NULL)
+ return 0;
+ return iio_trigger_attach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc);
+}
+
+/**
+ * max1363_ring_predisable() runs just prior to ring buffer being disabled
+ *
+ * Typical predisable function which ensures that no trigger events can
+ * occur before we disable the ring buffer (and hence would have no idea
+ * what to do with them)
+ **/
+static int max1363_ring_predisable(struct iio_dev *indio_dev)
+{
+ if (indio_dev->trig)
+ return iio_trigger_dettach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc);
+ else
+ return 0;
+}
+
+/**
+ * max1363_poll_func_th() th of trigger launched polling to ring buffer
+ *
+ * As sampling only occurs on i2c comms occuring, leave timestamping until
+ * then. Some triggers will generate their own time stamp. Currently
+ * there is no way of notifying them when no one cares.
+ **/
+void max1363_poll_func_th(struct iio_dev *indio_dev)
+{
+ struct max1363_state *st = indio_dev->dev_data;
+
+ schedule_work(&st->poll_work);
+
+ return;
+}
+/**
+ * max1363_poll_bh_to_ring() bh of trigger launched polling to ring buffer
+ * @work_s: the work struct through which this was scheduled
+ *
+ * Currently there is no option in this driver to disable the saving of
+ * timestamps within the ring.
+ * I think the one copy of this at a time was to avoid problems if the
+ * trigger was set far too high and the reads then locked up the computer.
+ **/
+static void max1363_poll_bh_to_ring(struct work_struct *work_s)
+{
+ struct max1363_state *st = container_of(work_s, struct max1363_state,
+ poll_work);
+ struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_sw_ring_buffer *ring = iio_to_sw_ring(indio_dev->ring);
+ s64 time_ns;
+ __u8 *rxbuf;
+ int b_sent;
+ size_t d_size;
+
+ /* Ensure the timestamp is 8 byte aligned */
+ d_size = st->current_mode->numvals*2 + sizeof(s64);
+ if (d_size % sizeof(s64))
+ d_size += sizeof(s64) - (d_size % sizeof(s64));
+
+ /* Ensure only one copy of this function running at a time */
+ if (atomic_inc_return(&st->protect_ring) > 1)
+ return;
+
+ /* Monitor mode prevents reading. Whilst not currently implemented
+ * might as well have this test in here in the meantime as it does
+ * no harm.
+ */
+ if (st->current_mode->numvals == 0)
+ return;
+
+ rxbuf = kmalloc(d_size, GFP_KERNEL);
+ if (rxbuf == NULL)
+ return;
+
+ b_sent = i2c_master_recv(st->client,
+ rxbuf,
+ st->current_mode->numvals*2);
+ if (b_sent < 0)
+ goto done;
+
+ time_ns = iio_get_time_ns();
+
+ memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));
+
+ indio_dev->ring->access.store_to(&ring->buf, rxbuf, time_ns);
+done:
+ kfree(rxbuf);
+ atomic_dec(&st->protect_ring);
+}
+
+
+int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev)
+{
+ struct max1363_state *st = indio_dev->dev_data;
+ int ret = 0;
+
+ indio_dev->ring = iio_sw_rb_allocate(indio_dev);
+ if (!indio_dev->ring) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* Effectively select the ring buffer implementation */
+ iio_ring_sw_register_funcs(&st->indio_dev->ring->access);
+ indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
+ if (indio_dev->pollfunc == NULL) {
+ ret = -ENOMEM;
+ goto error_deallocate_sw_rb;
+ }
+ /* Configure the polling function called on trigger interrupts */
+ indio_dev->pollfunc->poll_func_main = &max1363_poll_func_th;
+ indio_dev->pollfunc->private_data = indio_dev;
+
+ /* Ring buffer functions - here trigger setup related */
+ indio_dev->ring->postenable = &max1363_ring_postenable;
+ indio_dev->ring->preenable = &max1363_ring_preenable;
+ indio_dev->ring->predisable = &max1363_ring_predisable;
+ INIT_WORK(&st->poll_work, &max1363_poll_bh_to_ring);
+
+ /* Flag that polled ring buffering is possible */
+ indio_dev->modes |= INDIO_RING_TRIGGERED;
+ return 0;
+error_deallocate_sw_rb:
+ iio_sw_rb_free(indio_dev->ring);
+error_ret:
+ return ret;
+}
+
+void max1363_ring_cleanup(struct iio_dev *indio_dev)
+{
+ /* ensure that the trigger has been detached */
+ if (indio_dev->trig) {
+ iio_put_trigger(indio_dev->trig);
+ iio_trigger_dettach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc);
+ }
+ kfree(indio_dev->pollfunc);
+ iio_sw_rb_free(indio_dev->ring);
+}
+
+void max1363_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+ iio_ring_buffer_unregister(ring);
+};
+
+int max1363_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return iio_ring_buffer_register(ring);
+};
diff --git a/drivers/staging/iio/chrdev.h b/drivers/staging/iio/chrdev.h
new file mode 100644
index 0000000..8bc64bf
--- /dev/null
+++ b/drivers/staging/iio/chrdev.h
@@ -0,0 +1,118 @@
+/* The industrial I/O core - character device related
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _IIO_CHRDEV_H_
+#define _IIO_CHRDEV_H_
+struct iio_dev;
+
+/**
+ * struct iio_handler - Structure used to specify file operations
+ * for a particular chrdev
+ * @chrdev: character device structure
+ * @id: the location in the handler table - used for deallocation.
+ * @flags: file operations related flags including busy flag.
+ * @private: handler specific data used by the fileops registered with
+ * the chrdev.
+ */
+struct iio_handler {
+ struct cdev chrdev;
+ int id;
+ unsigned long flags;
+ void *private;
+};
+
+#define iio_cdev_to_handler(cd) \
+ container_of(cd, struct iio_handler, chrdev)
+
+/**
+ * struct iio_event_data - The actual event being pushed to userspace
+ * @id: event identifier
+ * @timestamp: best estimate of time of event occurance (often from
+ * the interrupt handler)
+ */
+struct iio_event_data {
+ int id;
+ s64 timestamp;
+};
+
+/**
+ * struct iio_detected_event_list - list element for events that have occured
+ * @list: linked list header
+ * @ev: the event itself
+ * @shared_pointer: used when the event is shared - i.e. can be escallated
+ * on demand (eg ring buffer 50%->100% full)
+ */
+struct iio_detected_event_list {
+ struct list_head list;
+ struct iio_event_data ev;
+ struct iio_shared_ev_pointer *shared_pointer;
+};
+/**
+ * struct iio_shared_ev_pointer - allows shared events to identify if currently
+ * in the detected event list
+ * @ev_p: pointer to detected event list element (null if not in list)
+ * @lock: protect this element to prevent simultaneous edit and remove
+ */
+struct iio_shared_ev_pointer {
+ struct iio_detected_event_list *ev_p;
+ spinlock_t lock;
+};
+
+/**
+ * struct iio_event_interface - chrdev interface for an event line
+ * @dev: device assocated with event interface
+ * @handler: fileoperations and related control for the chrdev
+ * @wait: wait queue to allow blocking reads of events
+ * @event_list_lock: mutex to protect the list of detected events
+ * @det_events: list of detected events
+ * @max_events: maximum number of events before new ones are dropped
+ * @current_events: number of events in detected list
+ * @id: indentifier to allow the event interface to know which
+ * physical line it corresponds to
+ * @owner: ensure the driver module owns the file, not iio
+ * @private: driver specific data
+ * @_name: used internally to store the sysfs name for minor id
+ * attribute
+ */
+struct iio_event_interface {
+ struct device dev;
+ struct iio_handler handler;
+ wait_queue_head_t wait;
+ struct mutex event_list_lock;
+ struct iio_detected_event_list det_events;
+ int max_events;
+ int current_events;
+ int id;
+ struct iio_chrdev_minor_attr attr;
+ struct module *owner;
+ void *private;
+ char _name[20];
+ char _attrname[20];
+};
+
+/**
+ * struct iio_event_handler_list - element in list of handlers for events
+ * @list: list header
+ * @refcount: as the handler may be shared between multiple device
+ * side events, reference counting ensures clean removal
+ * @exist_lock: prevents race conditions related to refcount useage.
+ * @handler: event handler function - called on event if this
+ * event_handler is enabled.
+ *
+ * Each device has one list of these per interrupt line
+ **/
+struct iio_event_handler_list {
+ struct list_head list;
+ int refcount;
+ struct mutex exist_lock;
+ int (*handler)(struct iio_dev *dev_info, int index, s64 timestamp,
+ int no_test);
+};
+
+#endif
diff --git a/drivers/staging/iio/iio.h b/drivers/staging/iio/iio.h
new file mode 100644
index 0000000..25ccb80
--- /dev/null
+++ b/drivers/staging/iio/iio.h
@@ -0,0 +1,411 @@
+/* The industrial I/O core
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _INDUSTRIAL_IO_H_
+#define _INDUSTRIAL_IO_H_
+
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include "sysfs.h"
+#include "chrdev.h"
+
+/* IIO TODO LIST */
+/* Static device specific elements (conversion factors etc)
+ * should be exported via sysfs
+ *
+ * Provide means of adjusting timer accuracy.
+ * Currently assumes nano seconds.
+ */
+
+/* Event interface flags */
+#define IIO_BUSY_BIT_POS 1
+
+struct iio_dev;
+
+/**
+ * iio_get_time_ns() - utility function to get a time stamp for events etc
+ **/
+static inline s64 iio_get_time_ns(void)
+{
+ struct timespec ts;
+ /*
+ * calls getnstimeofday.
+ * If hrtimers then up to ns accurate, if not microsecond.
+ */
+ ktime_get_real_ts(&ts);
+
+ return timespec_to_ns(&ts);
+}
+
+/**
+ * iio_add_event_to_list() - Wraps adding to event lists
+ * @el: the list element of the event to be handled.
+ * @head: the list associated with the event handler being used.
+ *
+ * Does reference counting to allow shared handlers.
+ **/
+void iio_add_event_to_list(struct iio_event_handler_list *el,
+ struct list_head *head);
+
+/**
+ * iio_remove_event_from_list() - Wraps removing from event list
+ * @el: element to be removed
+ * @head: associate list head for the interrupt handler.
+ *
+ * Does reference counting to allow shared handlers.
+ **/
+void iio_remove_event_from_list(struct iio_event_handler_list *el,
+ struct list_head *head);
+
+/* Device operating modes */
+#define INDIO_DIRECT_MODE 0x01
+#define INDIO_RING_TRIGGERED 0x02
+#define INDIO_RING_HARDWARE_BUFFER 0x08
+
+#define INDIO_ALL_RING_MODES (INDIO_RING_TRIGGERED | INDIO_RING_HARDWARE_BUFFER)
+
+/* Vast majority of this is set by the industrialio subsystem on a
+ * call to iio_device_register. */
+
+/**
+ * struct iio_dev - industrial I/O device
+ * @id: [INTERN] used to identify device internally
+ * @dev_data: [DRIVER] device specific data
+ * @modes: [DRIVER] operating modes supported by device
+ * @currentmode: [DRIVER] current operating mode
+ * @dev: [DRIVER] device structure, should be assigned a parent
+ * and owner
+ * @attrs: [DRIVER] general purpose device attributes
+ * @driver_module: [DRIVER] module structure used to ensure correct
+ * ownership of chrdevs etc
+ * @num_interrupt_lines:[DRIVER] number of physical interrupt lines from device
+ * @interrupts: [INTERN] interrupt line specific event lists etc
+ * @event_attrs: [DRIVER] event control attributes
+ * @event_conf_attrs: [DRIVER] event configuration attributes
+ * @event_interfaces: [INTERN] event chrdevs associated with interrupt lines
+ * @ring: [DRIVER] any ring buffer present
+ * @mlock: [INTERN] lock used to prevent simultaneous device state
+ * changes
+ * @scan_el_attrs: [DRIVER] control of scan elements if that scan mode
+ * control method is used
+ * @scan_count: [INTERN] the number of elements in the current scan mode
+ * @scan_mask: [INTERN] bitmask used in masking scan mode elements
+ * @scan_timestamp: [INTERN] does the scan mode include a timestamp
+ * @trig: [INTERN] current device trigger (ring buffer modes)
+ * @pollfunc: [DRIVER] function run on trigger being recieved
+ **/
+struct iio_dev {
+ int id;
+ void *dev_data;
+ int modes;
+ int currentmode;
+ struct device dev;
+ const struct attribute_group *attrs;
+ struct module *driver_module;
+
+ int num_interrupt_lines;
+ struct iio_interrupt **interrupts;
+ struct attribute_group *event_attrs;
+ struct attribute_group *event_conf_attrs;
+
+ struct iio_event_interface *event_interfaces;
+
+ struct iio_ring_buffer *ring;
+ struct mutex mlock;
+
+ struct attribute_group *scan_el_attrs;
+ int scan_count;
+
+ u16 scan_mask;
+ bool scan_timestamp;
+ struct iio_trigger *trig;
+ struct iio_poll_func *pollfunc;
+};
+
+/*
+ * These are mainly provided to allow for a change of implementation if a device
+ * has a large number of scan elements
+ */
+#define IIO_MAX_SCAN_LENGTH 15
+
+static inline int iio_scan_mask_query(struct iio_dev *dev_info, int bit)
+{
+ if (bit > IIO_MAX_SCAN_LENGTH)
+ return -EINVAL;
+ else
+ return !!(dev_info->scan_mask & (1 << bit));
+};
+
+static inline int iio_scan_mask_set(struct iio_dev *dev_info, int bit)
+{
+ if (bit > IIO_MAX_SCAN_LENGTH)
+ return -EINVAL;
+ dev_info->scan_mask |= (1 << bit);
+ dev_info->scan_count++;
+ return 0;
+};
+
+static inline int iio_scan_mask_clear(struct iio_dev *dev_info, int bit)
+{
+ if (bit > IIO_MAX_SCAN_LENGTH)
+ return -EINVAL;
+ dev_info->scan_mask &= ~(1 << bit);
+ dev_info->scan_count--;
+ return 0;
+};
+
+/**
+ * iio_scan_mask_count_to_right() - how many scan elements occur before here
+ * @dev_info: the iio_device whose scan mode we are querying
+ * @bit: which number scan element is this
+ **/
+static inline int iio_scan_mask_count_to_right(struct iio_dev *dev_info,
+int bit)
+{
+ int count = 0;
+ int mask = (1 << bit);
+ if (bit > IIO_MAX_SCAN_LENGTH)
+ return -EINVAL;
+ while (mask) {
+ mask >>= 1;
+ if (mask & dev_info->scan_mask)
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * iio_device_register() - register a device with the IIO subsystem
+ * @dev_info: Device structure filled by the device driver
+ **/
+int iio_device_register(struct iio_dev *dev_info);
+
+/**
+ * iio_device_unregister() - unregister a device from the IIO subsystem
+ * @dev_info: Device structure representing the device.
+ **/
+void iio_device_unregister(struct iio_dev *dev_info);
+
+/**
+ * struct iio_interrupt - wrapper used to allow easy handling of multiple
+ * physical interrupt lines
+ * @dev_info: the iio device for which the is an interrupt line
+ * @line_number: associated line number
+ * @id: idr allocated unique id number
+ * @irq: associate interrupt number
+ * @ev_list: event handler list for associated events
+ * @ev_list_lock: ensure only one access to list at a time
+ **/
+struct iio_interrupt {
+ struct iio_dev *dev_info;
+ int line_number;
+ int id;
+ int irq;
+ struct list_head ev_list;
+ spinlock_t ev_list_lock;
+};
+
+#define to_iio_interrupt(i) container_of(i, struct iio_interrupt, ev_list)
+
+/**
+ * iio_register_interrupt_line() - Tell IIO about interrupt lines
+ *
+ * @irq: Typically provided via platform data
+ * @dev_info: IIO device info structure for device
+ * @line_number: Which interrupt line of the device is this?
+ * @type: Interrupt type (e.g. edge triggered etc)
+ * @name: Identifying name.
+ **/
+int iio_register_interrupt_line(unsigned int irq,
+ struct iio_dev *dev_info,
+ int line_number,
+ unsigned long type,
+ const char *name);
+
+void iio_unregister_interrupt_line(struct iio_dev *dev_info,
+ int line_number);
+
+
+
+/**
+ * iio_push_event() - try to add event to the list for userspace reading
+ * @dev_info: IIO device structure
+ * @ev_line: Which event line (hardware interrupt)
+ * @ev_code: What event
+ * @timestamp: When the event occured
+ **/
+int iio_push_event(struct iio_dev *dev_info,
+ int ev_line,
+ int ev_code,
+ s64 timestamp);
+
+/**
+ * struct iio_work_cont - container for when singleton handler case matters
+ * @ws: [DEVICE]work_struct when not only possible event
+ * @ws_nocheck: [DEVICE]work_struct when only possible event
+ * @address: [DEVICE]associated register address
+ * @mask: [DEVICE]associated mask for identifying event source
+ * @st: [DEVICE]device specific state information
+ **/
+struct iio_work_cont {
+ struct work_struct ws;
+ struct work_struct ws_nocheck;
+ int address;
+ int mask;
+ void *st;
+};
+
+#define to_iio_work_cont_check(_ws) \
+ container_of(_ws, struct iio_work_cont, ws)
+
+#define to_iio_work_cont_no_check(_ws) \
+ container_of(_ws, struct iio_work_cont, ws_nocheck)
+
+/**
+ * iio_init_work_cont() - intiialize the elements of a work container
+ * @cont: the work container
+ * @_checkfunc: function called when there are multiple possible int sources
+ * @_nocheckfunc: function for when there is only one int source
+ * @_add: driver dependant, typically a register address
+ * @_mask: driver dependant, typically a bit mask for a register
+ * @_st: driver dependant, typically pointer to a device state structure
+ **/
+static inline void
+iio_init_work_cont(struct iio_work_cont *cont,
+ void (*_checkfunc)(struct work_struct *),
+ void (*_nocheckfunc)(struct work_struct *),
+ int _add, int _mask, void *_st)
+{
+ INIT_WORK(&(cont)->ws, _checkfunc);
+ INIT_WORK(&(cont)->ws_nocheck, _nocheckfunc);
+ cont->address = _add;
+ cont->mask = _mask;
+ cont->st = _st;
+}
+/**
+ * __iio_push_event() tries to add an event to the list associated with a chrdev
+ * @ev_int: the event interface to which we are pushing the event
+ * @ev_code: the outgoing event code
+ * @timestamp: timestamp of the event
+ * @shared_pointer_p: the shared event pointer
+ **/
+int __iio_push_event(struct iio_event_interface *ev_int,
+ int ev_code,
+ s64 timestamp,
+ struct iio_shared_ev_pointer*
+ shared_pointer_p);
+/**
+ * __iio_change_event() change an event code in case of event escallation
+ * @ev: the evnet to be changed
+ * @ev_code: new event code
+ * @timestamp: new timestamp
+ **/
+void __iio_change_event(struct iio_detected_event_list *ev,
+ int ev_code,
+ s64 timestamp);
+
+/**
+ * iio_setup_ev_int() Configure an event interface (chrdev)
+ * @name: name used for resulting sysfs directory etc.
+ * @ev_int: interface we are configuring
+ * @owner: module that is responsible for registering this ev_int
+ * @dev: device whose ev_int this is
+ **/
+int iio_setup_ev_int(struct iio_event_interface *ev_int,
+ const char *name,
+ struct module *owner,
+ struct device *dev);
+
+void iio_free_ev_int(struct iio_event_interface *ev_int);
+
+/**
+ * iio_allocate_chrdev() - Allocate a chrdev
+ * @handler: struct that contains relevant file handling for chrdev
+ * @dev_info: iio_dev for which chrdev is being created
+ **/
+int iio_allocate_chrdev(struct iio_handler *handler, struct iio_dev *dev_info);
+void iio_deallocate_chrdev(struct iio_handler *handler);
+
+/* Used to distinguish between bipolar and unipolar scan elemenents.
+ * Whilst this may seem obvious, we may well want to change the representation
+ * in the future!*/
+#define IIO_SIGNED(a) -(a)
+#define IIO_UNSIGNED(a) (a)
+
+extern dev_t iio_devt;
+extern struct class iio_class;
+
+/**
+ * iio_put_device() - reference counted deallocated of struct device
+ * @dev: the iio_device containing the device
+ **/
+static inline void iio_put_device(struct iio_dev *dev)
+{
+ if (dev)
+ put_device(&dev->dev);
+};
+
+/**
+ * to_iio_dev() - get iio_dev for which we have have the struct device
+ * @d: the struct device
+ **/
+static inline struct iio_dev *to_iio_dev(struct device *d)
+{
+ return container_of(d, struct iio_dev, dev);
+};
+
+/**
+ * iio_dev_get_devdata() - helper function gets device specific data
+ * @d: the iio_dev associated with the device
+ **/
+static inline void *iio_dev_get_devdata(struct iio_dev *d)
+{
+ return d->dev_data;
+}
+
+/**
+ * iio_allocate_device() - allocate an iio_dev from a driver
+ **/
+struct iio_dev *iio_allocate_device(void);
+
+/**
+ * iio_free_device() - free an iio_dev from a driver
+ **/
+void iio_free_device(struct iio_dev *dev);
+
+/**
+ * iio_put() - internal module reference count reduce
+ **/
+void iio_put(void);
+
+/**
+ * iio_get() - internal module reference count increase
+ **/
+void iio_get(void);
+
+/* Ring buffer related */
+int iio_device_get_chrdev_minor(void);
+void iio_device_free_chrdev_minor(int val);
+
+/**
+ * iio_ring_enabled() helper function to test if any form of ring enabled
+ **/
+static inline bool iio_ring_enabled(struct iio_dev *dev_info)
+{
+ return dev_info->currentmode
+ & (INDIO_RING_TRIGGERED
+ | INDIO_RING_HARDWARE_BUFFER);
+};
+
+struct idr;
+
+int iio_get_new_idr_val(struct idr *this_idr);
+void iio_free_idr_val(struct idr *this_idr, int id);
+#endif /* _INDUSTRIAL_IO_H_ */
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
new file mode 100644
index 0000000..660a9c1
--- /dev/null
+++ b/drivers/staging/iio/industrialio-core.c
@@ -0,0 +1,851 @@
+/* The industrial I/O core
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Based on elements of hwmon and input subsystems.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+#include "iio.h"
+#include "trigger_consumer.h"
+
+#define IIO_ID_PREFIX "device"
+#define IIO_ID_FORMAT IIO_ID_PREFIX "%d"
+
+/* IDR to assign each registered device a unique id*/
+static DEFINE_IDR(iio_idr);
+
+/* IDR for general event identifiers */
+static DEFINE_IDR(iio_event_idr);
+/* IDR to allocate character device minor numbers */
+static DEFINE_IDR(iio_chrdev_idr);
+/* Lock used to protect both of the above */
+static DEFINE_SPINLOCK(iio_idr_lock);
+
+dev_t iio_devt;
+EXPORT_SYMBOL(iio_devt);
+
+#define IIO_DEV_MAX 256
+static char *iio_nodename(struct device *dev)
+{
+ return kasprintf(GFP_KERNEL, "iio/%s", dev_name(dev));
+}
+
+struct class iio_class = {
+ .name = "iio",
+ .nodename = iio_nodename,
+};
+EXPORT_SYMBOL(iio_class);
+
+void __iio_change_event(struct iio_detected_event_list *ev,
+ int ev_code,
+ s64 timestamp)
+{
+ ev->ev.id = ev_code;
+ ev->ev.timestamp = timestamp;
+}
+EXPORT_SYMBOL(__iio_change_event);
+
+/* Used both in the interrupt line put events and the ring buffer ones */
+
+/* Note that in it's current form someone has to be listening before events
+ * are queued. Hence a client MUST open the chrdev before the ring buffer is
+ * switched on.
+ */
+ int __iio_push_event(struct iio_event_interface *ev_int,
+ int ev_code,
+ s64 timestamp,
+ struct iio_shared_ev_pointer *
+ shared_pointer_p)
+{
+ struct iio_detected_event_list *ev;
+ int ret = 0;
+
+ /* Does anyone care? */
+ mutex_lock(&ev_int->event_list_lock);
+ if (test_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags)) {
+ if (ev_int->current_events == ev_int->max_events)
+ return 0;
+ ev = kmalloc(sizeof(*ev), GFP_KERNEL);
+ if (ev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ ev->ev.id = ev_code;
+ ev->ev.timestamp = timestamp;
+ ev->shared_pointer = shared_pointer_p;
+ if (ev->shared_pointer)
+ shared_pointer_p->ev_p = ev;
+
+ list_add_tail(&ev->list, &ev_int->det_events.list);
+ ev_int->current_events++;
+ mutex_unlock(&ev_int->event_list_lock);
+ wake_up_interruptible(&ev_int->wait);
+ } else
+ mutex_unlock(&ev_int->event_list_lock);
+
+error_ret:
+ return ret;
+}
+EXPORT_SYMBOL(__iio_push_event);
+
+int iio_push_event(struct iio_dev *dev_info,
+ int ev_line,
+ int ev_code,
+ s64 timestamp)
+{
+ return __iio_push_event(&dev_info->event_interfaces[ev_line],
+ ev_code, timestamp, NULL);
+}
+EXPORT_SYMBOL(iio_push_event);
+
+/* Generic interrupt line interrupt handler */
+irqreturn_t iio_interrupt_handler(int irq, void *_int_info)
+{
+ struct iio_interrupt *int_info = _int_info;
+ struct iio_dev *dev_info = int_info->dev_info;
+ struct iio_event_handler_list *p;
+ s64 time_ns;
+ unsigned long flags;
+
+ spin_lock_irqsave(&int_info->ev_list_lock, flags);
+ if (list_empty(&int_info->ev_list)) {
+ spin_unlock_irqrestore(&int_info->ev_list_lock, flags);
+ return IRQ_NONE;
+ }
+
+ time_ns = iio_get_time_ns();
+ /* detect single element list*/
+ if (list_is_singular(&int_info->ev_list)) {
+ disable_irq_nosync(irq);
+ p = list_first_entry(&int_info->ev_list,
+ struct iio_event_handler_list,
+ list);
+ /* single event handler - maybe shared */
+ p->handler(dev_info, 1, time_ns, !(p->refcount > 1));
+ } else
+ list_for_each_entry(p, &int_info->ev_list, list) {
+ disable_irq_nosync(irq);
+ p->handler(dev_info, 1, time_ns, 0);
+ }
+ spin_unlock_irqrestore(&int_info->ev_list_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static struct iio_interrupt *iio_allocate_interrupt(void)
+{
+ struct iio_interrupt *i = kmalloc(sizeof *i, GFP_KERNEL);
+ if (i) {
+ spin_lock_init(&i->ev_list_lock);
+ INIT_LIST_HEAD(&i->ev_list);
+ }
+ return i;
+}
+
+/* Confirming the validity of supplied irq is left to drivers.*/
+int iio_register_interrupt_line(unsigned int irq,
+ struct iio_dev *dev_info,
+ int line_number,
+ unsigned long type,
+ const char *name)
+{
+ int ret;
+
+ dev_info->interrupts[line_number] = iio_allocate_interrupt();
+ if (dev_info->interrupts[line_number] == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ dev_info->interrupts[line_number]->line_number = line_number;
+ dev_info->interrupts[line_number]->irq = irq;
+ dev_info->interrupts[line_number]->dev_info = dev_info;
+
+ /* Possibly only request on demand?
+ * Can see this may complicate the handling of interrupts.
+ * However, with this approach we might end up handling lots of
+ * events no-one cares about.*/
+ ret = request_irq(irq,
+ &iio_interrupt_handler,
+ type,
+ name,
+ dev_info->interrupts[line_number]);
+
+error_ret:
+ return ret;
+}
+EXPORT_SYMBOL(iio_register_interrupt_line);
+
+/* This turns up an awful lot */
+ssize_t iio_read_const_attr(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string);
+}
+EXPORT_SYMBOL(iio_read_const_attr);
+
+/* Before this runs the interrupt generator must have been disabled */
+void iio_unregister_interrupt_line(struct iio_dev *dev_info, int line_number)
+{
+ /* make sure the interrupt handlers are all done */
+ flush_scheduled_work();
+ free_irq(dev_info->interrupts[line_number]->irq,
+ dev_info->interrupts[line_number]);
+ kfree(dev_info->interrupts[line_number]);
+}
+EXPORT_SYMBOL(iio_unregister_interrupt_line);
+
+/* Reference counted add and remove */
+void iio_add_event_to_list(struct iio_event_handler_list *el,
+ struct list_head *head)
+{
+ unsigned long flags;
+ struct iio_interrupt *inter = to_iio_interrupt(head);
+
+ /* take mutex to protect this element */
+ mutex_lock(&el->exist_lock);
+ if (el->refcount == 0) {
+ /* Take the event list spin lock */
+ spin_lock_irqsave(&inter->ev_list_lock, flags);
+ list_add(&el->list, head);
+ spin_unlock_irqrestore(&inter->ev_list_lock, flags);
+ }
+ el->refcount++;
+ mutex_unlock(&el->exist_lock);
+}
+EXPORT_SYMBOL(iio_add_event_to_list);
+
+void iio_remove_event_from_list(struct iio_event_handler_list *el,
+ struct list_head *head)
+{
+ unsigned long flags;
+ struct iio_interrupt *inter = to_iio_interrupt(head);
+
+ mutex_lock(&el->exist_lock);
+ el->refcount--;
+ if (el->refcount == 0) {
+ /* Take the event list spin lock */
+ spin_lock_irqsave(&inter->ev_list_lock, flags);
+ list_del_init(&el->list);
+ spin_unlock_irqrestore(&inter->ev_list_lock, flags);
+ }
+ mutex_unlock(&el->exist_lock);
+}
+EXPORT_SYMBOL(iio_remove_event_from_list);
+
+ssize_t iio_event_chrdev_read(struct file *filep,
+ char *buf,
+ size_t count,
+ loff_t *f_ps)
+{
+ struct iio_event_interface *ev_int = filep->private_data;
+ struct iio_detected_event_list *el;
+ int ret;
+ size_t len;
+
+ mutex_lock(&ev_int->event_list_lock);
+ if (list_empty(&ev_int->det_events.list)) {
+ if (filep->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto error_mutex_unlock;
+ }
+ mutex_unlock(&ev_int->event_list_lock);
+ /* Blocking on device; waiting for something to be there */
+ ret = wait_event_interruptible(ev_int->wait,
+ !list_empty(&ev_int
+ ->det_events.list));
+ if (ret)
+ goto error_ret;
+ /* Single access device so noone else can get the data */
+ mutex_lock(&ev_int->event_list_lock);
+ }
+
+ el = list_first_entry(&ev_int->det_events.list,
+ struct iio_detected_event_list,
+ list);
+ len = sizeof el->ev;
+ if (copy_to_user(buf, &(el->ev), len)) {
+ ret = -EFAULT;
+ goto error_mutex_unlock;
+ }
+ list_del(&el->list);
+ ev_int->current_events--;
+ mutex_unlock(&ev_int->event_list_lock);
+ /*
+ * Possible concurency issue if an update of this event is on its way
+ * through. May lead to new even being removed whilst the reported event
+ * was the unescalated event. In typical use case this is not a problem
+ * as userspace will say read half the buffer due to a 50% full event
+ * which would make the correct 100% full incorrect anyway.
+ */
+ spin_lock(&el->shared_pointer->lock);
+ if (el->shared_pointer)
+ (el->shared_pointer->ev_p) = NULL;
+ spin_unlock(&el->shared_pointer->lock);
+
+ kfree(el);
+
+ return len;
+
+error_mutex_unlock:
+ mutex_unlock(&ev_int->event_list_lock);
+error_ret:
+
+ return ret;
+}
+
+int iio_event_chrdev_release(struct inode *inode, struct file *filep)
+{
+ struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev);
+ struct iio_event_interface *ev_int = hand->private;
+ struct iio_detected_event_list *el, *t;
+
+ mutex_lock(&ev_int->event_list_lock);
+ clear_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags);
+ /*
+ * In order to maintain a clean state for reopening,
+ * clear out any awaiting events. The mask will prevent
+ * any new __iio_push_event calls running.
+ */
+ list_for_each_entry_safe(el, t, &ev_int->det_events.list, list) {
+ list_del(&el->list);
+ kfree(el);
+ }
+ mutex_unlock(&ev_int->event_list_lock);
+
+ return 0;
+}
+
+int iio_event_chrdev_open(struct inode *inode, struct file *filep)
+{
+ struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev);
+ struct iio_event_interface *ev_int = hand->private;
+
+ mutex_lock(&ev_int->event_list_lock);
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &hand->flags)) {
+ fops_put(filep->f_op);
+ mutex_unlock(&ev_int->event_list_lock);
+ return -EBUSY;
+ }
+ filep->private_data = hand->private;
+ mutex_unlock(&ev_int->event_list_lock);
+
+ return 0;
+}
+
+static const struct file_operations iio_event_chrdev_fileops = {
+ .read = iio_event_chrdev_read,
+ .release = iio_event_chrdev_release,
+ .open = iio_event_chrdev_open,
+ .owner = THIS_MODULE,
+};
+
+static void iio_event_dev_release(struct device *dev)
+{
+ struct iio_event_interface *ev_int
+ = container_of(dev, struct iio_event_interface, dev);
+ cdev_del(&ev_int->handler.chrdev);
+ iio_device_free_chrdev_minor(MINOR(dev->devt));
+};
+
+static struct device_type iio_event_type = {
+ .release = iio_event_dev_release,
+};
+
+int iio_device_get_chrdev_minor(void)
+{
+ int ret, val;
+
+idr_again:
+ if (unlikely(idr_pre_get(&iio_chrdev_idr, GFP_KERNEL) == 0))
+ return -ENOMEM;
+ spin_lock(&iio_idr_lock);
+ ret = idr_get_new(&iio_chrdev_idr, NULL, &val);
+ spin_unlock(&iio_idr_lock);
+ if (unlikely(ret == -EAGAIN))
+ goto idr_again;
+ else if (unlikely(ret))
+ return ret;
+ if (val > IIO_DEV_MAX)
+ return -ENOMEM;
+ return val;
+}
+
+void iio_device_free_chrdev_minor(int val)
+{
+ spin_lock(&iio_idr_lock);
+ idr_remove(&iio_chrdev_idr, val);
+ spin_unlock(&iio_idr_lock);
+}
+
+int iio_setup_ev_int(struct iio_event_interface *ev_int,
+ const char *name,
+ struct module *owner,
+ struct device *dev)
+{
+ int ret, minor;
+
+ ev_int->dev.class = &iio_class;
+ ev_int->dev.parent = dev;
+ ev_int->dev.type = &iio_event_type;
+ device_initialize(&ev_int->dev);
+
+ minor = iio_device_get_chrdev_minor();
+ if (minor < 0) {
+ ret = minor;
+ goto error_device_put;
+ }
+ ev_int->dev.devt = MKDEV(MAJOR(iio_devt), minor);
+ dev_set_name(&ev_int->dev, "%s", name);
+
+ ret = device_add(&ev_int->dev);
+ if (ret)
+ goto error_free_minor;
+
+ cdev_init(&ev_int->handler.chrdev, &iio_event_chrdev_fileops);
+ ev_int->handler.chrdev.owner = owner;
+
+ mutex_init(&ev_int->event_list_lock);
+ /* discussion point - make this variable? */
+ ev_int->max_events = 10;
+ ev_int->current_events = 0;
+ INIT_LIST_HEAD(&ev_int->det_events.list);
+ init_waitqueue_head(&ev_int->wait);
+ ev_int->handler.private = ev_int;
+ ev_int->handler.flags = 0;
+
+ ret = cdev_add(&ev_int->handler.chrdev, ev_int->dev.devt, 1);
+ if (ret)
+ goto error_unreg_device;
+
+ return 0;
+
+error_unreg_device:
+ device_unregister(&ev_int->dev);
+error_free_minor:
+ iio_device_free_chrdev_minor(minor);
+error_device_put:
+ put_device(&ev_int->dev);
+
+ return ret;
+}
+
+void iio_free_ev_int(struct iio_event_interface *ev_int)
+{
+ device_unregister(&ev_int->dev);
+ put_device(&ev_int->dev);
+}
+
+static int __init iio_dev_init(void)
+{
+ int err;
+
+ err = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
+ if (err < 0)
+ printk(KERN_ERR "%s: failed to allocate char dev region\n",
+ __FILE__);
+
+ return err;
+}
+
+static void __exit iio_dev_exit(void)
+{
+ if (iio_devt)
+ unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
+}
+
+static int __init iio_init(void)
+{
+ int ret;
+
+ /* Create sysfs class */
+ ret = class_register(&iio_class);
+ if (ret < 0) {
+ printk(KERN_ERR
+ "%s could not create sysfs class\n",
+ __FILE__);
+ goto error_nothing;
+ }
+
+ ret = iio_dev_init();
+ if (ret < 0)
+ goto error_unregister_class;
+
+ return 0;
+
+error_unregister_class:
+ class_unregister(&iio_class);
+error_nothing:
+ return ret;
+}
+
+static void __exit iio_exit(void)
+{
+ iio_dev_exit();
+ class_unregister(&iio_class);
+}
+
+static int iio_device_register_sysfs(struct iio_dev *dev_info)
+{
+ int ret = 0;
+
+ ret = sysfs_create_group(&dev_info->dev.kobj, dev_info->attrs);
+ if (ret) {
+ dev_err(dev_info->dev.parent,
+ "Failed to register sysfs hooks\n");
+ goto error_ret;
+ }
+
+ if (dev_info->scan_el_attrs) {
+ ret = sysfs_create_group(&dev_info->dev.kobj,
+ dev_info->scan_el_attrs);
+ if (ret)
+ dev_err(&dev_info->dev,
+ "Failed to add sysfs scan els\n");
+ }
+
+error_ret:
+ return ret;
+}
+
+static void iio_device_unregister_sysfs(struct iio_dev *dev_info)
+{
+ if (dev_info->scan_el_attrs)
+ sysfs_remove_group(&dev_info->dev.kobj,
+ dev_info->scan_el_attrs);
+
+ sysfs_remove_group(&dev_info->dev.kobj, dev_info->attrs);
+}
+
+int iio_get_new_idr_val(struct idr *this_idr)
+{
+ int ret;
+ int val;
+
+idr_again:
+ if (unlikely(idr_pre_get(this_idr, GFP_KERNEL) == 0))
+ return -ENOMEM;
+
+ spin_lock(&iio_idr_lock);
+ ret = idr_get_new(this_idr, NULL, &val);
+ spin_unlock(&iio_idr_lock);
+ if (unlikely(ret == -EAGAIN))
+ goto idr_again;
+ else if (unlikely(ret))
+ return ret;
+
+ return val;
+}
+EXPORT_SYMBOL(iio_get_new_idr_val);
+
+void iio_free_idr_val(struct idr *this_idr, int id)
+{
+ spin_lock(&iio_idr_lock);
+ idr_remove(this_idr, id);
+ spin_unlock(&iio_idr_lock);
+}
+EXPORT_SYMBOL(iio_free_idr_val);
+
+static int iio_device_register_id(struct iio_dev *dev_info,
+ struct idr *this_idr)
+{
+
+ dev_info->id = iio_get_new_idr_val(&iio_idr);
+ if (dev_info->id < 0)
+ return dev_info->id;
+ return 0;
+}
+
+static void iio_device_unregister_id(struct iio_dev *dev_info)
+{
+ iio_free_idr_val(&iio_idr, dev_info->id);
+}
+
+static inline int __iio_add_event_config_attrs(struct iio_dev *dev_info, int i)
+{
+ int ret;
+ /*p for adding, q for removing */
+ struct attribute **attrp, **attrq;
+
+ if (dev_info->event_conf_attrs && dev_info->event_conf_attrs[i].attrs) {
+ attrp = dev_info->event_conf_attrs[i].attrs;
+ while (*attrp) {
+ ret = sysfs_add_file_to_group(&dev_info->dev.kobj,
+ *attrp,
+ dev_info
+ ->event_attrs[i].name);
+ if (ret)
+ goto error_ret;
+ attrp++;
+ }
+ }
+ return 0;
+
+error_ret:
+ attrq = dev_info->event_conf_attrs[i].attrs;
+ while (attrq != attrp) {
+ sysfs_remove_file_from_group(&dev_info->dev.kobj,
+ *attrq,
+ dev_info->event_attrs[i].name);
+ attrq++;
+ }
+
+ return ret;
+}
+
+static inline int __iio_remove_event_config_attrs(struct iio_dev *dev_info,
+ int i)
+{
+ struct attribute **attrq;
+
+ if (dev_info->event_conf_attrs
+ && dev_info->event_conf_attrs[i].attrs) {
+ attrq = dev_info->event_conf_attrs[i].attrs;
+ while (*attrq) {
+ sysfs_remove_file_from_group(&dev_info->dev.kobj,
+ *attrq,
+ dev_info
+ ->event_attrs[i].name);
+ attrq++;
+ }
+ }
+
+ return 0;
+}
+
+static int iio_device_register_eventset(struct iio_dev *dev_info)
+{
+ int ret = 0, i, j;
+
+ if (dev_info->num_interrupt_lines == 0)
+ return 0;
+
+ dev_info->event_interfaces =
+ kzalloc(sizeof(struct iio_event_interface)
+ *dev_info->num_interrupt_lines,
+ GFP_KERNEL);
+ if (dev_info->event_interfaces == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ dev_info->interrupts = kzalloc(sizeof(struct iio_interrupt *)
+ *dev_info->num_interrupt_lines,
+ GFP_KERNEL);
+ if (dev_info->interrupts == NULL) {
+ ret = -ENOMEM;
+ goto error_free_event_interfaces;
+ }
+
+ for (i = 0; i < dev_info->num_interrupt_lines; i++) {
+ dev_info->event_interfaces[i].owner = dev_info->driver_module;
+ ret = iio_get_new_idr_val(&iio_event_idr);
+ if (ret)
+ goto error_free_setup_ev_ints;
+ else
+ dev_info->event_interfaces[i].id = ret;
+
+ snprintf(dev_info->event_interfaces[i]._name, 20,
+ "event_line%d",
+ dev_info->event_interfaces[i].id);
+
+ ret = iio_setup_ev_int(&dev_info->event_interfaces[i],
+ (const char *)(dev_info
+ ->event_interfaces[i]
+ ._name),
+ dev_info->driver_module,
+ &dev_info->dev);
+ if (ret) {
+ dev_err(&dev_info->dev,
+ "Could not get chrdev interface\n");
+ iio_free_idr_val(&iio_event_idr,
+ dev_info->event_interfaces[i].id);
+ goto error_free_setup_ev_ints;
+ }
+ }
+
+ for (i = 0; i < dev_info->num_interrupt_lines; i++) {
+ snprintf(dev_info->event_interfaces[i]._attrname, 20,
+ "event_line%d_sources", i);
+ dev_info->event_attrs[i].name
+ = (const char *)
+ (dev_info->event_interfaces[i]._attrname);
+ ret = sysfs_create_group(&dev_info->dev.kobj,
+ &dev_info->event_attrs[i]);
+ if (ret) {
+ dev_err(&dev_info->dev,
+ "Failed to register sysfs for event attrs");
+ goto error_remove_sysfs_interfaces;
+ }
+ }
+
+ for (i = 0; i < dev_info->num_interrupt_lines; i++) {
+ ret = __iio_add_event_config_attrs(dev_info, i);
+ if (ret)
+ goto error_unregister_config_attrs;
+ }
+
+ return 0;
+
+error_unregister_config_attrs:
+ for (j = 0; j < i; j++)
+ __iio_remove_event_config_attrs(dev_info, i);
+ i = dev_info->num_interrupt_lines - 1;
+error_remove_sysfs_interfaces:
+ for (j = 0; j < i; j++)
+ sysfs_remove_group(&dev_info->dev.kobj,
+ &dev_info->event_attrs[j]);
+ i = dev_info->num_interrupt_lines - 1;
+error_free_setup_ev_ints:
+ for (j = 0; j < i; j++) {
+ iio_free_idr_val(&iio_event_idr,
+ dev_info->event_interfaces[i].id);
+ iio_free_ev_int(&dev_info->event_interfaces[j]);
+ }
+ kfree(dev_info->interrupts);
+error_free_event_interfaces:
+ kfree(dev_info->event_interfaces);
+error_ret:
+
+ return ret;
+}
+
+static void iio_device_unregister_eventset(struct iio_dev *dev_info)
+{
+ int i;
+
+ if (dev_info->num_interrupt_lines == 0)
+ return;
+ for (i = 0; i < dev_info->num_interrupt_lines; i++)
+ sysfs_remove_group(&dev_info->dev.kobj,
+ &dev_info->event_attrs[i]);
+
+ for (i = 0; i < dev_info->num_interrupt_lines; i++) {
+ iio_free_idr_val(&iio_event_idr,
+ dev_info->event_interfaces[i].id);
+ iio_free_ev_int(&dev_info->event_interfaces[i]);
+ }
+ kfree(dev_info->interrupts);
+ kfree(dev_info->event_interfaces);
+}
+
+static void iio_dev_release(struct device *device)
+{
+ struct iio_dev *dev = to_iio_dev(device);
+
+ iio_put();
+ kfree(dev);
+}
+
+static struct device_type iio_dev_type = {
+ .name = "iio_device",
+ .release = iio_dev_release,
+};
+
+struct iio_dev *iio_allocate_device(void)
+{
+ struct iio_dev *dev = kzalloc(sizeof *dev, GFP_KERNEL);
+
+ if (dev) {
+ dev->dev.type = &iio_dev_type;
+ dev->dev.class = &iio_class;
+ device_initialize(&dev->dev);
+ dev_set_drvdata(&dev->dev, (void *)dev);
+ mutex_init(&dev->mlock);
+ iio_get();
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL(iio_allocate_device);
+
+void iio_free_device(struct iio_dev *dev)
+{
+ if (dev)
+ iio_put_device(dev);
+}
+EXPORT_SYMBOL(iio_free_device);
+
+int iio_device_register(struct iio_dev *dev_info)
+{
+ int ret;
+
+ ret = iio_device_register_id(dev_info, &iio_idr);
+ if (ret) {
+ dev_err(&dev_info->dev, "Failed to get id\n");
+ goto error_ret;
+ }
+ dev_set_name(&dev_info->dev, "device%d", dev_info->id);
+
+ ret = device_add(&dev_info->dev);
+ if (ret)
+ goto error_free_idr;
+ ret = iio_device_register_sysfs(dev_info);
+ if (ret) {
+ dev_err(dev_info->dev.parent,
+ "Failed to register sysfs interfaces\n");
+ goto error_del_device;
+ }
+ ret = iio_device_register_eventset(dev_info);
+ if (ret) {
+ dev_err(dev_info->dev.parent,
+ "Failed to register event set \n");
+ goto error_free_sysfs;
+ }
+ if (dev_info->modes & INDIO_RING_TRIGGERED)
+ iio_device_register_trigger_consumer(dev_info);
+
+ return 0;
+
+error_free_sysfs:
+ iio_device_unregister_sysfs(dev_info);
+error_del_device:
+ device_del(&dev_info->dev);
+error_free_idr:
+ iio_device_unregister_id(dev_info);
+error_ret:
+ return ret;
+}
+EXPORT_SYMBOL(iio_device_register);
+
+void iio_device_unregister(struct iio_dev *dev_info)
+{
+ if (dev_info->modes & INDIO_RING_TRIGGERED)
+ iio_device_unregister_trigger_consumer(dev_info);
+ iio_device_unregister_eventset(dev_info);
+ iio_device_unregister_sysfs(dev_info);
+ iio_device_unregister_id(dev_info);
+ device_unregister(&dev_info->dev);
+}
+EXPORT_SYMBOL(iio_device_unregister);
+
+void iio_put(void)
+{
+ module_put(THIS_MODULE);
+}
+
+void iio_get(void)
+{
+ __module_get(THIS_MODULE);
+}
+
+subsys_initcall(iio_init);
+module_exit(iio_exit);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
+MODULE_DESCRIPTION("Industrial I/O core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/industrialio-ring.c b/drivers/staging/iio/industrialio-ring.c
new file mode 100644
index 0000000..ebe5ccc
--- /dev/null
+++ b/drivers/staging/iio/industrialio-ring.c
@@ -0,0 +1,568 @@
+/* The industrial I/O core
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Handling of ring allocation / resizing.
+ *
+ *
+ * Things to look at here.
+ * - Better memory allocation techniques?
+ * - Alternative access techniques?
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/idr.h>
+
+#include "iio.h"
+#include "ring_generic.h"
+
+/* IDR for ring buffer identifier */
+static DEFINE_IDR(iio_ring_idr);
+/* IDR for ring event identifier */
+static DEFINE_IDR(iio_ring_event_idr);
+/* IDR for ring access identifier */
+static DEFINE_IDR(iio_ring_access_idr);
+
+int iio_push_ring_event(struct iio_ring_buffer *ring_buf,
+ int event_code,
+ s64 timestamp)
+{
+ return __iio_push_event(&ring_buf->ev_int,
+ event_code,
+ timestamp,
+ &ring_buf->shared_ev_pointer);
+}
+EXPORT_SYMBOL(iio_push_ring_event);
+
+int iio_push_or_escallate_ring_event(struct iio_ring_buffer *ring_buf,
+ int event_code,
+ s64 timestamp)
+{
+ if (ring_buf->shared_ev_pointer.ev_p)
+ __iio_change_event(ring_buf->shared_ev_pointer.ev_p,
+ event_code,
+ timestamp);
+ else
+ return iio_push_ring_event(ring_buf,
+ event_code,
+ timestamp);
+ return 0;
+}
+EXPORT_SYMBOL(iio_push_or_escallate_ring_event);
+
+/**
+ * iio_ring_open() chrdev file open for ring buffer access
+ *
+ * This function relies on all ring buffer implementations having an
+ * iio_ring_buffer as their first element.
+ **/
+int iio_ring_open(struct inode *inode, struct file *filp)
+{
+ struct iio_handler *hand
+ = container_of(inode->i_cdev, struct iio_handler, chrdev);
+ struct iio_ring_buffer *rb = hand->private;
+
+ filp->private_data = hand->private;
+ if (rb->access.mark_in_use)
+ rb->access.mark_in_use(rb);
+
+ return 0;
+}
+
+/**
+ * iio_ring_release() -chrdev file close ring buffer access
+ *
+ * This function relies on all ring buffer implementations having an
+ * iio_ring_buffer as their first element.
+ **/
+int iio_ring_release(struct inode *inode, struct file *filp)
+{
+ struct cdev *cd = inode->i_cdev;
+ struct iio_handler *hand = iio_cdev_to_handler(cd);
+ struct iio_ring_buffer *rb = hand->private;
+
+ clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags);
+ if (rb->access.unmark_in_use)
+ rb->access.unmark_in_use(rb);
+
+ return 0;
+}
+
+/**
+ * iio_ring_rip_outer() chrdev read for ring buffer access
+ *
+ * This function relies on all ring buffer implementations having an
+ * iio_ring _bufer as their first element.
+ **/
+ssize_t iio_ring_rip_outer(struct file *filp,
+ char *buf,
+ size_t count,
+ loff_t *f_ps)
+{
+ struct iio_ring_buffer *rb = filp->private_data;
+ int ret, dead_offset, copied;
+ u8 *data;
+ /* rip lots must exist. */
+ if (!rb->access.rip_lots)
+ return -EINVAL;
+ copied = rb->access.rip_lots(rb, count, &data, &dead_offset);
+
+ if (copied < 0) {
+ ret = copied;
+ goto error_ret;
+ }
+ if (copy_to_user(buf, data + dead_offset, copied)) {
+ ret = -EFAULT;
+ goto error_free_data_cpy;
+ }
+ /* In clever ring buffer designs this may not need to be freed.
+ * When such a design exists I'll add this to ring access funcs.
+ */
+ kfree(data);
+
+ return copied;
+
+error_free_data_cpy:
+ kfree(data);
+error_ret:
+ return ret;
+}
+
+static const struct file_operations iio_ring_fileops = {
+ .read = iio_ring_rip_outer,
+ .release = iio_ring_release,
+ .open = iio_ring_open,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * __iio_request_ring_buffer_event_chrdev() allocate ring event chrdev
+ * @buf: ring buffer whose event chrdev we are allocating
+ * @owner: the module who owns the ring buffer (for ref counting)
+ * @dev: device with which the chrdev is associated
+ **/
+static inline int
+__iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer *buf,
+ int id,
+ struct module *owner,
+ struct device *dev)
+{
+ int ret;
+ ret = iio_get_new_idr_val(&iio_ring_event_idr);
+ if (ret < 0)
+ goto error_ret;
+ else
+ buf->ev_int.id = ret;
+
+ snprintf(buf->ev_int._name, 20,
+ "ring_event_line%d",
+ buf->ev_int.id);
+ ret = iio_setup_ev_int(&(buf->ev_int),
+ buf->ev_int._name,
+ owner,
+ dev);
+ if (ret)
+ goto error_free_id;
+ return 0;
+
+error_free_id:
+ iio_free_idr_val(&iio_ring_event_idr, buf->ev_int.id);
+error_ret:
+ return ret;
+}
+
+static inline void
+__iio_free_ring_buffer_event_chrdev(struct iio_ring_buffer *buf)
+{
+ iio_free_ev_int(&(buf->ev_int));
+ iio_free_idr_val(&iio_ring_event_idr, buf->ev_int.id);
+}
+
+static void iio_ring_access_release(struct device *dev)
+{
+ struct iio_ring_buffer *buf
+ = access_dev_to_iio_ring_buffer(dev);
+ cdev_del(&buf->access_handler.chrdev);
+ iio_device_free_chrdev_minor(MINOR(dev->devt));
+}
+
+static struct device_type iio_ring_access_type = {
+ .release = iio_ring_access_release,
+};
+
+static inline int
+__iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer *buf,
+ int id,
+ struct module *owner)
+{
+ int ret, minor;
+
+ buf->access_handler.flags = 0;
+
+ buf->access_dev.parent = &buf->dev;
+ buf->access_dev.class = &iio_class;
+ buf->access_dev.type = &iio_ring_access_type;
+ device_initialize(&buf->access_dev);
+
+ minor = iio_device_get_chrdev_minor();
+ if (minor < 0) {
+ ret = minor;
+ goto error_device_put;
+ }
+ buf->access_dev.devt = MKDEV(MAJOR(iio_devt), minor);
+
+ ret = iio_get_new_idr_val(&iio_ring_access_idr);
+ if (ret < 0)
+ goto error_device_put;
+ else
+ buf->access_id = ret;
+ dev_set_name(&buf->access_dev, "ring_access%d", buf->access_id);
+ ret = device_add(&buf->access_dev);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to add the ring access dev\n");
+ goto error_free_idr;
+ }
+
+ cdev_init(&buf->access_handler.chrdev, &iio_ring_fileops);
+ buf->access_handler.chrdev.owner = owner;
+
+ ret = cdev_add(&buf->access_handler.chrdev, buf->access_dev.devt, 1);
+ if (ret) {
+ printk(KERN_ERR "failed to allocate ring access chrdev\n");
+ goto error_device_unregister;
+ }
+ return 0;
+error_device_unregister:
+ device_unregister(&buf->access_dev);
+error_free_idr:
+ iio_free_idr_val(&iio_ring_access_idr, buf->access_id);
+error_device_put:
+ put_device(&buf->access_dev);
+
+ return ret;
+}
+
+static void __iio_free_ring_buffer_access_chrdev(struct iio_ring_buffer *buf)
+{
+ iio_free_idr_val(&iio_ring_access_idr, buf->access_id);
+ device_unregister(&buf->access_dev);
+}
+
+void iio_ring_buffer_init(struct iio_ring_buffer *ring,
+ struct iio_dev *dev_info)
+{
+ if (ring->access.mark_param_change)
+ ring->access.mark_param_change(ring);
+ ring->indio_dev = dev_info;
+ ring->ev_int.private = ring;
+ ring->access_handler.private = ring;
+}
+EXPORT_SYMBOL(iio_ring_buffer_init);
+
+int iio_ring_buffer_register(struct iio_ring_buffer *ring)
+{
+ int ret;
+ ret = iio_get_new_idr_val(&iio_ring_idr);
+ if (ret < 0)
+ goto error_ret;
+ else
+ ring->id = ret;
+
+ dev_set_name(&ring->dev, "ring_buffer%d", ring->id);
+ ret = device_add(&ring->dev);
+ if (ret)
+ goto error_free_id;
+
+ ret = __iio_request_ring_buffer_event_chrdev(ring,
+ 0,
+ ring->owner,
+ &ring->dev);
+ if (ret)
+ goto error_remove_device;
+
+ ret = __iio_request_ring_buffer_access_chrdev(ring,
+ 0,
+ ring->owner);
+
+ if (ret)
+ goto error_free_ring_buffer_event_chrdev;
+
+ return ret;
+error_free_ring_buffer_event_chrdev:
+ __iio_free_ring_buffer_event_chrdev(ring);
+error_remove_device:
+ device_del(&ring->dev);
+error_free_id:
+ iio_free_idr_val(&iio_ring_idr, ring->id);
+error_ret:
+ return ret;
+}
+EXPORT_SYMBOL(iio_ring_buffer_register);
+
+void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
+{
+ __iio_free_ring_buffer_access_chrdev(ring);
+ __iio_free_ring_buffer_event_chrdev(ring);
+ device_del(&ring->dev);
+ iio_free_idr_val(&iio_ring_idr, ring->id);
+}
+EXPORT_SYMBOL(iio_ring_buffer_unregister);
+
+ssize_t iio_read_ring_length(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int len = 0;
+ struct iio_ring_buffer *ring = dev_get_drvdata(dev);
+
+ if (ring->access.get_length)
+ len = sprintf(buf, "%d\n",
+ ring->access.get_length(ring));
+
+ return len;
+}
+EXPORT_SYMBOL(iio_read_ring_length);
+
+ ssize_t iio_write_ring_length(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ ulong val;
+ struct iio_ring_buffer *ring = dev_get_drvdata(dev);
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (ring->access.get_length)
+ if (val == ring->access.get_length(ring))
+ return len;
+
+ if (ring->access.set_length) {
+ ring->access.set_length(ring, val);
+ if (ring->access.mark_param_change)
+ ring->access.mark_param_change(ring);
+ }
+
+ return len;
+}
+EXPORT_SYMBOL(iio_write_ring_length);
+
+ssize_t iio_read_ring_bps(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int len = 0;
+ struct iio_ring_buffer *ring = dev_get_drvdata(dev);
+
+ if (ring->access.get_bpd)
+ len = sprintf(buf, "%d\n",
+ ring->access.get_bpd(ring));
+
+ return len;
+}
+EXPORT_SYMBOL(iio_read_ring_bps);
+
+ssize_t iio_store_ring_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ bool requested_state, current_state;
+ int previous_mode;
+ struct iio_ring_buffer *ring = dev_get_drvdata(dev);
+ struct iio_dev *dev_info = ring->indio_dev;
+
+ mutex_lock(&dev_info->mlock);
+ previous_mode = dev_info->currentmode;
+ requested_state = !(buf[0] == '0');
+ current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
+ if (current_state == requested_state) {
+ printk(KERN_INFO "iio-ring, current state requested again\n");
+ goto done;
+ }
+ if (requested_state) {
+ if (ring->preenable) {
+ ret = ring->preenable(dev_info);
+ if (ret) {
+ printk(KERN_ERR
+ "Buffer not started:"
+ "ring preenable failed\n");
+ goto error_ret;
+ }
+ }
+ if (ring->access.request_update) {
+ ret = ring->access.request_update(ring);
+ if (ret) {
+ printk(KERN_INFO
+ "Buffer not started:"
+ "ring parameter update failed\n");
+ goto error_ret;
+ }
+ }
+ if (ring->access.mark_in_use)
+ ring->access.mark_in_use(ring);
+ /* Definitely possible for devices to support both of these.*/
+ if (dev_info->modes & INDIO_RING_TRIGGERED) {
+ if (!dev_info->trig) {
+ printk(KERN_INFO
+ "Buffer not started: no trigger\n");
+ ret = -EINVAL;
+ if (ring->access.unmark_in_use)
+ ring->access.unmark_in_use(ring);
+ goto error_ret;
+ }
+ dev_info->currentmode = INDIO_RING_TRIGGERED;
+ } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
+ dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
+ else { /* should never be reached */
+ ret = -EINVAL;
+ goto error_ret;
+ }
+
+ if (ring->postenable) {
+
+ ret = ring->postenable(dev_info);
+ if (ret) {
+ printk(KERN_INFO
+ "Buffer not started:"
+ "postenable failed\n");
+ if (ring->access.unmark_in_use)
+ ring->access.unmark_in_use(ring);
+ dev_info->currentmode = previous_mode;
+ if (ring->postdisable)
+ ring->postdisable(dev_info);
+ goto error_ret;
+ }
+ }
+ } else {
+ if (ring->predisable) {
+ ret = ring->predisable(dev_info);
+ if (ret)
+ goto error_ret;
+ }
+ if (ring->access.unmark_in_use)
+ ring->access.unmark_in_use(ring);
+ dev_info->currentmode = INDIO_DIRECT_MODE;
+ if (ring->postdisable) {
+ ret = ring->postdisable(dev_info);
+ if (ret)
+ goto error_ret;
+ }
+ }
+done:
+ mutex_unlock(&dev_info->mlock);
+ return len;
+
+error_ret:
+ mutex_unlock(&dev_info->mlock);
+ return ret;
+}
+EXPORT_SYMBOL(iio_store_ring_enable);
+ssize_t iio_show_ring_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_ring_buffer *ring = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", !!(ring->indio_dev->currentmode
+ & INDIO_ALL_RING_MODES));
+}
+EXPORT_SYMBOL(iio_show_ring_enable);
+
+ssize_t iio_scan_el_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_scan_el *this_el = to_iio_scan_el(attr);
+
+ ret = iio_scan_mask_query(indio_dev, this_el->number);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", ret);
+}
+EXPORT_SYMBOL(iio_scan_el_show);
+
+ssize_t iio_scan_el_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret = 0;
+ bool state;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_scan_el *this_el = to_iio_scan_el(attr);
+
+ state = !(buf[0] == '0');
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ ret = iio_scan_mask_query(indio_dev, this_el->number);
+ if (ret < 0)
+ goto error_ret;
+ if (!state && ret) {
+ ret = iio_scan_mask_clear(indio_dev, this_el->number);
+ if (ret)
+ goto error_ret;
+ indio_dev->scan_count--;
+ } else if (state && !ret) {
+ ret = iio_scan_mask_set(indio_dev, this_el->number);
+ if (ret)
+ goto error_ret;
+ indio_dev->scan_count++;
+ }
+ if (this_el->set_state)
+ ret = this_el->set_state(this_el, indio_dev, state);
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+
+}
+EXPORT_SYMBOL(iio_scan_el_store);
+
+ssize_t iio_scan_el_ts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", indio_dev->scan_timestamp);
+}
+EXPORT_SYMBOL(iio_scan_el_ts_show);
+
+ssize_t iio_scan_el_ts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ bool state;
+ state = !(buf[0] == '0');
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ indio_dev->scan_timestamp = state;
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+EXPORT_SYMBOL(iio_scan_el_ts_store);
+
diff --git a/drivers/staging/iio/industrialio-trigger.c b/drivers/staging/iio/industrialio-trigger.c
new file mode 100644
index 0000000..693ebc4
--- /dev/null
+++ b/drivers/staging/iio/industrialio-trigger.c
@@ -0,0 +1,399 @@
+/* The industrial I/O core, trigger handling functions
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+
+#include "iio.h"
+#include "trigger.h"
+
+/* RFC - Question of approach
+ * Make the common case (single sensor single trigger)
+ * simple by starting trigger capture from when first sensors
+ * is added.
+ *
+ * Complex simultaneous start requires use of 'hold' functionality
+ * of the trigger. (not implemented)
+ *
+ * Any other suggestions?
+ */
+
+
+static DEFINE_IDR(iio_trigger_idr);
+static DEFINE_SPINLOCK(iio_trigger_idr_lock);
+
+/* Single list of all available triggers */
+static LIST_HEAD(iio_trigger_list);
+static DEFINE_MUTEX(iio_trigger_list_lock);
+
+/**
+ * iio_trigger_register_sysfs() - create a device for this trigger
+ * @trig_info: the trigger
+ *
+ * Also adds any control attribute registered by the trigger driver
+ **/
+static int iio_trigger_register_sysfs(struct iio_trigger *trig_info)
+{
+ int ret = 0;
+
+ if (trig_info->control_attrs)
+ ret = sysfs_create_group(&trig_info->dev.kobj,
+ trig_info->control_attrs);
+
+ return ret;
+}
+
+static void iio_trigger_unregister_sysfs(struct iio_trigger *trig_info)
+{
+ if (trig_info->control_attrs)
+ sysfs_remove_group(&trig_info->dev.kobj,
+ trig_info->control_attrs);
+}
+
+
+/**
+ * iio_trigger_register_id() - get a unique id for this trigger
+ * @trig_info: the trigger
+ **/
+static int iio_trigger_register_id(struct iio_trigger *trig_info)
+{
+ int ret = 0;
+
+idr_again:
+ if (unlikely(idr_pre_get(&iio_trigger_idr, GFP_KERNEL) == 0))
+ return -ENOMEM;
+
+ spin_lock(&iio_trigger_idr_lock);
+ ret = idr_get_new(&iio_trigger_idr, NULL, &trig_info->id);
+ spin_unlock(&iio_trigger_idr_lock);
+ if (unlikely(ret == -EAGAIN))
+ goto idr_again;
+ else if (likely(!ret))
+ trig_info->id = trig_info->id & MAX_ID_MASK;
+
+ return ret;
+}
+
+/**
+ * iio_trigger_unregister_id() - free up unique id for use by another trigger
+ * @trig_info: the trigger
+ **/
+static void iio_trigger_unregister_id(struct iio_trigger *trig_info)
+{
+ spin_lock(&iio_trigger_idr_lock);
+ idr_remove(&iio_trigger_idr, trig_info->id);
+ spin_unlock(&iio_trigger_idr_lock);
+}
+
+int iio_trigger_register(struct iio_trigger *trig_info)
+{
+ int ret;
+
+ ret = iio_trigger_register_id(trig_info);
+ if (ret)
+ goto error_ret;
+ /* Set the name used for the sysfs directory etc */
+ dev_set_name(&trig_info->dev, "trigger%ld",
+ (unsigned long) trig_info->id);
+
+ ret = device_add(&trig_info->dev);
+ if (ret)
+ goto error_unregister_id;
+
+ ret = iio_trigger_register_sysfs(trig_info);
+ if (ret)
+ goto error_device_del;
+
+ /* Add to list of available triggers held by the IIO core */
+ mutex_lock(&iio_trigger_list_lock);
+ list_add_tail(&trig_info->list, &iio_trigger_list);
+ mutex_unlock(&iio_trigger_list_lock);
+
+ return 0;
+
+error_device_del:
+ device_del(&trig_info->dev);
+error_unregister_id:
+ iio_trigger_unregister_id(trig_info);
+error_ret:
+ return ret;
+}
+EXPORT_SYMBOL(iio_trigger_register);
+
+void iio_trigger_unregister(struct iio_trigger *trig_info)
+{
+ struct iio_trigger *cursor;
+
+ mutex_lock(&iio_trigger_list_lock);
+ list_for_each_entry(cursor, &iio_trigger_list, list)
+ if (cursor == trig_info) {
+ list_del(&cursor->list);
+ break;
+ }
+ mutex_unlock(&iio_trigger_list_lock);
+
+ iio_trigger_unregister_sysfs(trig_info);
+ iio_trigger_unregister_id(trig_info);
+ /* Possible issue in here */
+ device_unregister(&trig_info->dev);
+}
+EXPORT_SYMBOL(iio_trigger_unregister);
+
+struct iio_trigger *iio_trigger_find_by_name(const char *name, size_t len)
+{
+ struct iio_trigger *trig;
+ bool found = false;
+
+ mutex_lock(&iio_trigger_list_lock);
+ list_for_each_entry(trig, &iio_trigger_list, list) {
+ if (strncmp(trig->name, name, len) == 0) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&iio_trigger_list_lock);
+
+ return found ? trig : NULL;
+};
+EXPORT_SYMBOL(iio_trigger_find_by_name);
+
+void iio_trigger_poll(struct iio_trigger *trig)
+{
+ struct iio_poll_func *pf_cursor;
+
+ list_for_each_entry(pf_cursor, &trig->pollfunc_list, list) {
+ if (pf_cursor->poll_func_immediate) {
+ pf_cursor->poll_func_immediate(pf_cursor->private_data);
+ trig->use_count++;
+ }
+ }
+ list_for_each_entry(pf_cursor, &trig->pollfunc_list, list) {
+ if (pf_cursor->poll_func_main) {
+ pf_cursor->poll_func_main(pf_cursor->private_data);
+ trig->use_count++;
+ }
+ }
+}
+EXPORT_SYMBOL(iio_trigger_poll);
+
+void iio_trigger_notify_done(struct iio_trigger *trig)
+{
+ trig->use_count--;
+ if (trig->use_count == 0 && trig->try_reenable)
+ if (trig->try_reenable(trig)) {
+ /* Missed and interrupt so launch new poll now */
+ trig->timestamp = 0;
+ iio_trigger_poll(trig);
+ }
+}
+EXPORT_SYMBOL(iio_trigger_notify_done);
+
+/**
+ * iio_trigger_read_name() - retrieve useful identifying name
+ **/
+ssize_t iio_trigger_read_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_trigger *trig = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n", trig->name);
+}
+EXPORT_SYMBOL(iio_trigger_read_name);
+
+/* Trigger Consumer related functions */
+
+/* Complexity in here. With certain triggers (datardy) an acknowledgement
+ * may be needed if the pollfuncs do not include the data read for the
+ * triggering device.
+ * This is not currently handled. Alternative of not enabling trigger unless
+ * the relevant function is in there may be the best option.
+ */
+/* Worth protecting against double additions?*/
+int iio_trigger_attach_poll_func(struct iio_trigger *trig,
+ struct iio_poll_func *pf)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&trig->pollfunc_list_lock, flags);
+ list_add_tail(&pf->list, &trig->pollfunc_list);
+ spin_unlock_irqrestore(&trig->pollfunc_list_lock, flags);
+
+ if (trig->set_trigger_state)
+ ret = trig->set_trigger_state(trig, true);
+ if (ret) {
+ printk(KERN_ERR "set trigger state failed\n");
+ list_del(&pf->list);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(iio_trigger_attach_poll_func);
+
+int iio_trigger_dettach_poll_func(struct iio_trigger *trig,
+ struct iio_poll_func *pf)
+{
+ struct iio_poll_func *pf_cursor;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&trig->pollfunc_list_lock, flags);
+ list_for_each_entry(pf_cursor, &trig->pollfunc_list, list)
+ if (pf_cursor == pf) {
+ ret = 0;
+ break;
+ }
+ if (!ret) {
+ if (list_is_singular(&trig->pollfunc_list)
+ && trig->set_trigger_state) {
+ spin_unlock_irqrestore(&trig->pollfunc_list_lock,
+ flags);
+ /* May sleep hence cannot hold the spin lock */
+ ret = trig->set_trigger_state(trig, false);
+ if (ret)
+ goto error_ret;
+ spin_lock_irqsave(&trig->pollfunc_list_lock, flags);
+ }
+ /*
+ * Now we can delete safe in the knowledge that, if this is
+ * the last pollfunc then we have disabled the trigger anyway
+ * and so nothing should be able to call the pollfunc.
+ */
+ list_del(&pf_cursor->list);
+ }
+ spin_unlock_irqrestore(&trig->pollfunc_list_lock, flags);
+
+error_ret:
+ return ret;
+}
+EXPORT_SYMBOL(iio_trigger_dettach_poll_func);
+
+/**
+ * iio_trigger_read_currrent() trigger consumer sysfs query which trigger
+ *
+ * For trigger consumers the current_trigger interface allows the trigger
+ * used by the device to be queried.
+ **/
+static ssize_t iio_trigger_read_current(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ int len = 0;
+ if (dev_info->trig)
+ len = snprintf(buf,
+ IIO_TRIGGER_NAME_LENGTH,
+ "%s\n",
+ dev_info->trig->name);
+ return len;
+}
+
+/**
+ * iio_trigger_write_current() trigger consumer sysfs set current trigger
+ *
+ * For trigger consumers the current_trigger interface allows the trigger
+ * used for this device to be specified at run time based on the triggers
+ * name.
+ **/
+static ssize_t iio_trigger_write_current(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct iio_trigger *oldtrig = dev_info->trig;
+ mutex_lock(&dev_info->mlock);
+ if (dev_info->currentmode == INDIO_RING_TRIGGERED) {
+ mutex_unlock(&dev_info->mlock);
+ return -EBUSY;
+ }
+ mutex_unlock(&dev_info->mlock);
+
+ len = len < IIO_TRIGGER_NAME_LENGTH ? len : IIO_TRIGGER_NAME_LENGTH;
+
+ dev_info->trig = iio_trigger_find_by_name(buf, len);
+ if (oldtrig && dev_info->trig != oldtrig)
+ iio_put_trigger(oldtrig);
+ if (dev_info->trig)
+ iio_get_trigger(dev_info->trig);
+
+ return len;
+}
+
+DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
+ iio_trigger_read_current,
+ iio_trigger_write_current);
+
+static struct attribute *iio_trigger_consumer_attrs[] = {
+ &dev_attr_current_trigger.attr,
+ NULL,
+};
+
+static const struct attribute_group iio_trigger_consumer_attr_group = {
+ .name = "trigger",
+ .attrs = iio_trigger_consumer_attrs,
+};
+
+static void iio_trig_release(struct device *device)
+{
+ struct iio_trigger *trig = to_iio_trigger(device);
+ kfree(trig);
+ iio_put();
+}
+
+static struct device_type iio_trig_type = {
+ .release = iio_trig_release,
+};
+
+struct iio_trigger *iio_allocate_trigger(void)
+{
+ struct iio_trigger *trig;
+ trig = kzalloc(sizeof *trig, GFP_KERNEL);
+ if (trig) {
+ trig->dev.type = &iio_trig_type;
+ trig->dev.class = &iio_class;
+ device_initialize(&trig->dev);
+ dev_set_drvdata(&trig->dev, (void *)trig);
+ spin_lock_init(&trig->pollfunc_list_lock);
+ INIT_LIST_HEAD(&trig->list);
+ INIT_LIST_HEAD(&trig->pollfunc_list);
+ iio_get();
+ }
+ return trig;
+}
+EXPORT_SYMBOL(iio_allocate_trigger);
+
+void iio_free_trigger(struct iio_trigger *trig)
+{
+ if (trig)
+ put_device(&trig->dev);
+}
+EXPORT_SYMBOL(iio_free_trigger);
+
+int iio_device_register_trigger_consumer(struct iio_dev *dev_info)
+{
+ int ret;
+ ret = sysfs_create_group(&dev_info->dev.kobj,
+ &iio_trigger_consumer_attr_group);
+ return ret;
+}
+EXPORT_SYMBOL(iio_device_register_trigger_consumer);
+
+int iio_device_unregister_trigger_consumer(struct iio_dev *dev_info)
+{
+ sysfs_remove_group(&dev_info->dev.kobj,
+ &iio_trigger_consumer_attr_group);
+ return 0;
+}
+EXPORT_SYMBOL(iio_device_unregister_trigger_consumer);
+
diff --git a/drivers/staging/iio/light/Kconfig b/drivers/staging/iio/light/Kconfig
new file mode 100644
index 0000000..12af0c4
--- /dev/null
+++ b/drivers/staging/iio/light/Kconfig
@@ -0,0 +1,13 @@
+#
+# Light sensors
+#
+comment "Light sensors"
+
+config TSL2561
+ tristate "TAOS TSL2561 light-to-digital convertor"
+ depends on I2C
+ help
+ Say yes bere to build support for the TAOS light to digital
+ convertor. This chip has two light sensors. One is broadband
+ including infrared whilst the other measures only infrared.
+ Provides direct access via sysfs.
diff --git a/drivers/staging/iio/light/Makefile b/drivers/staging/iio/light/Makefile
new file mode 100644
index 0000000..ccff151
--- /dev/null
+++ b/drivers/staging/iio/light/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for industrial I/O Light sensors
+#
+
+obj-$(CONFIG_TSL2561) += tsl2561.o
diff --git a/drivers/staging/iio/light/light.h b/drivers/staging/iio/light/light.h
new file mode 100644
index 0000000..f00f827
--- /dev/null
+++ b/drivers/staging/iio/light/light.h
@@ -0,0 +1,12 @@
+#include "../sysfs.h"
+
+/* Light to digital sensor attributes */
+
+#define IIO_DEV_ATTR_LIGHT_INFRARED(_num, _show, _addr) \
+ IIO_DEVICE_ATTR(light_infrared##_num, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_LIGHT_BROAD(_num, _show, _addr) \
+ IIO_DEVICE_ATTR(light_broadspectrum##_num, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_LIGHT_VISIBLE(_num, _show, _addr) \
+ IIO_DEVICE_ATTR(light_visible##_num, S_IRUGO, _show, NULL, _addr)
diff --git a/drivers/staging/iio/light/tsl2561.c b/drivers/staging/iio/light/tsl2561.c
new file mode 100644
index 0000000..ea8a5ef
--- /dev/null
+++ b/drivers/staging/iio/light/tsl2561.c
@@ -0,0 +1,276 @@
+/*
+ * tsl2561.c - Linux kernel modules for light to digital convertor
+ *
+ * Copyright (C) 2008-2009 Jonathan Cameron <jic23@cam.ac.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Some portions based upon the tsl2550 driver.
+ *
+ * This driver could probably be adapted easily to talk to the tsl2560 (smbus)
+ *
+ * Needs some work to support the events this can generate.
+ * Todo: Implement interrupt handling. Currently a hardware bug means
+ * this isn't available on my test board.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include "../iio.h"
+#include "../sysfs.h"
+#include "light.h"
+
+#define TSL2561_CONTROL_REGISTER 0x00
+#define TSL2561_TIMING_REGISTER 0x01
+#define TSL2561_THRESHLOW_LOW_REGISTER 0x02
+#define TSL2561_THRESHLOW_HIGH_REGISTER 0x03
+#define TSL2561_THRESHHIGH_LOW_REGISTER 0x04
+#define TSL2561_THRESHHIGH_HIGH_REGISTER 0x05
+#define TSL2561_INT_CONTROL_REGISTER 0x06
+
+#define TSL2561_INT_REG_INT_OFF 0x00
+#define TSL2561_INT_REG_INT_LEVEL 0x08
+#define TSL2561_INT_REG_INT_SMBUS 0x10
+#define TSL2561_INT_REG_INT_TEST 0x18
+
+#define TSL2561_ID_REGISTER 0x0A
+
+#define TSL2561_DATA_0_LOW 0x0C
+#define TSL2561_DATA_1_LOW 0x0E
+
+/* Control Register Values */
+#define TSL2561_CONT_REG_PWR_ON 0x03
+#define TSL2561_CONT_REG_PWR_OFF 0x00
+
+/**
+ * struct tsl2561_state - device specific state
+ * @indio_dev: the industrialio I/O info structure
+ * @client: i2c client
+ * @command_buf: single command buffer used for all operations
+ * @command_buf_lock: ensure unique access to command_buf
+ */
+struct tsl2561_state {
+ struct iio_dev *indio_dev;
+ struct i2c_client *client;
+ struct tsl2561_command *command_buf;
+ struct mutex command_buf_lock;
+};
+
+/**
+ * struct tsl2561_command - command byte for smbus
+ * @address: register address
+ * @block: is this a block r/w
+ * @word: is this a word r/w
+ * @clear: set to 1 to clear pending interrupt
+ * @cmd: select the command register - always 1.
+ */
+struct tsl2561_command {
+ unsigned int address:4;
+ unsigned int block:1;
+ unsigned int word:1;
+ unsigned int clear:1;
+ unsigned int cmd:1;
+};
+
+static inline void tsl2561_init_command_buf(struct tsl2561_command *buf)
+{
+ buf->address = 0;
+ buf->block = 0;
+ buf->word = 0;
+ buf->clear = 0;
+ buf->cmd = 1;
+}
+
+static ssize_t tsl2561_read_val(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret = 0, data;
+ ssize_t len = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tsl2561_state *st = indio_dev->dev_data;
+
+ mutex_lock(&st->command_buf_lock);
+ st->command_buf->cmd = 1;
+ st->command_buf->word = 1;
+ st->command_buf->address = this_attr->address;
+
+ data = i2c_smbus_read_word_data(st->client, *(char *)(st->command_buf));
+ if (data < 0) {
+ ret = data;
+ goto error_ret;
+ }
+ len = sprintf(buf, "%u\n", data);
+
+error_ret:
+ mutex_unlock(&st->command_buf_lock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_LIGHT_INFRARED(0, tsl2561_read_val, TSL2561_DATA_0_LOW);
+static IIO_DEV_ATTR_LIGHT_BROAD(0, tsl2561_read_val, TSL2561_DATA_1_LOW);
+
+static struct attribute *tsl2561_attributes[] = {
+ &iio_dev_attr_light_infrared0.dev_attr.attr,
+ &iio_dev_attr_light_broadspectrum0.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group tsl2561_attribute_group = {
+ .attrs = tsl2561_attributes,
+};
+
+static int tsl2561_initialize(struct tsl2561_state *st)
+{
+ int err;
+
+ mutex_lock(&st->command_buf_lock);
+ st->command_buf->word = 0;
+ st->command_buf->block = 0;
+ st->command_buf->address = TSL2561_CONTROL_REGISTER;
+ err = i2c_smbus_write_byte_data(st->client, *(char *)(st->command_buf),
+ TSL2561_CONT_REG_PWR_ON);
+ if (err)
+ goto error_ret;
+
+ st->command_buf->address = TSL2561_INT_CONTROL_REGISTER;
+ err = i2c_smbus_write_byte_data(st->client, *(char *)(st->command_buf),
+ TSL2561_INT_REG_INT_TEST);
+
+error_ret:
+ mutex_unlock(&st->command_buf_lock);
+
+ return err;
+}
+
+static int tsl2561_powerdown(struct i2c_client *client)
+{
+ int err;
+ struct tsl2561_command Command = {
+ .cmd = 1,
+ .clear = 0,
+ .word = 0,
+ .block = 0,
+ .address = TSL2561_CONTROL_REGISTER,
+ };
+
+ err = i2c_smbus_write_byte_data(client, *(char *)(&Command),
+ TSL2561_CONT_REG_PWR_OFF);
+ return (err < 0) ? err : 0;
+}
+static int __devinit tsl2561_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0, regdone = 0;
+ struct tsl2561_state *st = kzalloc(sizeof(*st), GFP_KERNEL);
+
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ i2c_set_clientdata(client, st);
+ st->client = client;
+ mutex_init(&st->command_buf_lock);
+
+ st->command_buf = kmalloc(sizeof(*st->command_buf), GFP_KERNEL);
+ if (st->command_buf == NULL) {
+ ret = -ENOMEM;
+ goto error_free_state;
+ }
+ tsl2561_init_command_buf(st->command_buf);
+
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_command_buf;
+ }
+ st->indio_dev->attrs = &tsl2561_attribute_group;
+ st->indio_dev->dev.parent = &client->dev;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_free_iiodev;
+ regdone = 1;
+ /* Intialize the chip */
+ ret = tsl2561_initialize(st);
+ if (ret)
+ goto error_unregister_iiodev;
+
+ return 0;
+error_unregister_iiodev:
+error_free_iiodev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_command_buf:
+ kfree(st->command_buf);
+error_free_state:
+ kfree(st);
+error_ret:
+ return ret;
+
+}
+
+static int __devexit tsl2561_remove(struct i2c_client *client)
+{
+ struct tsl2561_state *st = i2c_get_clientdata(client);
+
+ iio_device_unregister(st->indio_dev);
+ kfree(st);
+
+ return tsl2561_powerdown(client);
+}
+
+static unsigned short normal_i2c[] = { 0x29, 0x39, 0x49, I2C_CLIENT_END };
+
+I2C_CLIENT_INSMOD;
+
+static const struct i2c_device_id tsl2561_id[] = {
+ { "tsl2561", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tsl2561_id);
+
+
+static struct i2c_driver tsl2561_driver = {
+ .driver = {
+ .name = "tsl2561",
+ },
+ .probe = tsl2561_probe,
+ .remove = __devexit_p(tsl2561_remove),
+ .id_table = tsl2561_id,
+};
+
+static __init int tsl2561_init(void)
+{
+ return i2c_add_driver(&tsl2561_driver);
+}
+module_init(tsl2561_init);
+
+static __exit void tsl2561_exit(void)
+{
+ i2c_del_driver(&tsl2561_driver);
+}
+module_exit(tsl2561_exit);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
+MODULE_DESCRIPTION("TSL2561 light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
new file mode 100644
index 0000000..d926189
--- /dev/null
+++ b/drivers/staging/iio/ring_generic.h
@@ -0,0 +1,283 @@
+/* The industrial I/O core - generic ring buffer interfaces.
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _IIO_RING_GENERIC_H_
+#define _IIO_RING_GENERIC_H_
+#include "iio.h"
+
+struct iio_handler;
+struct iio_ring_buffer;
+struct iio_dev;
+
+/**
+ * iio_push_ring_event() - ring buffer specific push to event chrdev
+ * @ring_buf: ring buffer that is the event source
+ * @event_code: event indentification code
+ * @timestamp: time of event
+ **/
+int iio_push_ring_event(struct iio_ring_buffer *ring_buf,
+ int event_code,
+ s64 timestamp);
+/**
+ * iio_push_or_escallate_ring_event() - escallate or add as appropriate
+ *
+ * Typical usecase is to escallate a 50% ring full to 75% full if noone has yet
+ * read the first event. Clearly the 50% full is no longer of interest in
+ * typical use case.
+ **/
+int iio_push_or_escallate_ring_event(struct iio_ring_buffer *ring_buf,
+ int event_code,
+ s64 timestamp);
+
+/**
+ * struct iio_ring_access_funcs - access functions for ring buffers.
+ * @create: perform allocation
+ * @init: get ring buffer ready for use
+ * @_exit: reverse steps in init
+ * @_free: deallocate ring buffer
+ * @mark_in_use: reference counting, typically to prevent module removal
+ * @unmark_in_use: reduce reference count when no longer using ring buffer
+ * @store_to: actually store stuff to the ring buffer
+ * @read_last: get the last element stored
+ * @rip_lots: try to get a specified number of elements (must exist)
+ * @mark_param_change: notify ring that some relevant parameter has changed
+ * Often this means the underlying storage may need to
+ * change.
+ * @request_update: if a parameter change has been marked, update underlying
+ * storage.
+ * @get_bpd: get current bytes per datum
+ * @set_bpd: set number of bytes per datum
+ * @get_length: get number of datums in ring
+ * @set_length: set number of datums in ring
+ * @is_enabled: query if ring is currently being used
+ * @enable: enable the ring
+ *
+ * The purpose of this structure is to make the ring buffer element
+ * modular as event for a given driver, different usecases may require
+ * different ring designs (space efficiency vs speed for example.
+ *
+ * It is worth noting that a given ring implementation may only support a small
+ * proportion of these functions. The core code 'should' cope fine with any of
+ * them not existing.
+ **/
+struct iio_ring_access_funcs {
+ void (*mark_in_use)(struct iio_ring_buffer *ring);
+ void (*unmark_in_use)(struct iio_ring_buffer *ring);
+
+ int (*store_to)(struct iio_ring_buffer *ring, u8 *data, s64 timestamp);
+ int (*read_last)(struct iio_ring_buffer *ring, u8 *data);
+ int (*rip_lots)(struct iio_ring_buffer *ring,
+ size_t count,
+ u8 **data,
+ int *dead_offset);
+
+ int (*mark_param_change)(struct iio_ring_buffer *ring);
+ int (*request_update)(struct iio_ring_buffer *ring);
+
+ int (*get_bpd)(struct iio_ring_buffer *ring);
+ int (*set_bpd)(struct iio_ring_buffer *ring, size_t bpd);
+ int (*get_length)(struct iio_ring_buffer *ring);
+ int (*set_length)(struct iio_ring_buffer *ring, int length);
+
+ int (*is_enabled)(struct iio_ring_buffer *ring);
+ int (*enable)(struct iio_ring_buffer *ring);
+};
+
+/**
+ * struct iio_ring_buffer - general ring buffer structure
+ * @length: [DEVICE]number of datums in ring
+ * @bpd: [DEVICE]size of individual datum including timestamp
+ * @loopcount: [INTERN]number of times the ring has looped
+ * @access_minor_name: [INTERN]store of name of the access chrdev minor number
+ * sysfs attribute
+ * @access_handler: [INTERN]chrdev access handling
+ * @event_minor_name: [INTERN]store of name of the event chrdev minor number
+ * sysfs attribute
+ * @ev_int: [INTERN]chrdev interface for the event chrdev
+ * @shared_ev_pointer: [INTERN]the shared event pointer to allow escalation of
+ * events
+ * @ring_access: [DRIVER]ring access functions associated with the
+ * implementation.
+ * @ring_prenable: [DRIVER] function to run prior to marking ring enabled
+ * @ring_postenable: [DRIVER] function to run after marking ring enabled
+ * @ring_predisable: [DRIVER] function to run prior to marking ring disabled
+ * @ring_postdisable: [DRIVER] function to run after marking ring disabled
+ **/
+struct iio_ring_buffer {
+ struct device dev;
+ struct device access_dev;
+ struct iio_dev *indio_dev;
+ struct module *owner;
+ int id;
+ int access_id;
+ int length;
+ int bpd;
+ int loopcount;
+ struct iio_handler access_handler;
+ struct iio_event_interface ev_int;
+ struct iio_shared_ev_pointer shared_ev_pointer;
+ struct iio_ring_access_funcs access;
+ int (*preenable)(struct iio_dev *);
+ int (*postenable)(struct iio_dev *);
+ int (*predisable)(struct iio_dev *);
+ int (*postdisable)(struct iio_dev *);
+
+};
+void iio_ring_buffer_init(struct iio_ring_buffer *ring,
+ struct iio_dev *dev_info);
+
+/**
+ * __iio_init_ring_buffer() - initialize common elements of ring buffers.
+ **/
+static inline void __iio_init_ring_buffer(struct iio_ring_buffer *ring,
+ int bytes_per_datum, int length)
+{
+ ring->bpd = bytes_per_datum;
+ ring->length = length;
+ ring->loopcount = 0;
+ ring->shared_ev_pointer.ev_p = 0;
+ ring->shared_ev_pointer.lock =
+ __SPIN_LOCK_UNLOCKED(ring->shared_ev_pointer->loc);
+}
+
+/**
+ * struct iio_scan_el - an individual element of a scan
+ * @dev_attr: control attribute (if directly controllable)
+ * @number: unique identifier of element (used for bit mask)
+ * @bit_count: number of bits in scan element
+ * @label: useful data for the scan el (often reg address)
+ * @set_state: for some devices datardy signals are generated
+ * for any enabled lines. This allows unwanted lines
+ * to be disabled and hence not get in the way.
+ **/
+struct iio_scan_el {
+ struct device_attribute dev_attr;
+ unsigned int number;
+ int bit_count;
+ unsigned int label;
+
+ int (*set_state)(struct iio_scan_el *scanel,
+ struct iio_dev *dev_info,
+ bool state);
+};
+
+#define to_iio_scan_el(_dev_attr) \
+ container_of(_dev_attr, struct iio_scan_el, dev_attr);
+
+/**
+ * iio_scan_el_store() - sysfs scan element selection interface.
+ *
+ * A generic function used to enable various scan elements. In some
+ * devices explicit read commands for each channel mean this is merely
+ * a software switch. In others this must actively disable the channel.
+ * Complexities occur when this interacts with data ready type triggers
+ * which may not reset unless every channel that is enabled is explicitly
+ * read.
+ **/
+ssize_t iio_scan_el_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len);
+/**
+ * iio_scal_el_show() - sysfs interface to query whether a scan element is
+ * is enabled or not.
+ **/
+ssize_t iio_scan_el_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
+/**
+ * IIO_SCAN_EL: - declare and initialize a scan element without control func
+ * @_name: identifying name. Resulting struct is iio_scan_el_##_name,
+ * sysfs element, scan_en_##_name.
+ * @_number: unique id number for the scan element.
+ * @_bits: number of bits in the scan element result (used in mixed bit
+ * length devices).
+ * @_label: indentification variable used by drivers. Often a reg address.
+ **/
+#define IIO_SCAN_EL(_name, _number, _bits, _label) \
+ struct iio_scan_el iio_scan_el_##_name = { \
+ .dev_attr = __ATTR(scan_en_##_name, \
+ S_IRUGO | S_IWUSR, \
+ iio_scan_el_show, \
+ iio_scan_el_store), \
+ .mask = (1 << _number), \
+ .bit_count = _bits, \
+ .label = _label, \
+ }
+
+ssize_t iio_scan_el_ts_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len);
+
+ssize_t iio_scan_el_ts_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
+/**
+ * IIO_SCAN_EL_C: - declare and initialize a scan element with a control func
+ *
+ * @_controlfunc: function used to notify hardware of whether state changes
+ **/
+#define IIO_SCAN_EL_C(_name, _number, _bits, _label, _controlfunc) \
+ struct iio_scan_el iio_scan_el_##_name = { \
+ .dev_attr = __ATTR(scan_en_##_name, \
+ S_IRUGO | S_IWUSR, \
+ iio_scan_el_show, \
+ iio_scan_el_store), \
+ .number = _number, \
+ .bit_count = _bits, \
+ .label = _label, \
+ .set_state = _controlfunc, \
+ }
+/**
+ * IIO_SCAN_EL_TIMESTAMP: - declare a special scan element for timestamps
+ *
+ * Odd one out. Handled slightly differently from other scan elements.
+ **/
+#define IIO_SCAN_EL_TIMESTAMP \
+ struct iio_scan_el iio_scan_el_timestamp = { \
+ .dev_attr = __ATTR(scan_en_timestamp, \
+ S_IRUGO | S_IWUSR, \
+ iio_scan_el_ts_show, \
+ iio_scan_el_ts_store), \
+ }
+
+static inline void iio_put_ring_buffer(struct iio_ring_buffer *ring)
+{
+ put_device(&ring->dev);
+};
+
+#define to_iio_ring_buffer(d) \
+ container_of(d, struct iio_ring_buffer, dev)
+#define access_dev_to_iio_ring_buffer(d) \
+ container_of(d, struct iio_ring_buffer, access_dev)
+int iio_ring_buffer_register(struct iio_ring_buffer *ring);
+void iio_ring_buffer_unregister(struct iio_ring_buffer *ring);
+
+ssize_t iio_read_ring_length(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+ssize_t iio_write_ring_length(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len);
+ssize_t iio_read_ring_bps(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+ssize_t iio_store_ring_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len);
+ssize_t iio_show_ring_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+#define IIO_RING_LENGTH_ATTR DEVICE_ATTR(length, S_IRUGO | S_IWUSR, \
+ iio_read_ring_length, \
+ iio_write_ring_length)
+#define IIO_RING_BPS_ATTR DEVICE_ATTR(bps, S_IRUGO | S_IWUSR, \
+ iio_read_ring_bps, NULL)
+#define IIO_RING_ENABLE_ATTR DEVICE_ATTR(ring_enable, S_IRUGO | S_IWUSR, \
+ iio_show_ring_enable, \
+ iio_store_ring_enable)
+
+#endif /* _IIO_RING_GENERIC_H_ */
diff --git a/drivers/staging/iio/ring_hw.h b/drivers/staging/iio/ring_hw.h
new file mode 100644
index 0000000..bb8cfd2
--- /dev/null
+++ b/drivers/staging/iio/ring_hw.h
@@ -0,0 +1,22 @@
+/*
+ * ring_hw.h - common functionality for iio hardware ring buffers
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Copyright (c) 2009 Jonathan Cameron <jic23@cam.ac.uk>
+ *
+ */
+
+/**
+ * struct iio_hw_ring_buffer- hardware ring buffer
+ * @buf: generic ring buffer elements
+ * @private: device specific data
+ */
+struct iio_hw_ring_buffer {
+ struct iio_ring_buffer buf;
+ void *private;
+};
+
+#define iio_to_hw_ring_buf(r) container_of(r, struct iio_hw_ring_buffer, buf)
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
new file mode 100644
index 0000000..359ff92
--- /dev/null
+++ b/drivers/staging/iio/ring_sw.c
@@ -0,0 +1,433 @@
+/* The industrial I/O simple minimally locked ring buffer.
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include "ring_sw.h"
+
+static inline int __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
+ int bytes_per_datum, int length)
+{
+ if ((length == 0) || (bytes_per_datum == 0))
+ return -EINVAL;
+
+ __iio_init_ring_buffer(&ring->buf, bytes_per_datum, length);
+ ring->use_lock = __SPIN_LOCK_UNLOCKED((ring)->use_lock);
+ ring->data = kmalloc(length*ring->buf.bpd, GFP_KERNEL);
+ ring->read_p = 0;
+ ring->write_p = 0;
+ ring->last_written_p = 0;
+ ring->half_p = 0;
+ return ring->data ? 0 : -ENOMEM;
+}
+
+static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
+{
+ kfree(ring->data);
+}
+
+void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r)
+{
+ struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
+ spin_lock(&ring->use_lock);
+ ring->use_count++;
+ spin_unlock(&ring->use_lock);
+}
+EXPORT_SYMBOL(iio_mark_sw_rb_in_use);
+
+void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r)
+{
+ struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
+ spin_lock(&ring->use_lock);
+ ring->use_count--;
+ spin_unlock(&ring->use_lock);
+}
+EXPORT_SYMBOL(iio_unmark_sw_rb_in_use);
+
+
+/* Ring buffer related functionality */
+/* Store to ring is typically called in the bh of a data ready interrupt handler
+ * in the device driver */
+/* Lock always held if their is a chance this may be called */
+/* Only one of these per ring may run concurrently - enforced by drivers */
+int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
+ unsigned char *data,
+ s64 timestamp)
+{
+ int ret = 0;
+ int code;
+ unsigned char *temp_ptr, *change_test_ptr;
+
+ /* initial store */
+ if (unlikely(ring->write_p == 0)) {
+ ring->write_p = ring->data;
+ /* Doesn't actually matter if this is out of the set
+ * as long as the read pointer is valid before this
+ * passes it - guaranteed as set later in this function.
+ */
+ ring->half_p = ring->data - ring->buf.length*ring->buf.bpd/2;
+ }
+ /* Copy data to where ever the current write pointer says */
+ memcpy(ring->write_p, data, ring->buf.bpd);
+ barrier();
+ /* Update the pointer used to get most recent value.
+ * Always valid as either points to latest or second latest value.
+ * Before this runs it is null and read attempts fail with -EAGAIN.
+ */
+ ring->last_written_p = ring->write_p;
+ barrier();
+ /* temp_ptr used to ensure we never have an invalid pointer
+ * it may be slightly lagging, but never invalid
+ */
+ temp_ptr = ring->write_p + ring->buf.bpd;
+ /* End of ring, back to the beginning */
+ if (temp_ptr == ring->data + ring->buf.length*ring->buf.bpd)
+ temp_ptr = ring->data;
+ /* Update the write pointer
+ * always valid as long as this is the only function able to write.
+ * Care needed with smp systems to ensure more than one ring fill
+ * is never scheduled.
+ */
+ ring->write_p = temp_ptr;
+
+ if (ring->read_p == 0)
+ ring->read_p = ring->data;
+ /* Buffer full - move the read pointer and create / escalate
+ * ring event */
+ /* Tricky case - if the read pointer moves before we adjust it.
+ * Handle by not pushing if it has moved - may result in occasional
+ * unnecessary buffer full events when it wasn't quite true.
+ */
+ else if (ring->write_p == ring->read_p) {
+ change_test_ptr = ring->read_p;
+ temp_ptr = change_test_ptr + ring->buf.bpd;
+ if (temp_ptr
+ == ring->data + ring->buf.length*ring->buf.bpd) {
+ temp_ptr = ring->data;
+ }
+ /* We are moving pointer on one because the ring is full. Any
+ * change to the read pointer will be this or greater.
+ */
+ if (change_test_ptr == ring->read_p)
+ ring->read_p = temp_ptr;
+
+ spin_lock(&ring->buf.shared_ev_pointer.lock);
+
+ ret = iio_push_or_escallate_ring_event(&ring->buf,
+ IIO_EVENT_CODE_RING_100_FULL,
+ timestamp);
+ spin_unlock(&ring->buf.shared_ev_pointer.lock);
+ if (ret)
+ goto error_ret;
+ }
+ /* investigate if our event barrier has been passed */
+ /* There are definite 'issues' with this and chances of
+ * simultaneous read */
+ /* Also need to use loop count to ensure this only happens once */
+ ring->half_p += ring->buf.bpd;
+ if (ring->half_p == ring->data + ring->buf.length*ring->buf.bpd)
+ ring->half_p = ring->data;
+ if (ring->half_p == ring->read_p) {
+ spin_lock(&ring->buf.shared_ev_pointer.lock);
+ code = IIO_EVENT_CODE_RING_50_FULL;
+ ret = __iio_push_event(&ring->buf.ev_int,
+ code,
+ timestamp,
+ &ring->buf.shared_ev_pointer);
+ spin_unlock(&ring->buf.shared_ev_pointer.lock);
+ }
+error_ret:
+ return ret;
+}
+
+int iio_rip_sw_rb(struct iio_ring_buffer *r,
+ size_t count, u8 **data, int *dead_offset)
+{
+ struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
+
+ u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
+ int ret, max_copied;
+ int bytes_to_rip;
+
+ /* A userspace program has probably made an error if it tries to
+ * read something that is not a whole number of bpds.
+ * Return an error.
+ */
+ if (count % ring->buf.bpd) {
+ ret = -EINVAL;
+ printk(KERN_INFO "Ring buffer read request not whole number of"
+ "samples: Request bytes %zd, Current bpd %d\n",
+ count, ring->buf.bpd);
+ goto error_ret;
+ }
+ /* Limit size to whole of ring buffer */
+ bytes_to_rip = min((size_t)(ring->buf.bpd*ring->buf.length), count);
+
+ *data = kmalloc(bytes_to_rip, GFP_KERNEL);
+ if (*data == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ /* build local copy */
+ initial_read_p = ring->read_p;
+ if (unlikely(initial_read_p == 0)) { /* No data here as yet */
+ ret = 0;
+ goto error_free_data_cpy;
+ }
+
+ initial_write_p = ring->write_p;
+
+ /* Need a consistent pair */
+ while ((initial_read_p != ring->read_p)
+ || (initial_write_p != ring->write_p)) {
+ initial_read_p = ring->read_p;
+ initial_write_p = ring->write_p;
+ }
+ if (initial_write_p == initial_read_p) {
+ /* No new data available.*/
+ ret = 0;
+ goto error_free_data_cpy;
+ }
+
+ if (initial_write_p >= initial_read_p + bytes_to_rip) {
+ /* write_p is greater than necessary, all is easy */
+ max_copied = bytes_to_rip;
+ memcpy(*data, initial_read_p, max_copied);
+ end_read_p = initial_read_p + max_copied;
+ } else if (initial_write_p > initial_read_p) {
+ /*not enough data to cpy */
+ max_copied = initial_write_p - initial_read_p;
+ memcpy(*data, initial_read_p, max_copied);
+ end_read_p = initial_write_p;
+ } else {
+ /* going through 'end' of ring buffer */
+ max_copied = ring->data
+ + ring->buf.length*ring->buf.bpd - initial_read_p;
+ memcpy(*data, initial_read_p, max_copied);
+ /* possible we are done if we align precisely with end */
+ if (max_copied == bytes_to_rip)
+ end_read_p = ring->data;
+ else if (initial_write_p
+ > ring->data + bytes_to_rip - max_copied) {
+ /* enough data to finish */
+ memcpy(*data + max_copied, ring->data,
+ bytes_to_rip - max_copied);
+ max_copied = bytes_to_rip;
+ end_read_p = ring->data + (bytes_to_rip - max_copied);
+ } else { /* not enough data */
+ memcpy(*data + max_copied, ring->data,
+ initial_write_p - ring->data);
+ max_copied += initial_write_p - ring->data;
+ end_read_p = initial_write_p;
+ }
+ }
+ /* Now to verify which section was cleanly copied - i.e. how far
+ * read pointer has been pushed */
+ current_read_p = ring->read_p;
+
+ if (initial_read_p <= current_read_p)
+ *dead_offset = current_read_p - initial_read_p;
+ else
+ *dead_offset = ring->buf.length*ring->buf.bpd
+ - (initial_read_p - current_read_p);
+
+ /* possible issue if the initial write has been lapped or indeed
+ * the point we were reading to has been passed */
+ /* No valid data read.
+ * In this case the read pointer is already correct having been
+ * pushed further than we would look. */
+ if (max_copied - *dead_offset < 0) {
+ ret = 0;
+ goto error_free_data_cpy;
+ }
+
+ /* setup the next read position */
+ /* Beware, this may fail due to concurrency fun and games.
+ * Possible that sufficient fill commands have run to push the read
+ * pointer past where we would be after the rip. If this occurs, leave
+ * it be.
+ */
+ /* Tricky - deal with loops */
+
+ while (ring->read_p != end_read_p)
+ ring->read_p = end_read_p;
+
+ return max_copied - *dead_offset;
+
+error_free_data_cpy:
+ kfree(*data);
+error_ret:
+ return ret;
+}
+EXPORT_SYMBOL(iio_rip_sw_rb);
+
+int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp)
+{
+ struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
+ return iio_store_to_sw_ring(ring, data, timestamp);
+}
+EXPORT_SYMBOL(iio_store_to_sw_rb);
+
+int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
+ unsigned char *data)
+{
+ unsigned char *last_written_p_copy;
+
+ iio_mark_sw_rb_in_use(&ring->buf);
+again:
+ barrier();
+ last_written_p_copy = ring->last_written_p;
+ barrier(); /*unnessecary? */
+ /* Check there is anything here */
+ if (last_written_p_copy == 0)
+ return -EAGAIN;
+ memcpy(data, last_written_p_copy, ring->buf.bpd);
+
+ if (unlikely(ring->last_written_p >= last_written_p_copy))
+ goto again;
+
+ iio_unmark_sw_rb_in_use(&ring->buf);
+ return 0;
+}
+
+int iio_read_last_from_sw_rb(struct iio_ring_buffer *r,
+ unsigned char *data)
+{
+ return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data);
+}
+EXPORT_SYMBOL(iio_read_last_from_sw_rb);
+
+int iio_request_update_sw_rb(struct iio_ring_buffer *r)
+{
+ int ret = 0;
+ struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
+
+ spin_lock(&ring->use_lock);
+ if (!ring->update_needed)
+ goto error_ret;
+ if (ring->use_count) {
+ ret = -EAGAIN;
+ goto error_ret;
+ }
+ __iio_free_sw_ring_buffer(ring);
+ ret = __iio_init_sw_ring_buffer(ring, ring->buf.bpd, ring->buf.length);
+error_ret:
+ spin_unlock(&ring->use_lock);
+ return ret;
+}
+EXPORT_SYMBOL(iio_request_update_sw_rb);
+
+int iio_get_bpd_sw_rb(struct iio_ring_buffer *r)
+{
+ struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
+ return ring->buf.bpd;
+}
+EXPORT_SYMBOL(iio_get_bpd_sw_rb);
+
+int iio_set_bpd_sw_rb(struct iio_ring_buffer *r, size_t bpd)
+{
+ if (r->bpd != bpd) {
+ r->bpd = bpd;
+ if (r->access.mark_param_change)
+ r->access.mark_param_change(r);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iio_set_bpd_sw_rb);
+
+int iio_get_length_sw_rb(struct iio_ring_buffer *r)
+{
+ return r->length;
+}
+EXPORT_SYMBOL(iio_get_length_sw_rb);
+
+int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length)
+{
+ if (r->length != length) {
+ r->length = length;
+ if (r->access.mark_param_change)
+ r->access.mark_param_change(r);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iio_set_length_sw_rb);
+
+int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r)
+{
+ struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
+ ring->update_needed = true;
+ return 0;
+}
+EXPORT_SYMBOL(iio_mark_update_needed_sw_rb);
+
+static void iio_sw_rb_release(struct device *dev)
+{
+ struct iio_ring_buffer *r = to_iio_ring_buffer(dev);
+ kfree(iio_to_sw_ring(r));
+}
+
+static IIO_RING_ENABLE_ATTR;
+static IIO_RING_BPS_ATTR;
+static IIO_RING_LENGTH_ATTR;
+
+/* Standard set of ring buffer attributes */
+static struct attribute *iio_ring_attributes[] = {
+ &dev_attr_length.attr,
+ &dev_attr_bps.attr,
+ &dev_attr_ring_enable.attr,
+ NULL,
+};
+
+static struct attribute_group iio_ring_attribute_group = {
+ .attrs = iio_ring_attributes,
+};
+
+static const struct attribute_group *iio_ring_attribute_groups[] = {
+ &iio_ring_attribute_group,
+ NULL
+};
+
+static struct device_type iio_sw_ring_type = {
+ .release = iio_sw_rb_release,
+ .groups = iio_ring_attribute_groups,
+};
+
+struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
+{
+ struct iio_ring_buffer *buf;
+ struct iio_sw_ring_buffer *ring;
+
+ ring = kzalloc(sizeof *ring, GFP_KERNEL);
+ if (!ring)
+ return 0;
+ buf = &ring->buf;
+
+ iio_ring_buffer_init(buf, indio_dev);
+ buf->dev.type = &iio_sw_ring_type;
+ device_initialize(&buf->dev);
+ buf->dev.parent = &indio_dev->dev;
+ buf->dev.class = &iio_class;
+ dev_set_drvdata(&buf->dev, (void *)buf);
+
+ return buf;
+}
+EXPORT_SYMBOL(iio_sw_rb_allocate);
+
+void iio_sw_rb_free(struct iio_ring_buffer *r)
+{
+ if (r)
+ iio_put_ring_buffer(r);
+}
+EXPORT_SYMBOL(iio_sw_rb_free);
+MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h
new file mode 100644
index 0000000..ae70ee0
--- /dev/null
+++ b/drivers/staging/iio/ring_sw.h
@@ -0,0 +1,189 @@
+/* The industrial I/O simple minimally locked ring buffer.
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This code is deliberately kept separate from the main industrialio I/O core
+ * as it is intended that in the future a number of different software ring
+ * buffer implementations will exist with different characteristics to suit
+ * different applications.
+ *
+ * This particular one was designed for a data capture application where it was
+ * particularly important that no userspace reads would interrupt the capture
+ * process. To this end the ring is not locked during a read.
+ *
+ * Comments on this buffer design welcomed. It's far from efficient and some of
+ * my understanding of the effects of scheduling on this are somewhat limited.
+ * Frankly, to my mind, this is the current weak point in the industrial I/O
+ * patch set.
+ */
+
+#ifndef _IIO_RING_SW_H_
+#define _IIO_RING_SW_H_
+/* NEEDS COMMENTS */
+/* The intention is that this should be a separate module from the iio core.
+ * This is a bit like supporting algorithms dependent on what the device
+ * driver requests - some may support multiple options */
+
+
+#include <linux/autoconf.h>
+#include "iio.h"
+#include "ring_generic.h"
+
+#if defined CONFIG_IIO_SW_RING || defined CONFIG_IIO_SW_RING_MODULE
+
+/**
+ * iio_create_sw_rb() software ring buffer allocation
+ * @r: pointer to ring buffer pointer
+ **/
+int iio_create_sw_rb(struct iio_ring_buffer **r);
+
+/**
+ * iio_init_sw_rb() initialize the software ring buffer
+ * @r: pointer to a software ring buffer created by an
+ * iio_create_sw_rb call.
+ **/
+int iio_init_sw_rb(struct iio_ring_buffer *r, struct iio_dev *indio_dev);
+/**
+ * iio_exit_sw_rb() reverse what was done in iio_init_sw_rb
+ **/
+void iio_exit_sw_rb(struct iio_ring_buffer *r);
+
+/**
+ * iio_free_sw_rb() free memory occupied by the core ring buffer struct
+ **/
+void iio_free_sw_rb(struct iio_ring_buffer *r);
+
+/**
+ * iio_mark_sw_rb_in_use() reference counting to prevent incorrect chances
+ **/
+void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r);
+
+/**
+ * iio_unmark_sw_rb_in_use() notify the ring buffer that we don't care anymore
+ **/
+void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r);
+
+/**
+ * iio_read_last_from_sw_rb() attempt to read the last stored datum from the rb
+ **/
+int iio_read_last_from_sw_rb(struct iio_ring_buffer *r, u8 *data);
+
+/**
+ * iio_store_to_sw_rb() store a new datum to the ring buffer
+ * @rb: pointer to ring buffer instance
+ * @data: the datum to be stored including timestamp if relevant.
+ * @timestamp: timestamp which will be attached to buffer events if relevant.
+ **/
+int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp);
+
+/**
+ * iio_rip_sw_rb() attempt to read data from the ring buffer
+ * @r: ring buffer instance
+ * @count: number of datum's to try and read
+ * @data: where the data will be stored.
+ * @dead_offset: how much of the stored data was possibly invalidated by
+ * the end of the copy.
+ **/
+int iio_rip_sw_rb(struct iio_ring_buffer *r,
+ size_t count,
+ u8 **data,
+ int *dead_offset);
+
+/**
+ * iio_request_update_sw_rb() update params if update needed
+ **/
+int iio_request_update_sw_rb(struct iio_ring_buffer *r);
+
+/**
+ * iio_mark_update_needed_sw_rb() tell the ring buffer it needs a param update
+ **/
+int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r);
+
+
+/**
+ * iio_get_bpd_sw_rb() get the datum size in bytes
+ **/
+int iio_get_bpd_sw_rb(struct iio_ring_buffer *r);
+
+/**
+ * iio_set_bpd_sw_rb() set the datum size in bytes
+ **/
+int iio_set_bpd_sw_rb(struct iio_ring_buffer *r, size_t bpd);
+
+/**
+ * iio_get_length_sw_rb() get how many datums the rb may contain
+ **/
+int iio_get_length_sw_rb(struct iio_ring_buffer *r);
+
+/**
+ * iio_set_length_sw_rb() set how many datums the rb may contain
+ **/
+int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length);
+
+/**
+ * iio_ring_sw_register_funcs() helper function to set up rb access
+ **/
+static inline void iio_ring_sw_register_funcs(struct iio_ring_access_funcs *ra)
+{
+ ra->mark_in_use = &iio_mark_sw_rb_in_use;
+ ra->unmark_in_use = &iio_unmark_sw_rb_in_use;
+
+ ra->store_to = &iio_store_to_sw_rb;
+ ra->read_last = &iio_read_last_from_sw_rb;
+ ra->rip_lots = &iio_rip_sw_rb;
+
+ ra->mark_param_change = &iio_mark_update_needed_sw_rb;
+ ra->request_update = &iio_request_update_sw_rb;
+
+ ra->get_bpd = &iio_get_bpd_sw_rb;
+ ra->set_bpd = &iio_set_bpd_sw_rb;
+
+ ra->get_length = &iio_get_length_sw_rb;
+ ra->set_length = &iio_set_length_sw_rb;
+};
+
+/**
+ * struct iio_sw_ring_buffer - software ring buffer
+ * @buf: generic ring buffer elements
+ * @data: the ring buffer memory
+ * @read_p: read pointer (oldest available)
+ * @write_p: write pointer
+ * @last_written_p: read pointer (newest available)
+ * @half_p: half buffer length behind write_p (event generation)
+ * @use_count: reference count to prevent resizing when in use
+ * @update_needed: flag to indicated change in size requested
+ * @use_lock: lock to prevent change in size when in use
+ *
+ * Note that the first element of all ring buffers must be a
+ * struct iio_ring_buffer.
+**/
+
+struct iio_sw_ring_buffer {
+ struct iio_ring_buffer buf;
+ unsigned char *data;
+ unsigned char *read_p;
+ unsigned char *write_p;
+ unsigned char *last_written_p;
+ /* used to act as a point at which to signal an event */
+ unsigned char *half_p;
+ int use_count;
+ int update_needed;
+ spinlock_t use_lock;
+};
+
+#define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf)
+
+struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev);
+void iio_sw_rb_free(struct iio_ring_buffer *ring);
+
+
+
+#else /* CONFIG_IIO_RING_BUFFER*/
+static inline void iio_ring_sw_register_funcs(struct iio_ring_access_funcs *ra)
+{};
+#endif /* !CONFIG_IIO_RING_BUFFER */
+#endif /* _IIO_RING_SW_H_ */
diff --git a/drivers/staging/iio/sysfs.h b/drivers/staging/iio/sysfs.h
new file mode 100644
index 0000000..bfe4055
--- /dev/null
+++ b/drivers/staging/iio/sysfs.h
@@ -0,0 +1,293 @@
+/* The industrial I/O core
+ *
+ *Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * General attributes
+ */
+
+#ifndef _INDUSTRIAL_IO_SYSFS_H_
+#define _INDUSTRIAL_IO_SYSFS_H_
+
+#include "iio.h"
+
+/**
+ * struct iio_event_attribute - event control attribute
+ * @dev_attr: underlying device attribute
+ * @mask: mask for the event when detecting
+ * @listel: list header to allow addition to list of event handlers
+*/
+struct iio_event_attr {
+ struct device_attribute dev_attr;
+ int mask;
+ struct iio_event_handler_list *listel;
+};
+
+#define to_iio_event_attr(_dev_attr) \
+ container_of(_dev_attr, struct iio_event_attr, dev_attr)
+
+/**
+ * struct iio_chrdev_minor_attr - simple attribute to allow reading of chrdev
+ * minor number
+ * @dev_attr: underlying device attribute
+ * @minor: the minor number
+ */
+struct iio_chrdev_minor_attr {
+ struct device_attribute dev_attr;
+ int minor;
+};
+
+void
+__init_iio_chrdev_minor_attr(struct iio_chrdev_minor_attr *minor_attr,
+ const char *name,
+ struct module *owner,
+ int id);
+
+
+#define to_iio_chrdev_minor_attr(_dev_attr) \
+ container_of(_dev_attr, struct iio_chrdev_minor_attr, dev_attr);
+
+/**
+ * struct iio_dev_attr - iio specific device attribute
+ * @dev_attr: underlying device attribute
+ * @address: associated register address
+ */
+struct iio_dev_attr {
+ struct device_attribute dev_attr;
+ int address;
+ int val2;
+};
+
+#define to_iio_dev_attr(_dev_attr) \
+ container_of(_dev_attr, struct iio_dev_attr, dev_attr)
+
+ssize_t iio_read_const_attr(struct device *dev,
+ struct device_attribute *attr,
+ char *len);
+
+/**
+ * struct iio_const_attr - constant device specific attribute
+ * often used for things like available modes
+ */
+struct iio_const_attr {
+ const char *string;
+ struct device_attribute dev_attr;
+};
+
+#define to_iio_const_attr(_dev_attr) \
+ container_of(_dev_attr, struct iio_const_attr, dev_attr)
+
+/* Some attributes will be hard coded (device dependant) and not require an
+ address, in these cases pass a negative */
+#define IIO_ATTR(_name, _mode, _show, _store, _addr) \
+ { .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .address = _addr }
+
+#define IIO_ATTR_2(_name, _mode, _show, _store, _addr, _val2) \
+ { .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .address = _addr, \
+ .val2 = _val2 }
+
+#define IIO_DEVICE_ATTR(_name, _mode, _show, _store, _addr) \
+ struct iio_dev_attr iio_dev_attr_##_name \
+ = IIO_ATTR(_name, _mode, _show, _store, _addr)
+
+
+#define IIO_DEVICE_ATTR_2(_name, _mode, _show, _store, _addr, _val2) \
+ struct iio_dev_attr iio_dev_attr_##_name \
+ = IIO_ATTR_2(_name, _mode, _show, _store, _addr, _val2)
+
+#define IIO_CONST_ATTR(_name, _string) \
+ struct iio_const_attr iio_const_attr_##_name \
+ = { .string = _string, \
+ .dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)}
+
+/* Generic attributes of onetype or another */
+
+/**
+ * IIO_DEV_ATTR_REG: revision number for the device
+ *
+ * Very much device dependent.
+ **/
+#define IIO_DEV_ATTR_REV(_show) \
+ IIO_DEVICE_ATTR(revision, S_IRUGO, _show, NULL, 0)
+/**
+ * IIO_DEV_ATTR_NAME: chip type dependant identifier
+ **/
+#define IIO_DEV_ATTR_NAME(_show) \
+ IIO_DEVICE_ATTR(name, S_IRUGO, _show, NULL, 0)
+
+/**
+ * IIO_DEV_ATTR_SAMP_FREQ: sets any internal clock frequency
+ **/
+#define IIO_DEV_ATTR_SAMP_FREQ(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(sampling_frequency, _mode, _show, _store, 0)
+
+/**
+ * IIO_DEV_ATTR_AVAIL_SAMP_FREQ: list available sampling frequencies.
+ *
+ * May be mode dependant on some devices
+ **/
+#define IIO_DEV_ATTR_AVAIL_SAMP_FREQ(_show) \
+ IIO_DEVICE_ATTR(available_sampling_frequency, S_IRUGO, _show, NULL, 0)
+
+/**
+ * IIO_DEV_ATTR_CONST_AVAIL_SAMP_FREQ: list available sampling frequencies.
+ *
+ * Constant version
+ **/
+#define IIO_CONST_ATTR_AVAIL_SAMP_FREQ(_string) \
+ IIO_CONST_ATTR(available_sampling_frequency, _string)
+/**
+ * IIO_DEV_ATTR_SCAN_MODE: select a scan mode
+ *
+ * This is used when only certain combinations of inputs may be read in one
+ * scan.
+ **/
+#define IIO_DEV_ATTR_SCAN_MODE(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(scan_mode, _mode, _show, _store, 0)
+/**
+ * IIO_DEV_ATTR_AVAIL_SCAN_MODES: list available scan modes
+ **/
+#define IIO_DEV_ATTR_AVAIL_SCAN_MODES(_show) \
+ IIO_DEVICE_ATTR(available_scan_modes, S_IRUGO, _show, NULL, 0)
+
+/**
+ * IIO_DEV_ATTR_SCAN: result of scan of multiple channels
+ **/
+#define IIO_DEV_ATTR_SCAN(_show) \
+ IIO_DEVICE_ATTR(scan, S_IRUGO, _show, NULL, 0);
+
+/**
+ * IIO_DEV_ATTR_INPUT: direct read of a single input channel
+ **/
+#define IIO_DEV_ATTR_INPUT(_number, _show) \
+ IIO_DEVICE_ATTR(in##_number, S_IRUGO, _show, NULL, _number)
+
+
+/**
+ * IIO_DEV_ATTR_SW_RING_ENABLE: enable software ring buffer
+ *
+ * Success may be dependant on attachment of trigger previously
+ **/
+#define IIO_DEV_ATTR_SW_RING_ENABLE(_show, _store) \
+ IIO_DEVICE_ATTR(sw_ring_enable, S_IRUGO | S_IWUSR, _show, _store, 0)
+
+/**
+ * IIO_DEV_ATTR_HW_RING_ENABLE: enable hardware ring buffer
+ *
+ * This is a different attribute from the software one as one can invision
+ * schemes where a combination of the two may be used.
+ **/
+#define IIO_DEV_ATTR_HW_RING_ENABLE(_show, _store) \
+ IIO_DEVICE_ATTR(hw_ring_enable, S_IRUGO | S_IWUSR, _show, _store, 0)
+
+/**
+ * IIO_DEV_ATTR_BPSE: set number of bits per scan element
+ **/
+#define IIO_DEV_ATTR_BPSE(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(bpse, _mode, _show, _store, 0)
+
+/**
+ * IIO_DEV_ATTR_BPSE_AVAILABLE: no of bits per scan element supported
+ **/
+#define IIO_DEV_ATTR_BPSE_AVAILABLE(_show) \
+ IIO_DEVICE_ATTR(bpse_available, S_IRUGO, _show, NULL, 0)
+
+/**
+ * IIO_DEV_ATTR_TEMP: many sensors have auxiliary temperature sensors
+ **/
+#define IIO_DEV_ATTR_TEMP(_show) \
+ IIO_DEVICE_ATTR(temp, S_IRUGO, _show, NULL, 0)
+/**
+ * IIO_EVENT_SH: generic shared event handler
+ *
+ * This is used in cases where more than one event may result from a single
+ * handler. Often the case that some alarm register must be read and multiple
+ * alarms may have been triggered.
+ **/
+#define IIO_EVENT_SH(_name, _handler) \
+ static struct iio_event_handler_list \
+ iio_event_##_name = { \
+ .handler = _handler, \
+ .refcount = 0, \
+ .exist_lock = __MUTEX_INITIALIZER(iio_event_##_name \
+ .exist_lock), \
+ .list = { \
+ .next = &iio_event_##_name.list, \
+ .prev = &iio_event_##_name.list, \
+ }, \
+ };
+/**
+ * IIO_EVENT_ATTR_SH: generic shared event attribute
+ *
+ * An attribute with an associated IIO_EVENT_SH
+ **/
+#define IIO_EVENT_ATTR_SH(_name, _ev_list, _show, _store, _mask) \
+ static struct iio_event_attr \
+ iio_event_attr_##_name \
+ = { .dev_attr = __ATTR(_name, S_IRUGO | S_IWUSR, \
+ _show, _store), \
+ .mask = _mask, \
+ .listel = &_ev_list };
+
+/**
+ * IIO_EVENT_ATTR: non shared event attribute
+ **/
+#define IIO_EVENT_ATTR(_name, _show, _store, _mask, _handler) \
+ static struct iio_event_handler_list \
+ iio_event_##_name = { \
+ .handler = _handler, \
+ }; \
+ static struct \
+ iio_event_attr \
+ iio_event_attr_##_name \
+ = { .dev_attr = __ATTR(_name, S_IRUGO | S_IWUSR, \
+ _show, _store), \
+ .mask = _mask, \
+ .listel = &iio_event_##_name }; \
+
+/**
+ * IIO_EVENT_ATTR_DATA_RDY: event driven by data ready signal
+ *
+ * Not typically implemented in devices where full triggering support
+ * has been implemented
+ **/
+#define IIO_EVENT_ATTR_DATA_RDY(_show, _store, _mask, _handler) \
+ IIO_EVENT_ATTR(data_rdy, _show, _store, _mask, _handler)
+
+#define IIO_EVENT_CODE_DATA_RDY 100
+#define IIO_EVENT_CODE_RING_BASE 200
+#define IIO_EVENT_CODE_ACCEL_BASE 300
+#define IIO_EVENT_CODE_GYRO_BASE 400
+#define IIO_EVENT_CODE_ADC_BASE 500
+#define IIO_EVENT_CODE_MISC_BASE 600
+
+#define IIO_EVENT_CODE_DEVICE_SPECIFIC 1000
+
+/**
+ * IIO_EVENT_ATTR_RING_50_FULL: ring buffer event to indicate 50% full
+ **/
+#define IIO_EVENT_ATTR_RING_50_FULL(_show, _store, _mask, _handler) \
+ IIO_EVENT_ATTR(ring_50_full, _show, _store, _mask, _handler)
+
+/**
+ * IIO_EVENT_ATTR_RING_50_FULL_SH: shared ring event to indicate 50% full
+ **/
+#define IIO_EVENT_ATTR_RING_50_FULL_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(ring_50_full, _evlist, _show, _store, _mask)
+
+/**
+ * IIO_EVENT_ATTR_RING_75_FULL_SH: shared ring event to indicate 75% full
+ **/
+#define IIO_EVENT_ATTR_RING_75_FULL_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(ring_75_full, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_CODE_RING_50_FULL IIO_EVENT_CODE_RING_BASE
+#define IIO_EVENT_CODE_RING_75_FULL (IIO_EVENT_CODE_RING_BASE + 1)
+#define IIO_EVENT_CODE_RING_100_FULL (IIO_EVENT_CODE_RING_BASE + 2)
+
+#endif /* _INDUSTRIAL_IO_SYSFS_H_ */
diff --git a/drivers/staging/iio/trigger.h b/drivers/staging/iio/trigger.h
new file mode 100644
index 0000000..8284098
--- /dev/null
+++ b/drivers/staging/iio/trigger.h
@@ -0,0 +1,151 @@
+/* The industrial I/O core, trigger handling functions
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _IIO_TRIGGER_H_
+#define _IIO_TRIGGER_H_
+#define IIO_TRIGGER_NAME_LENGTH 20
+#define IIO_TRIGGER_ID_PREFIX "iio:trigger"
+#define IIO_TRIGGER_ID_FORMAT IIO_TRIGGER_ID_PREFIX "%d"
+
+
+/**
+ * struct iio_trigger - industrial I/O trigger device
+ *
+ * @id: [INTERN] unique id number
+ * @name: [DRIVER] unique name
+ * @dev: [DRIVER] associated device (if relevant)
+ * @sysfs_dev: [INTERN] sysfs relevant device
+ * @private_data: [DRIVER] device specific data
+ * @list: [INTERN] used in maintenance of global trigger list
+ * @alloc_list: [DRIVER] used for driver specific trigger list
+ * @poll_func_list_lock:[INTERN] protection of the polling function list
+ * @pollfunc_list: [INTERN] list of functions to run on trigger.
+ * @control_attrs: [DRIVER] sysfs attributes relevant to trigger type
+ * @set_trigger_state: [DRIVER] switch on/off the trigger on demand
+ * @timestamp: [INTERN] timestamp usesd by some trigs (e.g. datardy)
+ * @owner: [DRIVER] used to monitor usage count of the trigger.
+ **/
+struct iio_trigger {
+ int id;
+ const char *name;
+ struct device dev;
+
+ void *private_data;
+ struct list_head list;
+ struct list_head alloc_list;
+ spinlock_t pollfunc_list_lock;
+ struct list_head pollfunc_list;
+ const struct attribute_group *control_attrs;
+ s64 timestamp;
+ struct module *owner;
+ int use_count;
+
+ int (*set_trigger_state)(struct iio_trigger *trig, bool state);
+ int (*try_reenable)(struct iio_trigger *trig);
+};
+
+static inline struct iio_trigger *to_iio_trigger(struct device *d)
+{
+ return container_of(d, struct iio_trigger, dev);
+};
+
+static inline void iio_put_trigger(struct iio_trigger *trig)
+{
+ put_device(&trig->dev);
+ module_put(trig->owner);
+};
+
+static inline void iio_get_trigger(struct iio_trigger *trig)
+{
+ __module_get(trig->owner);
+ get_device(&trig->dev);
+};
+
+/**
+ * iio_trigger_read_name() - sysfs access function to get the trigger name
+ **/
+ssize_t iio_trigger_read_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+#define IIO_TRIGGER_NAME_ATTR DEVICE_ATTR(name, S_IRUGO, \
+ iio_trigger_read_name, \
+ NULL);
+
+/**
+ * iio_trigger_find_by_name() - search global trigger list
+ **/
+struct iio_trigger *iio_trigger_find_by_name(const char *name, size_t len);
+
+/**
+ * iio_trigger_register() - register a trigger with the IIO core
+ * @trig_info: trigger to be registered
+ **/
+int iio_trigger_register(struct iio_trigger *trig_info);
+
+/**
+ * iio_trigger_unregister() - unregister a trigger from the core
+ **/
+void iio_trigger_unregister(struct iio_trigger *trig_info);
+
+/**
+ * iio_trigger_attach_poll_func() - add a function pair to be run on trigger
+ * @trig: trigger to which the function pair are being added
+ * @pf: poll function pair
+ **/
+int iio_trigger_attach_poll_func(struct iio_trigger *trig,
+ struct iio_poll_func *pf);
+
+/**
+ * iio_trigger_dettach_poll_func() - remove function pair from those to be
+ * run on trigger.
+ * @trig: trigger from which the function is being removed.
+ * @pf: poll function pair
+ **/
+int iio_trigger_dettach_poll_func(struct iio_trigger *trig,
+ struct iio_poll_func *pf);
+
+/**
+ * iio_trigger_poll() - called on a trigger occuring
+ * Typically called in relevant hardware interrupt handler.
+ **/
+void iio_trigger_poll(struct iio_trigger *);
+void iio_trigger_notify_done(struct iio_trigger *);
+
+/**
+ * struct iio_poll_func - poll function pair
+ *
+ * @list: associate this with a triggers pollfunc_list
+ * @private_data: data specific to device (passed into poll func)
+ * @poll_func_immediate: function in here is run first. They should be
+ * extremely lightweight. Typically used for latch
+ * control on sensor supporting it.
+ * @poll_func_main: function in here is run after all immediates.
+ * Reading from sensor etc typically involves
+ * scheduling
+ * from here.
+ *
+ * The two stage approach used here only important when multiple sensors are
+ * being triggered by a single trigger. This really comes into it's own with
+ * simultaneous sampling devices where a simple latch command can be used to
+ * make the device store the values on all inputs.
+ **/
+struct iio_poll_func {
+ struct list_head list;
+ void *private_data;
+ void (*poll_func_immediate)(struct iio_dev *indio_dev);
+ void (*poll_func_main)(struct iio_dev *private_data);
+
+};
+
+struct iio_trigger *iio_allocate_trigger(void);
+
+void iio_free_trigger(struct iio_trigger *trig);
+
+
+#endif /* _IIO_TRIGGER_H_ */
diff --git a/drivers/staging/iio/trigger/Kconfig b/drivers/staging/iio/trigger/Kconfig
new file mode 100644
index 0000000..fdd9301
--- /dev/null
+++ b/drivers/staging/iio/trigger/Kconfig
@@ -0,0 +1,21 @@
+#
+# Industrial I/O standalone triggers
+#
+comment "Triggers - standalone"
+
+if IIO_TRIGGER
+
+config IIO_PERIODIC_RTC_TRIGGER
+ tristate "Periodic RTC triggers"
+ depends on RTC_CLASS
+ help
+ Provides support for using periodic capable real time
+ clocks as IIO triggers.
+
+config IIO_GPIO_TRIGGER
+ tristate "GPIO trigger"
+ depends on GENERIC_GPIO
+ help
+ Provides support for using GPIO pins as IIO triggers.
+
+endif # IIO_TRIGGER
diff --git a/drivers/staging/iio/trigger/Makefile b/drivers/staging/iio/trigger/Makefile
new file mode 100644
index 0000000..e5f96d2
--- /dev/null
+++ b/drivers/staging/iio/trigger/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for triggers not associated with iio-devices
+#
+obj-$(CONFIG_IIO_PERIODIC_RTC_TRIGGER) += iio-trig-periodic-rtc.o
+obj-$(CONFIG_IIO_GPIO_TRIGGER) += iio-trig-gpio.o \ No newline at end of file
diff --git a/drivers/staging/iio/trigger/iio-trig-gpio.c b/drivers/staging/iio/trigger/iio-trig-gpio.c
new file mode 100644
index 0000000..539e416
--- /dev/null
+++ b/drivers/staging/iio/trigger/iio-trig-gpio.c
@@ -0,0 +1,202 @@
+/*
+ * Industrial I/O - gpio based trigger support
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Currently this is more of a functioning proof of concept that a fully
+ * fledged trigger driver.
+ *
+ * TODO:
+ *
+ * Add board config elements to allow specification of startup settings.
+ * Add configuration settings (irq type etc)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+
+#include "../iio.h"
+#include "../trigger.h"
+
+LIST_HEAD(iio_gpio_trigger_list);
+DEFINE_MUTEX(iio_gpio_trigger_list_lock);
+
+struct iio_gpio_trigger_info {
+ struct mutex in_use;
+ int gpio;
+};
+/*
+ * Need to reference count these triggers and only enable gpio interrupts
+ * as appropriate.
+ */
+
+/* So what functionality do we want in here?... */
+/* set high / low as interrupt type? */
+
+static irqreturn_t iio_gpio_trigger_poll(int irq, void *private)
+{
+ iio_trigger_poll(private);
+ return IRQ_HANDLED;
+}
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *iio_gpio_trigger_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+
+static const struct attribute_group iio_gpio_trigger_attr_group = {
+ .attrs = iio_gpio_trigger_attrs,
+};
+
+static int iio_gpio_trigger_probe(struct platform_device *dev)
+{
+ int *pdata = dev->dev.platform_data;
+ struct iio_gpio_trigger_info *trig_info;
+ struct iio_trigger *trig, *trig2;
+ int i, irq, ret = 0;
+ if (!pdata) {
+ printk(KERN_ERR "No IIO gpio trigger platform data found\n");
+ goto error_ret;
+ }
+ for (i = 0;; i++) {
+ if (!gpio_is_valid(pdata[i]))
+ break;
+ trig = iio_allocate_trigger();
+ if (!trig) {
+ ret = -ENOMEM;
+ goto error_free_completed_registrations;
+ }
+
+ trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
+ if (!trig_info) {
+ ret = -ENOMEM;
+ goto error_put_trigger;
+ }
+ trig->control_attrs = &iio_gpio_trigger_attr_group;
+ trig->private_data = trig_info;
+ trig_info->gpio = pdata[i];
+ trig->owner = THIS_MODULE;
+ trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ if (!trig->name) {
+ ret = -ENOMEM;
+ goto error_free_trig_info;
+ }
+ snprintf((char *)trig->name,
+ IIO_TRIGGER_NAME_LENGTH,
+ "gpiotrig%d",
+ pdata[i]);
+ ret = gpio_request(trig_info->gpio, trig->name);
+ if (ret)
+ goto error_free_name;
+
+ ret = gpio_direction_input(trig_info->gpio);
+ if (ret)
+ goto error_release_gpio;
+
+ irq = gpio_to_irq(trig_info->gpio);
+ if (irq < 0) {
+ ret = irq;
+ goto error_release_gpio;
+ }
+
+ ret = request_irq(irq, iio_gpio_trigger_poll,
+ IRQF_TRIGGER_RISING,
+ trig->name,
+ trig);
+ if (ret)
+ goto error_release_gpio;
+
+ ret = iio_trigger_register(trig);
+ if (ret)
+ goto error_release_irq;
+
+ list_add_tail(&trig->alloc_list, &iio_gpio_trigger_list);
+
+ }
+ return 0;
+
+/* First clean up the partly allocated trigger */
+error_release_irq:
+ free_irq(irq, trig);
+error_release_gpio:
+ gpio_free(trig_info->gpio);
+error_free_name:
+ kfree(trig->name);
+error_free_trig_info:
+ kfree(trig_info);
+error_put_trigger:
+ iio_put_trigger(trig);
+error_free_completed_registrations:
+ /* The rest should have been added to the iio_gpio_trigger_list */
+ list_for_each_entry_safe(trig,
+ trig2,
+ &iio_gpio_trigger_list,
+ alloc_list) {
+ trig_info = trig->private_data;
+ free_irq(gpio_to_irq(trig_info->gpio), trig);
+ gpio_free(trig_info->gpio);
+ kfree(trig->name);
+ kfree(trig_info);
+ iio_trigger_unregister(trig);
+ }
+
+error_ret:
+ return ret;
+}
+
+static int iio_gpio_trigger_remove(struct platform_device *dev)
+{
+ struct iio_trigger *trig, *trig2;
+ struct iio_gpio_trigger_info *trig_info;
+
+ mutex_lock(&iio_gpio_trigger_list_lock);
+ list_for_each_entry_safe(trig,
+ trig2,
+ &iio_gpio_trigger_list,
+ alloc_list) {
+ trig_info = trig->private_data;
+ iio_trigger_unregister(trig);
+ free_irq(gpio_to_irq(trig_info->gpio), trig);
+ gpio_free(trig_info->gpio);
+ kfree(trig->name);
+ kfree(trig_info);
+ iio_put_trigger(trig);
+ }
+ mutex_unlock(&iio_gpio_trigger_list_lock);
+
+ return 0;
+}
+
+static struct platform_driver iio_gpio_trigger_driver = {
+ .probe = iio_gpio_trigger_probe,
+ .remove = iio_gpio_trigger_remove,
+ .driver = {
+ .name = "iio_gpio_trigger",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init iio_gpio_trig_init(void)
+{
+ return platform_driver_register(&iio_gpio_trigger_driver);
+}
+module_init(iio_gpio_trig_init);
+
+static void __exit iio_gpio_trig_exit(void)
+{
+ platform_driver_unregister(&iio_gpio_trigger_driver);
+}
+module_exit(iio_gpio_trig_exit);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
+MODULE_DESCRIPTION("Example gpio trigger for the iio subsystem");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
new file mode 100644
index 0000000..e310dc00
--- /dev/null
+++ b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
@@ -0,0 +1,228 @@
+/* The industrial I/O periodic RTC trigger driver
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This is a heavily rewritten version of the periodic timer system in
+ * earlier version of industrialio. It supplies the same functionality
+ * but via a trigger rather than a specific periodic timer system.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include "../iio.h"
+#include "../trigger.h"
+
+LIST_HEAD(iio_prtc_trigger_list);
+DEFINE_MUTEX(iio_prtc_trigger_list_lock);
+
+struct iio_prtc_trigger_info {
+ struct rtc_device *rtc;
+ int frequency;
+ char *name;
+ struct rtc_task task;
+};
+
+static int iio_trig_periodic_rtc_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_prtc_trigger_info *trig_info = trig->private_data;
+ if (trig_info->frequency == 0)
+ return -EINVAL;
+ printk(KERN_INFO "trigger frequency is %d\n", trig_info->frequency);
+ return rtc_irq_set_state(trig_info->rtc, &trig_info->task, state);
+}
+
+static ssize_t iio_trig_periodic_read_freq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_trigger *trig = dev_get_drvdata(dev);
+ struct iio_prtc_trigger_info *trig_info = trig->private_data;
+ return sprintf(buf, "%u\n", trig_info->frequency);
+}
+
+static ssize_t iio_trig_periodic_write_freq(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_trigger *trig = dev_get_drvdata(dev);
+ struct iio_prtc_trigger_info *trig_info = trig->private_data;
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+
+ ret = rtc_irq_set_freq(trig_info->rtc, &trig_info->task, val);
+ if (ret)
+ goto error_ret;
+
+ trig_info->frequency = val;
+
+ return len;
+
+error_ret:
+ return ret;
+}
+
+static ssize_t iio_trig_periodic_read_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_trigger *trig = dev_get_drvdata(dev);
+ struct iio_prtc_trigger_info *trig_info = trig->private_data;
+ return sprintf(buf, "%s\n", trig_info->name);
+}
+
+static DEVICE_ATTR(name, S_IRUGO,
+ iio_trig_periodic_read_name,
+ NULL);
+static DEVICE_ATTR(frequency, S_IRUGO | S_IWUSR,
+ iio_trig_periodic_read_freq,
+ iio_trig_periodic_write_freq);
+
+static struct attribute *iio_trig_prtc_attrs[] = {
+ &dev_attr_frequency.attr,
+ &dev_attr_name.attr,
+ NULL,
+};
+static const struct attribute_group iio_trig_prtc_attr_group = {
+ .attrs = iio_trig_prtc_attrs,
+};
+
+static void iio_prtc_trigger_poll(void *private_data)
+{
+ iio_trigger_poll(private_data);
+}
+
+static int iio_trig_periodic_rtc_probe(struct platform_device *dev)
+{
+ char **pdata = dev->dev.platform_data;
+ struct iio_prtc_trigger_info *trig_info;
+ struct iio_trigger *trig, *trig2;
+
+ int i, ret;
+
+ for (i = 0;; i++) {
+ if (pdata[i] == NULL)
+ break;
+ trig = iio_allocate_trigger();
+ if (!trig) {
+ ret = -ENOMEM;
+ goto error_free_completed_registrations;
+ }
+ list_add(&trig->alloc_list, &iio_prtc_trigger_list);
+
+ trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
+ if (!trig_info) {
+ ret = -ENOMEM;
+ goto error_put_trigger_and_remove_from_list;
+ }
+ trig->private_data = trig_info;
+ trig->owner = THIS_MODULE;
+ trig->set_trigger_state = &iio_trig_periodic_rtc_set_state;
+ trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ if (trig->name == NULL) {
+ ret = -ENOMEM;
+ goto error_free_trig_info;
+ }
+ snprintf((char *)trig->name,
+ IIO_TRIGGER_NAME_LENGTH,
+ "periodic%s",
+ pdata[i]);
+ trig_info->name = (char *)trig->name;
+ /* RTC access */
+ trig_info->rtc
+ = rtc_class_open(pdata[i]);
+ if (trig_info->rtc == NULL) {
+ ret = -EINVAL;
+ goto error_free_name;
+ }
+ trig_info->task.func = iio_prtc_trigger_poll;
+ trig_info->task.private_data = trig;
+ ret = rtc_irq_register(trig_info->rtc, &trig_info->task);
+ if (ret)
+ goto error_close_rtc;
+ trig->control_attrs = &iio_trig_prtc_attr_group;
+ ret = iio_trigger_register(trig);
+ if (ret)
+ goto error_unregister_rtc_irq;
+ }
+ return 0;
+error_unregister_rtc_irq:
+ rtc_irq_unregister(trig_info->rtc, &trig_info->task);
+error_close_rtc:
+ rtc_class_close(trig_info->rtc);
+error_free_name:
+ kfree(trig->name);
+error_free_trig_info:
+ kfree(trig_info);
+error_put_trigger_and_remove_from_list:
+ list_del(&trig->alloc_list);
+ iio_put_trigger(trig);
+error_free_completed_registrations:
+ list_for_each_entry_safe(trig,
+ trig2,
+ &iio_prtc_trigger_list,
+ alloc_list) {
+ trig_info = trig->private_data;
+ rtc_irq_unregister(trig_info->rtc, &trig_info->task);
+ rtc_class_close(trig_info->rtc);
+ kfree(trig->name);
+ kfree(trig_info);
+ iio_trigger_unregister(trig);
+ }
+ return ret;
+}
+
+static int iio_trig_periodic_rtc_remove(struct platform_device *dev)
+{
+ struct iio_trigger *trig, *trig2;
+ struct iio_prtc_trigger_info *trig_info;
+ mutex_lock(&iio_prtc_trigger_list_lock);
+ list_for_each_entry_safe(trig,
+ trig2,
+ &iio_prtc_trigger_list,
+ alloc_list) {
+ trig_info = trig->private_data;
+ rtc_irq_unregister(trig_info->rtc, &trig_info->task);
+ rtc_class_close(trig_info->rtc);
+ kfree(trig->name);
+ kfree(trig_info);
+ iio_trigger_unregister(trig);
+ }
+ mutex_unlock(&iio_prtc_trigger_list_lock);
+ return 0;
+}
+
+static struct platform_driver iio_trig_periodic_rtc_driver = {
+ .probe = iio_trig_periodic_rtc_probe,
+ .remove = iio_trig_periodic_rtc_remove,
+ .driver = {
+ .name = "iio_prtc_trigger",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init iio_trig_periodic_rtc_init(void)
+{
+ return platform_driver_register(&iio_trig_periodic_rtc_driver);
+}
+
+static void __exit iio_trig_periodic_rtc_exit(void)
+{
+ return platform_driver_unregister(&iio_trig_periodic_rtc_driver);
+}
+
+module_init(iio_trig_periodic_rtc_init);
+module_exit(iio_trig_periodic_rtc_exit);
+MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
+MODULE_DESCRIPTION("Periodic realtime clock trigger for the iio subsystem");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/trigger_consumer.h b/drivers/staging/iio/trigger_consumer.h
new file mode 100644
index 0000000..4c7f527
--- /dev/null
+++ b/drivers/staging/iio/trigger_consumer.h
@@ -0,0 +1,45 @@
+
+/* The industrial I/O core, trigger consumer handling functions
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifdef CONFIG_IIO_TRIGGER
+/**
+ * iio_device_register_trigger_consumer() - set up an iio_dev to use triggers.
+ * @dev_info: iio_dev associated with the device that will consume the trigger
+ **/
+int iio_device_register_trigger_consumer(struct iio_dev *dev_info);
+/**
+ * iio_device_unregister_trigger_consumer() - reverse the registration process.
+ * @dev_info: iio_dev associated with the device that consumed the trigger
+ **/
+int iio_device_unregister_trigger_consumer(struct iio_dev *dev_info);
+
+#else
+
+/**
+ * iio_device_register_trigger_consumer() - set up an iio_dev to use triggers.
+ * @dev_info: iio_dev associated with the device that will consume the trigger
+ **/
+int iio_device_register_trigger_consumer(struct iio_dev *dev_info)
+{
+ return 0;
+};
+/**
+ * iio_device_unregister_trigger_consumer() - reverse the registration process
+ * @dev_info: iio_dev associated with the device that consumed the trigger
+ **/
+int iio_device_unregister_trigger_consumer(struct iio_dev *dev_info)
+{
+ return 0;
+};
+
+#endif /* CONFIG_TRIGGER_CONSUMER */
+
+
+